parquet-converter commited on
Commit
14a74d5
·
1 Parent(s): 5b07709

Update parquet files (step 77 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Epson printers resetter how to .zip Everything you need to know about resetting your printer.md +0 -158
  2. spaces/1gistliPinn/ChatGPT4/Examples/Camac Cmk 858 Driver Zip EXCLUSIVE.md +0 -24
  3. spaces/1gistliPinn/ChatGPT4/Examples/Crack Horosoft Professional Edition 4.0.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/Crack Optimik 2.36c.rar [NEW].md +0 -10
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Block Strike FPS Shooter APK for Android - Enjoy Fun and Competitive Online Action.md +0 -145
  6. spaces/1phancelerku/anime-remove-background/Aquaman 2 Full Movie 2020 Tamil Download in 720p 1080p and 4K Tamilrockers.md +0 -74
  7. spaces/1phancelerku/anime-remove-background/Download Farm Heroes Saga for PC and Mac with BlueStacks.md +0 -110
  8. spaces/1phancelerku/anime-remove-background/Emoji Keyboard How to Customize Your Android Keyboard with Fun and Style.md +0 -142
  9. spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_utils.py +0 -122
  10. spaces/801artistry/RVC801/infer/modules/vc/pipeline.py +0 -655
  11. spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/start_server.sh +0 -6
  12. spaces/AHzizi/WaifuVoiceGen/attentions.py +0 -300
  13. spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/lib_v5/layers_new.py +0 -125
  14. spaces/AIARTCHAN/openpose_editor/index.html +0 -0
  15. spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/binarizer_zh.py +0 -59
  16. spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/GenerSpeech/model/mixstyle.py +0 -63
  17. spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/diffspeech/shallow_diffusion_tts.py +0 -279
  18. spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/fs2_orig.py +0 -138
  19. spaces/AIWaves/Debate/gradio_base.py +0 -574
  20. spaces/AIZero2HeroBootcamp/TranscriptAILearnerFromYoutube/README.md +0 -13
  21. spaces/ASJMO/freegpt/g4f/Provider/Providers/DeepAi.py +0 -46
  22. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py +0 -56
  23. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/work_dirs/yolov6_s_df2_0.4/yolov6_s_fast.py +0 -510
  24. spaces/AashishKumar/Restaurant_voice_chatbot/app.py +0 -199
  25. spaces/Adapter/CoAdapter/ldm/modules/extra_condition/openpose/__init__.py +0 -0
  26. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthbuttons/Factory.js +0 -13
  27. spaces/AkitoP/umamusume_bert_vits2/server.py +0 -170
  28. spaces/Alpaca233/SadTalker/src/audio2pose_models/networks.py +0 -140
  29. spaces/Amrrs/DragGan-Inversion/visualizer_drag.py +0 -429
  30. spaces/Andy1621/uniformer_image_detection/configs/point_rend/README.md +0 -23
  31. spaces/Andy1621/uniformer_image_detection/mmdet/models/necks/rfp.py +0 -128
  32. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/plugin.py +0 -88
  33. spaces/Arjav/TOS-Summarization/app.py +0 -38
  34. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp +0 -75
  35. spaces/Bart92/RVC_HF/demucs/repitch.py +0 -96
  36. spaces/Benson/text-generation/Examples/Descargar Foto De Instagram.md +0 -45
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/jisfreq.py +0 -325
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/sbcharsetprober.py +0 -162
  39. spaces/Boadiwaa/Recipes/openai/api_resources/model.py +0 -6
  40. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/box_regression.py +0 -221
  41. spaces/CVPR/LIVE/pybind11/tests/test_methods_and_attributes.py +0 -440
  42. spaces/CVPR/WALT/mmdet/models/__init__.py +0 -16
  43. spaces/Catmeow/Text_Generation_Fine_Tune/README.md +0 -12
  44. spaces/CikeyQI/meme-api/meme_generator/memes/murmur/__init__.py +0 -34
  45. spaces/Cvandi/remake/realesrgan/train.py +0 -11
  46. spaces/Cvandi/remake/realesrgan/weights/README.md +0 -3
  47. spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/datasets/cc_sbu_dataset.py +0 -49
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_g_a_s_p.py +0 -55
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/Login-9c3cc0eb.css +0 -1
  50. spaces/Denliner/wd-v1-4-tags/app.py +0 -289
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Epson printers resetter how to .zip Everything you need to know about resetting your printer.md DELETED
@@ -1,158 +0,0 @@
1
- <br />
2
- <h1>How to Reset Epson Printers Using Resetter Tool</h1>
3
- <p>If you own an Epson printer, you may have encountered some problems that prevent you from printing normally. For example, you may see an error message saying "A printer's ink pad is at the end of its service life. Please contact Epson Support." or "Parts inside your printer are near the end of their service life." These messages indicate that your printer's waste ink pads are full and need to be replaced or reset.</p>
4
- <p>Waste ink pads are sponge pads inside your printer that collect the excess ink during printing and cleaning. When they reach their limit, they can overflow and cause damage to your printer. To avoid this, you need to reset your printer's waste ink counters using a special software called a resetter tool.</p>
5
- <h2>epson printers resetter how to .zip</h2><br /><p><b><b>DOWNLOAD</b> &#9733; <a href="https://byltly.com/2uKwWK">https://byltly.com/2uKwWK</a></b></p><br /><br />
6
- <p>A resetter tool is a software that can reset your printer's internal settings and clear the waste ink counters. By using a resetter tool, you can save money and time from taking your printer to a service center or buying a new one. In this article, we will show you how to reset your Epson printer using a resetter tool in simple steps.</p>
7
- <h2>How to Download and Extract Resetter Tool</h2>
8
- <p>The first step is to download the resetter tool for your specific Epson printer model. You can find the links for different models below:</p>
9
- <table>
10
- <tr><td>L120,L100,L200,L455,L565,L810,L850,L110,L210,L300,L350,L355,L550,L555,L130,L220,L310,L360,L365,L375,L475,L500,L510,L520,L540,L550,L800,L805,L1300,L1800</td><td><a href="https://epsonreset.com/">https://epsonreset.com/</a></td></tr>
11
- <tr><td>L1210 L3210 L3216 L3250 L3256 L5290 L5296</td><td><a href="https://drive.google.com/drive/folders/1-VUd3cbWn_6HX9E8R5p3GWXEZte-6ol5">https://drive.google.com/drive/folders/1-VUd3cbWn_6HX9E8R5p3GWXEZte-6ol5</a></td></tr>
12
- <tr><td>L365, L360, L310, L220, L130</td><td><a href="https://gadgetsbeat.com/epson-adjustment-program-resetter-tool/">https://gadgetsbeat.com/epson-adjustment-program-resetter-tool/</a></td></tr>
13
- <tr><td>M200,M205,ME32-T13,ME100,ME101,R290,RX600-RX620-RX630,RX650-RX620-RX630,R330,T10-T11,T30-T33,T59,TX121,TX210,TX220,TX300F,TX400,TX410,TX550W,TX600FW,TX610FW,TX700W,TX710W,TX720WD,TX800FW,TX810FW,P50,P60,PX650,PX660,PX700W,PX710W,PX720WD,PX800FW,PX810FW,S20,S21,S22,S23,S24,S40,S50,S80,S90,S95,C58,C59,C79,C90,C92,C110,C120,C1600,C2800,C3800,C3900,C4200,C4300,C4400,C4500,C4600,C4700,C4800,C4900,C5000,C5100,C5200,C5300,C5400,C5500,C5600,C5700,C5800,C5900,C6000,C6100,C6200,C6300,C6400,C6500,C6600</td><td><a href="https://helplineph.com/deped/epson-resetters-complete-and-free/">https://helplineph.com/deped/epson-resetters-complete-and-free/</a></td></tr>
14
- </table>
15
- <p>After downloading the zip file, you need to disable your antivirus protection for some time. This is because some antivirus programs may detect the resetter tool as a virus or malware and block it from running. To disable your antivirus protection, follow these steps:</p>
16
- <ul>
17
- <li>Right-click on the antivirus icon on your taskbar.</li>
18
- <li>Select "Disable protection" or "Turn off" or similar option.</li>
19
- <li>Choose how long you want to disable it (for example, 15 minutes).</li>
20
- <li>Click "OK" or "Yes" to confirm.</li>
21
- </ul>
22
- <p>Next, you need to extract the zip file using a program like WinRAR or 7-Zip. To extract the zip file, follow these steps:</p>
23
- <p>How to use epson resetter software for printers.zip<br />
24
- Epson printer resetter tool download and instructions.zip<br />
25
- Reset epson ink cartridge with printer resetter program.zip<br />
26
- Epson printer resetter guide and troubleshooting.zip<br />
27
- Printer resetter for epson models how to install.zip<br />
28
- How to reset epson printer waste ink counter.zip<br />
29
- Epson printer resetter utility free download.zip<br />
30
- How to fix epson printer error with resetter software.zip<br />
31
- Epson printer resetter compatible with windows and mac.zip<br />
32
- Reset epson printer settings to factory default.zip<br />
33
- How to get epson printer resetter activation key.zip<br />
34
- Epson printer resetter tutorial and tips.zip<br />
35
- Printer resetter for epson l series how to use.zip<br />
36
- Epson printer resetter online service and support.zip<br />
37
- How to update epson printer firmware with resetter tool.zip<br />
38
- Epson printer resetter for xp, wf, sx, and tx series.zip<br />
39
- How to solve epson printer problems with resetter app.zip<br />
40
- Epson printer resetter crack and serial number.zip<br />
41
- Reset epson printer password and network settings.zip<br />
42
- How to run epson printer resetter on linux.zip<br />
43
- Epson printer resetter for ecotank and workforce models.zip<br />
44
- How to backup and restore epson printer data with resetter.zip<br />
45
- Epson printer resetter features and benefits.zip<br />
46
- Printer resetter for epson stylus and expression series.zip<br />
47
- How to uninstall epson printer resetter from your computer.zip<br />
48
- Epson printer resetter reviews and testimonials.zip<br />
49
- Reset epson printer head cleaning and alignment.zip<br />
50
- How to register epson printer resetter online.zip<br />
51
- Epson printer resetter for artisan and surecolor models.zip<br />
52
- How to optimize epson printer performance with resetter.zip<br />
53
- Epson printer resetter faq and help.zip<br />
54
- Printer resetter for epson r, t, and b series.zip<br />
55
- How to contact epson printer resetter customer service.zip<br />
56
- Epson printer resetter license and terms of use.zip<br />
57
- Reset epson printer paper size and quality settings.zip<br />
58
- How to upgrade epson printer resetter to the latest version.zip<br />
59
- Epson printer resetter for picturemate and labelworks models.zip<br />
60
- How to troubleshoot epson printer resetter errors and issues.zip<br />
61
- Epson printer resetter blog and news updates.zip<br />
62
- Reset epson printer wireless and bluetooth settings.zip<br />
63
- How to buy epson printer resetter online securely.zip<br />
64
- Epson printer resetter for aculaser and epl models.zip<br />
65
- How to customize epson printer settings with resetter.zip<br />
66
- Epson printer resetter video tutorials and demos.zip<br />
67
- Reset epson printer maintenance and service mode settings.zip <br />
68
- How to share epson printer resetter with your friends.zip <br />
69
- Epson printer resetter for workforce pro and et models. zip <br />
70
- How to scan and print documents with epson printer resetter. zip <br />
71
- Epson printer resetter system requirements and compatibility. zip</p>
72
- <ul>
73
- <li>Right-click on the zip file and select "Extract here" or "Extract to folder".</li>
74
- <li>Enter the password if required (usually "epsonreset.com" or "gadgetsbeat.com").</li>
75
- <li>Wait for the extraction process to finish.</li>
76
- <li>Open the extracted folder and look for a file named "Run-Me" or similar.</li>
77
- </ul>
78
- <h2>How to Run and Use Resetter Tool</h2>
79
- <p>The next step is to run and use the resetter tool. To do this, follow these steps:</p>
80
- <ul>
81
- <li>Double-click on the file named "Run-Me" or similar.</li>
82
- <li>A window will open with some options.</li>
83
- <li>Click on "Select" button.</li>
84
- <li>A drop-down menu will appear with a list of printer models.</li>
85
- <li>Select your printer model from the list.</li>
86
- <li>Click on "OK".</li>
87
- <li>A new window will open with some information about your printer.</li>
88
- <li>Click on "Port" button.</li>
89
- <li>A drop-down menu will appear with a list of ports.</li>
90
- <li>Select your printer port from the list (usually Auto Selection).</li>
91
- <li>Click on "OK".</li>
92
- <li>A new window will open with some options.</li>
93
- <li>Click on "Particular adjustment mode" button.</li>
94
- <li>A new window will open with a list of adjustment functions.</li>
95
- <li>Select "Waste ink pad counter" from the list.</li>
96
- <li>Click on "OK".</li>
97
- <li>A new window will open with some options.</li>
98
- <li>Tick "Main pad counter" checkbox.</li>
99
- <li>Click on "Check" button.</li>
100
- <li>A new window will open with some information about your waste ink pad counter value.</li>
101
- <li>Note down this value for future reference.</li>
102
- <li>Click on "Initialization" button.</li>
103
- <li>A new window will open with a warning message.</li>
104
- <li>Click on "OK".</li>
105
- <li>A new window will open asking you to enter a reset key.</li>
106
- (Continued from previous message) <ul>
107
- <li>Enter a valid reset key that you have purchased or obtained for free (see below for more details).</li> (Continued from previous message) <li>Enter a valid reset key that you have purchased or obtained for free (see below for more details).</li>
108
- <li>Click on "OK".</li>
109
- <li>A new window will open with a confirmation message.</li>
110
- <li>Click on "OK".</li>
111
- <li>The resetting process is now complete.</li>
112
- </ul>
113
- <p>To get a reset key, you have two options:</p>
114
- <ul>
115
- <li>You can buy a reset key online from various websites that sell them for different prices. For example, you can visit <a href="https://www.wic.support/download/">https://www.wic.support/download/</a> and choose your printer model and pay by Visa, Master Card, PayPal, or Webmoney.</li>
116
- <li>You can get a free trial reset key by watching a video tutorial on how to use the resetter tool and following the instructions. For example, you can visit <a href="https://www.wic.support/download/">https://www.wic.support/download/</a> and click on "Want to get FREE Reset Key?" and watch the video and enter your email address to receive the trial reset key.</li>
117
- </ul>
118
- <p>Note that the trial reset key will only reset the waste ink counter to 80% one time only. You will need to buy a full reset key if you want to reset it completely or multiple times.</p>
119
- <h2>How to Test and Confirm the Resetting Process</h2>
120
- <p>The final step is to test and confirm that the resetting process has worked and your printer is functioning normally. To do this, follow these steps:</p>
121
- <ul>
122
- <li>Turn off your printer and wait for a few seconds.</li>
123
- <li>Turn on your printer and check if the error message is gone.</li>
124
- <li>If the error message is still there, repeat the resetting process again with a different reset key.</li>
125
- <li>If the error message is gone, you can perform some tests to check your printer's performance.</li>
126
- <li>To perform a paper feed test, click on "Paper feed test" button on the resetter tool window and follow the instructions.</li>
127
- <li>To perform a nozzle check, click on "Nozzle check" button on the resetter tool window and follow the instructions.</li>
128
- <li>To perform a color check pattern, click on "Color check pattern" button on the resetter tool window and follow the instructions.</li>
129
- <li>To make an EEPROM dump and backup, click on "EEPROM dump" button on the resetter tool window and follow the instructions.</li>
130
- </ul>
131
- <h2>Conclusion</h2>
132
- <p>In this article, we have shown you how to reset your Epson printer using a resetter tool in simple steps. By resetting your printer, you can solve the waste ink pad counter overflow error and save money and time from taking your printer to a service center or buying a new one. However, you should also be careful when using the resetter tool and follow the instructions properly. Here are some tips and warnings for using the resetter tool:</p>
133
- <ul>
134
- <li>Make sure you download the correct resetter tool for your specific printer model.</li>
135
- <li>Make sure you disable your antivirus protection before running the resetter tool.</li>
136
- <li>Make sure you enter a valid reset key that matches your printer model.</li>
137
- <li>Make sure you turn off and on your printer after resetting it.</li>
138
- <li>Make sure you perform some tests to confirm that your printer is working normally.</li>
139
- <li>Do not use the resetter tool too often as it may damage your printer's quality and lifespan.</li>
140
- <li>Do not share or distribute your reset key with others as it may be blocked or invalidated.</li>
141
- </ul>
142
- <p>We hope this article has been helpful for you. If you have any feedback or questions, please feel free to leave them in the comments section below. Thank you for reading!</p>
143
- <h3>Frequently Asked Questions</h3>
144
- <ol>
145
- <li><b>What is a waste ink pad counter?</b></li>
146
- <p>A waste ink pad counter is a value that shows how much ink has been collected by the waste ink pads inside your printer. When this value reaches 100%, it means that your waste ink pads are full and need to be replaced or reset.</p>
147
- <li><b>What is a resetter tool?</b></li>
148
- <p>A resetter tool is a software that can reset your printer's internal settings and clear the waste ink counters. By using a resetter tool, you can solve the waste ink pad counter overflow error and save money and time from taking your printer to a service center or buying a new one.</p>
149
- <li><b>Where can I get a resetter tool?</b></li>
150
- <p>You can download the resetter tool for different Epson printer models from various websites that provide them for free or for a fee. For example, you can visit <a href="https://epsonreset.com/">https://epsonreset.com/</a>, <a href="https://drive.google.com/drive/folders/1-VUd3cbWn_6HX9E8R5p3GWXEZte-6ol5">https://drive.google.com/drive/folders/1-VUd3cbWn_6HX9E8R5p3GWXEZte-6ol5</a>, <a href="https://gadgetsbeat.com/epson-adjustment-program-resetter-tool/">https://gadgetsbeat.com/epson-adjustment-program-resetter-tool/</a>, or <a href="https://helplineph.com/deped/epson-resetters-complete-and-free/">https://helplineph.com/deped/epson-resetters-complete-and-free/</a>.</p>
151
- <li><b>Where can I get a reset key?</b></li>
152
- <p>You can buy a reset key online from various websites that sell them for different prices. For example, you can visit <a href="https://www.wic.support/download/">https://www.wic.support/download/</a> and choose your printer model and pay by Visa, Master Card, PayPal, or Webmoney. You can also get a free trial reset key by watching a video tutorial on how to use the resetter tool and following the instructions. For example, you can visit <a href="https://www.wic.support/download/">https://www.wic.support/download/</a> and click on "Want to get FREE Reset Key?" and watch the video and enter your email address to receive the trial reset key.</p>
153
- <li><b>How often should I use the resetter tool?</b></li>
154
- <p>You should use the resetter tool only when you see an error message saying "A printer's ink pad is at the end of its service life. Please contact Epson Support." or "Parts inside your printer are near the end of their service life." You should not use the resetter tool too often as it may damage your printer's quality and lifespan.</p>
155
- </ol>
156
- </p> 0a6ba089eb<br />
157
- <br />
158
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Camac Cmk 858 Driver Zip EXCLUSIVE.md DELETED
@@ -1,24 +0,0 @@
1
- <h2>camac cmk 858 driver zip</h2><br /><p><b><b>Download File</b> &bull; <a href="https://imgfil.com/2uy1F5">https://imgfil.com/2uy1F5</a></b></p><br /><br />
2
- <br />
3
- Download the Macamac® driver for Mac operating systems. Camac Cmk 858 Driver are an excellent package and the performance is better than other brands. I recommend this product! Download the Macamac® driver for Mac operating systems. Download the Macamac® driver for Mac operating systems.
4
-
5
- Camac Cmk 858 Driver Zip Download for Windows
6
-
7
- Driver download is compulsory for installation of any software. If you have downloaded Camac Cmk 858 Driver Zip For Mac Download Macamac driver and you want to install in your system, please choose one of the two ways: You can use an active internet connection Download directly from this website If you need to download Camac Cmk 858 Driver Zip For Mac For Mac OS X. To install this Camac Cmk 858 Driver Zip For Mac Download, the installation file must be downloaded to your computer. The installation of the drivers for Mac OS X. You can use the driver installation disk for Mac to install the driver of the software. You can use the driver installation disk for Mac to install the driver of the software.
8
-
9
- camac Cmk 858 Driver Zip Download
10
-
11
- When the installation is complete, close all the unnecessary programs, and then restart the PC to use the driver after restart.
12
-
13
- Download Macamac driver. Click the downloaded link in the order you received it. Go to the section "Download the camac Driver" where you can find the link for your version of Mac.
14
-
15
- Click "Go to driver" to start the installation. The file will be copied to your computer, and then the installation will begin automatically. The installation of the drivers for Mac OS X. Follow the instructions on your screen and do not touch anything. When the installation is complete, close all the unnecessary programs, and then restart the PC to use the driver after restart.
16
-
17
- You can also go to http: You can also go to http: Go to the download page for the version of Mac you are using to download the Macamac® driver. You will find the download links at the bottom of the download page.
18
-
19
- Download Macamac® driver. Click the downloaded link in the order you received it. Go to the section "Download the camac Driver" where you can find the link for your version of Mac.
20
-
21
- Click "Go to driver" to start the installation. The file will be copied to your computer, and then the installation will begin 4fefd39f24<br />
22
- <br />
23
- <br />
24
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Crack Horosoft Professional Edition 4.0.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Crack Horosoft Professional Edition 4.0</h2><br /><p><b><b>Download</b> &#10026;&#10026;&#10026; <a href="https://imgfil.com/2uy25d">https://imgfil.com/2uy25d</a></b></p><br /><br />
2
- <br />
3
- HOROSOFT Professional Edition 4.0 astrology software. Buy Standard Edition Download Samples Horosoft's Blog Subscribe to RSS Feed ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Crack Optimik 2.36c.rar [NEW].md DELETED
@@ -1,10 +0,0 @@
1
-
2
- <p> crack optimik 2.36c.rar abonnenten. : abonnenten. : aktuelle. optimik is a leading provider of enterprise-class data protection and data security solutions for all your data. optimik is available in a number of editions and editions are available on linux. </p>
3
- <p>optimik 2.36c.rar free download <br> free download optimik 2.rar <br> optimik 2.rar crack <br> optimik 2.rar free download <br> optimik 2.rar password crack <br> optimik 2.rar download <br> optimik 2.rar serial number <br> optimik 2.rar cracked <br> optimik 2.rar crack bypass <br> optimik 2.rar how to crack <br> optimik 2.rar crack to <br> optimik 2.rar crack download <br> optimik 2.rar crack password <br> optimik 2.rar crack keygen <br> optimik 2.rar keygen <br> optimik 2.</p>
4
- <h2>crack optimik 2.36c.rar</h2><br /><p><b><b>DOWNLOAD</b> &gt;&gt;&gt;&gt;&gt; <a href="https://imgfil.com/2uxZbb">https://imgfil.com/2uxZbb</a></b></p><br /><br />
5
- <p>reinvent the wheel - the. kamlrose says: 29/05/2022 at 5:56 am paragon extfs for windows crack activationl bd86983c93 kamlrose. the optimik 2.36c.rar is a program that allows you to crack optimik 2.rar the optimik 2.rar and can be used at home or in a home. </p>
6
- <p>kamlrose says: 29/05/2022 at 5:56 am paragon extfs for windows crack activationl bd86983c93 kamlrose. the optimik 2.36c.rar is a program that allows you to crack optimik 2.rar the optimik 2.rar and can be used at home or in a home. </p>
7
- <p>optimik 2.36c.rar 2019 winrar torrent download <br> 07:20 pm on 21/04/2022 at 1:12 am. optimik 2.rar is a program that allows you to crack optimik 2.rar the optimik 2.rar and can be used at home or in a home. </p>
8
- <p>das sheety chat. optimik 2.36c.rar t1 tinyurl.com/p6dd54w das sheety chat. crack optimik 2.rar > tinyurl.com/mvxqpos roleplay zone. driver navigator 2013 crack crack optimik 2.rar 1 procedures reclutamiento propose encuentra brings paso, iss pro evolution 2 iso download. </p> 899543212b<br />
9
- <br />
10
- <br />
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Block Strike FPS Shooter APK for Android - Enjoy Fun and Competitive Online Action.md DELETED
@@ -1,145 +0,0 @@
1
- <br />
2
- <h1>Block Strike FPS Shooter APK: A Fun and Addictive Online Multiplayer Game</h1>
3
- <p>Are you looking for a new online multiplayer game to play with your friends or other players from around the world? Do you enjoy shooting games with blocky graphics and pixel art style? If so, you might want to check out Block Strike FPS Shooter APK, a first-person shooter game that offers fun and competitive gameplay, a variety of game modes, maps, and weapons, and a lot of customization options. In this article, we will tell you what Block Strike FPS Shooter is, how to download and install it on your Android device, how to play it, and why you should play it.</p>
4
- <h2>block strike fps shooter apk</h2><br /><p><b><b>DOWNLOAD</b> &#128505; <a href="https://urlin.us/2uSWor">https://urlin.us/2uSWor</a></b></p><br /><br />
5
- <h2>What is Block Strike FPS Shooter?</h2>
6
- <p>Block Strike FPS Shooter is a first-person online multiplayer 3D shooter with blocky graphics and fun competitive gameplay. It is developed by Rexet Studio, a Russian indie game studio that also created Pixel Combat: Zombies Strike, World War Heroes: WW2 FPS, Modern Strike Online: Free PvP FPS shooting game, and other popular games. Block Strike FPS Shooter was released in 2015 and has since gained over 50 million downloads on Google Play Store. It has also received positive reviews from players who praised its gameplay, graphics, variety, and updates.</p>
7
- <p>The game features over 60 different game modes, such as Team Deathmatch, Gun Game, Hunger Games, Zombie Survival, Bunny Hop, Surf, Parkour, and more. You can choose from over 100 maps to play on, ranging from small arenas to large open worlds. You can also use over 40 types of weapons, such as pistols, rifles, shotguns, snipers, grenades, knives, and even a flamethrower. You can customize your character's appearance and your weapons' skins with coins that you earn by playing the game or by purchasing them with real money. You can also create your own clan or join an existing one to chat with other players and participate in clan wars.</p>
8
- <h2>How to download and install Block Strike FPS Shooter APK?</h2>
9
- <p>If you want to play Block Strike FPS Shooter on your Android device, you can download it from Google Play Store or from APKCombo, a website that provides safe and fast downloads of APK files. APK files are Android application packages that contain all the files needed to install an app on your device. However, some apps may also require additional files called OBB files that contain data such as graphics, sounds, or videos. These apps are usually packaged as XAPK files that include both the APK file and the OBB file.</p>
10
- <h3>Requirements and permissions</h3>
11
- <p>To download and install Block Strike FPS Shooter APK from APKCombo, you need to have an Android device that runs on Android 5.1 or higher. You also need to have enough storage space on your device or SD card to store the XAPK file (about 300 MB) and the extracted OBB file (about 400 MB). Additionally, you need to enable the installation of apps from unknown sources on your device settings. This will allow you to install apps that are not from Google Play Store.</p>
12
- <p>When you install Block Strike FPS Shooter APK on your device, you will be asked to grant some permissions to the app. <p>Some of the permissions that the app may request are:</p>
13
- <p>block strike fps shooter mod apk<br />
14
- block strike fps shooter download<br />
15
- block strike fps shooter game<br />
16
- block strike fps shooter online<br />
17
- block strike fps shooter hack apk<br />
18
- block strike fps shooter android<br />
19
- block strike fps shooter pc<br />
20
- block strike fps shooter unlimited money apk<br />
21
- block strike fps shooter apk pure<br />
22
- block strike fps shooter rexdl<br />
23
- block strike fps shooter revdl<br />
24
- block strike fps shooter apk mirror<br />
25
- block strike fps shooter apk obb<br />
26
- block strike fps shooter latest version apk<br />
27
- block strike fps shooter old version apk<br />
28
- block strike fps shooter free apk<br />
29
- block strike fps shooter cheats apk<br />
30
- block strike fps shooter gameplay<br />
31
- block strike fps shooter tips and tricks<br />
32
- block strike fps shooter best guns<br />
33
- block strike fps shooter maps<br />
34
- block strike fps shooter modes<br />
35
- block strike fps shooter skins<br />
36
- block strike fps shooter weapons<br />
37
- block strike fps shooter clans<br />
38
- block strike fps shooter codes<br />
39
- block strike fps shooter guide<br />
40
- block strike fps shooter review<br />
41
- block strike fps shooter rating<br />
42
- block strike fps shooter update<br />
43
- block strike fps shooter app store<br />
44
- block strike fps shooter ios<br />
45
- block strike fps shooter iphone<br />
46
- block strike fps shooter ipad<br />
47
- block strike fps shooter mac<br />
48
- block strike fps shooter windows 10<br />
49
- block strike fps shooter bluestacks<br />
50
- block strike fps shooter emulator<br />
51
- block strike fps shooter nox player<br />
52
- block strike fps shooter memu play<br />
53
- block strike fps shooter ld player<br />
54
- block strike fps shooter gameloop<br />
55
- block strike fps shooter steam<br />
56
- block strike fps shooter facebook<br />
57
- block strike fps shooter twitter<br />
58
- block strike fps shooter instagram<br />
59
- block strike fps shooter youtube<br />
60
- block strike fps shooter reddit<br />
61
- block strike fps shooter discord</p>
62
- <ul>
63
- <li>Access to your location to provide location-based services and ads</li>
64
- <li>Access to your photos, media, and files to read and write data on your device or SD card</li>
65
- <li>Access to your camera and microphone to enable video and voice chat features</li>
66
- <li>Access to your contacts and phone to find and invite friends to play with you</li>
67
- <li>Access to your network and internet to connect to the game servers and download updates</li>
68
- </ul>
69
- <p>You can accept or deny these permissions according to your preferences. However, some features of the game may not work properly if you deny some permissions. You can also change the permissions settings anytime on your device settings.</p>
70
- <h3>Downloading and installing the XAPK file</h3>
71
- <p>To download and install Block Strike FPS Shooter APK from APKCombo, you need to follow these steps:</p>
72
- <ol>
73
- <li>Go to [APKCombo](^2^) and search for Block Strike FPS Shooter.</li>
74
- <li>Select the latest version of the game and click on the download button.</li>
75
- <li>Wait for the XAPK file to be downloaded on your device or SD card.</li>
76
- <li>Install an XAPK installer app, such as [XAPK Installer] or [APKPure], from Google Play Store or APKCombo. These apps will help you extract and install the XAPK file.</li>
77
- <li>Open the XAPK installer app and locate the downloaded XAPK file of Block Strike FPS Shooter.</li>
78
- <li>Tap on the install button and wait for the installation process to complete.</li>
79
- <li>Launch the game and enjoy playing Block Strike FPS Shooter on your Android device.</li>
80
- </ol>
81
- <h2>How to play Block Strike FPS Shooter?</h2>
82
- <p>Now that you have downloaded and installed Block Strike FPS Shooter APK on your device, you are ready to play the game. Here are some tips on how to play the game and have fun.</p>
83
- <h3>The basics of the gameplay and the controls</h3>
84
- <p>The game is a first-person shooter that lets you play online with other players in various game modes and maps. You can use the virtual joystick on the left side of the screen to move your character, and the buttons on the right side of the screen to shoot, aim, reload, jump, crouch, switch weapons, chat, and access the menu. You can also customize the controls layout and sensitivity in the settings menu.</p>
85
- <p>Your objective in each game mode may vary, but generally, you have to eliminate your enemies, complete objectives, or survive as long as possible. You can see your health, ammo, score, timer, map, and other information on the top of the screen. You can also see your teammates' names and health bars above their heads. You can communicate with your teammates or other players using voice chat or text chat features.</p>
86
- <h4>Choosing a game mode and a map</h4>
87
- <p>To start playing, you need to choose a game mode and a map from the main menu. You can either join an existing room or create your own room. You can also filter the rooms by region, mode, map, players, ping, or password. You can see the details of each room, such as the name, mode, map, players, ping, password, and status. You can also see a preview of each map before joining or creating a room.</p>
88
- <p>The game has over 60 different game modes that offer different challenges and experiences. Some of the most popular game modes are:</p>
89
- <ul>
90
- <li>Team Deathmatch: Two teams compete to get more kills than the other team within a time limit or a kill limit.</li>
91
- <li>Bomb: One team tries to plant a bomb at one of two sites while the other team tries to defuse it or prevent it from being planted.</li>
92
- <li>Hunger Games: A survival mode where players have to scavenge for weapons and items while avoiding a shrinking zone and other players.</li>
93
- <li>Zombie Survival: A mode where one player becomes a zombie and tries to infect other players while they try to survive until time runs out.</li>
94
- <li>Bunny Hop: A mode where players have to jump through obstacles using advanced movement techniques.</li>
95
- </ul>
96
- <p>The game has over 100 maps that vary in size, theme, layout, and design. Some of the most popular maps are:</p>
97
- <ul>
98
- <li>Dust 2: A classic map inspired by Counter-Strike that features two bomb sites and a desert setting.</li>
99
- <li>Minecraft: A map based on Minecraft that features pixelated blocks and structures.</li>
100
- <li>Nuketown: A map based on Call of Duty that features a small suburban area with two houses and a bus.</li>
101
- <li>Prison: A map that features a prison complex with cells, corridors, and guard towers.</li>
102
- <li>City: A map that features a large urban area with buildings, streets, and vehicles.</li>
103
- </ul>
104
- <h4>Shooting, moving, and interacting with objects</h4>
105
- <p>To shoot your enemies, you need to aim your crosshair at them and tap the shoot button. You can also tap the aim button to zoom in and improve your accuracy. However, some weapons have recoil, spread, or bullet drop that can affect your shooting. You can also reload your weapon by tapping the reload button or by running out of ammo. You can switch your weapon by tapping the weapon button or by swiping the screen. You can also use grenades, knives, or other items by tapping the corresponding buttons.</p>
106
- <p>To move your character, you need to use the virtual joystick on the left side of the screen. You can also jump by tapping the jump button or crouch by tapping the crouch button. You can also sprint by double-tapping the joystick or slide by tapping the crouch button while sprinting. You can also interact with some objects in the game, such as doors, ladders, buttons, or vehicles. To interact with an object, you need to approach it and tap the interact button that appears on the screen.</p>
107
- <h4>Customizing your character and weapons</h4>
108
- <p>To customize your character's appearance and your weapons' skins, you need to go to the shop menu from the main menu. You can buy various items with coins that you earn by playing the game or by purchasing them with real money. You can also get items from crates that you can open with keys that you can buy or earn. You can also sell or trade items with other players.</p>
109
- <p>You can customize your character's head, body, hands, legs, and accessories with different items. You can also customize your weapons' skins with different colors, patterns, stickers, or effects. You can also upgrade your weapons' stats with modules that you can buy or earn. You can also create your own skins or modules with the editor feature.</p>
110
- <h2>Why should you play Block Strike FPS Shooter?</h2>
111
- <p>Block Strike FPS Shooter is a fun and addictive online multiplayer game that offers a lot of features and benefits for players who enjoy shooting games. Here are some of the reasons why you should play Block Strike FPS Shooter:</p>
112
- <h3>Fun and competitive online multiplayer action</h3>
113
- <p>The game lets you play online with other players from around the world in various game modes and maps. You can compete with other players in team-based or solo modes, cooperate with other players in survival or objective modes, or just have fun in casual or custom modes. You can also chat with other players using voice chat or text chat features. You can also create your own clan or join an existing one to chat with other players and participate in clan wars.</p>
114
- <h3>Blocky graphics and pixel art style</h3>
115
- <p>The game has a unique blocky graphics and pixel art style that gives it a retro and nostalgic feel. The game also has a lot of details and animations that make it look lively and colorful. The game also has a smooth and responsive performance that makes it run well on most devices.</p>
116
- <h3>Variety of game modes, maps, and weapons</h3>
117
- <p>The game has over 60 different game modes that offer different challenges and experiences for players of all skill levels and preferences. The game also has over 100 maps that vary in size, theme, layout, and design. The game also has over 40 types of weapons that offer different advantages and disadvantages for different situations and play styles.</p>
118
- <h2>Conclusion</h2>
119
- <p>Block Strike FPS Shooter APK is a fun and addictive online multiplayer game that lets you play online with other players in various game modes and maps. You can also customize your character's appearance and your weapons' skins with coins that you earn by playing the game or by purchasing them with real money. You can also create your own clan or join an existing one to chat with other players and participate in clan wars.</p>
120
- <p>If you are looking for a new online multiplayer game to play with your friends or other players from around the world, you might want to check out Block Strike FPS Shooter APK. You can download it from Google Play Store or from APKCombo, a website that provides safe and fast downloads of APK files. You can also follow these steps to download and install it on your Android device:</p>
121
- <ol>
122
- <li>Go to [APKCombo] and search for Block Strike FPS Shooter.</li>
123
- <li>Select the latest version of the game and click on the download button.</li>
124
- <li>Wait for the XAPK file to be downloaded on your device or SD card.</ <li>Install an XAPK installer app, such as [XAPK Installer] or [APKPure], from Google Play Store or APKCombo. These apps will help you extract and install the XAPK file.</li>
125
- <li>Open the XAPK installer app and locate the downloaded XAPK file of Block Strike FPS Shooter.</li>
126
- <li>Tap on the install button and wait for the installation process to complete.</li>
127
- <li>Launch the game and enjoy playing Block Strike FPS Shooter on your Android device.</li>
128
- </ol>
129
- <p>We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!</p>
130
- <h2>FAQs</h2>
131
- <p>Here are some of the frequently asked questions about Block Strike FPS Shooter APK:</p>
132
- <ol>
133
- <li>Is Block Strike FPS Shooter APK safe to download and install?</li>
134
- <p>Yes, Block Strike FPS Shooter APK is safe to download and install from APKCombo, a website that provides safe and fast downloads of APK files. APKCombo scans all the APK files for viruses and malware before uploading them to their website. However, you should always be careful when downloading and installing apps from unknown sources and check the permissions that the app requests.</p>
135
- <li>Is Block Strike FPS Shooter APK free to play?</li>
136
- <p>Yes, Block Strike FPS Shooter APK is free to play, but it also offers in-app purchases that allow you to buy coins, keys, crates, skins, modules, or other items with real money. You can also earn coins by playing the game or by watching ads. You can also disable in-app purchases on your device settings if you don't want to spend money on the game.</p>
137
- <li>How can I update Block Strike FPS Shooter APK?</li>
138
- <p>You can update Block Strike FPS Shooter APK by downloading and installing the latest version of the game from APKCombo or Google Play Store. You can also enable automatic updates on your device settings to get notified when a new update is available. You should always update the game to get the latest features, bug fixes, and improvements.</p>
139
- <li>How can I contact the developer of Block Strike FPS Shooter?</li>
140
- <p>You can contact the developer of Block Strike FPS Shooter by sending an email to [email protected] or by visiting their website at https://rexetstudio.com/. You can also follow them on Facebook, Twitter, Instagram, or YouTube to get the latest news, updates, and promotions about their games.</p>
141
- <li>How can I play Block Strike FPS Shooter on PC?</li>
142
- <p>You can play Block Strike FPS Shooter on PC by using an Android emulator, such as [BlueStacks] or [NoxPlayer], that allows you to run Android apps on your PC. You need to download and install the emulator on your PC and then download and install Block Strike FPS Shooter APK from APKCombo or Google Play Store. You can then launch the game and play it with your keyboard and mouse or a gamepad.</p>
143
- </ol></p> 197e85843d<br />
144
- <br />
145
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Aquaman 2 Full Movie 2020 Tamil Download in 720p 1080p and 4K Tamilrockers.md DELETED
@@ -1,74 +0,0 @@
1
- <br />
2
- <h1>Aquaman 2 Full Movie 2020 Tamil Download Tamilrockers: Everything You <h1>Aquaman 2 Full Movie 2020 Tamil Download Tamilrockers: Everything You Need to Know</h1>
3
- <p>If you are a fan of superhero movies, you might be eagerly waiting for the release of Aquaman 2, the sequel to the 2018 blockbuster Aquaman. But did you know that you can also watch this movie in Tamil, one of the oldest and most spoken languages in the world? And did you know that there is a website called Tamilrockers that claims to offer this movie for free download? Well, before you get too excited, let me tell you everything you need to know about Aquaman 2, Tamil, and Tamilrockers in this article.</p>
4
- <h2>What is Aquaman 2?</h2>
5
- <p>Aquaman 2 is an upcoming American superhero film based on the DC Comics character Aquaman, starring Jason Momoa as the titular hero. It is the sequel to the 2018 film Aquaman, and the 15th and final installment in the DC Extended Universe (DCEU). The film is directed by James Wan and written by David Leslie Johnson-McGoldrick. It also features Amber Heard, Patrick Wilson, Yahya Abdul-Mateen II, Nicole Kidman, and Ben Affleck as Batman. The film is scheduled to be released on December 20, 2023. </p>
6
- <h2>aquaman 2 full movie 2020 tamil download tamilrockers</h2><br /><p><b><b>Download File</b> &#9193; <a href="https://jinyurl.com/2uNUhW">https://jinyurl.com/2uNUhW</a></b></p><br /><br />
7
- <h3>Who is Aquaman?</h3>
8
- <p>Aquaman is a superhero who has the ability to communicate with and control marine life, breathe underwater, swim at superhuman speeds, and wield a powerful trident. He is also the king of Atlantis, an underwater civilization that was once part of the surface world. He is a founding member of the Justice League, a team of superheroes that includes Batman, Superman, Wonder Woman, Flash, Cyborg, and Green Lantern. He is often considered as one of the most powerful and popular superheroes in the DC Comics universe.</p>
9
- <h3>Who are the other characters in Aquaman 2?</h3>
10
- <p>Some of the other main characters in Aquaman 2 are: - Mera (played by Amber Heard): She is Aquaman's love interest and ally. She is a princess of Xebel, an underwater kingdom that was once part of Atlantis. She has the ability to manipulate water and create hard water constructs. - Orm (played by Patrick Wilson): He is Aquaman's half-brother and rival. He is the former king of Atlantis who tried to wage war against the surface world in the first film. He has similar abilities as Aquaman but uses a helmet and a sword instead of a trident. - Black Manta (played by Yahya Abdul-Mateen II): He is Aquaman's archenemy and a ruthless mercenary. He wears a high-tech suit that has a helmet with red laser eyes and a harpoon gun. He seeks revenge against Aquaman for killing his father in the first film. - Atlanna (played by Nicole Kidman): She is Aquaman's mother and the former queen of Atlantis. She was presumed dead after being sacrificed to a monster called the Trench in the first film, but was later revealed to be alive and living in a hidden kingdom called the Hidden Sea. - Batman (played by Ben Affleck): He is Aquaman's friend and teammate in the Justice League. He is a billionaire vigilante who uses his intellect, skills, and gadgets to fight crime in Gotham City. He has no superpowers but is considered as one of the greatest detectives and strategists in the world.</p>
11
- <h3>What are the challenges faced by Aquaman 2?</h3>
12
- <p>Aquaman 2 faces several challenges and controversies that might affect its production and reception. Some of them are: - COVID-19 pandemic: The global health crisis caused by the coronavirus outbreak has disrupted many film projects, including Aquaman 2. The film was originally planned to start shooting in early 2020, but was delayed due to lockdowns and safety measures. The film is now expected to begin filming in June 2021 in London. - Amber Heard's legal issues: The actress who plays Mera has been involved in a bitter legal battle with her ex-husband Johnny Depp, who accused her of domestic abuse and defamation. Depp lost his libel case against a British tabloid that called him a "wife-beater" based on Heard's allegations. Many fans of Depp have petitioned to remove Heard from Aquaman 2, claiming that she lied about her abuse claims and that she does not deserve to play a strong female character. - DCEU's future plans: The DC Extended Universe has been undergoing some changes and reboots after the mixed reception of some of its previous films. The most notable example is Zack Snyder's Justice League, which was released on HBO Max in March 2021, which was a four-hour director's cut of the 2017 film Justice League. The new version was praised by critics and fans for its improved story, characters, and visuals. However, it also raised questions about the continuity and direction of the DCEU, as some of the events and characters in Zack Snyder's Justice League are different from those in the other films. For example, Ben Affleck's Batman is supposed to appear in Aquaman 2, but he has already retired from the role after Zack Snyder's Justice League. It is unclear how Aquaman 2 will fit into the DCEU timeline and canon. <h2>What is Tamil?</h2>
13
- <p>Tamil is a Dravidian language spoken by the Tamil people of South Asia, mainly in India and Sri Lanka. It is one of the official languages of India, Sri Lanka, and Singapore. It has a rich literary tradition dating back to the 5th century BCE. Tamil is written in a non-Latin script derived from the Brahmi script. It has several dialects and varieties based on region, caste, and social class.</p>
14
- <h3>How old is Tamil?</h3>
15
- <p>Tamil is one of the oldest living languages in the world, with evidence of its existence dating back to at least 2500 years ago. The earliest known Tamil inscriptions are from the 3rd century BCE, and the earliest known Tamil literature is from the 2nd century BCE. The oldest extant Tamil literary work is the Tolkappiyam, a grammar and poetics treatise written by Tolkappiyar. The classical period of Tamil literature spanned from the 3rd century BCE to the 8th century CE, producing works such as the Sangam poetry, the Silappatikaram, and the Tirukkural.</p>
16
- <h3>How is Tamil written?</h3>
17
- <p>Tamil is written in a script called Tamil-Brahmi, which is derived from the ancient Brahmi script that was used to write Sanskrit and other languages. The Tamil-Brahmi script consists of 12 vowels and 18 consonants, which can be combined to form syllables and words. The script also has special symbols for numerals, fractions, and punctuation marks. The script is written from left to right, with no spaces between words. The script has undergone some changes over time, such as the addition of new letters and diacritics to represent sounds borrowed from other languages.</p>
18
- <h3>How is Tamil spoken?</h3>
19
- <p>Tamil is spoken by about 80 million people worldwide, mostly in India, Sri Lanka, Malaysia, Singapore, and other countries where Tamil diaspora live. Tamil has several dialects and varieties based on region, caste, and social class. Some of the major dialects are Central Tamil, Kongu Tamil, Madurai Tamil, Tirunelveli Tamil, Jaffna Tamil, Batticaloa Tamil, etc. Each dialect has its own phonology, vocabulary, grammar, and idioms. Some dialects are mutually intelligible, while others are not. Tamil also has a standard form called Modern Standard Tamil or Literary Tamil, which is based on the classical language and used for formal and written communication.</p>
20
- <h2>What is Tamilrockers?</h2>
21
- <p>Tamilrockers is an illegal website that provides pirated copies of Indian and Hollywood movies online. It is notorious for leaking movies before or soon after their release dates, causing huge losses to the film industry. The website uses magnetic links to access and download copyrighted content. The authorities have blocked many mirrors and proxies of the website, but it continues to operate by switching to new domains and extensions. The website also offers movies dubbed in regional languages like Tamil, Telugu, Hindi, etc.</p>
22
- <h3>How does Tamilrockers work?</h3>
23
- <p>Tamilrockers works by using magnetic links or magnet links to access and download pirated movies online. A magnet link is a type of hyperlink that contains information about a file or a group of files that can be downloaded using a peer-to-peer network such as BitTorrent. A magnet link does not contain the actual file or its location on a server; instead, it contains a unique identifier or hash value that can be used to locate other users who have the same file or parts of it. This way, users can download files from multiple sources without relying on a central server or authority.</p>
24
- <p>aquaman 2 full movie 2020 tamil dubbed download tamilrockers<br />
25
- aquaman and the lost kingdom full movie in tamil download tamilrockers<br />
26
- aquaman 2 2020 tamil full movie watch online free tamilrockers<br />
27
- aquaman 2 full movie download in tamil hd 1080p tamilrockers<br />
28
- aquaman 2 full movie in tamil free download tamilrockers<br />
29
- aquaman 2 full movie 2020 tamil dubbed watch online tamilrockers<br />
30
- aquaman and the lost kingdom tamil dubbed movie download tamilrockers<br />
31
- aquaman 2 full movie in tamil hd download tamilrockers<br />
32
- aquaman 2 full movie download in tamil 720p tamilrockers<br />
33
- aquaman 2 full movie in tamil dubbed download tamilrockers<br />
34
- aquaman and the lost kingdom full movie watch online in tamil tamilrockers<br />
35
- aquaman 2 full movie in tamil online watch free tamilrockers<br />
36
- aquaman 2 full movie download in tamil hd quality tamilrockers<br />
37
- aquaman 2 full movie in tamil dubbed free download tamilrockers<br />
38
- aquaman and the lost kingdom full movie download in tamil hd tamilrockers<br />
39
- aquaman 2 full movie in tamil hd watch online free tamilrockers<br />
40
- aquaman 2 full movie download in tamil mp4 tamilrockers<br />
41
- aquaman 2 full movie in tamil dubbed hd download tamilrockers<br />
42
- aquaman and the lost kingdom full movie online free in tamil tamilrockers<br />
43
- aquaman 2 full movie in tamil free watch online tamilrockers<br />
44
- aquaman 2 full movie download in tamil hd print tamilrockers<br />
45
- aquaman 2 full movie in tamil dubbed online watch free tamilrockers<br />
46
- aquaman and the lost kingdom full movie free download in tamil tamilrockers<br />
47
- aquaman 2 full movie in tamil hd online watch free no sign up no registration no ads no popups no surveys no viruses no malware no spyware no ransomware no trojans no worms no rootkits no keyloggers no phishing no scamming no spamming no hacking no cracking no spoofing no sniffing no hijacking no injecting no infecting no corrupting no destroying no deleting no erasing no formatting no wiping no stealing no leaking no exposing no compromising no endangering no harming no hurting no damaging no ruining no breaking no crashing no burning no exploding (just kidding, don't use this one)<br />
48
- aquaman 2 full movie download in tamilmv, tamilyogi, isaimini, moviesda, kuttymovies, madras rockers, movierulz, filmywap, filmyzilla, worldfree4u, bolly4u, pagalworld, mp4moviez, jiorockers, todaypk, skymovieshd, extramovies, khatrimaza, bollyshare, coolmoviez, cinevood, dvdvilla, hdmovieshub, hdmovie99, mkvcinemas, mkvmoviespoint, mkvhub, mkvzone (again, don't use this one)</p>
49
- <h3>How does Tamilrockers evade authorities?</h3>
50
- <p>Tamilrockers evades authorities by changing its domains and extensions frequently to avoid being blocked or traced. A domain name is a unique name that identifies a website on the internet; for example, tamilrockers.com is a domain name. An extension is a part of a domain name that indicates the type or category of the website; for example, .com, .net, .org, etc. are extensions. Tamilrockers changes its domain name and extension frequently to avoid being detected by the authorities or blocked by the internet service providers. For example, some of the previous domains and extensions used by Tamilrockers are tamilrockers.net, tamilrockers.co, tamilrockers.ws, tamilrockers.la, tamilrockers.cc, etc. The current domain and extension of Tamilrockers are unknown, as they keep changing them regularly.</p>
51
- <h3>How does Tamilrockers affect the film industry?</h3>
52
- <p>Tamilrockers affects the film industry negatively by leaking movies illegally and causing huge losses and damages to the filmmakers and distributors. According to some estimates, the Indian film industry loses about $2.8 billion annually due to piracy. Piracy also affects the quality and creativity of the films, as the filmmakers have to compromise on their budgets and resources to cope with the losses. Piracy also deprives the audience of the authentic and original experience of watching a movie in a theater or on a legal platform. Piracy also violates the intellectual property rights and moral rights of the creators and owners of the films.</p>
53
- <h2>How to download Aquaman 2 full movie in Tamil from Tamilrockers?</h2>
54
- <p>If you still want to download Aquaman 2 full movie in Tamil from Tamilrockers, despite knowing the risks and consequences, here are the steps you need to follow: - Step 1: Find out the current domain and extension of Tamilrockers by searching on Google or using a proxy server. - Step 2: Visit the website and search for Aquaman 2 full movie in Tamil using the search bar or browsing through the categories. - Step 3: Click on the movie title and select the quality and size of the file you want to download. - Step 4: Click on the magnet link or torrent link to start downloading the file using a BitTorrent client such as uTorrent or BitTorrent. - Step 5: Wait for the download to complete and enjoy watching Aquaman 2 full movie in Tamil.</p>
55
- <h3>Disclaimer: Why you should not download Aquaman 2 from Tamilrockers?</h3>
56
- <p>Before you download Aquaman 2 from Tamilrockers, you should be aware of the following reasons why you should not do so: - It is illegal: Downloading pirated movies from Tamilrockers is a criminal offense that can land you in jail or result in a fine. You are also violating the intellectual property rights and moral rights of the filmmakers and distributors by downloading their movies without their permission or consent. - It is unethical: Downloading pirated movies from Tamilrockers is an unethical act that harms the film industry and the people who work hard to make and deliver quality movies to the audience. You are also depriving yourself of the authentic and original experience of watching a movie in a theater or on a legal platform. - It is unsafe: Downloading pirated movies from Tamilrockers can expose your device and data to malware, viruses, spyware, ransomware, etc. that can harm your device or steal your personal information. You can also become a victim of phishing, identity theft, fraud, etc. by clicking on malicious links or ads on Tamilrockers website.</p>
57
- <h1>Conclusion: Aquaman 2 Full Movie 2020 Tamil Download Tamilrockers: Is it worth it?</h1>
58
- <p>In conclusion, Aquaman 2 full movie 2020 Tamil download Tamilrockers is not worth it for many reasons. It is illegal, unethical, unsafe, and disrespectful to download pirated movies from Tamilrockers website. You are not only risking your legal and moral status, but also your device and data security by downloading pirated movies from Tamilrockers website. You are also missing out on the genuine and original experience of watching a movie in a theater or on a legal platform.</p>
59
- <p>Instead of downloading Aquaman 2 from Tamilrockers, you should watch it legally when it releases on December 20, 2023. You can watch it in English or in any other language that it is dubbed in. You can also watch it in Tamil if it is officially released in that language by the filmmakers or distributors. You can watch it in a theater near you or on an online streaming service that has the rights to show it. By watching Aquaman 2 legally, you are supporting the film industry and the people who work hard to make and deliver quality movies to you.</p>
60
- <p>Aquaman 2 is an amazing movie that deserves your attention and appreciation. It is a movie that showcases the power and beauty of Aquaman and his underwater world. It is a movie that features stunning visuals, thrilling action, captivating characters, and an engaging story. It is a movie that you will enjoy and remember for a long time. So, don't waste your time and money on downloading Aquaman 2 from Tamilrockers. Watch it legally and have a great time!</p>
61
- <h2>FAQs</h2>
62
- <p>Here are some frequently asked questions about Aquaman 2 full movie 2020 Tamil download Tamilrockers:</p>
63
- <h3>Q: When will Aquaman 2 release in theaters?</h3>
64
- <p>A: Aquaman 2 is scheduled to release on December 20, 2023 in theaters worldwide.</p>
65
- <h3>Q: Will Aquaman 2 be available in Tamil language?</h3>
66
- <p>A: It is not confirmed yet whether Aquaman 2 will be officially dubbed or released in Tamil language. However, you can watch it in English or in any other language that it is dubbed in.</p>
67
- <h3>Q: Is Tamilrockers a legal website?</h3>
68
- <p>A: No, Tamilrockers is an illegal website that provides pirated copies of movies online. It is a criminal offense to download or watch movies from Tamilrockers website.</p>
69
- <h3>Q: What are the risks of downloading movies from Tamilrockers?</h3>
70
- <p>A: Downloading movies from Tamilrockers can expose you to legal and ethical consequences, as well as malware, viruses, spyware, ransomware, phishing, identity theft, fraud, etc. that can harm your device or data.</p>
71
- <h3>Q: How can I watch Aquaman 2 legally?</h3>
72
- <p>A: You can watch Aquaman 2 legally by watching it in a theater near you or on an online streaming service that has the rights to show it. You can also buy or rent the DVD or Blu-ray of the movie when it becomes available.</p> 197e85843d<br />
73
- <br />
74
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Farm Heroes Saga for PC and Mac with BlueStacks.md DELETED
@@ -1,110 +0,0 @@
1
-
2
- <h1>How to Download and Play Farm Heroes Saga on Your Laptop</h1>
3
- <p>If you love match-3 puzzle games, you might have heard of <strong>Farm Heroes Saga</strong>, one of the most popular games from King, the makers of Candy Crush Saga. In this game, you will join forces with the Farm Heroes and help them collect the cute Cropsies across hundreds of levels. You will also face Rancid the Raccoon, who is trying to spoil the precious Farm Lands. Are you ready for a farmtastic adventure?</p>
4
- <h2>farm heroes saga download laptop</h2><br /><p><b><b>DOWNLOAD</b> &mdash;&mdash;&mdash;>>> <a href="https://jinyurl.com/2uNP7u">https://jinyurl.com/2uNP7u</a></b></p><br /><br />
5
- <p>In this article, we will show you how to download and play Farm Heroes Saga on your laptop, so you can enjoy this fun and colorful game on a bigger screen. We will also share some tips and tricks to help you master the game and have more fun.</p>
6
- <h2>What is Farm Heroes Saga?</h2>
7
- <h3>A fun and colorful match-3 puzzle game</h3>
8
- <p>Farm Heroes Saga is a match-3 puzzle game that challenges you to swap and match three or more Cropsies of the same kind in a row or column. Cropsies are adorable fruits and vegetables that have different values depending on how many you match. You need to collect enough Cropsies to meet the level's goal before you run out of moves.</p>
9
- <h3>A farmtastic adventure with cute Cropsies and Farm Heroes</h3>
10
- <p>Farm Heroes Saga is not just a simple puzzle game. It also has a story that takes you on an adventure across different farm lands, such as Dairy District, Fruity Fields, Sandy Slopes, and more. Along the way, you will meet various Farm Heroes, such as Amelia the Aviator, Hunter the Gourmet Chef, Choo Choo the Train Driver, and others. They will help you with their special skills and boosters.</p>
11
- <h3>A challenge to stop Rancid the Raccoon from spoiling the Farm Lands</h3>
12
- <p>Farm Heroes Saga also has a villain, Rancid the Raccoon, who is trying to ruin your farming fun. He will appear in some levels and try to lower the value of your Cropsies by spraying them with his stinky gas. You need to defeat him by collecting enough Cropsies before he escapes. You can also use special items, such as shovels, tractors, water buckets, and eggs, to counter his attacks.</p>
13
- <h2>Why play Farm Heroes Saga on your laptop?</h2>
14
- <h3>Enjoy a bigger screen and better graphics</h3 <p>One of the benefits of playing Farm Heroes Saga on your laptop is that you can enjoy a bigger screen and better graphics. The game has a lot of colorful and detailed animations and effects that will look more stunning on a larger display. You will also be able to see the Cropsies and the Farm Lands more clearly and appreciate their cuteness and charm.</p>
15
- <h3>Use your keyboard and mouse for more control and accuracy</h3>
16
- <p>Another advantage of playing Farm Heroes Saga on your laptop is that you can use your keyboard and mouse for more control and accuracy. You can use your mouse to drag and drop the Cropsies with ease and precision. You can also use your keyboard shortcuts to access the menu, pause the game, or switch between windows. You will have more flexibility and convenience when playing the game on your laptop.</p>
17
- <p>farm heroes saga pc download free<br />
18
- farm heroes saga for windows 10<br />
19
- farm heroes saga emulator for pc<br />
20
- farm heroes saga microsoft store<br />
21
- farm heroes saga bluestacks download<br />
22
- farm heroes saga online play on laptop<br />
23
- farm heroes saga download for mac<br />
24
- farm heroes saga game download for pc<br />
25
- farm heroes saga app for laptop<br />
26
- farm heroes saga offline download for pc<br />
27
- farm heroes saga install on laptop<br />
28
- farm heroes saga for pc without facebook<br />
29
- farm heroes saga free download full version for pc<br />
30
- farm heroes saga windows 7 download<br />
31
- farm heroes saga pc game free download<br />
32
- farm heroes saga apk download for laptop<br />
33
- farm heroes saga cheats for pc<br />
34
- farm heroes saga update download for pc<br />
35
- farm heroes saga hack download for pc<br />
36
- farm heroes saga mod apk download for pc<br />
37
- farm heroes saga levels download for pc<br />
38
- farm heroes saga download laptop windows 8<br />
39
- farm heroes saga download laptop windows xp<br />
40
- farm heroes saga download laptop windows vista<br />
41
- farm heroes saga download laptop windows 11<br />
42
- how to play farm heroes saga on laptop<br />
43
- how to download farm heroes saga on laptop without bluestacks<br />
44
- how to sync farm heroes saga on laptop and phone<br />
45
- how to uninstall farm heroes saga on laptop<br />
46
- how to get more lives in farm heroes saga on laptop<br />
47
- best emulator for farm heroes saga on laptop<br />
48
- best settings for farm heroes saga on laptop<br />
49
- best tips and tricks for farm heroes saga on laptop<br />
50
- best strategy for farm heroes saga on laptop<br />
51
- best booster for farm heroes saga on laptop<br />
52
- is farm heroes saga available for laptop<br />
53
- is farm heroes saga compatible with laptop<br />
54
- is farm heroes saga safe to download on laptop<br />
55
- is farm heroes saga free to play on laptop<br />
56
- can i play farm heroes saga on my laptop<br />
57
- can i download farm heroes saga on my laptop<br />
58
- can i transfer my progress in farm heroes saga from phone to laptop<br />
59
- can i connect my facebook account to farm heroes saga on laptop<br />
60
- can i use keyboard and mouse to play farm heroes saga on laptop<br />
61
- why is farm heroes saga not working on my laptop<br />
62
- why is farm heroes saga slow on my laptop<br />
63
- why is farm heroes saga crashing on my laptop<br />
64
- why is farm heroes saga not updating on my laptop</p>
65
- <h3>Sync your progress across devices with your Facebook account</h3>
66
- <p>A third benefit of playing Farm Heroes Saga on your laptop is that you can sync your progress across devices with your Facebook account. You don't have to worry about losing your data or starting over if you switch from your phone to your laptop or vice versa. You can simply log in with your Facebook account and continue where you left off. You can also see your friends' scores and achievements and send or receive lives and gifts from them.</p>
67
- <h2>How to download Farm Heroes Saga on your laptop?</h2>
68
- <h3>Option 1: Download from the Microsoft Store</h3>
69
- <p>If you have a Windows 10 laptop, you can download Farm Heroes Saga from the Microsoft Store for free. Here are the steps to do so:</p>
70
- <h4>Step 1: Open the Microsoft Store app on your laptop</h4>
71
- <p>You can find the Microsoft Store app on your Start menu or taskbar. Click on it to open it.</p>
72
- <h4>Step 2: Search for Farm Heroes Saga and click on it</h4>
73
- <p>You can use the search bar at the top right corner of the app to type in "Farm Heroes Saga" and hit enter. You will see the game's icon and name in the results. Click on it to go to its page.</p>
74
- <h4>Step 3: Click on the Get or Buy button to download the game</h4>
75
- <p>On the game's page, you will see a button that says either "Get" or "Buy" depending on whether the game is free or paid. Click on it to start downloading the game. You may need to sign in with your Microsoft account if you haven't already.</p>
76
- <h4>Step 4: Launch the game and start playing</h4>
77
- <p>Once the download is complete, you can launch the game from the Microsoft Store app or from your Start menu or taskbar. You will see a splash screen with the game's logo and then you can start playing.</p>
78
- <h3>Option 2: Download from a third-party platform</h3>
79
- <p>If you don't have a Windows 10 laptop or you prefer to use a different platform, you can download Farm Heroes Saga from a third-party platform that offers Android games for PC. One of the most popular platforms is BlueStacks, which is an Android emulator that allows you to run Android apps and games on your laptop. Here are the steps to download Farm Heroes Saga using BlueStacks:</p>
80
- <h4>Step 1: Download and install BlueStacks on your laptop</h4>
81
- <p>You can download BlueStacks from its official website <a href="">here</a>. Follow the instructions on how to install it on your laptop. It may take some time depending on your internet speed and system performance.</p>
82
- <h4>Step 2: Open BlueStacks and sign in with your Google account</h4>
83
- <p>After installing BlueStacks, open it and sign in with your Google account. This will allow you to access the Google Play Store and other Google services on BlueStacks. If you don't have a Google account, you can create one for free.</p>
84
- <h4>Step 3: Search for Farm Heroes Saga on the Google Play Store and install it</h4>
85
- <p>On BlueStacks, you will see a home screen with various apps and games. Click on the Google Play Store icon to open it. Then, search for Farm Heroes Saga using the search bar at the top of the screen. You will see the game's icon and name in the results. Click on it to go to its page. Then, click on the Install button to download and install the game.</p>
86
- <h4>Step 4: Launch the game and start playing</h4>
87
- <p>Once the installation is complete, you can launch the game from the Google Play Store or from the BlueStacks home screen. You will see a splash screen with the game's logo and then you can start playing.</p>
88
- <h2>Tips and tricks for playing Farm Heroes Saga on your laptop</h2>
89
- <p>Now that you know how to download and play Farm Heroes Saga on your laptop, you might want to learn some tips and tricks to improve your gameplay and have more fun. Here are some of them:</p>
90
- <h3>Use boosters and power-ups wisely</h3>
91
- <p>Farm Heroes Saga has various boosters and power-ups that can help you complete the levels faster and easier. For example, you can use the shovel to remove any Cropsie from the board, the tractor to clear a row or column of Cropsies, the water bucket to fill up the water droplets, and the egg cracker to crack all the eggs on the board. You can also create power-ups by matching four or more Cropsies of the same kind, such as the +1 bonus, the super Cropsie, and the firecracker. However, these boosters and power-ups are limited and can be replenished by spending gold bars or real money. Therefore, you should use them wisely and only when necessary.</p>
92
- <h3>Collect magic beans to activate the Farm Club</h3>
93
- <p>Farm Heroes Saga has a feature called the Farm Club, which is a collection of farm animals that have special abilities. You can unlock these animals by collecting magic beans, which are earned by completing levels with more stars. The more stars you get, the more magic beans you earn. You can then use these magic beans to activate the Farm Club animals on certain levels and benefit from their skills. For example, you can use the sheep to collect all the hay on the board, the dog to collect all the bones on the board, or the pig to collect all the mud on the board.</p>
94
- <h3>Complete daily quests and events for extra rewards</h3>
95
- <p>Farm Heroes Saga also has daily quests and events that can give you extra rewards, such as lives, boosters, gold bars, or stickers. You can access these quests and events by clicking on the icons on the left side of the screen. You will see different tasks that you need to complete within a certain time limit or number of moves. For example, you might need to collect a specific number of Cropsies, match a certain pattern of Cropsies, or defeat Rancid with a certain score. If you complete these tasks successfully, you will receive your rewards and move on to the next quest or event.</p>
96
- <h3>Join a team or create your own to chat and compete with other players</h3>
97
- <p>Farm Heroes Saga also has a social aspect that allows you to join a team or create your own and chat and compete with other players from around the world. You can access this feature by clicking on the team icon on the bottom right corner of the screen. You will see different teams that you can join or create based on your preferences and goals. You can also see your team members' profiles, scores, and messages. By joining a team, you can send and receive lives and gifts from your teammates, chat with them about tips and strategies, and participate in team tournaments and challenges for more fun and rewards.</p>
98
- <h2>Conclusion</h2>
99
- <p>Farm Heroes Saga is a fun and colorful match-3 puzzle game that will keep you entertained for hours. You can download and play it on your laptop for a better gaming experience. You can also use some tips and tricks to master the game and have more fun. If you are looking for a farmtastic adventure with cute Cropsies and Farm Heroes, Farm Heroes Saga is the game for you.</p>
100
- <h2>FAQs</h2>
101
- <p>Here are some frequently asked questions about Farm Heroes Saga:</p>
102
- <table>
103
- <tr><td><strong>Q: How many levels are there in Farm Heroes Saga?</strong></td><td><strong>A: There are over 3000 levels in Farm Heroes Saga as of June 2023.</strong></td></tr>
104
- <tr><td><strong>Q: How do I get more lives in Farm Heroes Saga?</strong></td><td><strong>A: You can get more lives in Farm Heroes Saga by waiting for them to refill over time, asking your friends or teammates to send them to you, watching ads, completing quests or events, or buying them with gold bars or real money.</strong></td></tr>
105
- <tr><td><strong>Q: How do I get more gold bars in Farm Heroes Saga?</strong></td><td><strong>A: You can get more gold bars in Farm Heroes Saga by completing levels with more stars, reaching certain milestones, participating in tournaments or challenges, watching ads, or buying them with real money.</strong></td></tr>
106
- <tr><td><strong>Q: How do I get more stickers in Farm Heroes Saga?</strong></td><td><strong>A: You can get more stickers in Farm Heroes Saga by completing quests or events that reward them, or buying them with gold bars or real money.</strong></td></tr>
107
- <tr><td><strong>Q: How do I contact the support team of Farm Heroes Saga?</strong></td><td><strong>A: You can contact the support team of Farm Heroes Saga by clicking on the settings icon on the top right corner of the screen, then clicking on the help center icon, then clicking on the contact us button. You can also visit the official website <a href="">here</a> and fill out a form with your issue or feedback.</strong></td></tr>
108
- </table></p> 197e85843d<br />
109
- <br />
110
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Emoji Keyboard How to Customize Your Android Keyboard with Fun and Style.md DELETED
@@ -1,142 +0,0 @@
1
- <br />
2
- <h1>How to Download and Install com.facemoji.lite.xiaomi APK on Your Android Phone</h1>
3
- <p>If you are looking for a fun and customizable keyboard app for your Android phone, you might want to try com.facemoji.lite.xiaomi APK. This is an app that allows you to use thousands of emojis, stickers, GIFs, fonts, and themes on your keyboard. You can also create your own emoji text art and keyboard skins, and share them with your friends. In this article, we will show you how to download and install com.facemoji.lite.xiaomi APK on your Android phone from two different sources: APKCombo and Uptodown.</p>
4
- <h2>What is com.facemoji.lite.xiaomi APK?</h2>
5
- <p>com.facemoji.lite.xiaomi APK is the package name of the app Facemoji Keyboard, which is developed by EKATOX APPS. Facemoji Keyboard is a free, customizable keyboard app that offers a variety of features such as auto paste, comment art, cool fonts, GIFs, and more. It is compatible with all social media platforms and messaging apps.</p>
6
- <h2>com.facemoji.lite.xiaomi apk download</h2><br /><p><b><b>Download Zip</b> &#9999; &#9999; &#9999; <a href="https://jinyurl.com/2uNMFk">https://jinyurl.com/2uNMFk</a></b></p><br /><br />
7
- <h3>Features of Facemoji Keyboard</h3>
8
- <ul>
9
- <li>Easy to send 3600+ Emoji, Emoticons, Free GIF, Symbol, Emoji Stickers</li>
10
- <li>DIY keyboard, emojis, stickers</li>
11
- <li>Text Bomb, Big Font Text Art Generator and Text Art Community</li>
12
- <li>Keyboard Skin Community and Customizable Emoji Text Art</li>
13
- <li>Fast Typing & Smart Input with Emoji Prediction, Trending Gifs, Swipe to type, Autocorrect & Word Prediction</li>
14
- </ul>
15
- <h3>Benefits of using Facemoji Keyboard</h3>
16
- <ul>
17
- <li>Spice up your chats and express yourself better with emojis, stickers, GIFs, and fonts</li>
18
- <li>Show off your creativity and personality by designing your own keyboard skins and emoji text art</li>
19
- <li>Enjoy a smooth and fast typing experience with smart input features</li>
20
- <li>Save storage space with a smaller installation package</li>
21
- <li>Supports more than 100 languages</li>
22
- </ul>
23
- <h2>How to download com.facemoji.lite.xiaomi APK from APKCombo</h2>
24
- <p>APKCombo is a website that offers free download of Android apps in APK format. You can find the latest version of com.facemoji.lite.xiaomi APK on APKCombo by following these steps:</p>
25
- <h3>Steps to download com.facemoji.lite.xiaomi APK from APKCombo</h3>
26
- <ol>
27
- <li>Open your browser and go to [APKCombo](^1^)</li>
28
- <li>Type "com.facemoji.lite.xiaomi" in the search box and hit enter</li>
29
- <li>Select the app named "Emoji Keyboard" by EKATOX APPS</li>
30
- <li>Tap on the green "Download APK" button</li>
31
- <li>Choose the version you want to download and tap on it</li>
32
- <li>Wait for the download to finish</li>
33
- </ol>
34
- <h3>How to install com.facemoji.lite.xiaomi APK on your Android phone</h3>
35
- <p>Before you can install com.facemoji.lite.xiaomi APK on your Android phone, you need to enable the option to install unknown apps from your browser. To do this:</p>
36
- <ol>
37
- <li>Go to your device settings and tap Apps & Notifications (or Apps in older versions)</li>
38
- <li>Tap on the browser you used to download the APK file (e.g. Chrome)</li>
39
- <li>Tap on Advanced and then Install Unknown Apps</li>
40
- <li>Toggle on the Allow from this source option</li>
41
- </ol>
42
- <p>Once you have done that, you can install com.facemoji.lite.xiaomi APK on your Android phone by following these steps:</p>
43
- <ol>
44
- <li>Open your file manager app and locate the downloaded APK file</li>
45
- <li>Tap on the file and then tap on Install</li>
46
- <li>Wait for the installation to complete</li>
47
- <li>Tap on Open to launch the app or Done to exit</li>
48
- </ol>
49
- <h2>How to download com.facemoji.lite.xiaomi APK from Uptodown</h2>
50
- <p>Uptodown is another website that offers free download of Android apps in APK format. You can also find the latest version of com.facemoji.lite.xiaomi APK on Uptodown by following these steps:</p>
51
- <h3>Steps to download com.facemoji.lite.xiaomi APK from Uptodown</h3>
52
- <ol>
53
- <li>Open your browser and go to [Uptodown]</li>
54
- <li>Type "com.facemoji.lite.xiaomi" in the search box and hit enter</li>
55
- <li>Select the app named "Facemoji Keyboard" by EKATOX APPS</li>
56
- <li>Tap on the green "Download" button</li>
57
- <li>Wait for the download to finish</li>
58
- </ol>
59
- <h3>How to install com.facemoji.lite.xiaomi APK on your Android phone</h3>
60
- <p>The installation process for com.facemoji.lite.xiaomi APK from Uptodown is similar to the one from APKCombo. You need to enable the option to install unknown apps from your browser first, and then follow these steps:</p>
61
- <ol>
62
- <li>Open your file manager app and locate the downloaded APK file</li>
63
- <li>Tap on the file and then tap on Install</li>
64
- <li>Wait for the installation to complete</li>
65
- <li>Tap on Open to launch the app or Done to exit</li>
66
- </ol>
67
- <h2>Conclusion</h2>
68
- <p>In this article, we have shown you how to download and install com.facemoji.lite.xiaomi APK on your Android phone from two different sources: APKCombo and Uptodown. Both websites offer free and safe download of Android apps in APK format. You can choose the one that suits you best and enjoy using Facemoji Keyboard on your Android phone. Facemoji Keyboard is a fun and customizable keyboard app that lets you use thousands of emojis, stickers, GIFs, fonts, and themes on your keyboard. You can also create your own emoji text art and keyboard skins, and share them with your friends. Facemoji Keyboard is compatible with all social media platforms and messaging apps, and supports more than 100 languages.</p>
69
- <h2>FAQs</h2>
70
- <ul>
71
- <li><b>What is an APK file?</b></li>
72
- <p>An APK file is an Android Package file that contains all the files and code needed to install and run an Android app. It is similar to an executable file (.exe) for Windows or a DMG file for Mac.</p>
73
- <li><b>Is it safe to download APK files from third-party websites?</b></li>
74
- <p>It depends on the website and the app. Some websites may offer malicious or modified APK files that can harm your device or compromise your privacy. You should always check the reputation and reviews of the website before downloading any APK file. You should also scan the APK file with an antivirus software before installing it.</p>
75
- <p>com.facemoji.lite.xiaomi apk download free<br />
76
- com.facemoji.lite.xiaomi app download for android<br />
77
- com.facemoji.lite.xiaomi latest version apk<br />
78
- com.facemoji.lite.xiaomi keyboard apk download<br />
79
- com.facemoji.lite.xiaomi uptodown<br />
80
- com.facemoji.lite.xiaomi apkcombo<br />
81
- com.facemoji.lite.xiaomi emoji keyboard apk<br />
82
- com.facemoji.lite.xiaomi apk mirror<br />
83
- com.facemoji.lite.xiaomi apk pure<br />
84
- com.facemoji.lite.xiaomi apk mod<br />
85
- com.facemoji.lite.xiaomi apk old version<br />
86
- com.facemoji.lite.xiaomi app review<br />
87
- com.facemoji.lite.xiaomi app features<br />
88
- com.facemoji.lite.xiaomi app update<br />
89
- com.facemoji.lite.xiaomi app install<br />
90
- com.facemoji.lite.xiaomi app store<br />
91
- com.facemoji.lite.xiaomi app permissions<br />
92
- com.facemoji.lite.xiaomi app alternative<br />
93
- com.facemoji.lite.xiaomi app problems<br />
94
- com.facemoji.lite.xiaomi app support<br />
95
- com.facemoji.lite.xiaomi android 4.4+<br />
96
- com.facemoji.lite.xiaomi android 10<br />
97
- com.facemoji.lite.xiaomi android 11<br />
98
- com.facemoji.lite.xiaomi android tv<br />
99
- com.facemoji.lite.xiaomi android emulator<br />
100
- com.facemoji.lite.xiaomi latest version 2.5.3<br />
101
- com.facemoji.lite.xiaomi version 2.5.2<br />
102
- com.facemoji.lite.xiaomi version 2.1.8.7<br />
103
- com.facemoji.lite.xiaomi version history<br />
104
- com.facemoji.lite.xiaomi version comparison<br />
105
- com.facemoji.lite.xiaomi keyboard themes download<br />
106
- com.facemoji.lite.xiaomi keyboard stickers download<br />
107
- com.facemoji.lite.xiaomi keyboard settings<br />
108
- com.facemoji.lite.xiaomi keyboard customization<br />
109
- com.facemoji.lite.xiaomi keyboard tutorial<br />
110
- com.facemoji.lite.xiaomi keyboard tips and tricks<br />
111
- com.facemoji.lite.xiaomi keyboard problems and solutions<br />
112
- com.facemoji.lite.xiaomi keyboard feedback and suggestions<br />
113
- com.facemoji.lite.xiaomi emoji keyboard lite apk download<br />
114
- com.facemoji.lite.xiaomi emoji keyboard lite app download for android<br />
115
- com.facemoji.lite.xiaomi emoji keyboard lite latest version apk <br />
116
- com.facemoji.lite.xiaomi emoji keyboard lite apk mirror <br />
117
- com.facemoji.lite.xiaomi emoji keyboard lite apk pure <br />
118
- com.facemoji.lite.xiaomi emoji keyboard lite apk mod <br />
119
- com.facemoji.lite.xiaomi emoji keyboard lite apk old version <br />
120
- com.facemoji.lite.xiaomi emoji keyboard lite app review <br />
121
- com.facemoji.lite.xiaomi emoji keyboard lite app features <br />
122
- com.facemoji.lite.xiaomi emoji keyboard lite app update <br />
123
- com.facemoji.lite.xiaomi emoji keyboard lite app install</p>
124
- <li><b>Why do I need to enable unknown sources to install APK files?</b></li>
125
- <p>By default, Android devices only allow installation of apps from official sources such as Google Play Store or Samsung Galaxy Store. This is a security measure to prevent unauthorized or harmful apps from being installed on your device. However, if you want to install apps from other sources, such as APK files downloaded from third-party websites, you need to enable unknown sources in your device settings. This will allow you to install apps from any source, but you should be careful and only install trusted apps.</p>
126
- <li><b>How do I uninstall com.facemoji.lite.xiaomi APK from my Android phone?</b></li>
127
- <p>If you want to uninstall com.facemoji.lite.xiaomi APK from your Android phone, you can follow these steps:</p>
128
- <ol>
129
- <li>Go to your device settings and tap Apps & Notifications (or Apps in older versions)</li>
130
- <li>Find and tap on Facemoji Keyboard</li>
131
- <li>Tap on Uninstall and confirm</li>
132
- <li>Wait for the uninstallation to finish</li>
133
- </ol> <li><b>What are the alternatives to com.facemoji.lite.xiaomi APK?</b></li>
134
- <p>If you are looking for other keyboard apps that offer similar features as com.facemoji.lite.xiaomi APK, you can try these alternatives:</p>
135
- <ul>
136
- <li><b>Gboard</b>: This is the official keyboard app from Google, which offers smart typing, voice typing, emoji search, GIFs, stickers, themes, and more. You can also access Google Translate, Google Search, and Google Assistant from your keyboard.</li>
137
- <li><b>Kika Keyboard</b>: This is another popular keyboard app that offers a variety of emojis, stickers, GIFs, fonts, themes, and more. You can also customize your keyboard with your own photos, sounds, and colors.</li>
138
- <li><b>SwiftKey Keyboard</b>: This is a keyboard app that uses artificial intelligence to learn your writing style and provide personalized predictions, corrections, and suggestions. It also supports over 300 languages, emojis, GIFs, stickers, themes, and more.</li>
139
- </ul>
140
- <p>I hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p> 401be4b1e0<br />
141
- <br />
142
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_utils.py DELETED
@@ -1,122 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- import importlib
16
- import os
17
- from dataclasses import dataclass
18
- from typing import Any, Dict, Optional, Union
19
-
20
- import paddle
21
-
22
- from ..utils import BaseOutput
23
-
24
- SCHEDULER_CONFIG_NAME = "scheduler_config.json"
25
-
26
-
27
- @dataclass
28
- class SchedulerOutput(BaseOutput):
29
- """
30
- Base class for the scheduler's step function output.
31
-
32
- Args:
33
- prev_sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
34
- Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
35
- denoising loop.
36
- """
37
-
38
- prev_sample: paddle.Tensor
39
-
40
-
41
- class SchedulerMixin:
42
- """
43
- Mixin containing common functions for the schedulers.
44
-
45
- Class attributes:
46
- - **_compatibles** (`List[str]`) -- A list of classes that are compatible with the parent class, so that
47
- `from_config` can be used from a class different than the one used to save the config (should be overridden
48
- by parent class).
49
- """
50
-
51
- config_name = SCHEDULER_CONFIG_NAME
52
- _compatibles = []
53
- has_compatibles = True
54
-
55
- @classmethod
56
- def from_pretrained(
57
- cls,
58
- pretrained_model_name_or_path: Dict[str, Any] = None,
59
- subfolder: Optional[str] = None,
60
- return_unused_kwargs=False,
61
- **kwargs,
62
- ):
63
- r"""
64
- Instantiate a Scheduler class from a pre-defined JSON configuration file inside a directory or Hub repo.
65
-
66
- Parameters:
67
- pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
68
- Can be either:
69
-
70
- - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an
71
- organization name, like `google/ddpm-celebahq-256`.
72
- - A path to a *directory* containing the schedluer configurations saved using
73
- [`~SchedulerMixin.save_pretrained`], e.g., `./my_model_directory/`.
74
- subfolder (`str`, *optional*):
75
- In case the relevant files are located inside a subfolder of the model repo (either remote in
76
- huggingface.co or downloaded locally), you can specify the folder name here.
77
- return_unused_kwargs (`bool`, *optional*, defaults to `False`):
78
- Whether kwargs that are not consumed by the Python class should be returned or not.
79
- cache_dir (`Union[str, os.PathLike]`, *optional*):
80
- Path to a directory in which a downloaded pretrained model configuration should be cached if the
81
- standard cache should not be used.
82
- output_loading_info(`bool`, *optional*, defaults to `False`):
83
- Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
84
-
85
- """
86
- config, kwargs = cls.load_config(
87
- pretrained_model_name_or_path=pretrained_model_name_or_path,
88
- subfolder=subfolder,
89
- return_unused_kwargs=True,
90
- **kwargs,
91
- )
92
- return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs)
93
-
94
- def save_pretrained(self, save_directory: Union[str, os.PathLike], **kwargs):
95
- """
96
- Save a scheduler configuration object to the directory `save_directory`, so that it can be re-loaded using the
97
- [`~SchedulerMixin.from_pretrained`] class method.
98
-
99
- Args:
100
- save_directory (`str` or `os.PathLike`):
101
- Directory where the configuration JSON file will be saved (will be created if it does not exist).
102
- """
103
- self.save_config(save_directory=save_directory, **kwargs)
104
-
105
- @property
106
- def compatibles(self):
107
- """
108
- Returns all schedulers that are compatible with this scheduler
109
-
110
- Returns:
111
- `List[SchedulerMixin]`: List of compatible schedulers
112
- """
113
- return self._get_compatibles()
114
-
115
- @classmethod
116
- def _get_compatibles(cls):
117
- compatible_classes_str = list(set([cls.__name__] + cls._compatibles))
118
- diffusers_library = importlib.import_module(__name__.split(".")[0])
119
- compatible_classes = [
120
- getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)
121
- ]
122
- return compatible_classes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/infer/modules/vc/pipeline.py DELETED
@@ -1,655 +0,0 @@
1
- import os
2
- import sys
3
- import traceback
4
- import logging
5
-
6
- logger = logging.getLogger(__name__)
7
-
8
- from functools import lru_cache
9
- from time import time as ttime
10
- from torch import Tensor
11
- import faiss
12
- import librosa
13
- import numpy as np
14
- import parselmouth
15
- import pyworld
16
- import torch
17
- import torch.nn.functional as F
18
- import torchcrepe
19
- from scipy import signal
20
- from tqdm import tqdm
21
-
22
- import random
23
- now_dir = os.getcwd()
24
- sys.path.append(now_dir)
25
- import re
26
- from functools import partial
27
- bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
28
-
29
- input_audio_path2wav = {}
30
- from LazyImport import lazyload
31
- torchcrepe = lazyload("torchcrepe") # Fork Feature. Crepe algo for training and preprocess
32
- torch = lazyload("torch")
33
- from infer.lib.rmvpe import RMVPE
34
-
35
- @lru_cache
36
- def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
37
- audio = input_audio_path2wav[input_audio_path]
38
- f0, t = pyworld.harvest(
39
- audio,
40
- fs=fs,
41
- f0_ceil=f0max,
42
- f0_floor=f0min,
43
- frame_period=frame_period,
44
- )
45
- f0 = pyworld.stonemask(audio, f0, t, fs)
46
- return f0
47
-
48
-
49
- def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比
50
- # print(data1.max(),data2.max())
51
- rms1 = librosa.feature.rms(
52
- y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2
53
- ) # 每半秒一个点
54
- rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2)
55
- rms1 = torch.from_numpy(rms1)
56
- rms1 = F.interpolate(
57
- rms1.unsqueeze(0), size=data2.shape[0], mode="linear"
58
- ).squeeze()
59
- rms2 = torch.from_numpy(rms2)
60
- rms2 = F.interpolate(
61
- rms2.unsqueeze(0), size=data2.shape[0], mode="linear"
62
- ).squeeze()
63
- rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6)
64
- data2 *= (
65
- torch.pow(rms1, torch.tensor(1 - rate))
66
- * torch.pow(rms2, torch.tensor(rate - 1))
67
- ).numpy()
68
- return data2
69
-
70
-
71
- class Pipeline(object):
72
- def __init__(self, tgt_sr, config):
73
- self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
74
- config.x_pad,
75
- config.x_query,
76
- config.x_center,
77
- config.x_max,
78
- config.is_half,
79
- )
80
- self.sr = 16000 # hubert输入采样率
81
- self.window = 160 # 每帧点数
82
- self.t_pad = self.sr * self.x_pad # 每条前后pad时间
83
- self.t_pad_tgt = tgt_sr * self.x_pad
84
- self.t_pad2 = self.t_pad * 2
85
- self.t_query = self.sr * self.x_query # 查询切点前后查询时间
86
- self.t_center = self.sr * self.x_center # 查询切点位置
87
- self.t_max = self.sr * self.x_max # 免查询时长阈值
88
- self.device = config.device
89
- self.model_rmvpe = RMVPE("%s/rmvpe.pt" % os.environ["rmvpe_root"], is_half=self.is_half, device=self.device)
90
- self.f0_method_dict = {
91
- "pm": self.get_pm,
92
- "harvest": self.get_harvest,
93
- "dio": self.get_dio,
94
- "rmvpe": self.get_rmvpe,
95
- "rmvpe+": self.get_pitch_dependant_rmvpe,
96
- "crepe": self.get_f0_official_crepe_computation,
97
- "crepe-tiny": partial(self.get_f0_official_crepe_computation, model='model'),
98
- "mangio-crepe": self.get_f0_crepe_computation,
99
- "mangio-crepe-tiny": partial(self.get_f0_crepe_computation, model='model'),
100
-
101
- }
102
- self.note_dict = [
103
- 65.41, 69.30, 73.42, 77.78, 82.41, 87.31,
104
- 92.50, 98.00, 103.83, 110.00, 116.54, 123.47,
105
- 130.81, 138.59, 146.83, 155.56, 164.81, 174.61,
106
- 185.00, 196.00, 207.65, 220.00, 233.08, 246.94,
107
- 261.63, 277.18, 293.66, 311.13, 329.63, 349.23,
108
- 369.99, 392.00, 415.30, 440.00, 466.16, 493.88,
109
- 523.25, 554.37, 587.33, 622.25, 659.25, 698.46,
110
- 739.99, 783.99, 830.61, 880.00, 932.33, 987.77,
111
- 1046.50, 1108.73, 1174.66, 1244.51, 1318.51, 1396.91,
112
- 1479.98, 1567.98, 1661.22, 1760.00, 1864.66, 1975.53,
113
- 2093.00, 2217.46, 2349.32, 2489.02, 2637.02, 2793.83,
114
- 2959.96, 3135.96, 3322.44, 3520.00, 3729.31, 3951.07
115
- ]
116
-
117
- # Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device)
118
- def get_optimal_torch_device(self, index: int = 0) -> torch.device:
119
- if torch.cuda.is_available():
120
- return torch.device(
121
- f"cuda:{index % torch.cuda.device_count()}"
122
- ) # Very fast
123
- elif torch.backends.mps.is_available():
124
- return torch.device("mps")
125
- return torch.device("cpu")
126
-
127
- # Fork Feature: Compute f0 with the crepe method
128
- def get_f0_crepe_computation(
129
- self,
130
- x,
131
- f0_min,
132
- f0_max,
133
- p_len,
134
- *args, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time.
135
- **kwargs, # Either use crepe-tiny "tiny" or crepe "full". Default is full
136
- ):
137
- x = x.astype(
138
- np.float32
139
- ) # fixes the F.conv2D exception. We needed to convert double to float.
140
- x /= np.quantile(np.abs(x), 0.999)
141
- torch_device = self.get_optimal_torch_device()
142
- audio = torch.from_numpy(x).to(torch_device, copy=True)
143
- audio = torch.unsqueeze(audio, dim=0)
144
- if audio.ndim == 2 and audio.shape[0] > 1:
145
- audio = torch.mean(audio, dim=0, keepdim=True).detach()
146
- audio = audio.detach()
147
- hop_length = kwargs.get('crepe_hop_length', 160)
148
- model = kwargs.get('model', 'full')
149
- print("Initiating prediction with a crepe_hop_length of: " + str(hop_length))
150
- pitch: Tensor = torchcrepe.predict(
151
- audio,
152
- self.sr,
153
- hop_length,
154
- f0_min,
155
- f0_max,
156
- model,
157
- batch_size=hop_length * 2,
158
- device=torch_device,
159
- pad=True,
160
- )
161
- p_len = p_len or x.shape[0] // hop_length
162
- # Resize the pitch for final f0
163
- source = np.array(pitch.squeeze(0).cpu().float().numpy())
164
- source[source < 0.001] = np.nan
165
- target = np.interp(
166
- np.arange(0, len(source) * p_len, len(source)) / p_len,
167
- np.arange(0, len(source)),
168
- source,
169
- )
170
- f0 = np.nan_to_num(target)
171
- return f0 # Resized f0
172
-
173
- def get_f0_official_crepe_computation(
174
- self,
175
- x,
176
- f0_min,
177
- f0_max,
178
- *args,
179
- **kwargs
180
- ):
181
- # Pick a batch size that doesn't cause memory errors on your gpu
182
- batch_size = 512
183
- # Compute pitch using first gpu
184
- audio = torch.tensor(np.copy(x))[None].float()
185
- model = kwargs.get('model', 'full')
186
- f0, pd = torchcrepe.predict(
187
- audio,
188
- self.sr,
189
- self.window,
190
- f0_min,
191
- f0_max,
192
- model,
193
- batch_size=batch_size,
194
- device=self.device,
195
- return_periodicity=True,
196
- )
197
- pd = torchcrepe.filter.median(pd, 3)
198
- f0 = torchcrepe.filter.mean(f0, 3)
199
- f0[pd < 0.1] = 0
200
- f0 = f0[0].cpu().numpy()
201
- return f0
202
-
203
- # Fork Feature: Compute pYIN f0 method
204
- def get_f0_pyin_computation(self, x, f0_min, f0_max):
205
- y, sr = librosa.load("saudio/Sidney.wav", self.sr, mono=True)
206
- f0, _, _ = librosa.pyin(y, sr=self.sr, fmin=f0_min, fmax=f0_max)
207
- f0 = f0[1:] # Get rid of extra first frame
208
- return f0
209
-
210
- def get_pm(self, x, p_len, *args, **kwargs):
211
- f0 = parselmouth.Sound(x, self.sr).to_pitch_ac(
212
- time_step=160 / 16000,
213
- voicing_threshold=0.6,
214
- pitch_floor=kwargs.get('f0_min'),
215
- pitch_ceiling=kwargs.get('f0_max'),
216
- ).selected_array["frequency"]
217
-
218
- return np.pad(
219
- f0,
220
- [[max(0, (p_len - len(f0) + 1) // 2), max(0, p_len - len(f0) - (p_len - len(f0) + 1) // 2)]],
221
- mode="constant"
222
- )
223
-
224
- def get_harvest(self, x, *args, **kwargs):
225
- f0_spectral = pyworld.harvest(
226
- x.astype(np.double),
227
- fs=self.sr,
228
- f0_ceil=kwargs.get('f0_max'),
229
- f0_floor=kwargs.get('f0_min'),
230
- frame_period=1000 * kwargs.get('hop_length', 160) / self.sr,
231
- )
232
- return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr)
233
-
234
- def get_dio(self, x, *args, **kwargs):
235
- f0_spectral = pyworld.dio(
236
- x.astype(np.double),
237
- fs=self.sr,
238
- f0_ceil=kwargs.get('f0_max'),
239
- f0_floor=kwargs.get('f0_min'),
240
- frame_period=1000 * kwargs.get('hop_length', 160) / self.sr,
241
- )
242
- return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr)
243
-
244
-
245
- def get_rmvpe(self, x, *args, **kwargs):
246
- if not hasattr(self, "model_rmvpe"):
247
- from infer.lib.rmvpe import RMVPE
248
-
249
- logger.info(
250
- "Loading rmvpe model,%s" % "%s/rmvpe.pt" % os.environ["rmvpe_root"]
251
- )
252
- self.model_rmvpe = RMVPE(
253
- "%s/rmvpe.pt" % os.environ["rmvpe_root"],
254
- is_half=self.is_half,
255
- device=self.device,
256
- )
257
- f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
258
-
259
- return f0
260
-
261
-
262
- def get_pitch_dependant_rmvpe(self, x, f0_min=1, f0_max=40000, *args, **kwargs):
263
- return self.model_rmvpe.infer_from_audio_with_pitch(x, thred=0.03, f0_min=f0_min, f0_max=f0_max)
264
-
265
- def autotune_f0(self, f0):
266
- autotuned_f0 = []
267
- for freq in f0:
268
- closest_notes = [x for x in self.note_dict if abs(x - freq) == min(abs(n - freq) for n in self.note_dict)]
269
- autotuned_f0.append(random.choice(closest_notes))
270
- return np.array(autotuned_f0, np.float64)
271
-
272
- # Fork Feature: Acquire median hybrid f0 estimation calculation
273
- def get_f0_hybrid_computation(
274
- self,
275
- methods_str,
276
- input_audio_path,
277
- x,
278
- f0_min,
279
- f0_max,
280
- p_len,
281
- filter_radius,
282
- crepe_hop_length,
283
- time_step
284
- ):
285
- # Get various f0 methods from input to use in the computation stack
286
- params = {'x': x, 'p_len': p_len, 'f0_min': f0_min,
287
- 'f0_max': f0_max, 'time_step': time_step, 'filter_radius': filter_radius,
288
- 'crepe_hop_length': crepe_hop_length, 'model': "full"
289
- }
290
- methods_str = re.search('hybrid\[(.+)\]', methods_str)
291
- if methods_str: # Ensure a match was found
292
- methods = [method.strip() for method in methods_str.group(1).split('+')]
293
- f0_computation_stack = []
294
-
295
- print(f"Calculating f0 pitch estimations for methods: {str(methods)}")
296
- x = x.astype(np.float32)
297
- x /= np.quantile(np.abs(x), 0.999)
298
- # Get f0 calculations for all methods specified
299
-
300
- for method in methods:
301
- if method not in self.f0_method_dict:
302
- print(f"Method {method} not found.")
303
- continue
304
- f0 = self.f0_method_dict[method](**params)
305
- if method == 'harvest' and filter_radius > 2:
306
- f0 = signal.medfilt(f0, 3)
307
- f0 = f0[1:] # Get rid of first frame.
308
- f0_computation_stack.append(f0)
309
-
310
- for fc in f0_computation_stack:
311
- print(len(fc))
312
-
313
- print(f"Calculating hybrid median f0 from the stack of: {str(methods)}")
314
- f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0)
315
- return f0_median_hybrid
316
-
317
- def get_f0(
318
- self,
319
- input_audio_path,
320
- x,
321
- p_len,
322
- f0_up_key,
323
- f0_method,
324
- filter_radius,
325
- crepe_hop_length,
326
- f0_autotune,
327
- inp_f0=None,
328
- f0_min=50,
329
- f0_max=1100,
330
- ):
331
- global input_audio_path2wav
332
- time_step = self.window / self.sr * 1000
333
- f0_min = 50
334
- f0_max = 1100
335
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
336
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
337
- params = {'x': x, 'p_len': p_len, 'f0_up_key': f0_up_key, 'f0_min': f0_min,
338
- 'f0_max': f0_max, 'time_step': time_step, 'filter_radius': filter_radius,
339
- 'crepe_hop_length': crepe_hop_length, 'model': "full"
340
- }
341
-
342
- if "hybrid" in f0_method:
343
- # Perform hybrid median pitch estimation
344
- input_audio_path2wav[input_audio_path] = x.astype(np.double)
345
- f0 = self.get_f0_hybrid_computation(
346
- f0_method,+
347
- input_audio_path,
348
- x,
349
- f0_min,
350
- f0_max,
351
- p_len,
352
- filter_radius,
353
- crepe_hop_length,
354
- time_step,
355
- )
356
- else:
357
- f0 = self.f0_method_dict[f0_method](**params)
358
-
359
- if "privateuseone" in str(self.device): # clean ortruntime memory
360
- del self.model_rmvpe.model
361
- del self.model_rmvpe
362
- logger.info("Cleaning ortruntime memory")
363
-
364
- if f0_autotune:
365
- f0 = self.autotune_f0(f0)
366
-
367
- f0 *= pow(2, f0_up_key / 12)
368
- # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
369
- tf0 = self.sr // self.window # 每秒f0点数
370
- if inp_f0 is not None:
371
- delta_t = np.round(
372
- (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
373
- ).astype("int16")
374
- replace_f0 = np.interp(
375
- list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
376
- )
377
- shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]
378
- f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[
379
- :shape
380
- ]
381
- # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
382
- f0bak = f0.copy()
383
- f0_mel = 1127 * np.log(1 + f0 / 700)
384
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
385
- f0_mel_max - f0_mel_min
386
- ) + 1
387
- f0_mel[f0_mel <= 1] = 1
388
- f0_mel[f0_mel > 255] = 255
389
- f0_coarse = np.rint(f0_mel).astype(np.int32)
390
- return f0_coarse, f0bak # 1-0
391
-
392
- def vc(
393
- self,
394
- model,
395
- net_g,
396
- sid,
397
- audio0,
398
- pitch,
399
- pitchf,
400
- times,
401
- index,
402
- big_npy,
403
- index_rate,
404
- version,
405
- protect,
406
- ): # ,file_index,file_big_npy
407
- feats = torch.from_numpy(audio0)
408
- if self.is_half:
409
- feats = feats.half()
410
- else:
411
- feats = feats.float()
412
- if feats.dim() == 2: # double channels
413
- feats = feats.mean(-1)
414
- assert feats.dim() == 1, feats.dim()
415
- feats = feats.view(1, -1)
416
- padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
417
-
418
- inputs = {
419
- "source": feats.to(self.device),
420
- "padding_mask": padding_mask,
421
- "output_layer": 9 if version == "v1" else 12,
422
- }
423
- t0 = ttime()
424
- with torch.no_grad():
425
- logits = model.extract_features(**inputs)
426
- feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
427
- if protect < 0.5 and pitch is not None and pitchf is not None:
428
- feats0 = feats.clone()
429
- if (
430
- not isinstance(index, type(None))
431
- and not isinstance(big_npy, type(None))
432
- and index_rate != 0
433
- ):
434
- npy = feats[0].cpu().numpy()
435
- if self.is_half:
436
- npy = npy.astype("float32")
437
-
438
- # _, I = index.search(npy, 1)
439
- # npy = big_npy[I.squeeze()]
440
-
441
- score, ix = index.search(npy, k=8)
442
- weight = np.square(1 / score)
443
- weight /= weight.sum(axis=1, keepdims=True)
444
- npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
445
-
446
- if self.is_half:
447
- npy = npy.astype("float16")
448
- feats = (
449
- torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
450
- + (1 - index_rate) * feats
451
- )
452
-
453
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
454
- if protect < 0.5 and pitch is not None and pitchf is not None:
455
- feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
456
- 0, 2, 1
457
- )
458
- t1 = ttime()
459
- p_len = audio0.shape[0] // self.window
460
- if feats.shape[1] < p_len:
461
- p_len = feats.shape[1]
462
- if pitch is not None and pitchf is not None:
463
- pitch = pitch[:, :p_len]
464
- pitchf = pitchf[:, :p_len]
465
-
466
- if protect < 0.5 and pitch is not None and pitchf is not None:
467
- pitchff = pitchf.clone()
468
- pitchff[pitchf > 0] = 1
469
- pitchff[pitchf < 1] = protect
470
- pitchff = pitchff.unsqueeze(-1)
471
- feats = feats * pitchff + feats0 * (1 - pitchff)
472
- feats = feats.to(feats0.dtype)
473
- p_len = torch.tensor([p_len], device=self.device).long()
474
- with torch.no_grad():
475
- hasp = pitch is not None and pitchf is not None
476
- arg = (feats, p_len, pitch, pitchf, sid) if hasp else (feats, p_len, sid)
477
- audio1 = (net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy()
478
- del hasp, arg
479
- del feats, p_len, padding_mask
480
- if torch.cuda.is_available():
481
- torch.cuda.empty_cache()
482
- t2 = ttime()
483
- times[0] += t1 - t0
484
- times[2] += t2 - t1
485
- return audio1
486
- def process_t(self, t, s, window, audio_pad, pitch, pitchf, times, index, big_npy, index_rate, version, protect, t_pad_tgt, if_f0, sid, model, net_g):
487
- t = t // window * window
488
- if if_f0 == 1:
489
- return self.vc(
490
- model,
491
- net_g,
492
- sid,
493
- audio_pad[s : t + t_pad_tgt + window],
494
- pitch[:, s // window : (t + t_pad_tgt) // window],
495
- pitchf[:, s // window : (t + t_pad_tgt) // window],
496
- times,
497
- index,
498
- big_npy,
499
- index_rate,
500
- version,
501
- protect,
502
- )[t_pad_tgt : -t_pad_tgt]
503
- else:
504
- return self.vc(
505
- model,
506
- net_g,
507
- sid,
508
- audio_pad[s : t + t_pad_tgt + window],
509
- None,
510
- None,
511
- times,
512
- index,
513
- big_npy,
514
- index_rate,
515
- version,
516
- protect,
517
- )[t_pad_tgt : -t_pad_tgt]
518
-
519
-
520
- def pipeline(
521
- self,
522
- model,
523
- net_g,
524
- sid,
525
- audio,
526
- input_audio_path,
527
- times,
528
- f0_up_key,
529
- f0_method,
530
- file_index,
531
- index_rate,
532
- if_f0,
533
- filter_radius,
534
- tgt_sr,
535
- resample_sr,
536
- rms_mix_rate,
537
- version,
538
- protect,
539
- crepe_hop_length,
540
- f0_autotune,
541
- f0_file=None,
542
- f0_min=50,
543
- f0_max=1100
544
- ):
545
- if (
546
- file_index != ""
547
- # and file_big_npy != ""
548
- # and os.path.exists(file_big_npy) == True
549
- and os.path.exists(file_index)
550
- and index_rate != 0
551
- ):
552
- try:
553
- index = faiss.read_index(file_index)
554
- # big_npy = np.load(file_big_npy)
555
- big_npy = index.reconstruct_n(0, index.ntotal)
556
- except:
557
- traceback.print_exc()
558
- index = big_npy = None
559
- else:
560
- index = big_npy = None
561
- audio = signal.filtfilt(bh, ah, audio)
562
- audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
563
- opt_ts = []
564
- if audio_pad.shape[0] > self.t_max:
565
- audio_sum = np.zeros_like(audio)
566
- for i in range(self.window):
567
- audio_sum += audio_pad[i : i - self.window]
568
- for t in range(self.t_center, audio.shape[0], self.t_center):
569
- opt_ts.append(
570
- t
571
- - self.t_query
572
- + np.where(
573
- np.abs(audio_sum[t - self.t_query : t + self.t_query])
574
- == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
575
- )[0][0]
576
- )
577
- s = 0
578
- audio_opt = []
579
- t = None
580
- t1 = ttime()
581
- audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
582
- p_len = audio_pad.shape[0] // self.window
583
- inp_f0 = None
584
- if hasattr(f0_file, "name"):
585
- try:
586
- with open(f0_file.name, "r") as f:
587
- lines = f.read().strip("\n").split("\n")
588
- inp_f0 = []
589
- for line in lines:
590
- inp_f0.append([float(i) for i in line.split(",")])
591
- inp_f0 = np.array(inp_f0, dtype="float32")
592
- except:
593
- traceback.print_exc()
594
- sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
595
- pitch, pitchf = None, None
596
- if if_f0:
597
- pitch, pitchf = self.get_f0(
598
- input_audio_path,
599
- audio_pad,
600
- p_len,
601
- f0_up_key,
602
- f0_method,
603
- filter_radius,
604
- crepe_hop_length,
605
- f0_autotune,
606
- inp_f0,
607
- f0_min,
608
- f0_max
609
- )
610
- pitch = pitch[:p_len]
611
- pitchf = pitchf[:p_len]
612
- if self.device == "mps" or "xpu" in self.device:
613
- pitchf = pitchf.astype(np.float32)
614
- pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
615
- pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
616
- t2 = ttime()
617
- times[1] += t2 - t1
618
-
619
- with tqdm(total=len(opt_ts), desc="Processing", unit="window") as pbar:
620
- for i, t in enumerate(opt_ts):
621
- t = t // self.window * self.window
622
- start = s
623
- end = t + self.t_pad2 + self.window
624
- audio_slice = audio_pad[start:end]
625
- pitch_slice = pitch[:, start // self.window:end // self.window] if if_f0 else None
626
- pitchf_slice = pitchf[:, start // self.window:end // self.window] if if_f0 else None
627
- audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt])
628
- s = t
629
- pbar.update(1)
630
- pbar.refresh()
631
-
632
- audio_slice = audio_pad[t:]
633
- pitch_slice = pitch[:, t // self.window:] if if_f0 and t is not None else pitch
634
- pitchf_slice = pitchf[:, t // self.window:] if if_f0 and t is not None else pitchf
635
- audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt])
636
-
637
- audio_opt = np.concatenate(audio_opt)
638
- if rms_mix_rate != 1:
639
- audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)
640
- if tgt_sr != resample_sr >= 16000:
641
- audio_opt = librosa.resample(
642
- audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
643
- )
644
- audio_max = np.abs(audio_opt).max() / 0.99
645
- max_int16 = 32768
646
- if audio_max > 1:
647
- max_int16 /= audio_max
648
- audio_opt = (audio_opt * max_int16).astype(np.int16)
649
- del pitch, pitchf, sid
650
- if torch.cuda.is_available():
651
- torch.cuda.empty_cache()
652
-
653
- print("Returning completed audio...")
654
- print("-------------------")
655
- return audio_opt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/start_server.sh DELETED
@@ -1,6 +0,0 @@
1
- #!/bin/sh
2
-
3
- # For mlock support
4
- ulimit -l unlimited
5
-
6
- python3 -B main.py
 
 
 
 
 
 
 
spaces/AHzizi/WaifuVoiceGen/attentions.py DELETED
@@ -1,300 +0,0 @@
1
- import math
2
- import torch
3
- from torch import nn
4
- from torch.nn import functional as F
5
-
6
- import commons
7
- from modules import LayerNorm
8
-
9
-
10
- class Encoder(nn.Module):
11
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
12
- super().__init__()
13
- self.hidden_channels = hidden_channels
14
- self.filter_channels = filter_channels
15
- self.n_heads = n_heads
16
- self.n_layers = n_layers
17
- self.kernel_size = kernel_size
18
- self.p_dropout = p_dropout
19
- self.window_size = window_size
20
-
21
- self.drop = nn.Dropout(p_dropout)
22
- self.attn_layers = nn.ModuleList()
23
- self.norm_layers_1 = nn.ModuleList()
24
- self.ffn_layers = nn.ModuleList()
25
- self.norm_layers_2 = nn.ModuleList()
26
- for i in range(self.n_layers):
27
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
28
- self.norm_layers_1.append(LayerNorm(hidden_channels))
29
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
30
- self.norm_layers_2.append(LayerNorm(hidden_channels))
31
-
32
- def forward(self, x, x_mask):
33
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
34
- x = x * x_mask
35
- for i in range(self.n_layers):
36
- y = self.attn_layers[i](x, x, attn_mask)
37
- y = self.drop(y)
38
- x = self.norm_layers_1[i](x + y)
39
-
40
- y = self.ffn_layers[i](x, x_mask)
41
- y = self.drop(y)
42
- x = self.norm_layers_2[i](x + y)
43
- x = x * x_mask
44
- return x
45
-
46
-
47
- class Decoder(nn.Module):
48
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
49
- super().__init__()
50
- self.hidden_channels = hidden_channels
51
- self.filter_channels = filter_channels
52
- self.n_heads = n_heads
53
- self.n_layers = n_layers
54
- self.kernel_size = kernel_size
55
- self.p_dropout = p_dropout
56
- self.proximal_bias = proximal_bias
57
- self.proximal_init = proximal_init
58
-
59
- self.drop = nn.Dropout(p_dropout)
60
- self.self_attn_layers = nn.ModuleList()
61
- self.norm_layers_0 = nn.ModuleList()
62
- self.encdec_attn_layers = nn.ModuleList()
63
- self.norm_layers_1 = nn.ModuleList()
64
- self.ffn_layers = nn.ModuleList()
65
- self.norm_layers_2 = nn.ModuleList()
66
- for i in range(self.n_layers):
67
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
68
- self.norm_layers_0.append(LayerNorm(hidden_channels))
69
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
70
- self.norm_layers_1.append(LayerNorm(hidden_channels))
71
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
72
- self.norm_layers_2.append(LayerNorm(hidden_channels))
73
-
74
- def forward(self, x, x_mask, h, h_mask):
75
- """
76
- x: decoder input
77
- h: encoder output
78
- """
79
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
80
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
81
- x = x * x_mask
82
- for i in range(self.n_layers):
83
- y = self.self_attn_layers[i](x, x, self_attn_mask)
84
- y = self.drop(y)
85
- x = self.norm_layers_0[i](x + y)
86
-
87
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
88
- y = self.drop(y)
89
- x = self.norm_layers_1[i](x + y)
90
-
91
- y = self.ffn_layers[i](x, x_mask)
92
- y = self.drop(y)
93
- x = self.norm_layers_2[i](x + y)
94
- x = x * x_mask
95
- return x
96
-
97
-
98
- class MultiHeadAttention(nn.Module):
99
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
100
- super().__init__()
101
- assert channels % n_heads == 0
102
-
103
- self.channels = channels
104
- self.out_channels = out_channels
105
- self.n_heads = n_heads
106
- self.p_dropout = p_dropout
107
- self.window_size = window_size
108
- self.heads_share = heads_share
109
- self.block_length = block_length
110
- self.proximal_bias = proximal_bias
111
- self.proximal_init = proximal_init
112
- self.attn = None
113
-
114
- self.k_channels = channels // n_heads
115
- self.conv_q = nn.Conv1d(channels, channels, 1)
116
- self.conv_k = nn.Conv1d(channels, channels, 1)
117
- self.conv_v = nn.Conv1d(channels, channels, 1)
118
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
119
- self.drop = nn.Dropout(p_dropout)
120
-
121
- if window_size is not None:
122
- n_heads_rel = 1 if heads_share else n_heads
123
- rel_stddev = self.k_channels**-0.5
124
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
125
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
126
-
127
- nn.init.xavier_uniform_(self.conv_q.weight)
128
- nn.init.xavier_uniform_(self.conv_k.weight)
129
- nn.init.xavier_uniform_(self.conv_v.weight)
130
- if proximal_init:
131
- with torch.no_grad():
132
- self.conv_k.weight.copy_(self.conv_q.weight)
133
- self.conv_k.bias.copy_(self.conv_q.bias)
134
-
135
- def forward(self, x, c, attn_mask=None):
136
- q = self.conv_q(x)
137
- k = self.conv_k(c)
138
- v = self.conv_v(c)
139
-
140
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
141
-
142
- x = self.conv_o(x)
143
- return x
144
-
145
- def attention(self, query, key, value, mask=None):
146
- # reshape [b, d, t] -> [b, n_h, t, d_k]
147
- b, d, t_s, t_t = (*key.size(), query.size(2))
148
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
149
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
150
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
151
-
152
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
153
- if self.window_size is not None:
154
- assert t_s == t_t, "Relative attention is only available for self-attention."
155
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
156
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
157
- scores_local = self._relative_position_to_absolute_position(rel_logits)
158
- scores = scores + scores_local
159
- if self.proximal_bias:
160
- assert t_s == t_t, "Proximal bias is only available for self-attention."
161
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
162
- if mask is not None:
163
- scores = scores.masked_fill(mask == 0, -1e4)
164
- if self.block_length is not None:
165
- assert t_s == t_t, "Local attention is only available for self-attention."
166
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
167
- scores = scores.masked_fill(block_mask == 0, -1e4)
168
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
169
- p_attn = self.drop(p_attn)
170
- output = torch.matmul(p_attn, value)
171
- if self.window_size is not None:
172
- relative_weights = self._absolute_position_to_relative_position(p_attn)
173
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
174
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
175
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
176
- return output, p_attn
177
-
178
- def _matmul_with_relative_values(self, x, y):
179
- """
180
- x: [b, h, l, m]
181
- y: [h or 1, m, d]
182
- ret: [b, h, l, d]
183
- """
184
- ret = torch.matmul(x, y.unsqueeze(0))
185
- return ret
186
-
187
- def _matmul_with_relative_keys(self, x, y):
188
- """
189
- x: [b, h, l, d]
190
- y: [h or 1, m, d]
191
- ret: [b, h, l, m]
192
- """
193
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
194
- return ret
195
-
196
- def _get_relative_embeddings(self, relative_embeddings, length):
197
- max_relative_position = 2 * self.window_size + 1
198
- # Pad first before slice to avoid using cond ops.
199
- pad_length = max(length - (self.window_size + 1), 0)
200
- slice_start_position = max((self.window_size + 1) - length, 0)
201
- slice_end_position = slice_start_position + 2 * length - 1
202
- if pad_length > 0:
203
- padded_relative_embeddings = F.pad(
204
- relative_embeddings,
205
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
206
- else:
207
- padded_relative_embeddings = relative_embeddings
208
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
209
- return used_relative_embeddings
210
-
211
- def _relative_position_to_absolute_position(self, x):
212
- """
213
- x: [b, h, l, 2*l-1]
214
- ret: [b, h, l, l]
215
- """
216
- batch, heads, length, _ = x.size()
217
- # Concat columns of pad to shift from relative to absolute indexing.
218
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
219
-
220
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
221
- x_flat = x.view([batch, heads, length * 2 * length])
222
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
223
-
224
- # Reshape and slice out the padded elements.
225
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
226
- return x_final
227
-
228
- def _absolute_position_to_relative_position(self, x):
229
- """
230
- x: [b, h, l, l]
231
- ret: [b, h, l, 2*l-1]
232
- """
233
- batch, heads, length, _ = x.size()
234
- # padd along column
235
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
236
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
237
- # add 0's in the beginning that will skew the elements after reshape
238
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
239
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
240
- return x_final
241
-
242
- def _attention_bias_proximal(self, length):
243
- """Bias for self-attention to encourage attention to close positions.
244
- Args:
245
- length: an integer scalar.
246
- Returns:
247
- a Tensor with shape [1, 1, length, length]
248
- """
249
- r = torch.arange(length, dtype=torch.float32)
250
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
251
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
252
-
253
-
254
- class FFN(nn.Module):
255
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
256
- super().__init__()
257
- self.in_channels = in_channels
258
- self.out_channels = out_channels
259
- self.filter_channels = filter_channels
260
- self.kernel_size = kernel_size
261
- self.p_dropout = p_dropout
262
- self.activation = activation
263
- self.causal = causal
264
-
265
- if causal:
266
- self.padding = self._causal_padding
267
- else:
268
- self.padding = self._same_padding
269
-
270
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
271
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
272
- self.drop = nn.Dropout(p_dropout)
273
-
274
- def forward(self, x, x_mask):
275
- x = self.conv_1(self.padding(x * x_mask))
276
- if self.activation == "gelu":
277
- x = x * torch.sigmoid(1.702 * x)
278
- else:
279
- x = torch.relu(x)
280
- x = self.drop(x)
281
- x = self.conv_2(self.padding(x * x_mask))
282
- return x * x_mask
283
-
284
- def _causal_padding(self, x):
285
- if self.kernel_size == 1:
286
- return x
287
- pad_l = self.kernel_size - 1
288
- pad_r = 0
289
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
290
- x = F.pad(x, commons.convert_pad_shape(padding))
291
- return x
292
-
293
- def _same_padding(self, x):
294
- if self.kernel_size == 1:
295
- return x
296
- pad_l = (self.kernel_size - 1) // 2
297
- pad_r = self.kernel_size // 2
298
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
299
- x = F.pad(x, commons.convert_pad_shape(padding))
300
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/lib_v5/layers_new.py DELETED
@@ -1,125 +0,0 @@
1
- import torch
2
- from torch import nn
3
- import torch.nn.functional as F
4
-
5
- from uvr5_pack.lib_v5 import spec_utils
6
-
7
-
8
- class Conv2DBNActiv(nn.Module):
9
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
10
- super(Conv2DBNActiv, self).__init__()
11
- self.conv = nn.Sequential(
12
- nn.Conv2d(
13
- nin,
14
- nout,
15
- kernel_size=ksize,
16
- stride=stride,
17
- padding=pad,
18
- dilation=dilation,
19
- bias=False,
20
- ),
21
- nn.BatchNorm2d(nout),
22
- activ(),
23
- )
24
-
25
- def __call__(self, x):
26
- return self.conv(x)
27
-
28
-
29
- class Encoder(nn.Module):
30
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
31
- super(Encoder, self).__init__()
32
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ)
33
- self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ)
34
-
35
- def __call__(self, x):
36
- h = self.conv1(x)
37
- h = self.conv2(h)
38
-
39
- return h
40
-
41
-
42
- class Decoder(nn.Module):
43
- def __init__(
44
- self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
45
- ):
46
- super(Decoder, self).__init__()
47
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
48
- # self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ)
49
- self.dropout = nn.Dropout2d(0.1) if dropout else None
50
-
51
- def __call__(self, x, skip=None):
52
- x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
53
-
54
- if skip is not None:
55
- skip = spec_utils.crop_center(skip, x)
56
- x = torch.cat([x, skip], dim=1)
57
-
58
- h = self.conv1(x)
59
- # h = self.conv2(h)
60
-
61
- if self.dropout is not None:
62
- h = self.dropout(h)
63
-
64
- return h
65
-
66
-
67
- class ASPPModule(nn.Module):
68
- def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False):
69
- super(ASPPModule, self).__init__()
70
- self.conv1 = nn.Sequential(
71
- nn.AdaptiveAvgPool2d((1, None)),
72
- Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ),
73
- )
74
- self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ)
75
- self.conv3 = Conv2DBNActiv(
76
- nin, nout, 3, 1, dilations[0], dilations[0], activ=activ
77
- )
78
- self.conv4 = Conv2DBNActiv(
79
- nin, nout, 3, 1, dilations[1], dilations[1], activ=activ
80
- )
81
- self.conv5 = Conv2DBNActiv(
82
- nin, nout, 3, 1, dilations[2], dilations[2], activ=activ
83
- )
84
- self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ)
85
- self.dropout = nn.Dropout2d(0.1) if dropout else None
86
-
87
- def forward(self, x):
88
- _, _, h, w = x.size()
89
- feat1 = F.interpolate(
90
- self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
91
- )
92
- feat2 = self.conv2(x)
93
- feat3 = self.conv3(x)
94
- feat4 = self.conv4(x)
95
- feat5 = self.conv5(x)
96
- out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
97
- out = self.bottleneck(out)
98
-
99
- if self.dropout is not None:
100
- out = self.dropout(out)
101
-
102
- return out
103
-
104
-
105
- class LSTMModule(nn.Module):
106
- def __init__(self, nin_conv, nin_lstm, nout_lstm):
107
- super(LSTMModule, self).__init__()
108
- self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0)
109
- self.lstm = nn.LSTM(
110
- input_size=nin_lstm, hidden_size=nout_lstm // 2, bidirectional=True
111
- )
112
- self.dense = nn.Sequential(
113
- nn.Linear(nout_lstm, nin_lstm), nn.BatchNorm1d(nin_lstm), nn.ReLU()
114
- )
115
-
116
- def forward(self, x):
117
- N, _, nbins, nframes = x.size()
118
- h = self.conv(x)[:, 0] # N, nbins, nframes
119
- h = h.permute(2, 0, 1) # nframes, N, nbins
120
- h, _ = self.lstm(h)
121
- h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins
122
- h = h.reshape(nframes, N, 1, nbins)
123
- h = h.permute(1, 2, 3, 0)
124
-
125
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIARTCHAN/openpose_editor/index.html DELETED
The diff for this file is too large to render. See raw diff
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/binarizer_zh.py DELETED
@@ -1,59 +0,0 @@
1
- import os
2
-
3
- os.environ["OMP_NUM_THREADS"] = "1"
4
-
5
- from data_gen.tts.txt_processors.zh_g2pM import ALL_SHENMU
6
- from data_gen.tts.base_binarizer import BaseBinarizer, BinarizationError
7
- from data_gen.tts.data_gen_utils import get_mel2ph
8
- from utils.hparams import set_hparams, hparams
9
- import numpy as np
10
-
11
-
12
- class ZhBinarizer(BaseBinarizer):
13
- @staticmethod
14
- def get_align(tg_fn, ph, mel, phone_encoded, res):
15
- if tg_fn is not None and os.path.exists(tg_fn):
16
- _, dur = get_mel2ph(tg_fn, ph, mel, hparams)
17
- else:
18
- raise BinarizationError(f"Align not found")
19
- ph_list = ph.split(" ")
20
- assert len(dur) == len(ph_list)
21
- mel2ph = []
22
- # 分隔符的时长分配给韵母
23
- dur_cumsum = np.pad(np.cumsum(dur), [1, 0], mode='constant', constant_values=0)
24
- for i in range(len(dur)):
25
- p = ph_list[i]
26
- if p[0] != '<' and not p[0].isalpha():
27
- uv_ = res['f0'][dur_cumsum[i]:dur_cumsum[i + 1]] == 0
28
- j = 0
29
- while j < len(uv_) and not uv_[j]:
30
- j += 1
31
- dur[i - 1] += j
32
- dur[i] -= j
33
- if dur[i] < 100:
34
- dur[i - 1] += dur[i]
35
- dur[i] = 0
36
- # 声母和韵母等长
37
- for i in range(len(dur)):
38
- p = ph_list[i]
39
- if p in ALL_SHENMU:
40
- p_next = ph_list[i + 1]
41
- if not (dur[i] > 0 and p_next[0].isalpha() and p_next not in ALL_SHENMU):
42
- print(f"assert dur[i] > 0 and p_next[0].isalpha() and p_next not in ALL_SHENMU, "
43
- f"dur[i]: {dur[i]}, p: {p}, p_next: {p_next}.")
44
- continue
45
- total = dur[i + 1] + dur[i]
46
- dur[i] = total // 2
47
- dur[i + 1] = total - dur[i]
48
- for i in range(len(dur)):
49
- mel2ph += [i + 1] * dur[i]
50
- mel2ph = np.array(mel2ph)
51
- if mel2ph.max() - 1 >= len(phone_encoded):
52
- raise BinarizationError(f"| Align does not match: {(mel2ph.max() - 1, len(phone_encoded))}")
53
- res['mel2ph'] = mel2ph
54
- res['dur'] = dur
55
-
56
-
57
- if __name__ == "__main__":
58
- set_hparams()
59
- ZhBinarizer().process()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/GenerSpeech/model/mixstyle.py DELETED
@@ -1,63 +0,0 @@
1
- from modules.commons.common_layers import *
2
- import random
3
-
4
-
5
- class MixStyle(nn.Module):
6
- """MixStyle.
7
- Reference:
8
- Zhou et al. Domain Generalization with MixStyle. ICLR 2021.
9
- """
10
-
11
- def __init__(self, p=0.5, alpha=0.1, eps=1e-6, hidden_size=256):
12
- """
13
- Args:
14
- p (float): probability of using MixStyle.
15
- alpha (float): parameter of the Beta distribution.
16
- eps (float): scaling parameter to avoid numerical issues.
17
- mix (str): how to mix.
18
- """
19
- super().__init__()
20
- self.p = p
21
- self.beta = torch.distributions.Beta(alpha, alpha)
22
- self.eps = eps
23
- self.alpha = alpha
24
- self._activated = True
25
- self.hidden_size = hidden_size
26
- self.affine_layer = LinearNorm(
27
- hidden_size,
28
- 2 * hidden_size, # For both b (bias) g (gain)
29
- )
30
-
31
- def __repr__(self):
32
- return f'MixStyle(p={self.p}, alpha={self.alpha}, eps={self.eps})'
33
-
34
- def set_activation_status(self, status=True):
35
- self._activated = status
36
-
37
- def forward(self, x, spk_embed):
38
- if not self.training or not self._activated:
39
- return x
40
-
41
- if random.random() > self.p:
42
- return x
43
-
44
- B = x.size(0)
45
-
46
- mu, sig = torch.mean(x, dim=-1, keepdim=True), torch.std(x, dim=-1, keepdim=True)
47
- x_normed = (x - mu) / (sig + 1e-6) # [B, T, H_m]
48
-
49
- lmda = self.beta.sample((B, 1, 1))
50
- lmda = lmda.to(x.device)
51
-
52
- # Get Bias and Gain
53
- mu1, sig1 = torch.split(self.affine_layer(spk_embed), self.hidden_size, dim=-1) # [B, 1, 2 * H_m] --> 2 * [B, 1, H_m]
54
-
55
- # MixStyle
56
- perm = torch.randperm(B)
57
- mu2, sig2 = mu1[perm], sig1[perm]
58
-
59
- mu_mix = mu1*lmda + mu2 * (1-lmda)
60
- sig_mix = sig1*lmda + sig2 * (1-lmda)
61
-
62
- # Perform Scailing and Shifting
63
- return sig_mix * x_normed + mu_mix # [B, T, H_m]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/diffspeech/shallow_diffusion_tts.py DELETED
@@ -1,279 +0,0 @@
1
- import math
2
- import random
3
- from functools import partial
4
- from inspect import isfunction
5
- import numpy as np
6
- import torch
7
- import torch.nn.functional as F
8
- from torch import nn
9
- from tqdm import tqdm
10
-
11
- from text_to_speech.modules.tts.fs2_orig import FastSpeech2Orig
12
- from text_to_speech.modules.tts.diffspeech.net import DiffNet
13
- from text_to_speech.modules.tts.commons.align_ops import expand_states
14
-
15
-
16
- def exists(x):
17
- return x is not None
18
-
19
-
20
- def default(val, d):
21
- if exists(val):
22
- return val
23
- return d() if isfunction(d) else d
24
-
25
-
26
- # gaussian diffusion trainer class
27
-
28
- def extract(a, t, x_shape):
29
- b, *_ = t.shape
30
- out = a.gather(-1, t)
31
- return out.reshape(b, *((1,) * (len(x_shape) - 1)))
32
-
33
-
34
- def noise_like(shape, device, repeat=False):
35
- repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
36
- noise = lambda: torch.randn(shape, device=device)
37
- return repeat_noise() if repeat else noise()
38
-
39
-
40
- def linear_beta_schedule(timesteps, max_beta=0.01):
41
- """
42
- linear schedule
43
- """
44
- betas = np.linspace(1e-4, max_beta, timesteps)
45
- return betas
46
-
47
-
48
- def cosine_beta_schedule(timesteps, s=0.008):
49
- """
50
- cosine schedule
51
- as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
52
- """
53
- steps = timesteps + 1
54
- x = np.linspace(0, steps, steps)
55
- alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2
56
- alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
57
- betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
58
- return np.clip(betas, a_min=0, a_max=0.999)
59
-
60
-
61
- beta_schedule = {
62
- "cosine": cosine_beta_schedule,
63
- "linear": linear_beta_schedule,
64
- }
65
-
66
-
67
- DIFF_DECODERS = {
68
- 'wavenet': lambda hp: DiffNet(hp),
69
- }
70
-
71
-
72
- class AuxModel(FastSpeech2Orig):
73
- def forward(self, txt_tokens, mel2ph=None, spk_embed=None, spk_id=None,
74
- f0=None, uv=None, energy=None, infer=False, **kwargs):
75
- ret = {}
76
- encoder_out = self.encoder(txt_tokens) # [B, T, C]
77
- src_nonpadding = (txt_tokens > 0).float()[:, :, None]
78
- style_embed = self.forward_style_embed(spk_embed, spk_id)
79
-
80
- # add dur
81
- dur_inp = (encoder_out + style_embed) * src_nonpadding
82
- mel2ph = self.forward_dur(dur_inp, mel2ph, txt_tokens, ret)
83
- tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
84
- decoder_inp = decoder_inp_ = expand_states(encoder_out, mel2ph)
85
-
86
- # add pitch and energy embed
87
- if self.hparams['use_pitch_embed']:
88
- pitch_inp = (decoder_inp_ + style_embed) * tgt_nonpadding
89
- decoder_inp = decoder_inp + self.forward_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out)
90
-
91
- # add pitch and energy embed
92
- if self.hparams['use_energy_embed']:
93
- energy_inp = (decoder_inp_ + style_embed) * tgt_nonpadding
94
- decoder_inp = decoder_inp + self.forward_energy(energy_inp, energy, ret)
95
-
96
- # decoder input
97
- ret['decoder_inp'] = decoder_inp = (decoder_inp + style_embed) * tgt_nonpadding
98
- if self.hparams['dec_inp_add_noise']:
99
- B, T, _ = decoder_inp.shape
100
- z = kwargs.get('adv_z', torch.randn([B, T, self.z_channels])).to(decoder_inp.device)
101
- ret['adv_z'] = z
102
- decoder_inp = torch.cat([decoder_inp, z], -1)
103
- decoder_inp = self.dec_inp_noise_proj(decoder_inp) * tgt_nonpadding
104
- if kwargs['skip_decoder']:
105
- return ret
106
- ret['mel_out'] = self.forward_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs)
107
- return ret
108
-
109
-
110
- class GaussianDiffusion(nn.Module):
111
- def __init__(self, dict_size, hparams, out_dims=None):
112
- super().__init__()
113
- self.hparams = hparams
114
- out_dims = hparams['audio_num_mel_bins']
115
- denoise_fn = DIFF_DECODERS[hparams['diff_decoder_type']](hparams)
116
- timesteps = hparams['timesteps']
117
- K_step = hparams['K_step']
118
- loss_type = hparams['diff_loss_type']
119
- spec_min = hparams['spec_min']
120
- spec_max = hparams['spec_max']
121
-
122
- self.denoise_fn = denoise_fn
123
- self.fs2 = AuxModel(dict_size, hparams)
124
- self.mel_bins = out_dims
125
-
126
- if hparams['schedule_type'] == 'linear':
127
- betas = linear_beta_schedule(timesteps, hparams['max_beta'])
128
- else:
129
- betas = cosine_beta_schedule(timesteps)
130
-
131
- alphas = 1. - betas
132
- alphas_cumprod = np.cumprod(alphas, axis=0)
133
- alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
134
-
135
- timesteps, = betas.shape
136
- self.num_timesteps = int(timesteps)
137
- self.K_step = K_step
138
- self.loss_type = loss_type
139
-
140
- to_torch = partial(torch.tensor, dtype=torch.float32)
141
-
142
- self.register_buffer('betas', to_torch(betas))
143
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
144
- self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
145
-
146
- # calculations for diffusion q(x_t | x_{t-1}) and others
147
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
148
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
149
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
150
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
151
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
152
-
153
- # calculations for posterior q(x_{t-1} | x_t, x_0)
154
- posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
155
- # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
156
- self.register_buffer('posterior_variance', to_torch(posterior_variance))
157
- # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
158
- self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
159
- self.register_buffer('posterior_mean_coef1', to_torch(
160
- betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
161
- self.register_buffer('posterior_mean_coef2', to_torch(
162
- (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
163
-
164
- self.register_buffer('spec_min', torch.FloatTensor(spec_min)[None, None, :hparams['keep_bins']])
165
- self.register_buffer('spec_max', torch.FloatTensor(spec_max)[None, None, :hparams['keep_bins']])
166
-
167
- def q_mean_variance(self, x_start, t):
168
- mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
169
- variance = extract(1. - self.alphas_cumprod, t, x_start.shape)
170
- log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape)
171
- return mean, variance, log_variance
172
-
173
- def predict_start_from_noise(self, x_t, t, noise):
174
- return (
175
- extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
176
- extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
177
- )
178
-
179
- def q_posterior(self, x_start, x_t, t):
180
- posterior_mean = (
181
- extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
182
- extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
183
- )
184
- posterior_variance = extract(self.posterior_variance, t, x_t.shape)
185
- posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
186
- return posterior_mean, posterior_variance, posterior_log_variance_clipped
187
-
188
- def p_mean_variance(self, x, t, cond, clip_denoised: bool):
189
- noise_pred = self.denoise_fn(x, t, cond=cond)
190
- x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred)
191
-
192
- if clip_denoised:
193
- x_recon.clamp_(-1., 1.)
194
-
195
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
196
- return model_mean, posterior_variance, posterior_log_variance
197
-
198
- @torch.no_grad()
199
- def p_sample(self, x, t, cond, clip_denoised=True, repeat_noise=False):
200
- b, *_, device = *x.shape, x.device
201
- model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, cond=cond, clip_denoised=clip_denoised)
202
- noise = noise_like(x.shape, device, repeat_noise)
203
- # no noise when t == 0
204
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
205
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
206
-
207
- def q_sample(self, x_start, t, noise=None):
208
- noise = default(noise, lambda: torch.randn_like(x_start))
209
- return (
210
- extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
211
- extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
212
- )
213
-
214
- def p_losses(self, x_start, t, cond, noise=None, nonpadding=None):
215
- noise = default(noise, lambda: torch.randn_like(x_start))
216
-
217
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
218
- x_recon = self.denoise_fn(x_noisy, t, cond)
219
-
220
- if self.loss_type == 'l1':
221
- if nonpadding is not None:
222
- loss = ((noise - x_recon).abs() * nonpadding.unsqueeze(1)).mean()
223
- else:
224
- # print('are you sure w/o nonpadding?')
225
- loss = (noise - x_recon).abs().mean()
226
-
227
- elif self.loss_type == 'l2':
228
- loss = F.mse_loss(noise, x_recon)
229
- else:
230
- raise NotImplementedError()
231
-
232
- return loss
233
-
234
- def forward(self, txt_tokens, mel2ph=None, spk_embed=None, spk_id=None,
235
- ref_mels=None, f0=None, uv=None, energy=None, infer=False, **kwargs):
236
- b, *_, device = *txt_tokens.shape, txt_tokens.device
237
- ret = self.fs2(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id,
238
- f0=f0, uv=uv, energy=energy, infer=infer, skip_decoder=(not infer), **kwargs)
239
- cond = ret['decoder_inp'].transpose(1, 2)
240
-
241
- if not infer:
242
- t = torch.randint(0, self.K_step, (b,), device=device).long()
243
- x = ref_mels
244
- x = self.norm_spec(x)
245
- x = x.transpose(1, 2)[:, None, :, :] # [B, 1, M, T]
246
- ret['diff_loss'] = self.p_losses(x, t, cond)
247
- # nonpadding = (mel2ph != 0).float()
248
- # ret['diff_loss'] = self.p_losses(x, t, cond, nonpadding=nonpadding)
249
- ret['mel_out'] = None
250
- else:
251
- ret['fs2_mel'] = ret['mel_out']
252
- fs2_mels = ret['mel_out']
253
- t = self.K_step
254
- fs2_mels = self.norm_spec(fs2_mels)
255
- fs2_mels = fs2_mels.transpose(1, 2)[:, None, :, :]
256
-
257
- x = self.q_sample(x_start=fs2_mels, t=torch.tensor([t - 1], device=device).long())
258
- if self.hparams.get('gaussian_start') is not None and self.hparams['gaussian_start']:
259
- print('===> gaussian start.')
260
- shape = (cond.shape[0], 1, self.mel_bins, cond.shape[2])
261
- x = torch.randn(shape, device=device)
262
- for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t):
263
- x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond)
264
- x = x[:, 0].transpose(1, 2)
265
- ret['mel_out'] = self.denorm_spec(x)
266
-
267
- return ret
268
-
269
- def norm_spec(self, x):
270
- return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1
271
-
272
- def denorm_spec(self, x):
273
- return (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min
274
-
275
- def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph):
276
- return self.fs2.cwt2f0_norm(cwt_spec, mean, std, mel2ph)
277
-
278
- def out2mel(self, x):
279
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/fs2_orig.py DELETED
@@ -1,138 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- from text_to_speech.modules.tts.fs2_orig import FastSpeech2Orig
4
- from tasks.tts.dataset_utils import FastSpeechDataset
5
- from tasks.tts.fs import FastSpeechTask
6
- from text_to_speech.utils.commons.dataset_utils import collate_1d, collate_2d
7
- from text_to_speech.utils.commons.hparams import hparams
8
- from text_to_speech.utils.plot.plot import spec_to_figure
9
- import numpy as np
10
-
11
-
12
- class FastSpeech2OrigDataset(FastSpeechDataset):
13
- def __init__(self, prefix, shuffle=False, items=None, data_dir=None):
14
- super().__init__(prefix, shuffle, items, data_dir)
15
- self.pitch_type = hparams.get('pitch_type')
16
-
17
- def __getitem__(self, index):
18
- sample = super().__getitem__(index)
19
- item = self._get_item(index)
20
- hparams = self.hparams
21
- mel = sample['mel']
22
- T = mel.shape[0]
23
- sample['energy'] = (mel.exp() ** 2).sum(-1).sqrt()
24
- if hparams['use_pitch_embed'] and self.pitch_type == 'cwt':
25
- cwt_spec = torch.Tensor(item['cwt_spec'])[:T]
26
- f0_mean = item.get('f0_mean', item.get('cwt_mean'))
27
- f0_std = item.get('f0_std', item.get('cwt_std'))
28
- sample.update({"cwt_spec": cwt_spec, "f0_mean": f0_mean, "f0_std": f0_std})
29
- return sample
30
-
31
- def collater(self, samples):
32
- if len(samples) == 0:
33
- return {}
34
- batch = super().collater(samples)
35
- if hparams['use_pitch_embed']:
36
- energy = collate_1d([s['energy'] for s in samples], 0.0)
37
- else:
38
- energy = None
39
- batch.update({'energy': energy})
40
- if self.pitch_type == 'cwt':
41
- cwt_spec = collate_2d([s['cwt_spec'] for s in samples])
42
- f0_mean = torch.Tensor([s['f0_mean'] for s in samples])
43
- f0_std = torch.Tensor([s['f0_std'] for s in samples])
44
- batch.update({'cwt_spec': cwt_spec, 'f0_mean': f0_mean, 'f0_std': f0_std})
45
- return batch
46
-
47
-
48
- class FastSpeech2OrigTask(FastSpeechTask):
49
- def __init__(self):
50
- super(FastSpeech2OrigTask, self).__init__()
51
- self.dataset_cls = FastSpeech2OrigDataset
52
-
53
- def build_tts_model(self):
54
- dict_size = len(self.token_encoder)
55
- self.model = FastSpeech2Orig(dict_size, hparams)
56
-
57
- def run_model(self, sample, infer=False, *args, **kwargs):
58
- txt_tokens = sample['txt_tokens'] # [B, T_t]
59
- spk_embed = sample.get('spk_embed')
60
- spk_id = sample.get('spk_ids')
61
- if not infer:
62
- target = sample['mels'] # [B, T_s, 80]
63
- mel2ph = sample['mel2ph'] # [B, T_s]
64
- f0 = sample.get('f0')
65
- uv = sample.get('uv')
66
- energy = sample.get('energy')
67
- output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id,
68
- f0=f0, uv=uv, energy=energy, infer=False)
69
- losses = {}
70
- self.add_mel_loss(output['mel_out'], target, losses)
71
- self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses)
72
- if hparams['use_pitch_embed']:
73
- self.add_pitch_loss(output, sample, losses)
74
- if hparams['use_energy_embed']:
75
- self.add_energy_loss(output, sample, losses)
76
- return losses, output
77
- else:
78
- mel2ph, uv, f0, energy = None, None, None, None
79
- use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur'])
80
- use_gt_f0 = kwargs.get('infer_use_gt_f0', hparams['use_gt_f0'])
81
- use_gt_energy = kwargs.get('infer_use_gt_energy', hparams['use_gt_energy'])
82
- if use_gt_dur:
83
- mel2ph = sample['mel2ph']
84
- if use_gt_f0:
85
- f0 = sample['f0']
86
- uv = sample['uv']
87
- if use_gt_energy:
88
- energy = sample['energy']
89
- output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id,
90
- f0=f0, uv=uv, energy=energy, infer=True)
91
- return output
92
-
93
- def save_valid_result(self, sample, batch_idx, model_out):
94
- super(FastSpeech2OrigTask, self).save_valid_result(sample, batch_idx, model_out)
95
- self.plot_cwt(batch_idx, model_out['cwt'], sample['cwt_spec'])
96
-
97
- def plot_cwt(self, batch_idx, cwt_out, cwt_gt=None):
98
- if len(cwt_out.shape) == 3:
99
- cwt_out = cwt_out[0]
100
- if isinstance(cwt_out, torch.Tensor):
101
- cwt_out = cwt_out.cpu().numpy()
102
- if cwt_gt is not None:
103
- if len(cwt_gt.shape) == 3:
104
- cwt_gt = cwt_gt[0]
105
- if isinstance(cwt_gt, torch.Tensor):
106
- cwt_gt = cwt_gt.cpu().numpy()
107
- cwt_out = np.concatenate([cwt_out, cwt_gt], -1)
108
- name = f'cwt_val_{batch_idx}'
109
- self.logger.add_figure(name, spec_to_figure(cwt_out), self.global_step)
110
-
111
- def add_pitch_loss(self, output, sample, losses):
112
- if hparams['pitch_type'] == 'cwt':
113
- cwt_spec = sample[f'cwt_spec']
114
- f0_mean = sample['f0_mean']
115
- uv = sample['uv']
116
- mel2ph = sample['mel2ph']
117
- f0_std = sample['f0_std']
118
- cwt_pred = output['cwt'][:, :, :10]
119
- f0_mean_pred = output['f0_mean']
120
- f0_std_pred = output['f0_std']
121
- nonpadding = (mel2ph != 0).float()
122
- losses['C'] = F.l1_loss(cwt_pred, cwt_spec) * hparams['lambda_f0']
123
- if hparams['use_uv']:
124
- assert output['cwt'].shape[-1] == 11
125
- uv_pred = output['cwt'][:, :, -1]
126
- losses['uv'] = (F.binary_cross_entropy_with_logits(uv_pred, uv, reduction='none')
127
- * nonpadding).sum() / nonpadding.sum() * hparams['lambda_uv']
128
- losses['f0_mean'] = F.l1_loss(f0_mean_pred, f0_mean) * hparams['lambda_f0']
129
- losses['f0_std'] = F.l1_loss(f0_std_pred, f0_std) * hparams['lambda_f0']
130
- else:
131
- super(FastSpeech2OrigTask, self).add_pitch_loss(output, sample, losses)
132
-
133
- def add_energy_loss(self, output, sample, losses):
134
- energy_pred, energy = output['energy_pred'], sample['energy']
135
- nonpadding = (energy != 0).float()
136
- loss = (F.mse_loss(energy_pred, energy, reduction='none') * nonpadding).sum() / nonpadding.sum()
137
- loss = loss * hparams['lambda_energy']
138
- losses['e'] = loss
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Debate/gradio_base.py DELETED
@@ -1,574 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The AIWaves Inc. team.
3
-
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
-
17
- # Emoji comes from this website:
18
- # https://emojipedia.org/
19
- import subprocess
20
- from gradio_config import GradioConfig as gc
21
- import gradio as gr
22
- from typing import List, Tuple, Any
23
- import time
24
- import socket
25
- import psutil
26
- import os
27
- from abc import abstractmethod
28
- import openai
29
-
30
- def test_apikey_connection(api_key=None, model="gpt-3.5-turbo"):
31
- openai.api_key = api_key if api_key is not None else os.environ["API_KEY"]
32
- if "PROXY" in os.environ:
33
- openai.proxy = os.environ["PROXY"]
34
- messages = [{"role": "user", "content": "what's your name?"}]
35
- try:
36
- response = openai.ChatCompletion.create(
37
- model=model,
38
- messages=messages,
39
- )
40
- return True
41
- except:
42
- return False
43
-
44
- def convert2list4agentname(sop):
45
- """
46
- Extract the agent names of all states
47
- return:
48
- only name: [name1, name2, ...]
49
- agent_name: [name1(role1), name2(role2), ...]
50
- """
51
- only_name = []
52
- agent_name = []
53
- roles_to_names = sop.roles_to_names
54
- for state_name,roles_names in roles_to_names.items():
55
- for role,name in roles_names.items():
56
- agent_name.append(f"{name}({role})")
57
- only_name.append(name)
58
- agent_name = list(set(agent_name))
59
- agent_name.sort()
60
- return agent_name, only_name
61
-
62
- def is_port_in_use(port):
63
- """Check if the port is available"""
64
- for conn in psutil.net_connections():
65
- if conn.laddr.port == port:
66
- return True
67
- return False
68
-
69
- def check_port(port):
70
- """Determine available ports"""
71
- if os.path.isfile("PORT.txt"):
72
- port = int(open("PORT.txt","r",encoding='utf-8').readlines()[0])
73
- else:
74
- for i in range(10):
75
- if is_port_in_use(port+i) == False:
76
- port += i
77
- break
78
- with open("PORT.txt", "w") as f:
79
- f.writelines(str(port))
80
- return port
81
-
82
- # Determine some heads
83
- SPECIAL_SIGN = {
84
- "START": "<START>",
85
- "SPLIT": "<SELFDEFINESEP>",
86
- "END": "<ENDSEP>"
87
- }
88
- HOST = "127.0.0.1"
89
- # The starting port number for the search.
90
- PORT = 15000
91
- PORT = check_port(PORT)
92
-
93
- def print_log(message:str):
94
- print(f"[{time.ctime()}]{message}")
95
-
96
- global_dialog = {
97
- "user": [],
98
- "agent": {},
99
- "system": []
100
- }
101
-
102
- class UIHelper:
103
- """Static Class"""
104
-
105
- @classmethod
106
- def wrap_css(cls, content, name) -> str:
107
- """
108
- Description:
109
- Wrap CSS around each output, and return it in HTML format for rendering with Markdown.
110
- Input:
111
- content: Output content
112
- name: Whose output is it
113
- Output:
114
- HTML
115
- """
116
- assert name in gc.OBJECT_INFO, \
117
- f"The current name `{name}` is not registered with an image. The names of the currently registered agents are in `{gc.OBJECT_INFO.keys()}`. Please use `GradioConfig.add_agent()` from `Gradio_Config/gradio_config.py` to bind the name of the new agent."
118
- output = ""
119
- info = gc.OBJECT_INFO[name]
120
- if info["id"] == "USER":
121
- output = gc.BUBBLE_CSS["USER"].format(
122
- info["bubble_color"], # Background-color
123
- info["text_color"], # Color of the agent's name
124
- name, # Agent name
125
- info["text_color"], # Font color
126
- info["font_size"], # Font size
127
- content, # Content
128
- info["head_url"] # URL of the avatar
129
- )
130
- elif info["id"] == "SYSTEM":
131
- output = gc.BUBBLE_CSS["SYSTEM"].format(
132
- info["bubble_color"], # Background-color
133
- info["font_size"], # Font size
134
- info["text_color"], # Font color
135
- name, # Agent name
136
- content # Content
137
- )
138
- elif info["id"] == "AGENT":
139
- output = gc.BUBBLE_CSS["AGENT"].format(
140
- info["head_url"], # URL of the avatar
141
- info["bubble_color"], # Background-color
142
- info["text_color"], # Font color
143
- name, # Agent name
144
- info["text_color"], # Font color
145
- info["font_size"], # Font size
146
- content, # Content
147
- )
148
- else:
149
- assert False, f"Id `{info['id']}` is invalid. The valid id is in ['SYSTEM', 'AGENT', 'USER']"
150
- return output
151
-
152
- @classmethod
153
- def novel_filter(cls, content, agent_name):
154
-
155
- """比如<CONTENT>...</CONTENT>,就应该输出CONTENT:..."""
156
- IS_RECORDER = agent_name.lower() in ["recorder", "summary"]
157
- if IS_RECORDER:
158
- BOLD_FORMAT = """<div style="color: #000000; display:inline">
159
- <b>{}</b>
160
- </div>
161
- <span style="color: black;">
162
- """
163
- else:
164
- BOLD_FORMAT = "<b>{}</b>"
165
- CENTER_FORMAT = """<div style="background-color: #F0F0F0; text-align: center; padding: 5px; color: #000000">
166
- <b>{}</b>
167
- </div>
168
- """
169
- START_FORMAT = "<{}>"
170
- END_FORMAT = "</{}>"
171
- mapping = {
172
- "TARGET": "🎯 Current Target: ",
173
- "NUMBER": "🍖 Required Number: ",
174
- "THOUGHT": "🤔 Overall Thought: ",
175
- "FIRST NAME": "⚪ First Name: ",
176
- "LAST NAME": "⚪ Last Name: ",
177
- "ROLE": "🤠 Character Properties: ",
178
- "RATIONALES": "🤔 Design Rationale: ",
179
- "BACKGROUND": "🚊 Character Background: ",
180
- "ID": "🔴 ID: ",
181
- "TITLE": "🧩 Chapter Title: ",
182
- "ABSTRACT": "🎬 Abstract: ",
183
- "CHARACTER INVOLVED": "☃️ Character Involved: ",
184
- "ADVICE": "💬 Advice:",
185
- "NAME": "📛 Name: ",
186
- "GENDER": "👩‍👩‍👦‍👦 Gender: ",
187
- "AGE": "⏲️ Age: ",
188
- "WORK": "👨‍🔧 Work: ",
189
- "PERSONALITY": "🧲 Character Personality: ",
190
- "SPEECH STYLE": "🗣️ Speaking Style: ",
191
- "RELATION": "🏠 Relation with Others: ",
192
- "WORD COUNT": "🎰 Word Count: ",
193
- "CHARACTER DESIGN": "📈 Character Design: ",
194
- "CHARACTER REQUIRE": "📈 Character Require: ",
195
- "CHARACTER NAME": "📈 Character Naming Analysis: ",
196
- "CHARACTER NOW": "📈 Character Now: ",
197
- "OUTLINE DESIGN": "📈 Outline Design: ",
198
- "OUTLINE REQUIRE": "📈 Outline Require: ",
199
- "OUTLINE NOW": "📈 Outline Now: ",
200
- "SUB TASK": "🎯 Current Sub Task: ",
201
- "CHARACTER ADVICE": "💬 Character Design Advice: ",
202
- "OUTLINE ADVANTAGE": "📈 Outline Advantage: ",
203
- "OUTLINE DISADVANTAGE": "📈 Outline Disadvantage: ",
204
- "OUTLINE ADVICE": "💬 Outline Advice: ",
205
- "NEXT": "➡️ Next Advice: ",
206
- "TOTAL NUMBER": "🔢 Total Number: "
207
- }
208
- for i in range(1, 10):
209
- mapping[f"CHARACTER {i}"] = f"🦄 Character {i}"
210
- mapping[f"SECTION {i}"] = f"🏷️ Chapter {i}"
211
- for key in mapping:
212
- if key in [f"CHARACTER {i}" for i in range(1, 10)] \
213
- or key in [f"SECTION {i}" for i in range(1, 10)] \
214
- :
215
- content = content.replace(
216
- START_FORMAT.format(key), CENTER_FORMAT.format(mapping[key])
217
- )
218
- elif key in ["TOTAL NUMBER"]:
219
- content = content.replace(
220
- START_FORMAT.format(key), CENTER_FORMAT.format(mapping[key]) + """<span style="color: black;">"""
221
- )
222
- content = content.replace(
223
- END_FORMAT.format(key), "</span>"
224
- )
225
- else:
226
- content = content.replace(
227
- START_FORMAT.format(key), BOLD_FORMAT.format(mapping[key])
228
- )
229
-
230
- content = content.replace(
231
- END_FORMAT.format(key), "</span>" if IS_RECORDER else ""
232
- )
233
- return content
234
-
235
- @classmethod
236
- def singleagent_filter(cls, content, agent_name):
237
- return content
238
-
239
- @classmethod
240
- def debate_filter(cls, content, agent_name):
241
- return content
242
-
243
- @classmethod
244
- def code_filter(cls, content, agent_name):
245
- # return content.replace("```python", "<pre><code>").replace("```","</pre></code>")
246
- return content
247
-
248
- @classmethod
249
- def general_filter(cls, content, agent_name):
250
- return content
251
-
252
- @classmethod
253
- def filter(cls, content: str, agent_name: str, ui_name: str):
254
- """
255
- Description:
256
- Make certain modifications to the output content to enhance its aesthetics when content is showed in gradio.
257
- Input:
258
- content: output content
259
- agent_name: Whose output is it
260
- ui_name: What UI is currently launching
261
- Output:
262
- Modified content
263
- """
264
- mapping = {
265
- "SingleAgentUI": cls.singleagent_filter,
266
- "DebateUI": cls.debate_filter,
267
- "NovelUI": cls.novel_filter,
268
- "CodeUI": cls.code_filter,
269
- "GeneralUI": cls.general_filter
270
- }
271
- if ui_name in mapping:
272
- return mapping[ui_name](content, agent_name)
273
- else:
274
- return content
275
-
276
- class Client:
277
- """
278
- For inter-process communication, this is the client.
279
- `gradio_backend.PY` serves as the backend, while `run_gradio` is the frontend.
280
- Communication between the frontend and backend is accomplished using Sockets.
281
- """
282
- # =======================Radio Const String======================
283
- SINGLE_MODE = "Single Mode"
284
- AUTO_MODE = "Auto Mode"
285
- MODE_LABEL = "Select the execution mode"
286
- MODE_INFO = "Single mode refers to when the current agent output ends, it will stop running until you click to continue. Auto mode refers to when you complete the input, all agents will continue to output until the task ends."
287
- # ===============================================================
288
- mode = AUTO_MODE
289
- FIRST_RUN:bool = True
290
- # if last agent is user, then next agent will be executed automatically rather than click button
291
- LAST_USER:bool = False
292
-
293
- receive_server = None
294
- send_server = None
295
- current_node = None
296
- cache = {}
297
-
298
- def __init__(self, host=HOST, port=PORT, bufsize=1024):
299
- assert Client.mode in [Client.SINGLE_MODE, Client.AUTO_MODE]
300
- self.SIGN = SPECIAL_SIGN
301
- self.bufsize = bufsize
302
- assert bufsize > 0
303
- self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
304
- self.client_socket.connect((host, port))
305
- while True:
306
- data = self.client_socket.recv(self.bufsize).decode('utf-8')
307
- if data == "hi":
308
- self.client_socket.send("hello agent".encode('utf-8'))
309
- time.sleep(1)
310
- elif data == "check":
311
- break
312
- print_log("Client: connecting successfully......")
313
-
314
- def start_server(self):
315
- while True:
316
- message = yield
317
- if message == 'exit':
318
- break
319
- self.send_message(message=message)
320
-
321
- def send_message(self, message):
322
- """Send the message to the server."""
323
- if isinstance(message, list) or isinstance(message, dict):
324
- message = str(message)
325
- assert isinstance(message, str)
326
- message = message + self.SIGN["SPLIT"]
327
- self.client_socket.send(message.encode('utf-8'))
328
-
329
- def receive_message(self, end_identifier: str = None, split_identifier: str = SPECIAL_SIGN["SPLIT"]) -> List:
330
- """Receive messages from the server, and it will block the process. Supports receiving long text."""
331
- remaining = ""
332
- while True:
333
- # receive message
334
- dataset = self.client_socket.recv(self.bufsize)
335
- try:
336
- # If decoding fails, it indicates that the current transmission is a long text.
337
- dataset = dataset.decode('utf-8')
338
- except UnicodeDecodeError:
339
- if not isinstance(remaining, bytes):
340
- remaining = remaining.encode('utf-8')
341
- assert isinstance(dataset, bytes)
342
- remaining += dataset
343
- try:
344
- dataset = remaining.decode('utf-8')
345
- remaining = ""
346
- except UnicodeDecodeError:
347
- continue
348
- assert isinstance(remaining, str)
349
- dataset = remaining + dataset
350
- list_dataset = dataset.split(split_identifier)
351
- if len(list_dataset) == 1:
352
- # If there is only one result from the split, it indicates that the current sequence itself has not yet ended.
353
- remaining = list_dataset[0]
354
- continue
355
- else:
356
- remaining = list_dataset[-1]
357
- # Receive successfully
358
- list_dataset = list_dataset[:-1]
359
- return_value = []
360
- for item in list_dataset:
361
- if end_identifier is not None and item == end_identifier:
362
- break
363
- return_value.append(item)
364
- identifier = yield return_value
365
- if identifier is not None:
366
- end_identifier, split_identifier = identifier
367
-
368
- def listening_for_start_(self):
369
- """
370
- When the server starts, the client is automatically launched.
371
- At this point, process synchronization is required,
372
- such as sending client data to the server for rendering,
373
- then the server sending the modified data back to the client,
374
- and simultaneously sending a startup command.
375
- Once the client receives the data, it will start running.
376
- """
377
- Client.receive_server = self.receive_message()
378
- # Waiting for information from the server.
379
- data: list = next(Client.receive_server)
380
- assert len(data) == 1
381
- data = eval(data[0])
382
- assert isinstance(data, dict)
383
- Client.cache.update(data)
384
- # Waiting for start command from the server.
385
- data:list = Client.receive_server.send(None)
386
- assert len(data) == 1
387
- assert data[0] == "<START>"
388
-
389
- class WebUI:
390
- """
391
- The base class for the frontend, which encapsulates some functions for process information synchronization.
392
- When a new frontend needs to be created, you should inherit from this class,
393
- then implement the `construct_ui()` method and set up event listeners.
394
- Finally, execute `run()` to load it.
395
- """
396
-
397
- def receive_message(
398
- self,
399
- end_identifier:str=None,
400
- split_identifier:str=SPECIAL_SIGN["SPLIT"]
401
- )->List:
402
- """This is the same as in Client class."""
403
- yield "hello"
404
- remaining = ""
405
- while True:
406
- dataset = self.client_socket.recv(self.bufsize)
407
- try:
408
- dataset = dataset.decode('utf-8')
409
- except UnicodeDecodeError:
410
- if not isinstance(remaining, bytes):
411
- remaining = remaining.encode('utf-8')
412
- assert isinstance(dataset, bytes)
413
- remaining += dataset
414
- try:
415
- dataset = remaining.decode('utf-8')
416
- remaining = ""
417
- except UnicodeDecodeError:
418
- continue
419
- assert isinstance(remaining, str)
420
- dataset = remaining + dataset
421
- list_dataset = dataset.split(split_identifier)
422
- if len(list_dataset) == 1:
423
- remaining = list_dataset[0]
424
- continue
425
- else:
426
- remaining = list_dataset[-1]
427
- list_dataset = list_dataset[:-1]
428
- return_value = []
429
- for item in list_dataset:
430
- if end_identifier is not None and item == end_identifier:
431
- break
432
- return_value.append(item)
433
- identifier = yield return_value
434
- if identifier is not None:
435
- end_identifier, split_identifier = identifier
436
-
437
- def send_message(self, message:str):
438
- """Send message to client."""
439
- SEP = self.SIGN["SPLIT"]
440
- self.client_socket.send(
441
- (message+SEP).encode("utf-8")
442
- )
443
-
444
- def _connect(self):
445
- # check
446
- if self.server_socket:
447
- self.server_socket.close()
448
- assert not os.path.isfile("PORT.txt")
449
- self.socket_port = check_port(PORT)
450
- # Step1. initialize
451
- self.server_socket = socket.socket(
452
- socket.AF_INET, socket.SOCK_STREAM
453
- )
454
- # Step2. binding ip and port
455
- self.server_socket.bind((self.socket_host, self.socket_port))
456
- # Step3. run client
457
- self._start_client()
458
-
459
- # Step4. listening for connect
460
- self.server_socket.listen(1)
461
-
462
- # Step5. test connection
463
- client_socket, client_address = self.server_socket.accept()
464
- print_log("server: establishing connection......")
465
- self.client_socket = client_socket
466
- while True:
467
- client_socket.send("hi".encode('utf-8'))
468
- time.sleep(1)
469
- data = client_socket.recv(self.bufsize).decode('utf-8')
470
- if data == "hello agent":
471
- client_socket.send("check".encode('utf-8'))
472
- print_log("server: connect successfully")
473
- break
474
- assert os.path.isfile("PORT.txt")
475
- os.remove("PORT.txt")
476
- if self.receive_server:
477
- del self.receive_server
478
- self.receive_server = self.receive_message()
479
- assert next(self.receive_server) == "hello"
480
-
481
- @abstractmethod
482
- def render_and_register_ui(self):
483
- # You need to implement this function.
484
- # The function's purpose is to bind the name of the agent with an image.
485
- # The name of the agent is stored in `self.cache[]`,
486
- # and the function for binding is in the method `add_agents` of the class `GradioConfig` in `Gradio_Config/gradio_config.py``.
487
- # This function will be executed in `self.first_recieve_from_client()`
488
- pass
489
-
490
- def first_recieve_from_client(self, reset_mode:bool=False):
491
- """
492
- This function is used to receive information from the client and is typically executed during the initialization of the class.
493
- If `reset_mode` is False, it will bind the name of the agent with an image.
494
- """
495
- self.FIRST_RECIEVE_FROM_CLIENT = True
496
- data_list:List = self.receive_server.send(None)
497
- assert len(data_list) == 1
498
- data = eval(data_list[0])
499
- assert isinstance(data, dict)
500
- self.cache.update(data)
501
- if not reset_mode:
502
- self.render_and_register_ui()
503
-
504
- def _second_send(self, message:dict):
505
- # Send the modified message.
506
- # It will be executed in `self.send_start_cmd()` automatically.
507
- self.send_message(str(message))
508
-
509
- def _third_send(self):
510
- # Send start command.
511
- # It will be executed in `self.send_start_cmd()` automatically.
512
- self.send_message(self.SIGN['START'])
513
-
514
- def send_start_cmd(self, message:dict={"hello":"hello"}):
515
- # If you have no message to send, you can ignore the args `message`.
516
- assert self.FIRST_RECIEVE_FROM_CLIENT, "Please make sure you have executed `self.first_recieve_from_client()` manually."
517
- self._second_send(message=message)
518
- time.sleep(1)
519
- self._third_send()
520
- self.FIRST_RECIEVE_FROM_CLIENT = False
521
-
522
- def __init__(
523
- self,
524
- client_cmd: list, # ['python','test.py','--a','b','--c','d']
525
- socket_host: str = HOST,
526
- socket_port: int = PORT,
527
- bufsize: int = 1024,
528
- ui_name: str = ""
529
- ):
530
- self.ui_name = ui_name
531
- self.server_socket = None
532
- self.SIGN = SPECIAL_SIGN
533
- self.socket_host = socket_host
534
- self.socket_port = socket_port
535
- self.bufsize = bufsize
536
- self.client_cmd = client_cmd
537
-
538
- self.receive_server = None
539
- self.cache = {}
540
- assert self.bufsize > 0
541
- self._connect()
542
-
543
- def _start_client(self):
544
- print(f"server: executing `{' '.join(self.client_cmd)}` ...")
545
- self.backend = subprocess.Popen(self.client_cmd)
546
-
547
- def _close_client(self):
548
- print(f"server: killing `{' '.join(self.client_cmd)}` ...")
549
- self.backend.terminate()
550
-
551
- def reset(self):
552
- print("server: restarting ...")
553
- self._close_client()
554
- time.sleep(1)
555
- self._connect()
556
-
557
- def render_bubble(self, rendered_data, agent_response, node_name, render_node_name:bool=True):
558
- # Rendered bubbles (HTML format) are used for gradio output.
559
- output = f"**{node_name}**<br>" if render_node_name else ""
560
- for item in agent_response:
561
- for agent_name in item:
562
- content = item[agent_name].replace("\n", "<br>")
563
- content = UIHelper.filter(content, agent_name, self.ui_name)
564
- output = f"{output}<br>{UIHelper.wrap_css(content, agent_name)}"
565
- rendered_data[-1] = [rendered_data[-1][0], output]
566
- return rendered_data
567
-
568
- def run(self,share: bool = True):
569
- self.demo.queue()
570
- self.demo.launch()
571
-
572
-
573
- if __name__ == '__main__':
574
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZero2HeroBootcamp/TranscriptAILearnerFromYoutube/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: TranscriptAILearnerFromYoutube
3
- emoji: 📊
4
- colorFrom: gray
5
- colorTo: pink
6
- sdk: streamlit
7
- sdk_version: 1.21.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/g4f/Provider/Providers/DeepAi.py DELETED
@@ -1,46 +0,0 @@
1
- import os
2
- import json
3
- import random
4
- import hashlib
5
- import requests
6
-
7
- from ...typing import sha256, Dict, get_type_hints
8
-
9
- url = 'https://deepai.org'
10
- model = ['gpt-3.5-turbo']
11
- supports_stream = True
12
- needs_auth = False
13
-
14
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
15
- def md5(text: str) -> str:
16
- return hashlib.md5(text.encode()).hexdigest()[::-1]
17
-
18
-
19
- def get_api_key(user_agent: str) -> str:
20
- part1 = str(random.randint(0, 10**11))
21
- part2 = md5(user_agent + md5(user_agent + md5(user_agent + part1 + "x")))
22
-
23
- return f"tryit-{part1}-{part2}"
24
-
25
- user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
26
-
27
- headers = {
28
- "api-key": get_api_key(user_agent),
29
- "user-agent": user_agent
30
- }
31
-
32
- files = {
33
- "chat_style": (None, "chat"),
34
- "chatHistory": (None, json.dumps(messages))
35
- }
36
-
37
- r = requests.post("https://api.deepai.org/chat_response", headers=headers, files=files, stream=True)
38
-
39
- for chunk in r.iter_content(chunk_size=None):
40
- r.raise_for_status()
41
- yield chunk.decode()
42
-
43
-
44
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
45
- '(%s)' % ', '.join(
46
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py DELETED
@@ -1,56 +0,0 @@
1
- _base_ = 'yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py'
2
-
3
- data_root = './data/cat/'
4
- class_name = ('cat', )
5
- num_classes = len(class_name)
6
- metainfo = dict(classes=class_name, palette=[(20, 220, 60)])
7
-
8
- anchors = [
9
- [(68, 69), (154, 91), (143, 162)], # P3/8
10
- [(242, 160), (189, 287), (391, 207)], # P4/16
11
- [(353, 337), (539, 341), (443, 432)] # P5/32
12
- ]
13
-
14
- max_epochs = 40
15
- train_batch_size_per_gpu = 12
16
- train_num_workers = 4
17
-
18
- load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth' # noqa
19
-
20
- model = dict(
21
- backbone=dict(frozen_stages=4),
22
- bbox_head=dict(
23
- head_module=dict(num_classes=num_classes),
24
- prior_generator=dict(base_sizes=anchors)))
25
-
26
- train_dataloader = dict(
27
- batch_size=train_batch_size_per_gpu,
28
- num_workers=train_num_workers,
29
- dataset=dict(
30
- data_root=data_root,
31
- metainfo=metainfo,
32
- ann_file='annotations/trainval.json',
33
- data_prefix=dict(img='images/')))
34
-
35
- val_dataloader = dict(
36
- dataset=dict(
37
- metainfo=metainfo,
38
- data_root=data_root,
39
- ann_file='annotations/test.json',
40
- data_prefix=dict(img='images/')))
41
-
42
- test_dataloader = val_dataloader
43
-
44
- _base_.optim_wrapper.optimizer.batch_size_per_gpu = train_batch_size_per_gpu
45
-
46
- val_evaluator = dict(ann_file=data_root + 'annotations/test.json')
47
- test_evaluator = val_evaluator
48
-
49
- default_hooks = dict(
50
- checkpoint=dict(interval=10, max_keep_ckpts=2, save_best='auto'),
51
- # The warmup_mim_iter parameter is critical.
52
- # The default value is 1000 which is not suitable for cat datasets.
53
- param_scheduler=dict(max_epochs=max_epochs, warmup_mim_iter=10),
54
- logger=dict(type='LoggerHook', interval=5))
55
- train_cfg = dict(max_epochs=max_epochs, val_interval=10)
56
- # visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) # noqa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/work_dirs/yolov6_s_df2_0.4/yolov6_s_fast.py DELETED
@@ -1,510 +0,0 @@
1
- default_scope = 'mmyolo'
2
- default_hooks = dict(
3
- timer=dict(type='IterTimerHook'),
4
- logger=dict(type='LoggerHook', interval=10),
5
- param_scheduler=dict(
6
- type='YOLOv5ParamSchedulerHook',
7
- scheduler_type='cosine',
8
- lr_factor=0.01,
9
- max_epochs=100),
10
- checkpoint=dict(
11
- type='CheckpointHook', interval=2, max_keep_ckpts=5, save_best='auto'),
12
- sampler_seed=dict(type='DistSamplerSeedHook'),
13
- visualization=dict(type='mmdet.DetVisualizationHook'))
14
- env_cfg = dict(
15
- cudnn_benchmark=True,
16
- mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
17
- dist_cfg=dict(backend='nccl'))
18
- vis_backends = [dict(type='LocalVisBackend')]
19
- visualizer = dict(
20
- type='mmdet.DetLocalVisualizer',
21
- vis_backends=[dict(type='LocalVisBackend'),
22
- dict(type='WandbVisBackend')],
23
- name='visualizer')
24
- log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
25
- log_level = 'INFO'
26
- load_from = None
27
- resume = False
28
- file_client_args = dict(backend='disk')
29
- _file_client_args = dict(backend='disk')
30
- tta_model = dict(
31
- type='mmdet.DetTTAModel',
32
- tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.65), max_per_img=300))
33
- img_scales = [(640, 640), (320, 320), (960, 960)]
34
- _multiscale_resize_transforms = [
35
- dict(
36
- type='Compose',
37
- transforms=[
38
- dict(type='YOLOv5KeepRatioResize', scale=(640, 640)),
39
- dict(
40
- type='LetterResize',
41
- scale=(640, 640),
42
- allow_scale_up=False,
43
- pad_val=dict(img=114))
44
- ]),
45
- dict(
46
- type='Compose',
47
- transforms=[
48
- dict(type='YOLOv5KeepRatioResize', scale=(320, 320)),
49
- dict(
50
- type='LetterResize',
51
- scale=(320, 320),
52
- allow_scale_up=False,
53
- pad_val=dict(img=114))
54
- ]),
55
- dict(
56
- type='Compose',
57
- transforms=[
58
- dict(type='YOLOv5KeepRatioResize', scale=(960, 960)),
59
- dict(
60
- type='LetterResize',
61
- scale=(960, 960),
62
- allow_scale_up=False,
63
- pad_val=dict(img=114))
64
- ])
65
- ]
66
- tta_pipeline = [
67
- dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
68
- dict(
69
- type='TestTimeAug',
70
- transforms=[[{
71
- 'type': 'Compose',
72
- 'transforms': [{
73
- 'type': 'YOLOv5KeepRatioResize',
74
- 'scale': (640, 640)
75
- }, {
76
- 'type': 'LetterResize',
77
- 'scale': (640, 640),
78
- 'allow_scale_up': False,
79
- 'pad_val': {
80
- 'img': 114
81
- }
82
- }]
83
- }, {
84
- 'type':
85
- 'Compose',
86
- 'transforms': [{
87
- 'type': 'YOLOv5KeepRatioResize',
88
- 'scale': (320, 320)
89
- }, {
90
- 'type': 'LetterResize',
91
- 'scale': (320, 320),
92
- 'allow_scale_up': False,
93
- 'pad_val': {
94
- 'img': 114
95
- }
96
- }]
97
- }, {
98
- 'type':
99
- 'Compose',
100
- 'transforms': [{
101
- 'type': 'YOLOv5KeepRatioResize',
102
- 'scale': (960, 960)
103
- }, {
104
- 'type': 'LetterResize',
105
- 'scale': (960, 960),
106
- 'allow_scale_up': False,
107
- 'pad_val': {
108
- 'img': 114
109
- }
110
- }]
111
- }],
112
- [{
113
- 'type': 'mmdet.RandomFlip',
114
- 'prob': 1.0
115
- }, {
116
- 'type': 'mmdet.RandomFlip',
117
- 'prob': 0.0
118
- }], [{
119
- 'type': 'mmdet.LoadAnnotations',
120
- 'with_bbox': True
121
- }],
122
- [{
123
- 'type':
124
- 'mmdet.PackDetInputs',
125
- 'meta_keys':
126
- ('img_id', 'img_path', 'ori_shape', 'img_shape',
127
- 'scale_factor', 'pad_param', 'flip', 'flip_direction')
128
- }]])
129
- ]
130
- data_root = './data-df2/'
131
- train_ann_file = 'annotations/instances_train2017.json'
132
- train_data_prefix = 'train2017/'
133
- val_ann_file = 'annotations/instances_val2017.json'
134
- val_data_prefix = 'val2017/'
135
- num_classes = 13
136
- train_batch_size_per_gpu = 32
137
- train_num_workers = 8
138
- persistent_workers = True
139
- base_lr = 0.0025
140
- max_epochs = 100
141
- num_last_epochs = 15
142
- img_scale = (640, 640)
143
- dataset_type = 'YOLOv5CocoDataset'
144
- val_batch_size_per_gpu = 1
145
- val_num_workers = 2
146
- batch_shapes_cfg = dict(
147
- type='BatchShapePolicy',
148
- batch_size=1,
149
- img_size=640,
150
- size_divisor=32,
151
- extra_pad_ratio=0.5)
152
- deepen_factor = 0.33
153
- widen_factor = 0.5
154
- affine_scale = 0.5
155
- lr_factor = 0.01
156
- weight_decay = 0.0005
157
- save_epoch_intervals = 2
158
- max_keep_ckpts = 3
159
- model = dict(
160
- type='YOLODetector',
161
- data_preprocessor=dict(
162
- type='YOLOv5DetDataPreprocessor',
163
- mean=[0.0, 0.0, 0.0],
164
- std=[255.0, 255.0, 255.0],
165
- bgr_to_rgb=True),
166
- backbone=dict(
167
- type='YOLOv6EfficientRep',
168
- deepen_factor=0.33,
169
- widen_factor=0.5,
170
- norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
171
- act_cfg=dict(type='ReLU', inplace=True)),
172
- neck=dict(
173
- type='YOLOv6RepPAFPN',
174
- deepen_factor=0.33,
175
- widen_factor=0.5,
176
- in_channels=[256, 512, 1024],
177
- out_channels=[128, 256, 512],
178
- num_csp_blocks=12,
179
- norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
180
- act_cfg=dict(type='ReLU', inplace=True)),
181
- bbox_head=dict(
182
- type='YOLOv6Head',
183
- head_module=dict(
184
- type='YOLOv6HeadModule',
185
- num_classes=13,
186
- in_channels=[128, 256, 512],
187
- widen_factor=0.5,
188
- norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
189
- act_cfg=dict(type='SiLU', inplace=True),
190
- featmap_strides=[8, 16, 32]),
191
- loss_bbox=dict(
192
- type='IoULoss',
193
- iou_mode='giou',
194
- bbox_format='xyxy',
195
- reduction='mean',
196
- loss_weight=2.5,
197
- return_iou=False)),
198
- train_cfg=dict(
199
- initial_epoch=4,
200
- initial_assigner=dict(
201
- type='BatchATSSAssigner',
202
- num_classes=13,
203
- topk=9,
204
- iou_calculator=dict(type='mmdet.BboxOverlaps2D')),
205
- assigner=dict(
206
- type='BatchTaskAlignedAssigner',
207
- num_classes=13,
208
- topk=13,
209
- alpha=1,
210
- beta=6)),
211
- test_cfg=dict(
212
- multi_label=True,
213
- nms_pre=30000,
214
- score_thr=0.001,
215
- nms=dict(type='nms', iou_threshold=0.65),
216
- max_per_img=300))
217
- pre_transform = [
218
- dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
219
- dict(type='LoadAnnotations', with_bbox=True)
220
- ]
221
- train_pipeline = [
222
- dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
223
- dict(type='LoadAnnotations', with_bbox=True),
224
- dict(
225
- type='Mosaic',
226
- img_scale=(640, 640),
227
- pad_val=114.0,
228
- pre_transform=[
229
- dict(
230
- type='LoadImageFromFile',
231
- file_client_args=dict(backend='disk')),
232
- dict(type='LoadAnnotations', with_bbox=True)
233
- ]),
234
- dict(
235
- type='YOLOv5RandomAffine',
236
- max_rotate_degree=0.0,
237
- max_translate_ratio=0.1,
238
- scaling_ratio_range=(0.5, 1.5),
239
- border=(-320, -320),
240
- border_val=(114, 114, 114),
241
- max_shear_degree=0.0),
242
- dict(type='YOLOv5HSVRandomAug'),
243
- dict(type='mmdet.RandomFlip', prob=0.5),
244
- dict(
245
- type='mmdet.PackDetInputs',
246
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
247
- 'flip_direction'))
248
- ]
249
- train_pipeline_stage2 = [
250
- dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
251
- dict(type='LoadAnnotations', with_bbox=True),
252
- dict(type='YOLOv5KeepRatioResize', scale=(640, 640)),
253
- dict(
254
- type='LetterResize',
255
- scale=(640, 640),
256
- allow_scale_up=True,
257
- pad_val=dict(img=114)),
258
- dict(
259
- type='YOLOv5RandomAffine',
260
- max_rotate_degree=0.0,
261
- max_translate_ratio=0.1,
262
- scaling_ratio_range=(0.5, 1.5),
263
- max_shear_degree=0.0),
264
- dict(type='YOLOv5HSVRandomAug'),
265
- dict(type='mmdet.RandomFlip', prob=0.5),
266
- dict(
267
- type='mmdet.PackDetInputs',
268
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
269
- 'flip_direction'))
270
- ]
271
- train_dataloader = dict(
272
- batch_size=32,
273
- num_workers=8,
274
- collate_fn=dict(type='yolov5_collate'),
275
- persistent_workers=True,
276
- pin_memory=True,
277
- sampler=dict(type='DefaultSampler', shuffle=True),
278
- dataset=dict(
279
- type='RepeatDataset',
280
- times=2,
281
- dataset=dict(
282
- type='YOLOv5CocoDataset',
283
- data_root='./data-df2/',
284
- metainfo=dict(
285
- classes=('short_sleeved_shirt', 'long_sleeved_shirt',
286
- 'short_sleeved_outwear', 'long_sleeved_outwear',
287
- 'vest', 'sling', 'shorts', 'trousers', 'skirt',
288
- 'short_sleeved_dress', 'long_sleeved_dress',
289
- 'vest_dress', 'sling_dress'),
290
- palette=[(255, 0, 0), (255, 128, 0), (255, 255, 0),
291
- (128, 255, 0), (0, 255, 0), (0, 255, 128),
292
- (0, 255, 255), (0, 128, 255), (0, 0, 255),
293
- (127, 0, 255), (255, 0, 255), (255, 0, 127),
294
- (128, 128, 128)]),
295
- ann_file='annotations/trainval.json',
296
- data_prefix=dict(img='smaller-dataset/'),
297
- filter_cfg=dict(filter_empty_gt=False, min_size=32),
298
- pipeline=[
299
- dict(
300
- type='LoadImageFromFile',
301
- file_client_args=dict(backend='disk')),
302
- dict(type='LoadAnnotations', with_bbox=True),
303
- dict(
304
- type='Mosaic',
305
- img_scale=(640, 640),
306
- pad_val=114.0,
307
- pre_transform=[
308
- dict(
309
- type='LoadImageFromFile',
310
- file_client_args=dict(backend='disk')),
311
- dict(type='LoadAnnotations', with_bbox=True)
312
- ]),
313
- dict(
314
- type='YOLOv5RandomAffine',
315
- max_rotate_degree=0.0,
316
- max_translate_ratio=0.1,
317
- scaling_ratio_range=(0.5, 1.5),
318
- border=(-320, -320),
319
- border_val=(114, 114, 114),
320
- max_shear_degree=0.0),
321
- dict(type='YOLOv5HSVRandomAug'),
322
- dict(type='mmdet.RandomFlip', prob=0.5),
323
- dict(
324
- type='mmdet.PackDetInputs',
325
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
326
- 'flip', 'flip_direction'))
327
- ])))
328
- test_pipeline = [
329
- dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
330
- dict(type='YOLOv5KeepRatioResize', scale=(640, 640)),
331
- dict(
332
- type='LetterResize',
333
- scale=(640, 640),
334
- allow_scale_up=False,
335
- pad_val=dict(img=114)),
336
- dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'),
337
- dict(
338
- type='mmdet.PackDetInputs',
339
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
340
- 'scale_factor', 'pad_param'))
341
- ]
342
- val_dataloader = dict(
343
- batch_size=1,
344
- num_workers=2,
345
- persistent_workers=True,
346
- pin_memory=True,
347
- drop_last=False,
348
- sampler=dict(type='DefaultSampler', shuffle=False),
349
- dataset=dict(
350
- type='YOLOv5CocoDataset',
351
- data_root='./data-df2/',
352
- test_mode=True,
353
- data_prefix=dict(img='smaller-dataset/'),
354
- ann_file='annotations/trainval.json',
355
- pipeline=[
356
- dict(
357
- type='LoadImageFromFile',
358
- file_client_args=dict(backend='disk')),
359
- dict(type='YOLOv5KeepRatioResize', scale=(640, 640)),
360
- dict(
361
- type='LetterResize',
362
- scale=(640, 640),
363
- allow_scale_up=False,
364
- pad_val=dict(img=114)),
365
- dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'),
366
- dict(
367
- type='mmdet.PackDetInputs',
368
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
369
- 'scale_factor', 'pad_param'))
370
- ],
371
- batch_shapes_cfg=dict(
372
- type='BatchShapePolicy',
373
- batch_size=1,
374
- img_size=640,
375
- size_divisor=32,
376
- extra_pad_ratio=0.5),
377
- metainfo=dict(
378
- classes=('short_sleeved_shirt', 'long_sleeved_shirt',
379
- 'short_sleeved_outwear', 'long_sleeved_outwear', 'vest',
380
- 'sling', 'shorts', 'trousers', 'skirt',
381
- 'short_sleeved_dress', 'long_sleeved_dress', 'vest_dress',
382
- 'sling_dress'),
383
- palette=[(255, 0, 0), (255, 128, 0), (255, 255, 0), (128, 255, 0),
384
- (0, 255, 0), (0, 255, 128), (0, 255, 255), (0, 128, 255),
385
- (0, 0, 255), (127, 0, 255), (255, 0, 255), (255, 0, 127),
386
- (128, 128, 128)])))
387
- test_dataloader = dict(
388
- batch_size=1,
389
- num_workers=2,
390
- persistent_workers=True,
391
- pin_memory=True,
392
- drop_last=False,
393
- sampler=dict(type='DefaultSampler', shuffle=False),
394
- dataset=dict(
395
- type='YOLOv5CocoDataset',
396
- data_root='./data-df2/',
397
- test_mode=True,
398
- data_prefix=dict(img='smaller-dataset/'),
399
- ann_file='annotations/trainval.json',
400
- pipeline=[
401
- dict(
402
- type='LoadImageFromFile',
403
- file_client_args=dict(backend='disk')),
404
- dict(type='YOLOv5KeepRatioResize', scale=(640, 640)),
405
- dict(
406
- type='LetterResize',
407
- scale=(640, 640),
408
- allow_scale_up=False,
409
- pad_val=dict(img=114)),
410
- dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'),
411
- dict(
412
- type='mmdet.PackDetInputs',
413
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
414
- 'scale_factor', 'pad_param'))
415
- ],
416
- batch_shapes_cfg=dict(
417
- type='BatchShapePolicy',
418
- batch_size=1,
419
- img_size=640,
420
- size_divisor=32,
421
- extra_pad_ratio=0.5),
422
- metainfo=dict(
423
- classes=('short_sleeved_shirt', 'long_sleeved_shirt',
424
- 'short_sleeved_outwear', 'long_sleeved_outwear', 'vest',
425
- 'sling', 'shorts', 'trousers', 'skirt',
426
- 'short_sleeved_dress', 'long_sleeved_dress', 'vest_dress',
427
- 'sling_dress'),
428
- palette=[(255, 0, 0), (255, 128, 0), (255, 255, 0), (128, 255, 0),
429
- (0, 255, 0), (0, 255, 128), (0, 255, 255), (0, 128, 255),
430
- (0, 0, 255), (127, 0, 255), (255, 0, 255), (255, 0, 127),
431
- (128, 128, 128)])))
432
- optim_wrapper = dict(
433
- type='OptimWrapper',
434
- optimizer=dict(
435
- type='SGD',
436
- lr=0.0025,
437
- momentum=0.937,
438
- weight_decay=0.0005,
439
- nesterov=True,
440
- batch_size_per_gpu=32),
441
- constructor='YOLOv5OptimizerConstructor')
442
- custom_hooks = [
443
- dict(
444
- type='EMAHook',
445
- ema_type='ExpMomentumEMA',
446
- momentum=0.0001,
447
- update_buffers=True,
448
- strict_load=False,
449
- priority=49),
450
- dict(
451
- type='mmdet.PipelineSwitchHook',
452
- switch_epoch=-15,
453
- switch_pipeline=[
454
- dict(
455
- type='LoadImageFromFile',
456
- file_client_args=dict(backend='disk')),
457
- dict(type='LoadAnnotations', with_bbox=True),
458
- dict(type='YOLOv5KeepRatioResize', scale=(640, 640)),
459
- dict(
460
- type='LetterResize',
461
- scale=(640, 640),
462
- allow_scale_up=True,
463
- pad_val=dict(img=114)),
464
- dict(
465
- type='YOLOv5RandomAffine',
466
- max_rotate_degree=0.0,
467
- max_translate_ratio=0.1,
468
- scaling_ratio_range=(0.5, 1.5),
469
- max_shear_degree=0.0),
470
- dict(type='YOLOv5HSVRandomAug'),
471
- dict(type='mmdet.RandomFlip', prob=0.5),
472
- dict(
473
- type='mmdet.PackDetInputs',
474
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
475
- 'flip', 'flip_direction'))
476
- ])
477
- ]
478
- val_evaluator = dict(
479
- type='mmdet.CocoMetric',
480
- proposal_nums=(100, 1, 10),
481
- ann_file='./data-df2/annotations/trainval.json',
482
- metric='bbox')
483
- test_evaluator = dict(
484
- type='mmdet.CocoMetric',
485
- proposal_nums=(100, 1, 10),
486
- ann_file='./data-df2/annotations/trainval.json',
487
- metric='bbox')
488
- train_cfg = dict(
489
- type='EpochBasedTrainLoop',
490
- max_epochs=100,
491
- val_interval=2,
492
- dynamic_intervals=[(85, 1)],
493
- val_begin=20)
494
- val_cfg = dict(type='ValLoop')
495
- test_cfg = dict(type='TestLoop')
496
- work_dir = './work_dirs/yolov6_s_df2'
497
- class_name = ('short_sleeved_shirt', 'long_sleeved_shirt',
498
- 'short_sleeved_outwear', 'long_sleeved_outwear', 'vest', 'sling',
499
- 'shorts', 'trousers', 'skirt', 'short_sleeved_dress',
500
- 'long_sleeved_dress', 'vest_dress', 'sling_dress')
501
- metainfo = dict(
502
- classes=('short_sleeved_shirt', 'long_sleeved_shirt',
503
- 'short_sleeved_outwear', 'long_sleeved_outwear', 'vest', 'sling',
504
- 'shorts', 'trousers', 'skirt', 'short_sleeved_dress',
505
- 'long_sleeved_dress', 'vest_dress', 'sling_dress'),
506
- palette=[(255, 0, 0), (255, 128, 0), (255, 255, 0), (128, 255, 0),
507
- (0, 255, 0), (0, 255, 128), (0, 255, 255), (0, 128, 255),
508
- (0, 0, 255), (127, 0, 255), (255, 0, 255), (255, 0, 127),
509
- (128, 128, 128)])
510
- launcher = 'pytorch'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AashishKumar/Restaurant_voice_chatbot/app.py DELETED
@@ -1,199 +0,0 @@
1
- import openai
2
- import speech_recognition as sr
3
- import gradio as gr
4
-
5
- restaurants = {
6
- "Joe's Pizza": {
7
- "Margherita Pizza": 14.99,
8
- "Pepperoni Pizza": 16.99,
9
- "Vegetable Pizza": 15.99,
10
- "Caesar Salad": 8.99,
11
- "Garlic Knots": 5.99,
12
- "Diet Coke": 2.99,
13
- "Chocolate Chip Cookie": 2.99
14
- },
15
- "Bamboo House": {
16
- "Whopper Meal": 7.99,
17
- "Chicken Fries": 3.99,
18
- "Impossible Whopper": 5.99,
19
- "Chicken Sandwich": 4.99,
20
- "Onion Rings": 2.99,
21
- "Fountain Drink": 1.99,
22
- "Apple Pie": 1.49
23
- },
24
- "Taco Bell": {
25
- "Crunchwrap Supreme": 4.99,
26
- "Beef Quesarito": 3.99,
27
- "Nachos Supreme": 2.99,
28
- "Cheesy Gordita Crunch": 4.99,
29
- "Soft Taco": 1.29,
30
- "Baja Blast Freeze": 2.49,
31
- "Cinnamon Twists": 1.99
32
- },
33
- "Curry Kingdom": {
34
- "Big Mac-Meal": 6.49,
35
- "10-Piece Chicken-McNuggets": 4.29,
36
- "Filet Fish": 3.79,
37
- "Cheessy Quarte-Pounder": 5.19,
38
- "French Fries": 1.89,
39
- "Soft Drink": 1.00,
40
- "Apple Pie": 0.99
41
- },
42
- "Chipotle House": {
43
- "Burrito Bowl": 7.99,
44
- "Steak Quesadilla": 4.99,
45
- "Crispy Tacos": 3.99,
46
- "Barbacoa Salad": 8.99,
47
- "Chips Guac": 3.99,
48
- "Soft Drinks": 2.29,
49
- "Chocolate Brownie": 2.25
50
- }
51
- }
52
-
53
-
54
- # ChatGPT API setup
55
- openai.api_key = "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"
56
-
57
- def recognize_speech(audio):
58
- audio_file = open(audio,"rb")
59
- transcript=openai.Audio.transcribe("whisper-1",audio_file)
60
- print(transcript)
61
-
62
- return transcript["text"]
63
-
64
- def chatbot(command):
65
- response = openai.ChatCompletion.create(
66
- model="gpt-3.5-turbo",
67
- messages=[
68
- {"role": "system", "content": "You are a Restaurants chatbot Who takes order,shows menus and answers users food related querry's presiously"},
69
- {"role": "user", "content": command},
70
- ]
71
- )
72
- result = ''
73
- for choice in response.choices:
74
- result += choice.message.content
75
- return result
76
-
77
- def nlp(txt_summ):
78
- completion = openai.Completion.create(
79
- model="text-davinci-003",
80
- prompt= txt_summ ,
81
- temperature=0.7,
82
- max_tokens=64,
83
- top_p=1.0,
84
- frequency_penalty=0.0,
85
- presence_penalty=0.0
86
- )
87
- response = completion.choices[0].text
88
-
89
- return response
90
- # Get menu for a specific restaurant
91
- def get_menu_items(restaurant):
92
- return restaurants[restaurant]
93
-
94
- def identify_food_command(text):
95
- keywords = ['order', 'menu', 'food']
96
- for keyword in keywords:
97
- if keyword in text:
98
- return keyword
99
- return None
100
-
101
- # Main function to handle user inputs and chatbot responses
102
- def main(audio):
103
- while True:
104
- try:
105
- print("What can I help you with?")
106
- command = recognize_speech(audio)
107
- print(f"You said: {command}")
108
- txt_command ="extract the food related command keyword from the sentence :\n\n"+command
109
- food_command = (" ".join(nlp(txt_command).strip().split()[-1:])).lower()
110
- print(food_command)
111
-
112
- restaurant_name = ''
113
- txt_extract = "extract the restaurants name from the sentence :\n\n"+command
114
- restaurant_name = " ".join(((nlp(txt_extract)).strip()).title().split()[-2:])
115
- found_rest = False
116
- if(restaurant_name in restaurants.keys()):
117
- found_rest = True
118
-
119
- item_name = ''
120
- txt_extract = "extract the food name from the given sentence :\n\n"+command
121
- item_name = " ".join(((nlp(txt_extract)).strip()).title().strip(".,;:").split()[-2:])
122
- found_item = False
123
- for restaurant, rest_info in restaurants.items():
124
- if item_name in rest_info:
125
- found_item = True
126
-
127
-
128
- print(found_item , found_rest)
129
- if food_command in ['order', 'eat' , 'want' , 'serve' , 'prepare' ]:
130
- if not found_rest and found_item:
131
- temp_val = {}
132
- for restaurant, rest_info in restaurants.items():
133
- if item_name in rest_info:
134
- temp_val[restaurant] = rest_info[item_name]
135
- if temp_val:
136
- min_price = min(temp_val.values())
137
- res = [key for key in temp_val if temp_val[key] == min_price]
138
- response = f"You have ordered {item_name} from {res[0]} with price of {min_price}"
139
-
140
-
141
- elif found_rest and found_item:
142
- response = f"\nYou ordered {item_name} from {restaurant_name}\nGreat! Thank you for ordering."
143
-
144
- elif found_rest and not found_item :
145
- menu_items = get_menu_items(restaurant_name)
146
- response = f"Sure, here's the menu for {restaurant_name}: {menu_items} What would you like to order?"
147
- max_tries = 3
148
- for i in range(max_tries):
149
- order_audio = input()
150
- item = order_audio
151
- if item == "nothing":
152
- response = "Okay! You don't want anything"
153
- break
154
- elif item in restaurants[restaurant_name].keys():
155
- response = f"\nYou ordered {item} from {restaurant_name.title()}\nGreat! Thank you for ordering."
156
- break
157
- else:
158
- if i == max_tries - 1:
159
- response = "Sorry, you didn't provide any valid input. Goodbye!"
160
- break
161
- else:
162
- response = "Sorry, that item is not available at this restaurant. Please try again."
163
- else :
164
- resp = "Respond properly and Try to make the Customer buy some food and for the valid response"
165
- response = chatbot(resp)
166
-
167
-
168
- elif food_command in ['menu' , 'menus' , 'catalogue' , 'items' , 'something']:
169
- if found_rest:
170
- menu_items = get_menu_items(restaurant_name)
171
- response = f"Here's the menu for {restaurant_name}: {menu_items}"
172
- else:
173
- response = chatbot(command)
174
- elif identify_food_command(command) == 'food':
175
- response=chatbot("Respond a person properly who has come to your restaurant asking food")
176
- else:
177
- response=chatbot("Response, as if you cannot understand and make the person salivate so that he buys a food . Also Give proper reply for the output\n"+command)
178
-
179
- return response
180
-
181
- except sr.UnknownValueError:
182
- print("Sorry, I did not understand what you said.")
183
- except sr.RequestError:
184
- print("Sorry, I am unable to process your request.")
185
- except Exception as e:
186
- print("An error occurred:", e)
187
-
188
-
189
-
190
- interface = gr.Interface(
191
- main,
192
- inputs=gr.Audio(source="microphone", type="filepath", label="Input Audio"),
193
- outputs= gr.Textbox(label="Foodie Chatbot's response"),
194
- title="Foodie Chatbot",
195
- description="Talk to the Foodie Chatbot and get restaurant recommendations and menus!",
196
- )
197
- if __name__ == "__main__":
198
- interface.launch(inline=False)
199
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/modules/extra_condition/openpose/__init__.py DELETED
File without changes
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthbuttons/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import FixWidthButtons from './FixWidthButtons.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('fixWidthButtons', function (config) {
6
- var gameObject = new FixWidthButtons(this.scene, config);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.FixWidthButtons', FixWidthButtons);
12
-
13
- export default FixWidthButtons;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AkitoP/umamusume_bert_vits2/server.py DELETED
@@ -1,170 +0,0 @@
1
- from flask import Flask, request, Response
2
- from io import BytesIO
3
- import torch
4
- from av import open as avopen
5
-
6
- import commons
7
- import utils
8
- from models import SynthesizerTrn
9
- from text.symbols import symbols
10
- from text import cleaned_text_to_sequence, get_bert
11
- from text.cleaner import clean_text
12
- from scipy.io import wavfile
13
-
14
- # Flask Init
15
- app = Flask(__name__)
16
- app.config["JSON_AS_ASCII"] = False
17
-
18
-
19
- def get_text(text, language_str, hps):
20
- norm_text, phone, tone, word2ph = clean_text(text, language_str)
21
- phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
22
-
23
- if hps.data.add_blank:
24
- phone = commons.intersperse(phone, 0)
25
- tone = commons.intersperse(tone, 0)
26
- language = commons.intersperse(language, 0)
27
- for i in range(len(word2ph)):
28
- word2ph[i] = word2ph[i] * 2
29
- word2ph[0] += 1
30
- bert = get_bert(norm_text, word2ph, language_str)
31
- del word2ph
32
- assert bert.shape[-1] == len(phone), phone
33
-
34
- if language_str == "ZH":
35
- bert = bert
36
- ja_bert = torch.zeros(768, len(phone))
37
- elif language_str == "JA":
38
- ja_bert = bert
39
- bert = torch.zeros(1024, len(phone))
40
- else:
41
- bert = torch.zeros(1024, len(phone))
42
- ja_bert = torch.zeros(768, len(phone))
43
- assert bert.shape[-1] == len(
44
- phone
45
- ), f"Bert seq len {bert.shape[-1]} != {len(phone)}"
46
- phone = torch.LongTensor(phone)
47
- tone = torch.LongTensor(tone)
48
- language = torch.LongTensor(language)
49
- return bert, ja_bert, phone, tone, language
50
-
51
-
52
- def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, language):
53
- bert, ja_bert, phones, tones, lang_ids = get_text(text, language, hps)
54
- with torch.no_grad():
55
- x_tst = phones.to(dev).unsqueeze(0)
56
- tones = tones.to(dev).unsqueeze(0)
57
- lang_ids = lang_ids.to(dev).unsqueeze(0)
58
- bert = bert.to(dev).unsqueeze(0)
59
- ja_bert = ja_bert.to(device).unsqueeze(0)
60
- x_tst_lengths = torch.LongTensor([phones.size(0)]).to(dev)
61
- speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(dev)
62
- audio = (
63
- net_g.infer(
64
- x_tst,
65
- x_tst_lengths,
66
- speakers,
67
- tones,
68
- lang_ids,
69
- bert,
70
- ja_bert,
71
- sdp_ratio=sdp_ratio,
72
- noise_scale=noise_scale,
73
- noise_scale_w=noise_scale_w,
74
- length_scale=length_scale,
75
- )[0][0, 0]
76
- .data.cpu()
77
- .float()
78
- .numpy()
79
- )
80
- return audio
81
-
82
-
83
- def replace_punctuation(text, i=2):
84
- punctuation = ",。?!"
85
- for char in punctuation:
86
- text = text.replace(char, char * i)
87
- return text
88
-
89
-
90
- def wav2(i, o, format):
91
- inp = avopen(i, "rb")
92
- out = avopen(o, "wb", format=format)
93
- if format == "ogg":
94
- format = "libvorbis"
95
-
96
- ostream = out.add_stream(format)
97
-
98
- for frame in inp.decode(audio=0):
99
- for p in ostream.encode(frame):
100
- out.mux(p)
101
-
102
- for p in ostream.encode(None):
103
- out.mux(p)
104
-
105
- out.close()
106
- inp.close()
107
-
108
-
109
- # Load Generator
110
- hps = utils.get_hparams_from_file("./configs/config.json")
111
-
112
- dev = "cuda"
113
- net_g = SynthesizerTrn(
114
- len(symbols),
115
- hps.data.filter_length // 2 + 1,
116
- hps.train.segment_size // hps.data.hop_length,
117
- n_speakers=hps.data.n_speakers,
118
- **hps.model,
119
- ).to(dev)
120
- _ = net_g.eval()
121
-
122
- _ = utils.load_checkpoint("logs/G_649000.pth", net_g, None, skip_optimizer=True)
123
-
124
-
125
- @app.route("/")
126
- def main():
127
- try:
128
- speaker = request.args.get("speaker")
129
- text = request.args.get("text").replace("/n", "")
130
- sdp_ratio = float(request.args.get("sdp_ratio", 0.2))
131
- noise = float(request.args.get("noise", 0.5))
132
- noisew = float(request.args.get("noisew", 0.6))
133
- length = float(request.args.get("length", 1.2))
134
- language = request.args.get("language")
135
- if length >= 2:
136
- return "Too big length"
137
- if len(text) >= 250:
138
- return "Too long text"
139
- fmt = request.args.get("format", "wav")
140
- if None in (speaker, text):
141
- return "Missing Parameter"
142
- if fmt not in ("mp3", "wav", "ogg"):
143
- return "Invalid Format"
144
- if language not in ("JA", "ZH"):
145
- return "Invalid language"
146
- except:
147
- return "Invalid Parameter"
148
-
149
- with torch.no_grad():
150
- audio = infer(
151
- text,
152
- sdp_ratio=sdp_ratio,
153
- noise_scale=noise,
154
- noise_scale_w=noisew,
155
- length_scale=length,
156
- sid=speaker,
157
- language=language,
158
- )
159
-
160
- with BytesIO() as wav:
161
- wavfile.write(wav, hps.data.sampling_rate, audio)
162
- torch.cuda.empty_cache()
163
- if fmt == "wav":
164
- return Response(wav.getvalue(), mimetype="audio/wav")
165
- wav.seek(0, 0)
166
- with BytesIO() as ofp:
167
- wav2(wav, ofp, fmt)
168
- return Response(
169
- ofp.getvalue(), mimetype="audio/mpeg" if fmt == "mp3" else "audio/ogg"
170
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/audio2pose_models/networks.py DELETED
@@ -1,140 +0,0 @@
1
- import torch.nn as nn
2
- import torch
3
-
4
-
5
- class ResidualConv(nn.Module):
6
- def __init__(self, input_dim, output_dim, stride, padding):
7
- super(ResidualConv, self).__init__()
8
-
9
- self.conv_block = nn.Sequential(
10
- nn.BatchNorm2d(input_dim),
11
- nn.ReLU(),
12
- nn.Conv2d(
13
- input_dim, output_dim, kernel_size=3, stride=stride, padding=padding
14
- ),
15
- nn.BatchNorm2d(output_dim),
16
- nn.ReLU(),
17
- nn.Conv2d(output_dim, output_dim, kernel_size=3, padding=1),
18
- )
19
- self.conv_skip = nn.Sequential(
20
- nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=stride, padding=1),
21
- nn.BatchNorm2d(output_dim),
22
- )
23
-
24
- def forward(self, x):
25
-
26
- return self.conv_block(x) + self.conv_skip(x)
27
-
28
-
29
- class Upsample(nn.Module):
30
- def __init__(self, input_dim, output_dim, kernel, stride):
31
- super(Upsample, self).__init__()
32
-
33
- self.upsample = nn.ConvTranspose2d(
34
- input_dim, output_dim, kernel_size=kernel, stride=stride
35
- )
36
-
37
- def forward(self, x):
38
- return self.upsample(x)
39
-
40
-
41
- class Squeeze_Excite_Block(nn.Module):
42
- def __init__(self, channel, reduction=16):
43
- super(Squeeze_Excite_Block, self).__init__()
44
- self.avg_pool = nn.AdaptiveAvgPool2d(1)
45
- self.fc = nn.Sequential(
46
- nn.Linear(channel, channel // reduction, bias=False),
47
- nn.ReLU(inplace=True),
48
- nn.Linear(channel // reduction, channel, bias=False),
49
- nn.Sigmoid(),
50
- )
51
-
52
- def forward(self, x):
53
- b, c, _, _ = x.size()
54
- y = self.avg_pool(x).view(b, c)
55
- y = self.fc(y).view(b, c, 1, 1)
56
- return x * y.expand_as(x)
57
-
58
-
59
- class ASPP(nn.Module):
60
- def __init__(self, in_dims, out_dims, rate=[6, 12, 18]):
61
- super(ASPP, self).__init__()
62
-
63
- self.aspp_block1 = nn.Sequential(
64
- nn.Conv2d(
65
- in_dims, out_dims, 3, stride=1, padding=rate[0], dilation=rate[0]
66
- ),
67
- nn.ReLU(inplace=True),
68
- nn.BatchNorm2d(out_dims),
69
- )
70
- self.aspp_block2 = nn.Sequential(
71
- nn.Conv2d(
72
- in_dims, out_dims, 3, stride=1, padding=rate[1], dilation=rate[1]
73
- ),
74
- nn.ReLU(inplace=True),
75
- nn.BatchNorm2d(out_dims),
76
- )
77
- self.aspp_block3 = nn.Sequential(
78
- nn.Conv2d(
79
- in_dims, out_dims, 3, stride=1, padding=rate[2], dilation=rate[2]
80
- ),
81
- nn.ReLU(inplace=True),
82
- nn.BatchNorm2d(out_dims),
83
- )
84
-
85
- self.output = nn.Conv2d(len(rate) * out_dims, out_dims, 1)
86
- self._init_weights()
87
-
88
- def forward(self, x):
89
- x1 = self.aspp_block1(x)
90
- x2 = self.aspp_block2(x)
91
- x3 = self.aspp_block3(x)
92
- out = torch.cat([x1, x2, x3], dim=1)
93
- return self.output(out)
94
-
95
- def _init_weights(self):
96
- for m in self.modules():
97
- if isinstance(m, nn.Conv2d):
98
- nn.init.kaiming_normal_(m.weight)
99
- elif isinstance(m, nn.BatchNorm2d):
100
- m.weight.data.fill_(1)
101
- m.bias.data.zero_()
102
-
103
-
104
- class Upsample_(nn.Module):
105
- def __init__(self, scale=2):
106
- super(Upsample_, self).__init__()
107
-
108
- self.upsample = nn.Upsample(mode="bilinear", scale_factor=scale)
109
-
110
- def forward(self, x):
111
- return self.upsample(x)
112
-
113
-
114
- class AttentionBlock(nn.Module):
115
- def __init__(self, input_encoder, input_decoder, output_dim):
116
- super(AttentionBlock, self).__init__()
117
-
118
- self.conv_encoder = nn.Sequential(
119
- nn.BatchNorm2d(input_encoder),
120
- nn.ReLU(),
121
- nn.Conv2d(input_encoder, output_dim, 3, padding=1),
122
- nn.MaxPool2d(2, 2),
123
- )
124
-
125
- self.conv_decoder = nn.Sequential(
126
- nn.BatchNorm2d(input_decoder),
127
- nn.ReLU(),
128
- nn.Conv2d(input_decoder, output_dim, 3, padding=1),
129
- )
130
-
131
- self.conv_attn = nn.Sequential(
132
- nn.BatchNorm2d(output_dim),
133
- nn.ReLU(),
134
- nn.Conv2d(output_dim, 1, 1),
135
- )
136
-
137
- def forward(self, x1, x2):
138
- out = self.conv_encoder(x1) + self.conv_decoder(x2)
139
- out = self.conv_attn(out)
140
- return out * x2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/visualizer_drag.py DELETED
@@ -1,429 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- import click
10
- import os
11
-
12
- import multiprocessing
13
- import numpy as np
14
- import torch
15
- import imgui
16
- import dnnlib
17
- from gui_utils import imgui_window
18
- from gui_utils import imgui_utils
19
- from gui_utils import gl_utils
20
- from gui_utils import text_utils
21
- from viz import renderer
22
- from viz import pickle_widget
23
- from viz import latent_widget
24
- from viz import drag_widget
25
- from viz import capture_widget
26
-
27
- # ----------------------------------------------------------------------------
28
-
29
-
30
- class Visualizer(imgui_window.ImguiWindow):
31
- def __init__(self, capture_dir=None):
32
- super().__init__(title='DragGAN', window_width=3840, window_height=2160)
33
-
34
- # Internals.
35
- self._last_error_print = None
36
- self._async_renderer = AsyncRenderer()
37
- self._defer_rendering = 0
38
- self._tex_img = None
39
- self._tex_obj = None
40
- self._mask_obj = None
41
- self._image_area = None
42
- self._status = dnnlib.EasyDict()
43
-
44
- # Widget interface.
45
- self.args = dnnlib.EasyDict()
46
- self.result = dnnlib.EasyDict()
47
- self.pane_w = 0
48
- self.label_w = 0
49
- self.button_w = 0
50
- self.image_w = 0
51
- self.image_h = 0
52
-
53
- # Widgets.
54
- self.pickle_widget = pickle_widget.PickleWidget(self)
55
- self.latent_widget = latent_widget.LatentWidget(self)
56
- self.drag_widget = drag_widget.DragWidget(self)
57
- self.capture_widget = capture_widget.CaptureWidget(self)
58
-
59
- if capture_dir is not None:
60
- self.capture_widget.path = capture_dir
61
-
62
- # Initialize window.
63
- self.set_position(0, 0)
64
- self._adjust_font_size()
65
- self.skip_frame() # Layout may change after first frame.
66
-
67
- def close(self):
68
- super().close()
69
- if self._async_renderer is not None:
70
- self._async_renderer.close()
71
- self._async_renderer = None
72
-
73
- def add_recent_pickle(self, pkl, ignore_errors=False):
74
- self.pickle_widget.add_recent(pkl, ignore_errors=ignore_errors)
75
-
76
- def load_pickle(self, pkl, ignore_errors=False):
77
- self.pickle_widget.load(pkl, ignore_errors=ignore_errors)
78
-
79
- def print_error(self, error):
80
- error = str(error)
81
- if error != self._last_error_print:
82
- print('\n' + error + '\n')
83
- self._last_error_print = error
84
-
85
- def defer_rendering(self, num_frames=1):
86
- self._defer_rendering = max(self._defer_rendering, num_frames)
87
-
88
- def clear_result(self):
89
- self._async_renderer.clear_result()
90
-
91
- def set_async(self, is_async):
92
- if is_async != self._async_renderer.is_async:
93
- self._async_renderer.set_async(is_async)
94
- self.clear_result()
95
- if 'image' in self.result:
96
- self.result.message = 'Switching rendering process...'
97
- self.defer_rendering()
98
-
99
- def _adjust_font_size(self):
100
- old = self.font_size
101
- self.set_font_size(
102
- min(self.content_width / 120, self.content_height / 60))
103
- if self.font_size != old:
104
- self.skip_frame() # Layout changed.
105
-
106
- def check_update_mask(self, **args):
107
- update_mask = False
108
- if 'pkl' in self._status:
109
- if self._status.pkl != args['pkl']:
110
- update_mask = True
111
- self._status.pkl = args['pkl']
112
- if 'w0_seed' in self._status:
113
- if self._status.w0_seed != args['w0_seed']:
114
- update_mask = True
115
- self._status.w0_seed = args['w0_seed']
116
- return update_mask
117
-
118
- def capture_image_frame(self):
119
- self.capture_next_frame()
120
- captured_frame = self.pop_captured_frame()
121
- captured_image = None
122
- if captured_frame is not None:
123
- x1, y1, w, h = self._image_area
124
- captured_image = captured_frame[y1:y1+h, x1:x1+w, :]
125
- return captured_image
126
-
127
- def get_drag_info(self):
128
- seed = self.latent_widget.seed
129
- points = self.drag_widget.points
130
- targets = self.drag_widget.targets
131
- mask = self.drag_widget.mask
132
- w = self._async_renderer._renderer_obj.w
133
- return seed, points, targets, mask, w
134
-
135
- def draw_frame(self):
136
- self.begin_frame()
137
- self.args = dnnlib.EasyDict()
138
- self.pane_w = self.font_size * 18
139
- self.button_w = self.font_size * 5
140
- self.label_w = round(self.font_size * 4.5)
141
-
142
- # Detect mouse dragging in the result area.
143
- if self._image_area is not None:
144
- if not hasattr(self.drag_widget, 'width'):
145
- self.drag_widget.init_mask(self.image_w, self.image_h)
146
- clicked, down, img_x, img_y = imgui_utils.click_hidden_window(
147
- '##image_area', self._image_area[0], self._image_area[1], self._image_area[2], self._image_area[3], self.image_w, self.image_h)
148
- self.drag_widget.action(clicked, down, img_x, img_y)
149
-
150
- # Begin control pane.
151
- imgui.set_next_window_position(0, 0)
152
- imgui.set_next_window_size(self.pane_w, self.content_height)
153
- imgui.begin('##control_pane', closable=False, flags=(
154
- imgui.WINDOW_NO_TITLE_BAR | imgui.WINDOW_NO_RESIZE | imgui.WINDOW_NO_MOVE))
155
-
156
- # Widgets.
157
- expanded, _visible = imgui_utils.collapsing_header(
158
- 'Network & latent', default=True)
159
- self.pickle_widget(expanded)
160
- self.latent_widget(expanded)
161
- expanded, _visible = imgui_utils.collapsing_header(
162
- 'Drag', default=True)
163
- self.drag_widget(expanded)
164
- expanded, _visible = imgui_utils.collapsing_header(
165
- 'Capture', default=True)
166
- self.capture_widget(expanded)
167
-
168
- # Render.
169
- if self.is_skipping_frames():
170
- pass
171
- elif self._defer_rendering > 0:
172
- self._defer_rendering -= 1
173
- elif self.args.pkl is not None:
174
- self._async_renderer.set_args(**self.args)
175
- result = self._async_renderer.get_result()
176
- if result is not None:
177
- self.result = result
178
- if 'stop' in self.result and self.result.stop:
179
- self.drag_widget.stop_drag()
180
- if 'points' in self.result:
181
- self.drag_widget.set_points(self.result.points)
182
- if 'init_net' in self.result:
183
- if self.result.init_net:
184
- self.drag_widget.reset_point()
185
-
186
- if self.check_update_mask(**self.args):
187
- h, w, _ = self.result.image.shape
188
- self.drag_widget.init_mask(w, h)
189
-
190
- # Display.
191
- max_w = self.content_width - self.pane_w
192
- max_h = self.content_height
193
- pos = np.array([self.pane_w + max_w / 2, max_h / 2])
194
- if 'image' in self.result:
195
- if self._tex_img is not self.result.image:
196
- self._tex_img = self.result.image
197
- if self._tex_obj is None or not self._tex_obj.is_compatible(image=self._tex_img):
198
- self._tex_obj = gl_utils.Texture(
199
- image=self._tex_img, bilinear=False, mipmap=False)
200
- else:
201
- self._tex_obj.update(self._tex_img)
202
- self.image_h, self.image_w = self._tex_obj.height, self._tex_obj.width
203
- zoom = min(max_w / self._tex_obj.width,
204
- max_h / self._tex_obj.height)
205
- zoom = np.floor(zoom) if zoom >= 1 else zoom
206
- self._tex_obj.draw(pos=pos, zoom=zoom, align=0.5, rint=True)
207
- if self.drag_widget.show_mask and hasattr(self.drag_widget, 'mask'):
208
- mask = ((1-self.drag_widget.mask.unsqueeze(-1))
209
- * 255).to(torch.uint8)
210
- if self._mask_obj is None or not self._mask_obj.is_compatible(image=self._tex_img):
211
- self._mask_obj = gl_utils.Texture(
212
- image=mask, bilinear=False, mipmap=False)
213
- else:
214
- self._mask_obj.update(mask)
215
- self._mask_obj.draw(pos=pos, zoom=zoom,
216
- align=0.5, rint=True, alpha=0.15)
217
-
218
- if self.drag_widget.mode in ['flexible', 'fixed']:
219
- posx, posy = imgui.get_mouse_pos()
220
- if posx >= self.pane_w:
221
- pos_c = np.array([posx, posy])
222
- gl_utils.draw_circle(
223
- center=pos_c, radius=self.drag_widget.r_mask * zoom, alpha=0.5)
224
-
225
- rescale = self._tex_obj.width / 512 * zoom
226
-
227
- for point in self.drag_widget.targets:
228
- pos_x = self.pane_w + max_w / 2 + \
229
- (point[1] - self.image_w//2) * zoom
230
- pos_y = max_h / 2 + (point[0] - self.image_h//2) * zoom
231
- gl_utils.draw_circle(center=np.array([pos_x, pos_y]), color=[
232
- 0, 0, 1], radius=9 * rescale)
233
-
234
- for point in self.drag_widget.points:
235
- pos_x = self.pane_w + max_w / 2 + \
236
- (point[1] - self.image_w//2) * zoom
237
- pos_y = max_h / 2 + (point[0] - self.image_h//2) * zoom
238
- gl_utils.draw_circle(center=np.array([pos_x, pos_y]), color=[
239
- 1, 0, 0], radius=9 * rescale)
240
-
241
- for point, target in zip(self.drag_widget.points, self.drag_widget.targets):
242
- t_x = self.pane_w + max_w / 2 + \
243
- (target[1] - self.image_w//2) * zoom
244
- t_y = max_h / 2 + (target[0] - self.image_h//2) * zoom
245
-
246
- p_x = self.pane_w + max_w / 2 + \
247
- (point[1] - self.image_w//2) * zoom
248
- p_y = max_h / 2 + (point[0] - self.image_h//2) * zoom
249
-
250
- gl_utils.draw_arrow(p_x, p_y, t_x, t_y,
251
- l=8 * rescale, width=3 * rescale)
252
-
253
- imshow_w = int(self._tex_obj.width * zoom)
254
- imshow_h = int(self._tex_obj.height * zoom)
255
- self._image_area = [int(self.pane_w + max_w / 2 - imshow_w / 2),
256
- int(max_h / 2 - imshow_h / 2), imshow_w, imshow_h]
257
- if 'error' in self.result:
258
- self.print_error(self.result.error)
259
- if 'message' not in self.result:
260
- self.result.message = str(self.result.error)
261
- if 'message' in self.result:
262
- tex = text_utils.get_texture(
263
- self.result.message, size=self.font_size, max_width=max_w, max_height=max_h, outline=2)
264
- tex.draw(pos=pos, align=0.5, rint=True, color=1)
265
-
266
- # End frame.
267
- self._adjust_font_size()
268
- imgui.end()
269
- self.end_frame()
270
-
271
- # ----------------------------------------------------------------------------
272
-
273
-
274
- class AsyncRenderer:
275
- def __init__(self):
276
- self._closed = False
277
- self._is_async = False
278
- self._cur_args = None
279
- self._cur_result = None
280
- self._cur_stamp = 0
281
- self._renderer_obj = None
282
- self._args_queue = None
283
- self._result_queue = None
284
- self._process = None
285
-
286
- def close(self):
287
- self._closed = True
288
- self._renderer_obj = None
289
- if self._process is not None:
290
- self._process.terminate()
291
- self._process = None
292
- self._args_queue = None
293
- self._result_queue = None
294
-
295
- @property
296
- def is_async(self):
297
- return self._is_async
298
-
299
- def set_async(self, is_async):
300
- self._is_async = is_async
301
-
302
- def set_args(self, **args):
303
- assert not self._closed
304
- args2 = args.copy()
305
- args_mask = args2.pop('mask')
306
- if self._cur_args:
307
- _cur_args = self._cur_args.copy()
308
- cur_args_mask = _cur_args.pop('mask')
309
- else:
310
- _cur_args = self._cur_args
311
- # if args != self._cur_args:
312
- if args2 != _cur_args:
313
- if self._is_async:
314
- self._set_args_async(**args)
315
- else:
316
- self._set_args_sync(**args)
317
- self._cur_args = args
318
-
319
- def _set_args_async(self, **args):
320
- if self._process is None:
321
- self._args_queue = multiprocessing.Queue()
322
- self._result_queue = multiprocessing.Queue()
323
- try:
324
- multiprocessing.set_start_method('spawn')
325
- except RuntimeError:
326
- pass
327
- self._process = multiprocessing.Process(target=self._process_fn, args=(
328
- self._args_queue, self._result_queue), daemon=True)
329
- self._process.start()
330
- self._args_queue.put([args, self._cur_stamp])
331
-
332
- def _set_args_sync(self, **args):
333
- if self._renderer_obj is None:
334
- self._renderer_obj = renderer.Renderer()
335
- self._cur_result = self._renderer_obj.render(**args)
336
-
337
- def get_result(self):
338
- assert not self._closed
339
- if self._result_queue is not None:
340
- while self._result_queue.qsize() > 0:
341
- result, stamp = self._result_queue.get()
342
- if stamp == self._cur_stamp:
343
- self._cur_result = result
344
- return self._cur_result
345
-
346
- def clear_result(self):
347
- assert not self._closed
348
- self._cur_args = None
349
- self._cur_result = None
350
- self._cur_stamp += 1
351
-
352
- @staticmethod
353
- def _process_fn(args_queue, result_queue):
354
- renderer_obj = renderer.Renderer()
355
- cur_args = None
356
- cur_stamp = None
357
- while True:
358
- args, stamp = args_queue.get()
359
- while args_queue.qsize() > 0:
360
- args, stamp = args_queue.get()
361
- if args != cur_args or stamp != cur_stamp:
362
- result = renderer_obj.render(**args)
363
- if 'error' in result:
364
- result.error = renderer.CapturedException(result.error)
365
- result_queue.put([result, stamp])
366
- cur_args = args
367
- cur_stamp = stamp
368
-
369
- # ----------------------------------------------------------------------------
370
-
371
-
372
- @click.command()
373
- @click.argument('pkls', metavar='PATH', nargs=-1)
374
- @click.option('--capture-dir', help='Where to save screenshot captures', metavar='PATH', default=None)
375
- @click.option('--browse-dir', help='Specify model path for the \'Browse...\' button', metavar='PATH')
376
- def main(
377
- pkls,
378
- capture_dir,
379
- browse_dir
380
- ):
381
- """Interactive model visualizer.
382
-
383
- Optional PATH argument can be used specify which .pkl file to load.
384
- """
385
- viz = Visualizer(capture_dir=capture_dir)
386
-
387
- if browse_dir is not None:
388
- viz.pickle_widget.search_dirs = [browse_dir]
389
-
390
- # List pickles.
391
- if len(pkls) > 0:
392
- for pkl in pkls:
393
- viz.add_recent_pickle(pkl)
394
- viz.load_pickle(pkls[0])
395
- else:
396
- pretrained = [
397
- 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqcat-512x512.pkl',
398
- 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqdog-512x512.pkl',
399
- 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqv2-512x512.pkl',
400
- 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqwild-512x512.pkl',
401
- 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-brecahad-512x512.pkl',
402
- 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-celebahq-256x256.pkl',
403
- 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-cifar10-32x32.pkl',
404
- 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhq-1024x1024.pkl',
405
- 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhq-256x256.pkl',
406
- 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhq-512x512.pkl',
407
- 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhqu-1024x1024.pkl',
408
- 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhqu-256x256.pkl',
409
- 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-lsundog-256x256.pkl',
410
- 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-metfaces-1024x1024.pkl',
411
- 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-metfacesu-1024x1024.pkl'
412
- ]
413
-
414
- # Populate recent pickles list with pretrained model URLs.
415
- for url in pretrained:
416
- viz.add_recent_pickle(url)
417
-
418
- # Run.
419
- while not viz.should_close():
420
- viz.draw_frame()
421
- viz.close()
422
-
423
- # ----------------------------------------------------------------------------
424
-
425
-
426
- if __name__ == "__main__":
427
- main()
428
-
429
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/point_rend/README.md DELETED
@@ -1,23 +0,0 @@
1
- # PointRend
2
-
3
- ## Introduction
4
-
5
- [ALGORITHM]
6
-
7
- ```latex
8
- @InProceedings{kirillov2019pointrend,
9
- title={{PointRend}: Image Segmentation as Rendering},
10
- author={Alexander Kirillov and Yuxin Wu and Kaiming He and Ross Girshick},
11
- journal={ArXiv:1912.08193},
12
- year={2019}
13
- }
14
- ```
15
-
16
- ## Results and models
17
-
18
- | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
19
- | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------: | :--------: |
20
- | R-50-FPN | caffe | 1x | 4.6 | | 38.4 | 36.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco_20200612_161407.log.json) |
21
- | R-50-FPN | caffe | 3x | 4.6 | | 41.0 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco/point_rend_r50_caffe_fpn_mstrain_3x_coco-e0ebb6b7.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco/point_rend_r50_caffe_fpn_mstrain_3x_coco_20200614_002632.log.json) |
22
-
23
- Note: All models are trained with multi-scale, the input image shorter side is randomly scaled to one of (640, 672, 704, 736, 768, 800).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/necks/rfp.py DELETED
@@ -1,128 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from mmcv.cnn import constant_init, kaiming_init, xavier_init
5
-
6
- from ..builder import NECKS, build_backbone
7
- from .fpn import FPN
8
-
9
-
10
- class ASPP(nn.Module):
11
- """ASPP (Atrous Spatial Pyramid Pooling)
12
-
13
- This is an implementation of the ASPP module used in DetectoRS
14
- (https://arxiv.org/pdf/2006.02334.pdf)
15
-
16
- Args:
17
- in_channels (int): Number of input channels.
18
- out_channels (int): Number of channels produced by this module
19
- dilations (tuple[int]): Dilations of the four branches.
20
- Default: (1, 3, 6, 1)
21
- """
22
-
23
- def __init__(self, in_channels, out_channels, dilations=(1, 3, 6, 1)):
24
- super().__init__()
25
- assert dilations[-1] == 1
26
- self.aspp = nn.ModuleList()
27
- for dilation in dilations:
28
- kernel_size = 3 if dilation > 1 else 1
29
- padding = dilation if dilation > 1 else 0
30
- conv = nn.Conv2d(
31
- in_channels,
32
- out_channels,
33
- kernel_size=kernel_size,
34
- stride=1,
35
- dilation=dilation,
36
- padding=padding,
37
- bias=True)
38
- self.aspp.append(conv)
39
- self.gap = nn.AdaptiveAvgPool2d(1)
40
- self.init_weights()
41
-
42
- def init_weights(self):
43
- for m in self.modules():
44
- if isinstance(m, nn.Conv2d):
45
- kaiming_init(m)
46
-
47
- def forward(self, x):
48
- avg_x = self.gap(x)
49
- out = []
50
- for aspp_idx in range(len(self.aspp)):
51
- inp = avg_x if (aspp_idx == len(self.aspp) - 1) else x
52
- out.append(F.relu_(self.aspp[aspp_idx](inp)))
53
- out[-1] = out[-1].expand_as(out[-2])
54
- out = torch.cat(out, dim=1)
55
- return out
56
-
57
-
58
- @NECKS.register_module()
59
- class RFP(FPN):
60
- """RFP (Recursive Feature Pyramid)
61
-
62
- This is an implementation of RFP in `DetectoRS
63
- <https://arxiv.org/pdf/2006.02334.pdf>`_. Different from standard FPN, the
64
- input of RFP should be multi level features along with origin input image
65
- of backbone.
66
-
67
- Args:
68
- rfp_steps (int): Number of unrolled steps of RFP.
69
- rfp_backbone (dict): Configuration of the backbone for RFP.
70
- aspp_out_channels (int): Number of output channels of ASPP module.
71
- aspp_dilations (tuple[int]): Dilation rates of four branches.
72
- Default: (1, 3, 6, 1)
73
- """
74
-
75
- def __init__(self,
76
- rfp_steps,
77
- rfp_backbone,
78
- aspp_out_channels,
79
- aspp_dilations=(1, 3, 6, 1),
80
- **kwargs):
81
- super().__init__(**kwargs)
82
- self.rfp_steps = rfp_steps
83
- self.rfp_modules = nn.ModuleList()
84
- for rfp_idx in range(1, rfp_steps):
85
- rfp_module = build_backbone(rfp_backbone)
86
- self.rfp_modules.append(rfp_module)
87
- self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels,
88
- aspp_dilations)
89
- self.rfp_weight = nn.Conv2d(
90
- self.out_channels,
91
- 1,
92
- kernel_size=1,
93
- stride=1,
94
- padding=0,
95
- bias=True)
96
-
97
- def init_weights(self):
98
- # Avoid using super().init_weights(), which may alter the default
99
- # initialization of the modules in self.rfp_modules that have missing
100
- # keys in the pretrained checkpoint.
101
- for convs in [self.lateral_convs, self.fpn_convs]:
102
- for m in convs.modules():
103
- if isinstance(m, nn.Conv2d):
104
- xavier_init(m, distribution='uniform')
105
- for rfp_idx in range(self.rfp_steps - 1):
106
- self.rfp_modules[rfp_idx].init_weights(
107
- self.rfp_modules[rfp_idx].pretrained)
108
- constant_init(self.rfp_weight, 0)
109
-
110
- def forward(self, inputs):
111
- inputs = list(inputs)
112
- assert len(inputs) == len(self.in_channels) + 1 # +1 for input image
113
- img = inputs.pop(0)
114
- # FPN forward
115
- x = super().forward(tuple(inputs))
116
- for rfp_idx in range(self.rfp_steps - 1):
117
- rfp_feats = [x[0]] + list(
118
- self.rfp_aspp(x[i]) for i in range(1, len(x)))
119
- x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats)
120
- # FPN forward
121
- x_idx = super().forward(x_idx)
122
- x_new = []
123
- for ft_idx in range(len(x_idx)):
124
- add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx]))
125
- x_new.append(add_weight * x_idx[ft_idx] +
126
- (1 - add_weight) * x[ft_idx])
127
- x = x_new
128
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/plugin.py DELETED
@@ -1,88 +0,0 @@
1
- import inspect
2
- import platform
3
-
4
- from .registry import PLUGIN_LAYERS
5
-
6
- if platform.system() == 'Windows':
7
- import regex as re
8
- else:
9
- import re
10
-
11
-
12
- def infer_abbr(class_type):
13
- """Infer abbreviation from the class name.
14
-
15
- This method will infer the abbreviation to map class types to
16
- abbreviations.
17
-
18
- Rule 1: If the class has the property "abbr", return the property.
19
- Rule 2: Otherwise, the abbreviation falls back to snake case of class
20
- name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``.
21
-
22
- Args:
23
- class_type (type): The norm layer type.
24
-
25
- Returns:
26
- str: The inferred abbreviation.
27
- """
28
-
29
- def camel2snack(word):
30
- """Convert camel case word into snack case.
31
-
32
- Modified from `inflection lib
33
- <https://inflection.readthedocs.io/en/latest/#inflection.underscore>`_.
34
-
35
- Example::
36
-
37
- >>> camel2snack("FancyBlock")
38
- 'fancy_block'
39
- """
40
-
41
- word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word)
42
- word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word)
43
- word = word.replace('-', '_')
44
- return word.lower()
45
-
46
- if not inspect.isclass(class_type):
47
- raise TypeError(
48
- f'class_type must be a type, but got {type(class_type)}')
49
- if hasattr(class_type, '_abbr_'):
50
- return class_type._abbr_
51
- else:
52
- return camel2snack(class_type.__name__)
53
-
54
-
55
- def build_plugin_layer(cfg, postfix='', **kwargs):
56
- """Build plugin layer.
57
-
58
- Args:
59
- cfg (None or dict): cfg should contain:
60
- type (str): identify plugin layer type.
61
- layer args: args needed to instantiate a plugin layer.
62
- postfix (int, str): appended into norm abbreviation to
63
- create named layer. Default: ''.
64
-
65
- Returns:
66
- tuple[str, nn.Module]:
67
- name (str): abbreviation + postfix
68
- layer (nn.Module): created plugin layer
69
- """
70
- if not isinstance(cfg, dict):
71
- raise TypeError('cfg must be a dict')
72
- if 'type' not in cfg:
73
- raise KeyError('the cfg dict must contain the key "type"')
74
- cfg_ = cfg.copy()
75
-
76
- layer_type = cfg_.pop('type')
77
- if layer_type not in PLUGIN_LAYERS:
78
- raise KeyError(f'Unrecognized plugin type {layer_type}')
79
-
80
- plugin_layer = PLUGIN_LAYERS.get(layer_type)
81
- abbr = infer_abbr(plugin_layer)
82
-
83
- assert isinstance(postfix, (int, str))
84
- name = abbr + str(postfix)
85
-
86
- layer = plugin_layer(**kwargs, **cfg_)
87
-
88
- return name, layer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arjav/TOS-Summarization/app.py DELETED
@@ -1,38 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from transformers import PegasusTokenizer, PegasusForConditionalGeneration
4
-
5
-
6
- def summarize(Terms):
7
- tokenizer = PegasusTokenizer.from_pretrained('google/pegasus-billsum')
8
- model = PegasusForConditionalGeneration.from_pretrained(
9
- "arjav/TOS-Pegasus")
10
- input_tokenized = tokenizer.encode(
11
- Terms, return_tensors='pt', max_length=1024, truncation=True)
12
- summary_ids = model.generate(input_tokenized,
13
- num_beams=9,
14
- no_repeat_ngram_size=3,
15
- length_penalty=2.0,
16
- min_length= 150,
17
- max_length= 200,
18
- early_stopping=True)
19
- summary = [tokenizer.decode(g, skip_special_tokens=True,
20
- clean_up_tokenization_spaces=False) for g in summary_ids][0]
21
-
22
- return summary
23
-
24
-
25
- description = "Enter a Terms of Service document to summarize"
26
- title = "Terms of Service Summarization"
27
- interface = gr.Interface(fn=summarize,
28
- inputs=gr.Textbox(
29
- label="Terms of Service", lines=2, placeholder="Enter Terms of Service"),
30
- outputs=gr.Textbox(label="Summary"),
31
- description=description,
32
- title=title,
33
- examples=[['account termination policy youtube will terminate a user s access to the service if under appropriate circumstances the user is determined to be a repeat infringer. youtube reserves the right to decide whether content violates these terms of service for reasons other than copyright infringement such as but not limited to pornography obscenity or excessive length. youtube may at any time without prior notice and in its sole discretion remove such content and or terminate a user s account for submitting such material in violation of these terms of service.']],
34
- allow_flagging='never'
35
- )
36
-
37
-
38
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp DELETED
@@ -1,75 +0,0 @@
1
- // Copyright (c) Facebook, Inc. and its affiliates.
2
- #include "../box_iou_rotated/box_iou_rotated_utils.h"
3
- #include "nms_rotated.h"
4
-
5
- namespace detectron2 {
6
-
7
- template <typename scalar_t>
8
- at::Tensor nms_rotated_cpu_kernel(
9
- const at::Tensor& dets,
10
- const at::Tensor& scores,
11
- const double iou_threshold) {
12
- // nms_rotated_cpu_kernel is modified from torchvision's nms_cpu_kernel,
13
- // however, the code in this function is much shorter because
14
- // we delegate the IoU computation for rotated boxes to
15
- // the single_box_iou_rotated function in box_iou_rotated_utils.h
16
- AT_ASSERTM(dets.device().is_cpu(), "dets must be a CPU tensor");
17
- AT_ASSERTM(scores.device().is_cpu(), "scores must be a CPU tensor");
18
- AT_ASSERTM(
19
- dets.scalar_type() == scores.scalar_type(),
20
- "dets should have the same type as scores");
21
-
22
- if (dets.numel() == 0) {
23
- return at::empty({0}, dets.options().dtype(at::kLong));
24
- }
25
-
26
- auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
27
-
28
- auto ndets = dets.size(0);
29
- at::Tensor suppressed_t = at::zeros({ndets}, dets.options().dtype(at::kByte));
30
- at::Tensor keep_t = at::zeros({ndets}, dets.options().dtype(at::kLong));
31
-
32
- auto suppressed = suppressed_t.data_ptr<uint8_t>();
33
- auto keep = keep_t.data_ptr<int64_t>();
34
- auto order = order_t.data_ptr<int64_t>();
35
-
36
- int64_t num_to_keep = 0;
37
-
38
- for (int64_t _i = 0; _i < ndets; _i++) {
39
- auto i = order[_i];
40
- if (suppressed[i] == 1) {
41
- continue;
42
- }
43
-
44
- keep[num_to_keep++] = i;
45
-
46
- for (int64_t _j = _i + 1; _j < ndets; _j++) {
47
- auto j = order[_j];
48
- if (suppressed[j] == 1) {
49
- continue;
50
- }
51
-
52
- auto ovr = single_box_iou_rotated<scalar_t>(
53
- dets[i].data_ptr<scalar_t>(), dets[j].data_ptr<scalar_t>());
54
- if (ovr >= iou_threshold) {
55
- suppressed[j] = 1;
56
- }
57
- }
58
- }
59
- return keep_t.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep);
60
- }
61
-
62
- at::Tensor nms_rotated_cpu(
63
- // input must be contiguous
64
- const at::Tensor& dets,
65
- const at::Tensor& scores,
66
- const double iou_threshold) {
67
- auto result = at::empty({0}, dets.options());
68
-
69
- AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms_rotated", [&] {
70
- result = nms_rotated_cpu_kernel<scalar_t>(dets, scores, iou_threshold);
71
- });
72
- return result;
73
- }
74
-
75
- } // namespace detectron2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/demucs/repitch.py DELETED
@@ -1,96 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import io
8
- import random
9
- import subprocess as sp
10
- import tempfile
11
-
12
- import numpy as np
13
- import torch
14
- from scipy.io import wavfile
15
-
16
-
17
- def i16_pcm(wav):
18
- if wav.dtype == np.int16:
19
- return wav
20
- return (wav * 2**15).clamp_(-2**15, 2**15 - 1).short()
21
-
22
-
23
- def f32_pcm(wav):
24
- if wav.dtype == np.float:
25
- return wav
26
- return wav.float() / 2**15
27
-
28
-
29
- class RepitchedWrapper:
30
- """
31
- Wrap a dataset to apply online change of pitch / tempo.
32
- """
33
- def __init__(self, dataset, proba=0.2, max_pitch=2, max_tempo=12, tempo_std=5, vocals=[3]):
34
- self.dataset = dataset
35
- self.proba = proba
36
- self.max_pitch = max_pitch
37
- self.max_tempo = max_tempo
38
- self.tempo_std = tempo_std
39
- self.vocals = vocals
40
-
41
- def __len__(self):
42
- return len(self.dataset)
43
-
44
- def __getitem__(self, index):
45
- streams = self.dataset[index]
46
- in_length = streams.shape[-1]
47
- out_length = int((1 - 0.01 * self.max_tempo) * in_length)
48
-
49
- if random.random() < self.proba:
50
- delta_pitch = random.randint(-self.max_pitch, self.max_pitch)
51
- delta_tempo = random.gauss(0, self.tempo_std)
52
- delta_tempo = min(max(-self.max_tempo, delta_tempo), self.max_tempo)
53
- outs = []
54
- for idx, stream in enumerate(streams):
55
- stream = repitch(
56
- stream,
57
- delta_pitch,
58
- delta_tempo,
59
- voice=idx in self.vocals)
60
- outs.append(stream[:, :out_length])
61
- streams = torch.stack(outs)
62
- else:
63
- streams = streams[..., :out_length]
64
- return streams
65
-
66
-
67
- def repitch(wav, pitch, tempo, voice=False, quick=False, samplerate=44100):
68
- """
69
- tempo is a relative delta in percentage, so tempo=10 means tempo at 110%!
70
- pitch is in semi tones.
71
- Requires `soundstretch` to be installed, see
72
- https://www.surina.net/soundtouch/soundstretch.html
73
- """
74
- outfile = tempfile.NamedTemporaryFile(suffix=".wav")
75
- in_ = io.BytesIO()
76
- wavfile.write(in_, samplerate, i16_pcm(wav).t().numpy())
77
- command = [
78
- "soundstretch",
79
- "stdin",
80
- outfile.name,
81
- f"-pitch={pitch}",
82
- f"-tempo={tempo:.6f}",
83
- ]
84
- if quick:
85
- command += ["-quick"]
86
- if voice:
87
- command += ["-speech"]
88
- try:
89
- sp.run(command, capture_output=True, input=in_.getvalue(), check=True)
90
- except sp.CalledProcessError as error:
91
- raise RuntimeError(f"Could not change bpm because {error.stderr.decode('utf-8')}")
92
- sr, wav = wavfile.read(outfile.name)
93
- wav = wav.copy()
94
- wav = f32_pcm(torch.from_numpy(wav).t())
95
- assert sr == samplerate
96
- return wav
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Foto De Instagram.md DELETED
@@ -1,45 +0,0 @@
1
-
2
- <h1>Animales de fiesta 3 Descargar película en hindi: Una guía para amantes de la comedia</h1>
3
- <p>Si usted está buscando una película divertida y entretenida para ver con sus amigos o familiares, es posible que desee ver Party Animals 3. Esta es una película de comedia que le hará reír en voz alta con sus escenas y personajes hilarantes. ¿Pero cómo puedes ver esta película en hindi? En este artículo, le diremos todo lo que necesita saber sobre la descarga de películas de Party Animals 3 en hindi, incluyendo de qué se trata la película, por qué debería verla y cómo descargarla legal y seguramente en línea. </p>
4
- <h2>¿Qué es Animales de Fiesta 3?</h2>
5
- <p>Party Animals 3 es una película de comedia que fue lanzada en 2009. Es la tercera entrega de la franquicia Party Animals, que comenzó con Party Animals en 2002 y Party Animals 2 en 2006. La película sigue las aventuras de Van Wilder, un estudiante universitario famoso por organizar fiestas salvajes y divertirse. En esta película, Van Wilder va a una universidad en Alemania, donde conoce a un grupo de inadaptados que necesitan su ayuda para salvar su dormitorio de ser cerrado por el decano. En el camino, también se enamora de una hermosa chica llamada Eva.</p>
6
- <h2>descargar foto de instagram</h2><br /><p><b><b>Download Zip</b> ->>> <a href="https://bltlly.com/2v6JHE">https://bltlly.com/2v6JHE</a></b></p><br /><br />
7
- <p>La película está protagonizada por Jonathan Bennett como Van Wilder, Kristin Cavallari como Eva, Jerry Shea como Yu Dum Fok, Steve Talley como Dick Arnold, Nic Nac como Farley, Kurt Fuller como Dean Reardon, Nick Zano como Lance Kringle, Brett Rice como Coach Snoop, Linden Ashby como Profesor Betcher, y Meredith Giangrande como Kaitlin Hayes.</p>
8
- <h2>¿Por qué deberías ver Party Animals 3?</h2>
9
- <p>Hay muchas razones por las que deberías ver Party Animals 3 si eres un fan de las películas de comedia. Estas son algunas de ellas:</p>
10
- <ul>
11
-
12
- <li>Las críticas y valoraciones positivas de críticos y audiencias. Party Animals 3 recibió críticas y valoraciones positivas de críticos y audiencias que vieron la película. La película tiene una calificación de 5.1 sobre 10 en IMDb, que es más alta que las calificaciones de las dos películas anteriores en la franquicia. La película también tiene una calificación de 4 sobre 5 en Amazon Prime Video, donde muchos espectadores elogiaron la película por ser divertida, entretenida y agradable. </li>
13
- <li>La disponibilidad de la película en idioma hindi. Si estás buscando una película de comedia que puedas ver en hindi, Party Animals 3 es una buena opción para ti. La película está disponible en idioma hindi en varias plataformas y sitios web que ofrecen servicios de transmisión o descarga en línea. Puedes ver la película en hindi con subtítulos o doblaje, dependiendo de tu preferencia. </li>
14
- </ul>
15
- <h2>Cómo descargar Party Animals 3 en hindi? </h2>
16
- <p>Si quieres descargar Party Animals 3 en hindi, debes tener cuidado con las fuentes que usas. Hay muchas formas ilegales e inseguras de descargar películas en línea, que pueden <p>exponerlo a estafas, malware, virus y problemas legales. Por lo tanto, siempre debe utilizar formas legales y seguras para transmitir o descargar películas en línea. Estas son algunas de las mejores plataformas y sitios web para ver Party Animals 3 en hindi:</p>
17
- <ul>
18
- <li>Amazon Prime Video. Esta es una de las plataformas más populares y confiables para ver películas y programas en línea. Puede transmitir o descargar Party Animals 3 en hindi en Amazon Prime Video con una tarifa de suscripción de $ 12.99 por mes o $ 119 por año. También puede obtener una prueba gratuita de 30 días si es un usuario nuevo. Amazon Prime Video ofrece vídeo y audio de alta calidad, así como subtítulos y opciones de doblaje. </li>
19
-
20
- <li>Moviesflix. Este es un sitio web que ofrece transmisión y descarga gratuita de películas y programas en varios idiomas, incluido el hindi. Puedes ver Party Animals 3 en hindi en Moviesflix sin registro ni suscripción. Sin embargo, debe tener en cuenta que Moviesflix no es un sitio web legal o seguro, y puede contener anuncios, ventanas emergentes, redirecciones y malware. Por lo tanto, debe usar una VPN y un software antivirus al acceder a Moviesflix.</li>
21
- </ul>
22
- <p>Estos son algunos de los consejos y trucos para evitar estafas y malware al descargar Party Animals 3 en hindi:</p>
23
- <ul>
24
- <li>Siempre use una VPN y un software antivirus cuando acceda a sitios web o plataformas no confiables. </li>
25
- <li>Compruebe siempre las opiniones y valoraciones de los sitios web o plataformas antes de usarlos. </li>
26
- <li>Siempre evite hacer clic en enlaces sospechosos, anuncios, ventanas emergentes o redirecciones que puedan aparecer en los sitios web o plataformas. </li>
27
- <li>Siempre descargue la película de las fuentes oficiales o verificadas, y evite usar torrent o redes peer-to-peer. </li>
28
- <li>Siempre escanea el archivo descargado en busca de virus o malware antes de abrirlo. </li>
29
- </ul>
30
- <h2>Conclusión</h2>
31
- <p>Party Animals 3 es una película de comedia que te hará reír con sus hilarantes escenas y personajes. Es una película que se puede ver en idioma hindi en varias plataformas y sitios web en línea. Sin embargo, siempre debe usar formas legales y seguras para transmitir o descargar la película, y evitar estafas y malware que puedan dañar su dispositivo o datos. Esperamos que este artículo te haya ayudado a aprender más sobre la descarga de películas de Party Animals 3 en hindi, y que disfrutes viendo la película con tus amigos o familiares. </p>
32
- <h2>Preguntas frecuentes</h2>
33
- <h3>Q: ¿Cuándo fue liberado Party Animals 3? </h3>
34
- <p>A: Party Animals 3 fue lanzado el 27 de marzo de 2009 en los Estados Unidos.</p>
35
- <h3>P: ¿Quién dirigió Party Animals 3?</h3>
36
- <p>A: Party Animals 3 fue dirigida por Mort Nathan, quien también dirigió la primera película de la franquicia. </p>
37
- <p></p>
38
-
39
- <p>A: Party Animals 3 es un spin-off de la película original de Party Animals, protagonizada por Ryan Reynolds como Van Wilder. Party Animals 3 cuenta con un actor diferente como Van Wilder, que se supone que es su primo. </p>
40
- <h3>P: ¿Cuánto tiempo es Party Animals 3?</h3>
41
- <p>A: Party Animals 3 tiene un tiempo de ejecución de 97 minutos. </p>
42
- <h3>Q: ¿Cuál es la calificación de Party Animals 3?</h3>
43
- <p>A: Party Animals 3 está clasificado como R por su contenido crudo y sexual, desnudez, lenguaje y algún material de drogas. </p> 64aa2da5cf<br />
44
- <br />
45
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/jisfreq.py DELETED
@@ -1,325 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is Mozilla Communicator client code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 1998
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- #
12
- # This library is free software; you can redistribute it and/or
13
- # modify it under the terms of the GNU Lesser General Public
14
- # License as published by the Free Software Foundation; either
15
- # version 2.1 of the License, or (at your option) any later version.
16
- #
17
- # This library is distributed in the hope that it will be useful,
18
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
19
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20
- # Lesser General Public License for more details.
21
- #
22
- # You should have received a copy of the GNU Lesser General Public
23
- # License along with this library; if not, write to the Free Software
24
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25
- # 02110-1301 USA
26
- ######################### END LICENSE BLOCK #########################
27
-
28
- # Sampling from about 20M text materials include literature and computer technology
29
- #
30
- # Japanese frequency table, applied to both S-JIS and EUC-JP
31
- # They are sorted in order.
32
-
33
- # 128 --> 0.77094
34
- # 256 --> 0.85710
35
- # 512 --> 0.92635
36
- # 1024 --> 0.97130
37
- # 2048 --> 0.99431
38
- #
39
- # Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
40
- # Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
41
- #
42
- # Typical Distribution Ratio, 25% of IDR
43
-
44
- JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
45
-
46
- # Char to FreqOrder table ,
47
- JIS_TABLE_SIZE = 4368
48
-
49
- # fmt: off
50
- JIS_CHAR_TO_FREQ_ORDER = (
51
- 40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
52
- 3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
53
- 1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
54
- 2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
55
- 2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
56
- 5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
57
- 1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
58
- 5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
59
- 5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
60
- 5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
61
- 5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
62
- 5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
63
- 5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
64
- 1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
65
- 1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
66
- 1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
67
- 2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
68
- 3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
69
- 3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
70
- 4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
71
- 12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
72
- 1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
73
- 109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
74
- 5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
75
- 271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
76
- 32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
77
- 43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
78
- 280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
79
- 54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
80
- 5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
81
- 5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
82
- 5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
83
- 4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
84
- 5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
85
- 5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
86
- 5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
87
- 5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
88
- 5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
89
- 5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
90
- 5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
91
- 5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
92
- 5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
93
- 3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
94
- 5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
95
- 5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
96
- 5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
97
- 5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
98
- 5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
99
- 5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
100
- 5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
101
- 5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
102
- 5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
103
- 5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
104
- 5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
105
- 5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
106
- 5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
107
- 5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
108
- 5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
109
- 5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
110
- 5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
111
- 5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
112
- 5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
113
- 5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
114
- 5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
115
- 5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
116
- 5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
117
- 5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
118
- 5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
119
- 5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
120
- 5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
121
- 5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
122
- 5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
123
- 5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
124
- 5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
125
- 5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
126
- 5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
127
- 5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
128
- 5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
129
- 5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
130
- 5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
131
- 5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
132
- 6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
133
- 6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
134
- 6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
135
- 6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
136
- 6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
137
- 6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
138
- 6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
139
- 6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
140
- 4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
141
- 854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
142
- 665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
143
- 1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
144
- 1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
145
- 896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
146
- 3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
147
- 3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
148
- 804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
149
- 3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
150
- 3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
151
- 586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
152
- 2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
153
- 277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
154
- 3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
155
- 1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
156
- 380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
157
- 1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
158
- 850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
159
- 2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
160
- 2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
161
- 2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
162
- 2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
163
- 1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
164
- 1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
165
- 1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
166
- 1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
167
- 2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
168
- 1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
169
- 2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
170
- 1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
171
- 1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
172
- 1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
173
- 1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
174
- 1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
175
- 1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
176
- 606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
177
- 684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
178
- 1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
179
- 2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
180
- 2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
181
- 2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
182
- 3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
183
- 3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
184
- 884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
185
- 3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
186
- 1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
187
- 861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
188
- 2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
189
- 1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
190
- 576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
191
- 3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
192
- 4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
193
- 2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
194
- 1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
195
- 2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
196
- 1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
197
- 385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
198
- 178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
199
- 1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
200
- 2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
201
- 2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
202
- 2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
203
- 3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
204
- 1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
205
- 2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
206
- 359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
207
- 837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
208
- 855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
209
- 1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
210
- 2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
211
- 633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
212
- 1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
213
- 1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
214
- 353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
215
- 1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
216
- 1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
217
- 1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
218
- 764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
219
- 2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
220
- 278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
221
- 2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
222
- 3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
223
- 2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
224
- 1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
225
- 6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
226
- 1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
227
- 2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
228
- 1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
229
- 470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
230
- 72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
231
- 3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
232
- 3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
233
- 1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
234
- 1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
235
- 1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
236
- 1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
237
- 123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
238
- 913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
239
- 2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
240
- 900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
241
- 3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
242
- 2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
243
- 423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
244
- 1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
245
- 2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
246
- 220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
247
- 1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
248
- 745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
249
- 4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
250
- 2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
251
- 1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
252
- 666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
253
- 1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
254
- 2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
255
- 376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
256
- 6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
257
- 1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
258
- 1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
259
- 2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
260
- 3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
261
- 914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
262
- 3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
263
- 1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
264
- 674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
265
- 1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
266
- 199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
267
- 3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
268
- 370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
269
- 2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
270
- 414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
271
- 4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
272
- 2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
273
- 1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
274
- 1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
275
- 1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
276
- 166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
277
- 1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
278
- 3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
279
- 1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
280
- 3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
281
- 264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
282
- 543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
283
- 983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
284
- 2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
285
- 1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
286
- 867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
287
- 1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
288
- 894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
289
- 1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
290
- 530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
291
- 839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
292
- 480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
293
- 1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
294
- 1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
295
- 2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
296
- 4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
297
- 227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
298
- 1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
299
- 328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
300
- 1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
301
- 3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
302
- 1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
303
- 2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
304
- 2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
305
- 1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
306
- 1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
307
- 2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
308
- 455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
309
- 2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
310
- 1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
311
- 1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
312
- 1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
313
- 1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
314
- 3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
315
- 2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
316
- 2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
317
- 575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
318
- 3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
319
- 3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
320
- 1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
321
- 2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
322
- 1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
323
- 2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
324
- )
325
- # fmt: on
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/sbcharsetprober.py DELETED
@@ -1,162 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is Mozilla Universal charset detector code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 2001
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- # Shy Shalom - original C code
12
- #
13
- # This library is free software; you can redistribute it and/or
14
- # modify it under the terms of the GNU Lesser General Public
15
- # License as published by the Free Software Foundation; either
16
- # version 2.1 of the License, or (at your option) any later version.
17
- #
18
- # This library is distributed in the hope that it will be useful,
19
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
20
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21
- # Lesser General Public License for more details.
22
- #
23
- # You should have received a copy of the GNU Lesser General Public
24
- # License along with this library; if not, write to the Free Software
25
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
26
- # 02110-1301 USA
27
- ######################### END LICENSE BLOCK #########################
28
-
29
- from typing import Dict, List, NamedTuple, Optional, Union
30
-
31
- from .charsetprober import CharSetProber
32
- from .enums import CharacterCategory, ProbingState, SequenceLikelihood
33
-
34
-
35
- class SingleByteCharSetModel(NamedTuple):
36
- charset_name: str
37
- language: str
38
- char_to_order_map: Dict[int, int]
39
- language_model: Dict[int, Dict[int, int]]
40
- typical_positive_ratio: float
41
- keep_ascii_letters: bool
42
- alphabet: str
43
-
44
-
45
- class SingleByteCharSetProber(CharSetProber):
46
- SAMPLE_SIZE = 64
47
- SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2
48
- POSITIVE_SHORTCUT_THRESHOLD = 0.95
49
- NEGATIVE_SHORTCUT_THRESHOLD = 0.05
50
-
51
- def __init__(
52
- self,
53
- model: SingleByteCharSetModel,
54
- is_reversed: bool = False,
55
- name_prober: Optional[CharSetProber] = None,
56
- ) -> None:
57
- super().__init__()
58
- self._model = model
59
- # TRUE if we need to reverse every pair in the model lookup
60
- self._reversed = is_reversed
61
- # Optional auxiliary prober for name decision
62
- self._name_prober = name_prober
63
- self._last_order = 255
64
- self._seq_counters: List[int] = []
65
- self._total_seqs = 0
66
- self._total_char = 0
67
- self._control_char = 0
68
- self._freq_char = 0
69
- self.reset()
70
-
71
- def reset(self) -> None:
72
- super().reset()
73
- # char order of last character
74
- self._last_order = 255
75
- self._seq_counters = [0] * SequenceLikelihood.get_num_categories()
76
- self._total_seqs = 0
77
- self._total_char = 0
78
- self._control_char = 0
79
- # characters that fall in our sampling range
80
- self._freq_char = 0
81
-
82
- @property
83
- def charset_name(self) -> Optional[str]:
84
- if self._name_prober:
85
- return self._name_prober.charset_name
86
- return self._model.charset_name
87
-
88
- @property
89
- def language(self) -> Optional[str]:
90
- if self._name_prober:
91
- return self._name_prober.language
92
- return self._model.language
93
-
94
- def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
95
- # TODO: Make filter_international_words keep things in self.alphabet
96
- if not self._model.keep_ascii_letters:
97
- byte_str = self.filter_international_words(byte_str)
98
- else:
99
- byte_str = self.remove_xml_tags(byte_str)
100
- if not byte_str:
101
- return self.state
102
- char_to_order_map = self._model.char_to_order_map
103
- language_model = self._model.language_model
104
- for char in byte_str:
105
- order = char_to_order_map.get(char, CharacterCategory.UNDEFINED)
106
- # XXX: This was SYMBOL_CAT_ORDER before, with a value of 250, but
107
- # CharacterCategory.SYMBOL is actually 253, so we use CONTROL
108
- # to make it closer to the original intent. The only difference
109
- # is whether or not we count digits and control characters for
110
- # _total_char purposes.
111
- if order < CharacterCategory.CONTROL:
112
- self._total_char += 1
113
- if order < self.SAMPLE_SIZE:
114
- self._freq_char += 1
115
- if self._last_order < self.SAMPLE_SIZE:
116
- self._total_seqs += 1
117
- if not self._reversed:
118
- lm_cat = language_model[self._last_order][order]
119
- else:
120
- lm_cat = language_model[order][self._last_order]
121
- self._seq_counters[lm_cat] += 1
122
- self._last_order = order
123
-
124
- charset_name = self._model.charset_name
125
- if self.state == ProbingState.DETECTING:
126
- if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD:
127
- confidence = self.get_confidence()
128
- if confidence > self.POSITIVE_SHORTCUT_THRESHOLD:
129
- self.logger.debug(
130
- "%s confidence = %s, we have a winner", charset_name, confidence
131
- )
132
- self._state = ProbingState.FOUND_IT
133
- elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD:
134
- self.logger.debug(
135
- "%s confidence = %s, below negative shortcut threshold %s",
136
- charset_name,
137
- confidence,
138
- self.NEGATIVE_SHORTCUT_THRESHOLD,
139
- )
140
- self._state = ProbingState.NOT_ME
141
-
142
- return self.state
143
-
144
- def get_confidence(self) -> float:
145
- r = 0.01
146
- if self._total_seqs > 0:
147
- r = (
148
- (
149
- self._seq_counters[SequenceLikelihood.POSITIVE]
150
- + 0.25 * self._seq_counters[SequenceLikelihood.LIKELY]
151
- )
152
- / self._total_seqs
153
- / self._model.typical_positive_ratio
154
- )
155
- # The more control characters (proportionnaly to the size
156
- # of the text), the less confident we become in the current
157
- # charset.
158
- r = r * (self._total_char - self._control_char) / self._total_char
159
- r = r * self._freq_char / self._total_char
160
- if r >= 1.0:
161
- r = 0.99
162
- return r
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boadiwaa/Recipes/openai/api_resources/model.py DELETED
@@ -1,6 +0,0 @@
1
- from openai.api_resources.abstract import DeletableAPIResource, ListableAPIResource
2
-
3
-
4
- class Model(ListableAPIResource, DeletableAPIResource):
5
- engine_required = False
6
- OBJECT_NAME = "models"
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/box_regression.py DELETED
@@ -1,221 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import math
3
- from typing import Tuple
4
- import torch
5
-
6
- # Value for clamping large dw and dh predictions. The heuristic is that we clamp
7
- # such that dw and dh are no larger than what would transform a 16px box into a
8
- # 1000px box (based on a small anchor, 16px, and a typical image size, 1000px).
9
- _DEFAULT_SCALE_CLAMP = math.log(1000.0 / 16)
10
-
11
-
12
- __all__ = ["Box2BoxTransform", "Box2BoxTransformRotated"]
13
-
14
-
15
- @torch.jit.script
16
- class Box2BoxTransform(object):
17
- """
18
- The box-to-box transform defined in R-CNN. The transformation is parameterized
19
- by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height
20
- by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height).
21
- """
22
-
23
- def __init__(
24
- self, weights: Tuple[float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP
25
- ):
26
- """
27
- Args:
28
- weights (4-element tuple): Scaling factors that are applied to the
29
- (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set
30
- such that the deltas have unit variance; now they are treated as
31
- hyperparameters of the system.
32
- scale_clamp (float): When predicting deltas, the predicted box scaling
33
- factors (dw and dh) are clamped such that they are <= scale_clamp.
34
- """
35
- self.weights = weights
36
- self.scale_clamp = scale_clamp
37
-
38
- def get_deltas(self, src_boxes, target_boxes):
39
- """
40
- Get box regression transformation deltas (dx, dy, dw, dh) that can be used
41
- to transform the `src_boxes` into the `target_boxes`. That is, the relation
42
- ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless
43
- any delta is too large and is clamped).
44
-
45
- Args:
46
- src_boxes (Tensor): source boxes, e.g., object proposals
47
- target_boxes (Tensor): target of the transformation, e.g., ground-truth
48
- boxes.
49
- """
50
- assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
51
- assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
52
-
53
- src_widths = src_boxes[:, 2] - src_boxes[:, 0]
54
- src_heights = src_boxes[:, 3] - src_boxes[:, 1]
55
- src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths
56
- src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights
57
-
58
- target_widths = target_boxes[:, 2] - target_boxes[:, 0]
59
- target_heights = target_boxes[:, 3] - target_boxes[:, 1]
60
- target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths
61
- target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights
62
-
63
- wx, wy, ww, wh = self.weights
64
- dx = wx * (target_ctr_x - src_ctr_x) / src_widths
65
- dy = wy * (target_ctr_y - src_ctr_y) / src_heights
66
- dw = ww * torch.log(target_widths / src_widths)
67
- dh = wh * torch.log(target_heights / src_heights)
68
-
69
- deltas = torch.stack((dx, dy, dw, dh), dim=1)
70
- assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!"
71
- return deltas
72
-
73
- def apply_deltas(self, deltas, boxes):
74
- """
75
- Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.
76
-
77
- Args:
78
- deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
79
- deltas[i] represents k potentially different class-specific
80
- box transformations for the single box boxes[i].
81
- boxes (Tensor): boxes to transform, of shape (N, 4)
82
- """
83
- boxes = boxes.to(deltas.dtype)
84
-
85
- widths = boxes[:, 2] - boxes[:, 0]
86
- heights = boxes[:, 3] - boxes[:, 1]
87
- ctr_x = boxes[:, 0] + 0.5 * widths
88
- ctr_y = boxes[:, 1] + 0.5 * heights
89
-
90
- wx, wy, ww, wh = self.weights
91
- dx = deltas[:, 0::4] / wx
92
- dy = deltas[:, 1::4] / wy
93
- dw = deltas[:, 2::4] / ww
94
- dh = deltas[:, 3::4] / wh
95
-
96
- # Prevent sending too large values into torch.exp()
97
- dw = torch.clamp(dw, max=self.scale_clamp)
98
- dh = torch.clamp(dh, max=self.scale_clamp)
99
-
100
- pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
101
- pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
102
- pred_w = torch.exp(dw) * widths[:, None]
103
- pred_h = torch.exp(dh) * heights[:, None]
104
-
105
- pred_boxes = torch.zeros_like(deltas)
106
- pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1
107
- pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1
108
- pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2
109
- pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2
110
- return pred_boxes
111
-
112
-
113
- @torch.jit.script
114
- class Box2BoxTransformRotated(object):
115
- """
116
- The box-to-box transform defined in Rotated R-CNN. The transformation is parameterized
117
- by 5 deltas: (dx, dy, dw, dh, da). The transformation scales the box's width and height
118
- by exp(dw), exp(dh), shifts a box's center by the offset (dx * width, dy * height),
119
- and rotate a box's angle by da (radians).
120
- Note: angles of deltas are in radians while angles of boxes are in degrees.
121
- """
122
-
123
- def __init__(
124
- self,
125
- weights: Tuple[float, float, float, float, float],
126
- scale_clamp: float = _DEFAULT_SCALE_CLAMP,
127
- ):
128
- """
129
- Args:
130
- weights (5-element tuple): Scaling factors that are applied to the
131
- (dx, dy, dw, dh, da) deltas. These are treated as
132
- hyperparameters of the system.
133
- scale_clamp (float): When predicting deltas, the predicted box scaling
134
- factors (dw and dh) are clamped such that they are <= scale_clamp.
135
- """
136
- self.weights = weights
137
- self.scale_clamp = scale_clamp
138
-
139
- def get_deltas(self, src_boxes, target_boxes):
140
- """
141
- Get box regression transformation deltas (dx, dy, dw, dh, da) that can be used
142
- to transform the `src_boxes` into the `target_boxes`. That is, the relation
143
- ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless
144
- any delta is too large and is clamped).
145
-
146
- Args:
147
- src_boxes (Tensor): Nx5 source boxes, e.g., object proposals
148
- target_boxes (Tensor): Nx5 target of the transformation, e.g., ground-truth
149
- boxes.
150
- """
151
- assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
152
- assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
153
-
154
- src_ctr_x, src_ctr_y, src_widths, src_heights, src_angles = torch.unbind(src_boxes, dim=1)
155
-
156
- target_ctr_x, target_ctr_y, target_widths, target_heights, target_angles = torch.unbind(
157
- target_boxes, dim=1
158
- )
159
-
160
- wx, wy, ww, wh, wa = self.weights
161
- dx = wx * (target_ctr_x - src_ctr_x) / src_widths
162
- dy = wy * (target_ctr_y - src_ctr_y) / src_heights
163
- dw = ww * torch.log(target_widths / src_widths)
164
- dh = wh * torch.log(target_heights / src_heights)
165
- # Angles of deltas are in radians while angles of boxes are in degrees.
166
- # the conversion to radians serve as a way to normalize the values
167
- da = target_angles - src_angles
168
- da = (da + 180.0) % 360.0 - 180.0 # make it in [-180, 180)
169
- da *= wa * math.pi / 180.0
170
-
171
- deltas = torch.stack((dx, dy, dw, dh, da), dim=1)
172
- assert (
173
- (src_widths > 0).all().item()
174
- ), "Input boxes to Box2BoxTransformRotated are not valid!"
175
- return deltas
176
-
177
- def apply_deltas(self, deltas, boxes):
178
- """
179
- Apply transformation `deltas` (dx, dy, dw, dh, da) to `boxes`.
180
-
181
- Args:
182
- deltas (Tensor): transformation deltas of shape (N, 5).
183
- deltas[i] represents box transformation for the single box boxes[i].
184
- boxes (Tensor): boxes to transform, of shape (N, 5)
185
- """
186
- assert deltas.shape[1] == 5 and boxes.shape[1] == 5
187
-
188
- boxes = boxes.to(deltas.dtype)
189
-
190
- ctr_x = boxes[:, 0]
191
- ctr_y = boxes[:, 1]
192
- widths = boxes[:, 2]
193
- heights = boxes[:, 3]
194
- angles = boxes[:, 4]
195
-
196
- wx, wy, ww, wh, wa = self.weights
197
-
198
- dx = deltas[:, 0] / wx
199
- dy = deltas[:, 1] / wy
200
- dw = deltas[:, 2] / ww
201
- dh = deltas[:, 3] / wh
202
- da = deltas[:, 4] / wa
203
-
204
- # Prevent sending too large values into torch.exp()
205
- dw = torch.clamp(dw, max=self.scale_clamp)
206
- dh = torch.clamp(dh, max=self.scale_clamp)
207
-
208
- pred_boxes = torch.zeros_like(deltas)
209
- pred_boxes[:, 0] = dx * widths + ctr_x # x_ctr
210
- pred_boxes[:, 1] = dy * heights + ctr_y # y_ctr
211
- pred_boxes[:, 2] = torch.exp(dw) * widths # width
212
- pred_boxes[:, 3] = torch.exp(dh) * heights # height
213
-
214
- # Following original RRPN implementation,
215
- # angles of deltas are in radians while angles of boxes are in degrees.
216
- pred_angle = da * 180.0 / math.pi + angles
217
- pred_angle = (pred_angle + 180.0) % 360.0 - 180.0 # make it in [-180, 180)
218
-
219
- pred_boxes[:, 4] = pred_angle
220
-
221
- return pred_boxes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_methods_and_attributes.py DELETED
@@ -1,440 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import pytest
3
-
4
- import env # noqa: F401
5
-
6
- from pybind11_tests import methods_and_attributes as m
7
- from pybind11_tests import ConstructorStats
8
-
9
-
10
- def test_methods_and_attributes():
11
- instance1 = m.ExampleMandA()
12
- instance2 = m.ExampleMandA(32)
13
-
14
- instance1.add1(instance2)
15
- instance1.add2(instance2)
16
- instance1.add3(instance2)
17
- instance1.add4(instance2)
18
- instance1.add5(instance2)
19
- instance1.add6(32)
20
- instance1.add7(32)
21
- instance1.add8(32)
22
- instance1.add9(32)
23
- instance1.add10(32)
24
-
25
- assert str(instance1) == "ExampleMandA[value=320]"
26
- assert str(instance2) == "ExampleMandA[value=32]"
27
- assert str(instance1.self1()) == "ExampleMandA[value=320]"
28
- assert str(instance1.self2()) == "ExampleMandA[value=320]"
29
- assert str(instance1.self3()) == "ExampleMandA[value=320]"
30
- assert str(instance1.self4()) == "ExampleMandA[value=320]"
31
- assert str(instance1.self5()) == "ExampleMandA[value=320]"
32
-
33
- assert instance1.internal1() == 320
34
- assert instance1.internal2() == 320
35
- assert instance1.internal3() == 320
36
- assert instance1.internal4() == 320
37
- assert instance1.internal5() == 320
38
-
39
- assert instance1.overloaded() == "()"
40
- assert instance1.overloaded(0) == "(int)"
41
- assert instance1.overloaded(1, 1.0) == "(int, float)"
42
- assert instance1.overloaded(2.0, 2) == "(float, int)"
43
- assert instance1.overloaded(3, 3) == "(int, int)"
44
- assert instance1.overloaded(4., 4.) == "(float, float)"
45
- assert instance1.overloaded_const(-3) == "(int) const"
46
- assert instance1.overloaded_const(5, 5.0) == "(int, float) const"
47
- assert instance1.overloaded_const(6.0, 6) == "(float, int) const"
48
- assert instance1.overloaded_const(7, 7) == "(int, int) const"
49
- assert instance1.overloaded_const(8., 8.) == "(float, float) const"
50
- assert instance1.overloaded_float(1, 1) == "(float, float)"
51
- assert instance1.overloaded_float(1, 1.) == "(float, float)"
52
- assert instance1.overloaded_float(1., 1) == "(float, float)"
53
- assert instance1.overloaded_float(1., 1.) == "(float, float)"
54
-
55
- assert instance1.value == 320
56
- instance1.value = 100
57
- assert str(instance1) == "ExampleMandA[value=100]"
58
-
59
- cstats = ConstructorStats.get(m.ExampleMandA)
60
- assert cstats.alive() == 2
61
- del instance1, instance2
62
- assert cstats.alive() == 0
63
- assert cstats.values() == ["32"]
64
- assert cstats.default_constructions == 1
65
- assert cstats.copy_constructions == 2
66
- assert cstats.move_constructions >= 2
67
- assert cstats.copy_assignments == 0
68
- assert cstats.move_assignments == 0
69
-
70
-
71
- def test_copy_method():
72
- """Issue #443: calling copied methods fails in Python 3"""
73
-
74
- m.ExampleMandA.add2c = m.ExampleMandA.add2
75
- m.ExampleMandA.add2d = m.ExampleMandA.add2b
76
- a = m.ExampleMandA(123)
77
- assert a.value == 123
78
- a.add2(m.ExampleMandA(-100))
79
- assert a.value == 23
80
- a.add2b(m.ExampleMandA(20))
81
- assert a.value == 43
82
- a.add2c(m.ExampleMandA(6))
83
- assert a.value == 49
84
- a.add2d(m.ExampleMandA(-7))
85
- assert a.value == 42
86
-
87
-
88
- def test_properties():
89
- instance = m.TestProperties()
90
-
91
- assert instance.def_readonly == 1
92
- with pytest.raises(AttributeError):
93
- instance.def_readonly = 2
94
-
95
- instance.def_readwrite = 2
96
- assert instance.def_readwrite == 2
97
-
98
- assert instance.def_property_readonly == 2
99
- with pytest.raises(AttributeError):
100
- instance.def_property_readonly = 3
101
-
102
- instance.def_property = 3
103
- assert instance.def_property == 3
104
-
105
- with pytest.raises(AttributeError) as excinfo:
106
- dummy = instance.def_property_writeonly # noqa: F841 unused var
107
- assert "unreadable attribute" in str(excinfo.value)
108
-
109
- instance.def_property_writeonly = 4
110
- assert instance.def_property_readonly == 4
111
-
112
- with pytest.raises(AttributeError) as excinfo:
113
- dummy = instance.def_property_impossible # noqa: F841 unused var
114
- assert "unreadable attribute" in str(excinfo.value)
115
-
116
- with pytest.raises(AttributeError) as excinfo:
117
- instance.def_property_impossible = 5
118
- assert "can't set attribute" in str(excinfo.value)
119
-
120
-
121
- def test_static_properties():
122
- assert m.TestProperties.def_readonly_static == 1
123
- with pytest.raises(AttributeError) as excinfo:
124
- m.TestProperties.def_readonly_static = 2
125
- assert "can't set attribute" in str(excinfo.value)
126
-
127
- m.TestProperties.def_readwrite_static = 2
128
- assert m.TestProperties.def_readwrite_static == 2
129
-
130
- with pytest.raises(AttributeError) as excinfo:
131
- dummy = m.TestProperties.def_writeonly_static # noqa: F841 unused var
132
- assert "unreadable attribute" in str(excinfo.value)
133
-
134
- m.TestProperties.def_writeonly_static = 3
135
- assert m.TestProperties.def_readonly_static == 3
136
-
137
- assert m.TestProperties.def_property_readonly_static == 3
138
- with pytest.raises(AttributeError) as excinfo:
139
- m.TestProperties.def_property_readonly_static = 99
140
- assert "can't set attribute" in str(excinfo.value)
141
-
142
- m.TestProperties.def_property_static = 4
143
- assert m.TestProperties.def_property_static == 4
144
-
145
- with pytest.raises(AttributeError) as excinfo:
146
- dummy = m.TestProperties.def_property_writeonly_static
147
- assert "unreadable attribute" in str(excinfo.value)
148
-
149
- m.TestProperties.def_property_writeonly_static = 5
150
- assert m.TestProperties.def_property_static == 5
151
-
152
- # Static property read and write via instance
153
- instance = m.TestProperties()
154
-
155
- m.TestProperties.def_readwrite_static = 0
156
- assert m.TestProperties.def_readwrite_static == 0
157
- assert instance.def_readwrite_static == 0
158
-
159
- instance.def_readwrite_static = 2
160
- assert m.TestProperties.def_readwrite_static == 2
161
- assert instance.def_readwrite_static == 2
162
-
163
- with pytest.raises(AttributeError) as excinfo:
164
- dummy = instance.def_property_writeonly_static # noqa: F841 unused var
165
- assert "unreadable attribute" in str(excinfo.value)
166
-
167
- instance.def_property_writeonly_static = 4
168
- assert instance.def_property_static == 4
169
-
170
- # It should be possible to override properties in derived classes
171
- assert m.TestPropertiesOverride().def_readonly == 99
172
- assert m.TestPropertiesOverride.def_readonly_static == 99
173
-
174
-
175
- def test_static_cls():
176
- """Static property getter and setters expect the type object as the their only argument"""
177
-
178
- instance = m.TestProperties()
179
- assert m.TestProperties.static_cls is m.TestProperties
180
- assert instance.static_cls is m.TestProperties
181
-
182
- def check_self(self):
183
- assert self is m.TestProperties
184
-
185
- m.TestProperties.static_cls = check_self
186
- instance.static_cls = check_self
187
-
188
-
189
- def test_metaclass_override():
190
- """Overriding pybind11's default metaclass changes the behavior of `static_property`"""
191
-
192
- assert type(m.ExampleMandA).__name__ == "pybind11_type"
193
- assert type(m.MetaclassOverride).__name__ == "type"
194
-
195
- assert m.MetaclassOverride.readonly == 1
196
- assert type(m.MetaclassOverride.__dict__["readonly"]).__name__ == "pybind11_static_property"
197
-
198
- # Regular `type` replaces the property instead of calling `__set__()`
199
- m.MetaclassOverride.readonly = 2
200
- assert m.MetaclassOverride.readonly == 2
201
- assert isinstance(m.MetaclassOverride.__dict__["readonly"], int)
202
-
203
-
204
- def test_no_mixed_overloads():
205
- from pybind11_tests import debug_enabled
206
-
207
- with pytest.raises(RuntimeError) as excinfo:
208
- m.ExampleMandA.add_mixed_overloads1()
209
- assert (str(excinfo.value) ==
210
- "overloading a method with both static and instance methods is not supported; " +
211
- ("compile in debug mode for more details" if not debug_enabled else
212
- "error while attempting to bind static method ExampleMandA.overload_mixed1"
213
- "(arg0: float) -> str")
214
- )
215
-
216
- with pytest.raises(RuntimeError) as excinfo:
217
- m.ExampleMandA.add_mixed_overloads2()
218
- assert (str(excinfo.value) ==
219
- "overloading a method with both static and instance methods is not supported; " +
220
- ("compile in debug mode for more details" if not debug_enabled else
221
- "error while attempting to bind instance method ExampleMandA.overload_mixed2"
222
- "(self: pybind11_tests.methods_and_attributes.ExampleMandA, arg0: int, arg1: int)"
223
- " -> str")
224
- )
225
-
226
-
227
- @pytest.mark.parametrize("access", ["ro", "rw", "static_ro", "static_rw"])
228
- def test_property_return_value_policies(access):
229
- if not access.startswith("static"):
230
- obj = m.TestPropRVP()
231
- else:
232
- obj = m.TestPropRVP
233
-
234
- ref = getattr(obj, access + "_ref")
235
- assert ref.value == 1
236
- ref.value = 2
237
- assert getattr(obj, access + "_ref").value == 2
238
- ref.value = 1 # restore original value for static properties
239
-
240
- copy = getattr(obj, access + "_copy")
241
- assert copy.value == 1
242
- copy.value = 2
243
- assert getattr(obj, access + "_copy").value == 1
244
-
245
- copy = getattr(obj, access + "_func")
246
- assert copy.value == 1
247
- copy.value = 2
248
- assert getattr(obj, access + "_func").value == 1
249
-
250
-
251
- def test_property_rvalue_policy():
252
- """When returning an rvalue, the return value policy is automatically changed from
253
- `reference(_internal)` to `move`. The following would not work otherwise."""
254
-
255
- instance = m.TestPropRVP()
256
- o = instance.rvalue
257
- assert o.value == 1
258
-
259
- os = m.TestPropRVP.static_rvalue
260
- assert os.value == 1
261
-
262
-
263
- # https://foss.heptapod.net/pypy/pypy/-/issues/2447
264
- @pytest.mark.xfail("env.PYPY")
265
- def test_dynamic_attributes():
266
- instance = m.DynamicClass()
267
- assert not hasattr(instance, "foo")
268
- assert "foo" not in dir(instance)
269
-
270
- # Dynamically add attribute
271
- instance.foo = 42
272
- assert hasattr(instance, "foo")
273
- assert instance.foo == 42
274
- assert "foo" in dir(instance)
275
-
276
- # __dict__ should be accessible and replaceable
277
- assert "foo" in instance.__dict__
278
- instance.__dict__ = {"bar": True}
279
- assert not hasattr(instance, "foo")
280
- assert hasattr(instance, "bar")
281
-
282
- with pytest.raises(TypeError) as excinfo:
283
- instance.__dict__ = []
284
- assert str(excinfo.value) == "__dict__ must be set to a dictionary, not a 'list'"
285
-
286
- cstats = ConstructorStats.get(m.DynamicClass)
287
- assert cstats.alive() == 1
288
- del instance
289
- assert cstats.alive() == 0
290
-
291
- # Derived classes should work as well
292
- class PythonDerivedDynamicClass(m.DynamicClass):
293
- pass
294
-
295
- for cls in m.CppDerivedDynamicClass, PythonDerivedDynamicClass:
296
- derived = cls()
297
- derived.foobar = 100
298
- assert derived.foobar == 100
299
-
300
- assert cstats.alive() == 1
301
- del derived
302
- assert cstats.alive() == 0
303
-
304
-
305
- # https://foss.heptapod.net/pypy/pypy/-/issues/2447
306
- @pytest.mark.xfail("env.PYPY")
307
- def test_cyclic_gc():
308
- # One object references itself
309
- instance = m.DynamicClass()
310
- instance.circular_reference = instance
311
-
312
- cstats = ConstructorStats.get(m.DynamicClass)
313
- assert cstats.alive() == 1
314
- del instance
315
- assert cstats.alive() == 0
316
-
317
- # Two object reference each other
318
- i1 = m.DynamicClass()
319
- i2 = m.DynamicClass()
320
- i1.cycle = i2
321
- i2.cycle = i1
322
-
323
- assert cstats.alive() == 2
324
- del i1, i2
325
- assert cstats.alive() == 0
326
-
327
-
328
- def test_bad_arg_default(msg):
329
- from pybind11_tests import debug_enabled
330
-
331
- with pytest.raises(RuntimeError) as excinfo:
332
- m.bad_arg_def_named()
333
- assert msg(excinfo.value) == (
334
- "arg(): could not convert default argument 'a: UnregisteredType' in function "
335
- "'should_fail' into a Python object (type not registered yet?)"
336
- if debug_enabled else
337
- "arg(): could not convert default argument into a Python object (type not registered "
338
- "yet?). Compile in debug mode for more information."
339
- )
340
-
341
- with pytest.raises(RuntimeError) as excinfo:
342
- m.bad_arg_def_unnamed()
343
- assert msg(excinfo.value) == (
344
- "arg(): could not convert default argument 'UnregisteredType' in function "
345
- "'should_fail' into a Python object (type not registered yet?)"
346
- if debug_enabled else
347
- "arg(): could not convert default argument into a Python object (type not registered "
348
- "yet?). Compile in debug mode for more information."
349
- )
350
-
351
-
352
- def test_accepts_none(msg):
353
- a = m.NoneTester()
354
- assert m.no_none1(a) == 42
355
- assert m.no_none2(a) == 42
356
- assert m.no_none3(a) == 42
357
- assert m.no_none4(a) == 42
358
- assert m.no_none5(a) == 42
359
- assert m.ok_none1(a) == 42
360
- assert m.ok_none2(a) == 42
361
- assert m.ok_none3(a) == 42
362
- assert m.ok_none4(a) == 42
363
- assert m.ok_none5(a) == 42
364
-
365
- with pytest.raises(TypeError) as excinfo:
366
- m.no_none1(None)
367
- assert "incompatible function arguments" in str(excinfo.value)
368
- with pytest.raises(TypeError) as excinfo:
369
- m.no_none2(None)
370
- assert "incompatible function arguments" in str(excinfo.value)
371
- with pytest.raises(TypeError) as excinfo:
372
- m.no_none3(None)
373
- assert "incompatible function arguments" in str(excinfo.value)
374
- with pytest.raises(TypeError) as excinfo:
375
- m.no_none4(None)
376
- assert "incompatible function arguments" in str(excinfo.value)
377
- with pytest.raises(TypeError) as excinfo:
378
- m.no_none5(None)
379
- assert "incompatible function arguments" in str(excinfo.value)
380
-
381
- # The first one still raises because you can't pass None as a lvalue reference arg:
382
- with pytest.raises(TypeError) as excinfo:
383
- assert m.ok_none1(None) == -1
384
- assert msg(excinfo.value) == """
385
- ok_none1(): incompatible function arguments. The following argument types are supported:
386
- 1. (arg0: m.methods_and_attributes.NoneTester) -> int
387
-
388
- Invoked with: None
389
- """
390
-
391
- # The rest take the argument as pointer or holder, and accept None:
392
- assert m.ok_none2(None) == -1
393
- assert m.ok_none3(None) == -1
394
- assert m.ok_none4(None) == -1
395
- assert m.ok_none5(None) == -1
396
-
397
-
398
- def test_str_issue(msg):
399
- """#283: __str__ called on uninitialized instance when constructor arguments invalid"""
400
-
401
- assert str(m.StrIssue(3)) == "StrIssue[3]"
402
-
403
- with pytest.raises(TypeError) as excinfo:
404
- str(m.StrIssue("no", "such", "constructor"))
405
- assert msg(excinfo.value) == """
406
- __init__(): incompatible constructor arguments. The following argument types are supported:
407
- 1. m.methods_and_attributes.StrIssue(arg0: int)
408
- 2. m.methods_and_attributes.StrIssue()
409
-
410
- Invoked with: 'no', 'such', 'constructor'
411
- """
412
-
413
-
414
- def test_unregistered_base_implementations():
415
- a = m.RegisteredDerived()
416
- a.do_nothing()
417
- assert a.rw_value == 42
418
- assert a.ro_value == 1.25
419
- a.rw_value += 5
420
- assert a.sum() == 48.25
421
- a.increase_value()
422
- assert a.rw_value == 48
423
- assert a.ro_value == 1.5
424
- assert a.sum() == 49.5
425
- assert a.rw_value_prop == 48
426
- a.rw_value_prop += 1
427
- assert a.rw_value_prop == 49
428
- a.increase_value()
429
- assert a.ro_value_prop == 1.75
430
-
431
-
432
- def test_ref_qualified():
433
- """Tests that explicit lvalue ref-qualified methods can be called just like their
434
- non ref-qualified counterparts."""
435
-
436
- r = m.RefQualified()
437
- assert r.value == 0
438
- r.refQualified(17)
439
- assert r.value == 17
440
- assert r.constRefQualified(23) == 40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/__init__.py DELETED
@@ -1,16 +0,0 @@
1
- from .backbones import * # noqa: F401,F403
2
- from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS,
3
- ROI_EXTRACTORS, SHARED_HEADS, build_backbone,
4
- build_detector, build_head, build_loss, build_neck,
5
- build_roi_extractor, build_shared_head)
6
- from .dense_heads import * # noqa: F401,F403
7
- from .detectors import * # noqa: F401,F403
8
- from .losses import * # noqa: F401,F403
9
- from .necks import * # noqa: F401,F403
10
- from .roi_heads import * # noqa: F401,F403
11
-
12
- __all__ = [
13
- 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES',
14
- 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor',
15
- 'build_shared_head', 'build_head', 'build_loss', 'build_detector'
16
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Catmeow/Text_Generation_Fine_Tune/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Text Generation Fine Tune
3
- emoji: 💻
4
- colorFrom: red
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.7
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/murmur/__init__.py DELETED
@@ -1,34 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from pil_utils import BuildImage
5
-
6
- from meme_generator import add_meme
7
- from meme_generator.exception import TextOverLength
8
-
9
- img_dir = Path(__file__).parent / "images"
10
-
11
-
12
- def murmur(images, texts: List[str], args):
13
- text = texts[0]
14
- frame = BuildImage.open(img_dir / "0.jpg")
15
- try:
16
- frame.draw_text(
17
- (10, 255, 430, 300),
18
- text,
19
- max_fontsize=40,
20
- min_fontsize=15,
21
- )
22
- except ValueError:
23
- raise TextOverLength(text)
24
- return frame.save_jpg()
25
-
26
-
27
- add_meme(
28
- "murmur",
29
- murmur,
30
- min_texts=1,
31
- max_texts=1,
32
- default_texts=["你的假期余额不足"],
33
- keywords=["低语"],
34
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cvandi/remake/realesrgan/train.py DELETED
@@ -1,11 +0,0 @@
1
- # flake8: noqa
2
- import os.path as osp
3
- from basicsr.train import train_pipeline
4
-
5
- import realesrgan.archs
6
- import realesrgan.data
7
- import realesrgan.models
8
-
9
- if __name__ == '__main__':
10
- root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
11
- train_pipeline(root_path)
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cvandi/remake/realesrgan/weights/README.md DELETED
@@ -1,3 +0,0 @@
1
- # Weights
2
-
3
- Put the downloaded weights to this folder.
 
 
 
 
spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/datasets/cc_sbu_dataset.py DELETED
@@ -1,49 +0,0 @@
1
- import os
2
- from PIL import Image
3
- import webdataset as wds
4
- from video_llama.datasets.datasets.base_dataset import BaseDataset
5
- from video_llama.datasets.datasets.caption_datasets import CaptionDataset
6
-
7
-
8
- class CCSBUDataset(BaseDataset):
9
- def __init__(self, vis_processor, text_processor, location):
10
- super().__init__(vis_processor=vis_processor, text_processor=text_processor)
11
-
12
- self.inner_dataset = wds.DataPipeline(
13
- wds.ResampledShards(location),
14
- wds.tarfile_to_samples(handler=wds.warn_and_continue),
15
- wds.shuffle(1000, handler=wds.warn_and_continue),
16
- wds.decode("pilrgb", handler=wds.warn_and_continue),
17
- wds.to_tuple("jpg", "json", handler=wds.warn_and_continue),
18
- wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),
19
- wds.map(self.to_dict, handler=wds.warn_and_continue),
20
- )
21
-
22
- def to_dict(self, sample):
23
- return {
24
- "image": sample[0],
25
- "text_input": self.text_processor(sample[1]["caption"]),
26
- "type":'image',
27
- }
28
-
29
-
30
- class CCSBUAlignDataset(CaptionDataset):
31
-
32
- def __getitem__(self, index):
33
-
34
- # TODO this assumes image input, not general enough
35
- ann = self.annotation[index]
36
-
37
- img_file = '{}.jpg'.format(ann["image_id"])
38
- image_path = os.path.join(self.vis_root, img_file)
39
- image = Image.open(image_path).convert("RGB")
40
-
41
- image = self.vis_processor(image)
42
- caption = ann["caption"]
43
-
44
- return {
45
- "image": image,
46
- "text_input": caption,
47
- "image_id": self.img_ids[ann["image_id"]],
48
- "type":'image',
49
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_g_a_s_p.py DELETED
@@ -1,55 +0,0 @@
1
- from fontTools.misc.textTools import safeEval
2
- from . import DefaultTable
3
- import struct
4
-
5
-
6
- GASP_SYMMETRIC_GRIDFIT = 0x0004
7
- GASP_SYMMETRIC_SMOOTHING = 0x0008
8
- GASP_DOGRAY = 0x0002
9
- GASP_GRIDFIT = 0x0001
10
-
11
-
12
- class table__g_a_s_p(DefaultTable.DefaultTable):
13
- def decompile(self, data, ttFont):
14
- self.version, numRanges = struct.unpack(">HH", data[:4])
15
- assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version
16
- data = data[4:]
17
- self.gaspRange = {}
18
- for i in range(numRanges):
19
- rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4])
20
- self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior)
21
- data = data[4:]
22
- assert not data, "too much data"
23
-
24
- def compile(self, ttFont):
25
- version = 0 # ignore self.version
26
- numRanges = len(self.gaspRange)
27
- data = b""
28
- items = sorted(self.gaspRange.items())
29
- for rangeMaxPPEM, rangeGaspBehavior in items:
30
- data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior)
31
- if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY):
32
- version = 1
33
- data = struct.pack(">HH", version, numRanges) + data
34
- return data
35
-
36
- def toXML(self, writer, ttFont):
37
- items = sorted(self.gaspRange.items())
38
- for rangeMaxPPEM, rangeGaspBehavior in items:
39
- writer.simpletag(
40
- "gaspRange",
41
- [
42
- ("rangeMaxPPEM", rangeMaxPPEM),
43
- ("rangeGaspBehavior", rangeGaspBehavior),
44
- ],
45
- )
46
- writer.newline()
47
-
48
- def fromXML(self, name, attrs, content, ttFont):
49
- if name != "gaspRange":
50
- return
51
- if not hasattr(self, "gaspRange"):
52
- self.gaspRange = {}
53
- self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval(
54
- attrs["rangeGaspBehavior"]
55
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/Login-9c3cc0eb.css DELETED
@@ -1 +0,0 @@
1
- .wrap.svelte-1ogxbi0{display:flex;flex-direction:column;justify-content:center;align-items:center;margin-top:var(--size-3);background:var(--background-fill-primary);width:var(--size-full)}h2.svelte-1ogxbi0{margin-bottom:var(--size-3);color:var(--body-text-color);font-weight:var(--section-header-text-weight);font-size:var(--text-xl)}.auth.svelte-1ogxbi0{margin-top:var(--size-1);margin-bottom:var(--size-1);color:var(--body-text-color)}.creds.svelte-1ogxbi0{margin-top:var(--size-4);margin-bottom:var(--size-4);color:var(--error-text-color);font-weight:var(--weight-semibold)}
 
 
spaces/Denliner/wd-v1-4-tags/app.py DELETED
@@ -1,289 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import argparse
4
- import functools
5
- import html
6
- import os
7
-
8
- import gradio as gr
9
- import huggingface_hub
10
- import numpy as np
11
- import onnxruntime as rt
12
- import pandas as pd
13
- import piexif
14
- import piexif.helper
15
-
16
- import PIL.Image
17
-
18
- from Utils import dbimutils
19
-
20
- TITLE = "WaifuDiffusion v1.4 Tags"
21
- DESCRIPTION = """
22
- This is an edited version of SmilingWolf's wd-1.4 taggs, which I have modified so that you don't have to remove the commas when you label an image for a booru website
23
-
24
- https://huggingface.co/spaces/SmilingWolf/wd-v1-4-tags
25
-
26
- Demo for:
27
- - [SmilingWolf/wd-v1-4-moat-tagger-v2](https://huggingface.co/SmilingWolf/wd-v1-4-moat-tagger-v2)
28
- - [SmilingWolf/wd-v1-4-swinv2-tagger-v2](https://huggingface.co/SmilingWolf/wd-v1-4-convnext-tagger-v2)
29
- - [SmilingWolf/wd-v1-4-convnext-tagger-v2](https://huggingface.co/SmilingWolf/wd-v1-4-convnext-tagger-v2)
30
- - [SmilingWolf/wd-v1-4-vit-tagger-v2](https://huggingface.co/SmilingWolf/wd-v1-4-vit-tagger-v2)
31
- - [SmilingWolf/wd-v1-4-convnextv2-tagger-v2](https://huggingface.co/SmilingWolf/wd-v1-4-convnextv2-tagger-v2)
32
- Includes "ready to copy" prompt and a prompt analyzer.
33
-
34
- Modified from [NoCrypt/DeepDanbooru_string](https://huggingface.co/spaces/NoCrypt/DeepDanbooru_string)
35
- Modified from [hysts/DeepDanbooru](https://huggingface.co/spaces/hysts/DeepDanbooru)
36
-
37
- PNG Info code forked from [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
38
-
39
- Example image by [ほし☆☆☆](https://www.pixiv.net/en/users/43565085)
40
- """
41
-
42
- HF_TOKEN = os.environ["HF_TOKEN"]
43
- MOAT_MODEL_REPO = "SmilingWolf/wd-v1-4-moat-tagger-v2"
44
- SWIN_MODEL_REPO = "SmilingWolf/wd-v1-4-swinv2-tagger-v2"
45
- CONV_MODEL_REPO = "SmilingWolf/wd-v1-4-convnext-tagger-v2"
46
- CONV2_MODEL_REPO = "SmilingWolf/wd-v1-4-convnextv2-tagger-v2"
47
- VIT_MODEL_REPO = "SmilingWolf/wd-v1-4-vit-tagger-v2"
48
- MODEL_FILENAME = "model.onnx"
49
- LABEL_FILENAME = "selected_tags.csv"
50
-
51
-
52
- def parse_args() -> argparse.Namespace:
53
- parser = argparse.ArgumentParser()
54
- parser.add_argument("--score-slider-step", type=float, default=0.05)
55
- parser.add_argument("--score-general-threshold", type=float, default=0.35)
56
- parser.add_argument("--score-character-threshold", type=float, default=0.85)
57
- parser.add_argument("--share", action="store_true")
58
- return parser.parse_args()
59
-
60
-
61
- def load_model(model_repo: str, model_filename: str) -> rt.InferenceSession:
62
- path = huggingface_hub.hf_hub_download(
63
- model_repo, model_filename, use_auth_token=HF_TOKEN
64
- )
65
- model = rt.InferenceSession(path)
66
- return model
67
-
68
-
69
- def change_model(model_name):
70
- global loaded_models
71
- if model_name == "MOAT":
72
- model = load_model(MOAT_MODEL_REPO, MODEL_FILENAME)
73
- elif model_name == "SwinV2":
74
- model = load_model(SWIN_MODEL_REPO, MODEL_FILENAME)
75
- elif model_name == "ConvNext":
76
- model = load_model(CONV_MODEL_REPO, MODEL_FILENAME)
77
- elif model_name == "ViT":
78
- model = load_model(VIT_MODEL_REPO, MODEL_FILENAME)
79
- elif model_name == "ConvNextV2":
80
- model = load_model(CONV2_MODEL_REPO, MODEL_FILENAME)
81
-
82
- loaded_models[model_name] = model
83
- return loaded_models[model_name]
84
-
85
-
86
- def load_labels() -> list[str]:
87
- path = huggingface_hub.hf_hub_download(
88
- MOAT_MODEL_REPO, LABEL_FILENAME, use_auth_token=HF_TOKEN
89
- )
90
- df = pd.read_csv(path)
91
-
92
- tag_names = df["name"].tolist()
93
- rating_indexes = list(np.where(df["category"] == 9)[0])
94
- general_indexes = list(np.where(df["category"] == 0)[0])
95
- character_indexes = list(np.where(df["category"] == 4)[0])
96
- return tag_names, rating_indexes, general_indexes, character_indexes
97
-
98
-
99
- def plaintext_to_html(text):
100
- text = (
101
- "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split("\n")]) + "</p>"
102
- )
103
- return text
104
-
105
-
106
- def predict(
107
- image: PIL.Image.Image,
108
- model_name: str,
109
- general_threshold: float,
110
- character_threshold: float,
111
- tag_names: list[str],
112
- rating_indexes: list[np.int64],
113
- general_indexes: list[np.int64],
114
- character_indexes: list[np.int64],
115
- ):
116
- global loaded_models
117
-
118
- rawimage = image
119
-
120
- model = loaded_models[model_name]
121
- if model is None:
122
- model = change_model(model_name)
123
-
124
- _, height, width, _ = model.get_inputs()[0].shape
125
-
126
- # Alpha to white
127
- image = image.convert("RGBA")
128
- new_image = PIL.Image.new("RGBA", image.size, "WHITE")
129
- new_image.paste(image, mask=image)
130
- image = new_image.convert("RGB")
131
- image = np.asarray(image)
132
-
133
- # PIL RGB to OpenCV BGR
134
- image = image[:, :, ::-1]
135
-
136
- image = dbimutils.make_square(image, height)
137
- image = dbimutils.smart_resize(image, height)
138
- image = image.astype(np.float32)
139
- image = np.expand_dims(image, 0)
140
-
141
- input_name = model.get_inputs()[0].name
142
- label_name = model.get_outputs()[0].name
143
- probs = model.run([label_name], {input_name: image})[0]
144
-
145
- labels = list(zip(tag_names, probs[0].astype(float)))
146
-
147
- # First 4 labels are actually ratings: pick one with argmax
148
- ratings_names = [labels[i] for i in rating_indexes]
149
- rating = dict(ratings_names)
150
-
151
- # Then we have general tags: pick any where prediction confidence > threshold
152
- general_names = [labels[i] for i in general_indexes]
153
- general_res = [x for x in general_names if x[1] > general_threshold]
154
- general_res = dict(general_res)
155
-
156
- # Everything else is characters: pick any where prediction confidence > threshold
157
- character_names = [labels[i] for i in character_indexes]
158
- character_res = [x for x in character_names if x[1] > character_threshold]
159
- character_res = dict(character_res)
160
-
161
- b = dict(sorted(general_res.items(), key=lambda item: item[1], reverse=True))
162
- a = (
163
- ", ".join(list(b.keys()))
164
- .replace("_", " ")
165
- .replace("(", "\(")
166
- .replace(")", "\)")
167
- )
168
- c = ", ".join(list(b.keys()))
169
- d = " ".join(list(b.keys()))
170
- items = rawimage.info
171
- geninfo = ""
172
-
173
- if "exif" in rawimage.info:
174
- exif = piexif.load(rawimage.info["exif"])
175
- exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b"")
176
- try:
177
- exif_comment = piexif.helper.UserComment.load(exif_comment)
178
- except ValueError:
179
- exif_comment = exif_comment.decode("utf8", errors="ignore")
180
-
181
- items["exif comment"] = exif_comment
182
- geninfo = exif_comment
183
-
184
- for field in [
185
- "jfif",
186
- "jfif_version",
187
- "jfif_unit",
188
- "jfif_density",
189
- "dpi",
190
- "exif",
191
- "loop",
192
- "background",
193
- "timestamp",
194
- "duration",
195
- ]:
196
- items.pop(field, None)
197
-
198
- geninfo = items.get("parameters", geninfo)
199
-
200
- info = f"""
201
- <p><h4>PNG Info</h4></p>
202
- """
203
- for key, text in items.items():
204
- info += (
205
- f"""
206
- <div>
207
- <p><b>{plaintext_to_html(str(key))}</b></p>
208
- <p>{plaintext_to_html(str(text))}</p>
209
- </div>
210
- """.strip()
211
- + "\n"
212
- )
213
-
214
- if len(info) == 0:
215
- message = "Nothing found in the image."
216
- info = f"<div><p>{message}<p></div>"
217
-
218
- return (a, c,d, rating, character_res, general_res, info)
219
-
220
-
221
- def main():
222
- global loaded_models
223
- loaded_models = {
224
- "MOAT": None,
225
- "SwinV2": None,
226
- "ConvNext": None,
227
- "ConvNextV2": None,
228
- "ViT": None,
229
- }
230
-
231
- args = parse_args()
232
-
233
- change_model("MOAT")
234
-
235
- tag_names, rating_indexes, general_indexes, character_indexes = load_labels()
236
-
237
- func = functools.partial(
238
- predict,
239
- tag_names=tag_names,
240
- rating_indexes=rating_indexes,
241
- general_indexes=general_indexes,
242
- character_indexes=character_indexes,
243
- )
244
-
245
- gr.Interface(
246
- fn=func,
247
- inputs=[
248
- gr.Image(type="pil", label="Input"),
249
- gr.Radio(
250
- ["MOAT", "SwinV2", "ConvNext", "ConvNextV2", "ViT"],
251
- value="MOAT",
252
- label="Model",
253
- ),
254
- gr.Slider(
255
- 0,
256
- 1,
257
- step=args.score_slider_step,
258
- value=args.score_general_threshold,
259
- label="General Tags Threshold",
260
- ),
261
- gr.Slider(
262
- 0,
263
- 1,
264
- step=args.score_slider_step,
265
- value=args.score_character_threshold,
266
- label="Character Tags Threshold",
267
- ),
268
- ],
269
- outputs=[
270
- gr.Textbox(label="Output (string)"),
271
- gr.Textbox(label="Output (raw string)"),
272
- gr.Textbox(label="Output (booru string)"),
273
- gr.Label(label="Rating"),
274
- gr.Label(label="Output (characters)"),
275
- gr.Label(label="Output (tags)"),
276
- gr.HTML(),
277
- ],
278
- examples=[["power.jpg", "MOAT", 0.1, 0.85]],
279
- title=TITLE,
280
- description=DESCRIPTION,
281
- allow_flagging="never",
282
- ).launch(
283
- enable_queue=True,
284
- share=args.share,
285
- )
286
-
287
-
288
- if __name__ == "__main__":
289
- main()