parquet-converter commited on
Commit
b7db385
·
1 Parent(s): 33e13ec

Update parquet files (step 38 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bandicam 4.4 Crack Full Version [32-bit 64-bit] [NEW].md +0 -153
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Hard Sentinel Today and Boost Your Hard Drive Performance and Reliability.md +0 -50
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download bios folder for ps3 Improve your PS3 performance and security with the latest system software update.md +0 -115
  4. spaces/1gistliPinn/ChatGPT4/Examples/2 Unlimited Discography (5 Albums 4 Singles) 1992 1998 FLAC LOSSLESS.md +0 -6
  5. spaces/1gistliPinn/ChatGPT4/Examples/CROCODILE PHYSICS 605 Torrent.md +0 -10
  6. spaces/1phancelerku/anime-remove-background/Cloneapp Messenger APK A Powerful Tool for WhatsApp Cloning and Direct Chatting.md +0 -94
  7. spaces/1phancelerku/anime-remove-background/Conquest 2 APK A Game with Hidden Treasures Fierce Enemies and New Weapons.md +0 -82
  8. spaces/1phancelerku/anime-remove-background/Destroy the Planet with Solar Smash APK - Free Download for Android.md +0 -168
  9. spaces/801artistry/RVC801/demucs/wav.py +0 -174
  10. spaces/A1draw-12196y/DeepDanbooru_string/app.py +0 -185
  11. spaces/A666sxr/Genshin_TTS/text/symbols.py +0 -13
  12. spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/syntaspeech/syntactic_graph_buider.py +0 -294
  13. spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/portaspeech/portaspeech.py +0 -233
  14. spaces/Abdullahw72/bark-voice-cloning/hubert/__init__.py +0 -0
  15. spaces/AfrodreamsAI/afrodreams/Home.py +0 -164
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/dialog-quest/QuestMethods.js +0 -18
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/checkbox/Factory.d.ts +0 -19
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/skew/Skew.js +0 -2
  19. spaces/Akmyradov/TurkmenSpeechRecogntion/README.md +0 -12
  20. spaces/AlekseyKorshuk/thin-plate-spline-motion-model/run.py +0 -89
  21. spaces/Aloento/9Nine-PITS/data_utils.py +0 -358
  22. spaces/Amrrs/hubble-jwst-compare/README.md +0 -13
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/conceptual/philosophy.md +0 -110
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_pndm.py +0 -242
  25. spaces/Andy1621/uniformer_image_detection/configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py +0 -13
  26. spaces/Andy1621/uniformer_image_detection/mmdet/core/evaluation/mean_ap.py +0 -469
  27. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/_distutils_hack/__init__.py +0 -222
  28. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/contrib/socks.py +0 -216
  29. spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/factory.py +0 -277
  30. spaces/BOXNYC/shirley/README.md +0 -17
  31. spaces/Banbri/zcvzcv/src/app/interface/bottom-bar/index.tsx +0 -187
  32. spaces/Bart92/RVC_HF/extract_locale.py +0 -34
  33. spaces/Benson/text-generation/Examples/Camin Simulador ltimo Coche Descarga Apk.md +0 -48
  34. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py +0 -277
  35. spaces/Boadiwaa/Recipes/openai/api_resources/edit.py +0 -32
  36. spaces/BwayKC/prompthero-openjourney-v2/README.md +0 -14
  37. spaces/CVPR/LIVE/pybind11/include/pybind11/detail/common.h +0 -837
  38. spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/uninitialized_copy.h +0 -23
  39. spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_R_50_FPN_50ep_LSJ.py +0 -14
  40. spaces/Cartinoe5930/LLMAgora/result/MMLU/README.md +0 -0
  41. spaces/Catmeow/Face2Painting_From_Photo/face_detection.py +0 -140
  42. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/PpmImagePlugin.py +0 -347
  43. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/TiffTags.py +0 -560
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_server.py +0 -62
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/roundTools.py +0 -109
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/inference/_text_generation.py +0 -479
  47. spaces/Dinoking/Guccio-AI-Designer/models/stylegan2/stylegan2-pytorch/distributed.py +0 -126
  48. spaces/ECCV2022/bytetrack/yolox/data/dataloading.py +0 -178
  49. spaces/Eddycrack864/Applio-Inference/venv.sh +0 -1
  50. spaces/EronSamez/RVC_HFmeu/Applio-RVC-Fork/utils/clonerepo_experimental.py +0 -253
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bandicam 4.4 Crack Full Version [32-bit 64-bit] [NEW].md DELETED
@@ -1,153 +0,0 @@
1
-
2
- <h1>Bandicam 4.4 Crack Full Version [32-bit 64-bit]</h1>
3
- <p>Do you want to record your screen activities with high quality and low file size? Do you want to capture your gaming sessions, video chats, webinars, or tutorials with ease? Do you want to edit and share your videos without any hassle? If you answered yes to any of these questions, then you need <strong>Bandicam 4.4 Crack Full Version</strong>, a powerful and versatile screen recorder for Windows.</p>
4
- <h2>Bandicam 4.4 Crack Full Version [32-bit 64-bit]</h2><br /><p><b><b>Download File</b> ->->->-> <a href="https://byltly.com/2uKAd3">https://byltly.com/2uKAd3</a></b></p><br /><br />
5
- <h2>What is Bandicam and why do you need it?</h2>
6
- <p>Bandicam is a lightweight screen recorder that allows you to record your screen activities to a video file. It has three recording modes: game recording, screen recording, and device recording. You can use Bandicam to record anything on your PC, such as games, videos, webcams, desktops, HDMI devices, and more.</p>
7
- <h3>Bandicam is a lightweight screen recorder for Windows</h3>
8
- <p>One of the main advantages of Bandicam is that it is very light on your system resources. It uses much lower CPU/GPU/RAM usage than other similar software, which means it causes less lag and does not affect your PC performance. Bandicam also compresses the video while recording, which results in smaller file sizes and faster upload speeds.</p>
9
- <h3>Bandicam can record games, videos, and desktop activities</h3>
10
- <p>Another advantage of Bandicam is that it can record various types of content on your PC. You can use the game recording mode to capture your gameplay with high FPS and HD quality. You can use the screen recording mode to record any area of your screen, such as web browsers, PowerPoint presentations, Skype calls, etc. You can also use the device recording mode to record external devices connected to your PC, such as webcams, smartphones, game consoles, etc.</p>
11
- <h3>Bandicam has many features and benefits for users</h3>
12
- <p>Bandicam also has many features and benefits that make it a great choice for users who want to record their screen activities. Some of these features are:</p>
13
- <p>Bandicam 4.4 full version free download with crack<br />
14
- How to activate Bandicam 4.4 with crack for lifetime<br />
15
- Bandicam 4.4 screen recorder crack download for Windows<br />
16
- Bandicam 4.4 crack serial key generator<br />
17
- Bandicam 4.4 crack patch keygen<br />
18
- Bandicam 4.4 crack license key activation code<br />
19
- Bandicam 4.4 crack registration key product key<br />
20
- Bandicam 4.4 crack no watermark no lag<br />
21
- Bandicam 4.4 crack full features unlocked<br />
22
- Bandicam 4.4 crack latest version updated<br />
23
- Bandicam 4.4 crack for Mac OS X<br />
24
- Bandicam 4.4 crack for Linux Ubuntu<br />
25
- Bandicam 4.4 crack portable edition<br />
26
- Bandicam 4.4 crack offline installer setup<br />
27
- Bandicam 4.4 crack online activation tool<br />
28
- Bandicam 4.4 crack working method 2023<br />
29
- Bandicam 4.4 crack review and tutorial<br />
30
- Bandicam 4.4 crack comparison with other screen recorders<br />
31
- Bandicam 4.4 crack best settings for high quality recording<br />
32
- Bandicam 4.4 crack tips and tricks to improve performance<br />
33
- Bandicam 4.4 crack alternatives and competitors<br />
34
- Bandicam 4.4 crack pros and cons advantages and disadvantages<br />
35
- Bandicam 4.4 crack system requirements and compatibility<br />
36
- Bandicam 4.4 crack technical support and customer service<br />
37
- Bandicam 4.4 crack refund policy and money back guarantee<br />
38
- Bandicam 4.4 crack discount coupon code and promo offer<br />
39
- Bandicam 4.4 crack testimonials and user feedback<br />
40
- Bandicam 4.4 crack FAQs and solutions to common problems<br />
41
- Bandicam 4.4 crack download link and installation guide<br />
42
- Bandicam 4.4 crack virus scan and malware check<br />
43
- Bandicam 4.4 crack safe and secure download source<br />
44
- Bandicam 4.4 crack legal and ethical issues<br />
45
- Bandicam 4.4 crack risks and consequences of using cracked software<br />
46
- Bandicam 4.4 crack benefits and advantages of using original software<br />
47
- Bandicam 4.</p>
48
- <ul>
49
- <li>You can record up to 4K Ultra HD video at resolutions up to 3840 x 2160 pixels.</li>
50
- <li>You can record for over 24 hours without stopping with the auto-complete recording function.</li>
51
- <li>You can upload your video to YouTube without converting it (720p/1080p/2160p full HD video can be made).</li>
52
- <li>You can use hardware acceleration to improve the recording performance (Nvidia NVENC/CUDA, Intel Quick Sync Video, AMD APP).</li>
53
- <li>You can use real-time drawing and mouse effects to enhance your video (draw lines, boxes, highlights, cursor effects).</li>
54
- <li>You can edit and save your recorded file in various formats (AVI, MP4) and codecs (MPEG-1, Xvid, MJPEG, MP2, PCM).</li>
55
- </ul>
56
- <h2>How to download and install Bandicam 4.4 Crack Full Version</h2>
57
- <p>If you want to enjoy all the features and benefits of Bandicam without any limitations or watermarks, you need to download and install Bandicam 4.4 Crack Full Version on your PC. Here are the steps you need to follow:</p>
58
- <h3>Download Bandicam 4.4 Crack from a reliable source</h3>
59
- <p>The first step is to download Bandicam 4.4 Crack from a reliable source on the internet. You can find many websites that offer Bandicam 4.4 Crack for free download, but be careful not to download any malware or viruses along with it. One of the trusted sources you can use is <a href="https://www.techidea.net/bandicam-cracked/">Tech Idea</a>, which provides a safe and secure download link for Bandicam 4.4 Crack. You can also check out other sources like <a href="https://www.filehorse.com/download-bandicam/">FileHorse</a> or <a href="https://www.bandicam.com/32bit/">Bandicam official website</a> if you have a 32-bit Windows system.</p>
60
- <h3>Install Bandicam 4.4 Crack on your PC</h3>
61
- <p>The next step is to install Bandicam 4.4 Crack on your PC. To do this, you need to follow these simple steps:</p>
62
- <ol>
63
- <li>Extract the downloaded file using WinRAR or any other extraction tool.</li>
64
- <li>Run the setup file as administrator and follow the installation wizard.</li>
65
- <li>Select the destination folder where you want to install Bandicam.</li>
66
- <li>Click on install and wait for the installation process to complete.</li>
67
- <li>Do not run Bandicam after installation.</li>
68
- </ol>
69
- <h3>Activate Bandicam 4.4 Crack with the serial number plate</h3>
70
- <p>The final step is to activate Bandicam 4.4 Crack with the serial number plate that comes with the crack file. To do this, you need to follow these simple steps:</p>
71
- <ol>
72
- <li>Copy the crack file from the extracted folder and paste it into the installation folder of Bandicam.</li>
73
- <li>Run Bandicam as administrator and click on register.</li>
74
- <li>Enter any email address and paste the serial number plate from the crack file.</li>
75
- <li>Click on register and enjoy Bandicam 4.4 Crack Full Version.</li>
76
- </ol>
77
- <h2>How to use Bandicam 4.4 Crack Full Version to record your screen</h2>
78
- <p>Now that you have downloaded and installed Bandicam 4.4 Crack Full Version on your PC, you are ready to use it to record your screen activities. Here are some tips on how to use Bandicam 4.4 Crack Full Version effectively:</p>
79
- <h3>Select the recording mode and area</h3>
80
- <p>The first thing you need to do is select the recording mode that suits your needs. You can choose between game recording mode (for capturing games), screen recording mode (for capturing any area of your screen), or device recording mode (for capturing external devices). To select a mode, click on one of the icons at the top of the main window of Bandicam.</p>
81
- <p>The next thing you need to do is select the area that you want to record. You can either choose a full-screen window (for DirectX/OpenGL games) or a user-defined area (for other applications). To select an area, click on one of the buttons at the top-left corner of the main window of Bandicam.</p>
82
- <h3>Adjust the settings and options</h3>
83
- <p>The next thing you need to do is adjust the settings and options that affect the quality and performance of your recording. You can access these settings by clicking on one of the buttons at the top-right corner of the main window of Bandicam.</p>
84
- <p>Some of the settings and options that you can adjust are:</p>
85
- <ul>
86
- <li>The video format (AVI or MP4) and codec (MPEG-1, Xvid, MJPEG) that determine the size and compatibility of your recorded file.</li>
87
- <li>The FPS (frames per second) that determine how smooth your video will be.</li>
88
- <li>The quality (bitrate) that determine how clear your video will be.</li>
89
- <h3>Start and stop the recording</h3>
90
- <p>The next thing you need to do is start and stop the recording. To do this, you need to follow these simple steps:</p>
91
- <ol>
92
- <li>Click on the red record button at the top-right corner of the main window of Bandicam or press the hotkey (F12 by default) to start the recording.</li>
93
- <li>You will see a green FPS number on the screen indicating that the recording is in progress.</li>
94
- <li>Click on the same button or press the same hotkey again to stop the recording.</li>
95
- <li>You will see a message saying that the recording has been completed and saved.</li>
96
- </ol>
97
- <h3>Edit and save the recorded file</h3>
98
- <p>The last thing you need to do is edit and save the recorded file. To do this, you need to follow these simple steps:</p>
99
- <ol>
100
- <li>Click on the edit button at the top-right corner of the main window of Bandicam or open Bandicut, a fast and lossless video cutter that comes with Bandicam.</li>
101
- <li>Select the recorded file from the list and click on open.</li>
102
- <li>You can use Bandicut to cut, split, join, or trim your video as you wish.</li>
103
- <li>Click on start and select the encoding mode or high-speed mode depending on your preference.</li>
104
- <li>Click on encoding settings and adjust the format, codec, quality, and FPS of your video.</li>
105
- <li>Click on start again and wait for the editing process to finish.</li>
106
- <li>You can find your edited file in the output folder of Bandicut.</li>
107
- </ol>
108
- <h2>Tips and tricks to get the best out of Bandicam 4.4 Crack Full Version</h2>
109
- <p>Bandicam 4.4 Crack Full Version is a powerful and versatile screen recorder that can help you record your screen activities with high quality and low file size. However, there are some tips and tricks that can help you get even better results with Bandicam 4.4 Crack Full Version. Here are some of them:</p>
110
- <h3>Use hardware acceleration to improve performance</h3>
111
- <p>One of the tips that can help you improve the performance of Bandicam 4.4 Crack Full Version is to use hardware acceleration. Hardware acceleration is a feature that allows Bandicam to use your GPU (graphics card) instead of your CPU (processor) to encode your video. This can reduce the CPU usage and increase the FPS of your recording. To use hardware acceleration, you need to follow these simple steps:</p>
112
- <ol>
113
- <li>Click on settings under the video tab of Bandicam.</li>
114
- <li>Select H264 (Nvidia NVENC), H264 (Intel Quick Sync Video), or H264 (AMD APP) as your codec depending on your GPU model.</li>
115
- <li>Click on OK and enjoy faster and smoother recording with Bandicam.</li>
116
- </ol>
117
- <h3>Use real-time drawing and mouse effects to enhance your video</h3>
118
- <p>Another tip that can help you enhance your video with Bandicam 4.4 Crack Full Version is to use real-time drawing and mouse effects. Real-time drawing and mouse effects are features that allow you to draw lines, boxes, highlights, cursor effects, or text overlays on your screen while recording. This can help you emphasize important points, add annotations, or create tutorials with Bandicam. To use real-time drawing and mouse effects, you need to follow these simple steps:</p>
119
- <ol>
120
- <li>Click on settings under the video tab of Bandicam.</li>
121
- <li>Select show FPS overlay / show mouse cursor / highlight mouse cursor / add mouse click effect / add webcam overlay / add logo / add text as you wish.</li>
122
- <li>Click on OK and start recording with Bandicam.</li>
123
- <li>You can use hotkeys (Ctrl+Alt+1/2/3/4 by default) to draw lines, boxes, highlights, or text overlays on your screen while recording.</li>
124
- <li>You can also use hotkeys (Ctrl+Alt+H by default) to hide or show all drawings while recording.</li>
125
- </ol>
126
- <h3>Use the auto-complete recording function to record for a long time</h3>
127
- <p>The last tip that can help you record for a long time with Bandicam 4.4 Crack Full Version is to use the auto-complete recording function. Auto-complete recording is a feature that allows Bandicam to automatically stop or split your recording after a certain time or file size. This can help you avoid recording too long videos that are hard to edit or upload. To use auto-complete recording, you need to follow these simple steps:</p>
128
- <ol>
129
- <li>Click on settings under the general tab of Bandicam.</li>
130
- <li>Select auto complete recording after / split file every as you wish.</li>
131
- <li>Enter the time or file size limit for your recording.</li>
132
- <li>Click on OK and start recording with Bandicam.</li>
133
- <li>Bandicam will automatically stop or split your recording after reaching the limit you set.</li>
134
- </ol>
135
- <h2>Conclusion</h2>
136
- <p>In conclusion, Bandicam 4.4 Crack Full Version is a powerful and versatile screen recorder that can help you record your screen activities with high quality and low file size. It has many features and benefits for users who want to capture their gameplay, video chats, webinars, tutorials, or anything else on their PC. It also has some tips and tricks that can help you get even better results with Bandicam 4.4 Crack Full Version. If you want to enjoy all these features and benefits without any limitations or watermarks, you need to download and install Bandicam 4.4 Crack Full Version from a reliable source on the internet. You also need to activate it with the serial number plate that comes with the crack file. Then, you can use it to record your screen activities with ease and share them with others without any hassle.</p>
137
- <h2>Frequently Asked Questions</h2>
138
- <p>Here are some frequently asked questions about Bandicam 4.4 Crack Full Version:</p>
139
- <ol>
140
- <li><strong>Is Bandicam 4.4 Crack Full Version safe?</strong></li>
141
- <p>Bandicam 4.4 Crack Full Version is safe if you download it from a reliable source on the internet. However, be careful not to download any malware or viruses along with it. You should also scan your PC with an antivirus program after installing it.</p>
142
- <li><strong>Is Bandicam 4.4 Crack Full Version legal?</strong></li>
143
- <p>Bandicam 4.4 Crack Full Version is not legal because it violates the terms and conditions of Bandicam Company, which owns the rights of Bandicam software. You should buy a license from <a href="https://www.bandicam.com/buy/">Bandicam official website</a> if you want to support them and use their software legally.</p>
144
- <li><strong>What are some alternatives to Bandicam 4.4 Crack Full Version?</strong></li>
145
- <p>If you don't want to use Bandicam 4.4 Crack Full Version for any reason, there are some alternatives that you can try instead. Some of them are OBS Studio (free), Camtasia Studio (paid), Fraps (paid), ScreenFlow (paid), etc.</p>
146
- <li><strong>How can I contact Bandicam support?</strong></li>
147
- <p>If you have any questions or issues related to Bandicam software, you can contact <a href="https://www.bandisoft.com/support/">Bandisoft support</a> by email or forum. They will try their best to help you solve your problems.</p>
148
- <li><strong>How can I learn more about Bandicam?</strong></li>
149
- <p>If you want to learn more about Bandicam software, you can visit <a href="https://www.bandisoft.com/">Bandisoft website</a> where you can find more information about their products, features, tutorials, reviews, etc.</p>
150
- </ol>
151
- </p> 0a6ba089eb<br />
152
- <br />
153
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Hard Sentinel Today and Boost Your Hard Drive Performance and Reliability.md DELETED
@@ -1,50 +0,0 @@
1
-
2
- <h1>How to Download Hard Sentinel and Monitor Your Hard Drive Health</h1>
3
- <p>Hard Sentinel is a software that helps you to check and monitor the health of your hard drive. It can detect and report any potential problems, such as bad sectors, temperature, performance, and SMART attributes. It can also alert you if your hard drive is failing or needs to be replaced.</p>
4
- <h2>download hard sentinel</h2><br /><p><b><b>Download</b> &#8250;&#8250;&#8250;&#8250;&#8250; <a href="https://byltly.com/2uKwjc">https://byltly.com/2uKwjc</a></b></p><br /><br />
5
- <p>If you want to download Hard Sentinel and use it to keep an eye on your hard drive health, here are the steps you need to follow:</p>
6
- <ol>
7
- <li>Go to the official website of Hard Sentinel and click on the Download button. You can choose between the free trial version or the pro version that offers more features and benefits.</li>
8
- <li>Save the installer file on your computer and run it. Follow the instructions on the screen to complete the installation process.</li>
9
- <li>Launch Hard Sentinel and select the hard drive you want to monitor. You can see the status, temperature, performance, and health of your hard drive on the main window.</li>
10
- <li>If you want to configure the settings and options of Hard Sentinel, click on the Menu button and choose Preferences. You can adjust the alert levels, notification methods, test options, and more.</li>
11
- <li>If you want to run a test on your hard drive, click on the Test button and choose the type of test you want to perform. You can run a short self-test, an extended self-test, or a random seek test.</li>
12
- <li>If you want to view the detailed information and SMART attributes of your hard drive, click on the Report button and choose Show All Information. You can also save or print the report for future reference.</li>
13
- </ol>
14
- <p>By downloading Hard Sentinel and using it regularly, you can ensure that your hard drive is in good condition and prevent any data loss or damage. You can also improve the performance and lifespan of your hard drive by following some simple tips, such as defragmenting your disk, cleaning up your files, and updating your drivers.</p>
15
-
16
- <h2>Why You Need Hard Sentinel to Monitor Your Hard Drive Health</h2>
17
- <p>Your hard drive is one of the most important components of your computer. It stores all your data, such as your documents, photos, videos, music, and programs. However, your hard drive is also prone to various problems and failures that can cause data loss or corruption. Some of the common causes of hard drive problems are:</p>
18
- <ul>
19
- <li>Physical damage, such as shocks, drops, or spills.</li>
20
- <li>Logical errors, such as bad sectors, file system corruption, or virus infection.</li>
21
- <li>Mechanical wear and tear, such as aging, overheating, or power surges.</li>
22
- <li>Human errors, such as accidental deletion, formatting, or partitioning.</li>
23
- </ul>
24
- <p>These problems can affect the performance and reliability of your hard drive. They can also lead to data loss or corruption, which can be devastating and costly. That's why you need Hard Sentinel to monitor your hard drive health and prevent any potential disasters.</p>
25
-
26
- <h2>How Hard Sentinel Works to Monitor Your Hard Drive Health</h2>
27
- <p>Hard Sentinel is a software that uses the SMART (Self-Monitoring, Analysis, and Reporting Technology) feature of your hard drive to monitor its health. SMART is a built-in function that tracks various parameters and attributes of your hard drive, such as:</p>
28
- <p></p>
29
- <ul>
30
- <li>Error rate</li>
31
- <li>Spin-up time</li>
32
- <li>Reallocated sectors count</li>
33
- <li>Power-on hours</li>
34
- <li>Temperature</li>
35
- <li>And more</li>
36
- </ul>
37
- <p>These parameters and attributes can indicate the current and future status of your hard drive. They can also help you to identify any potential problems or failures before they become serious. Hard Sentinel analyzes the SMART data and displays it in an easy-to-understand way. It also assigns a health percentage and a performance percentage to your hard drive based on the SMART data. It can alert you if your hard drive is in danger or needs to be replaced.</p>
38
-
39
- <h2>The Benefits of Using Hard Sentinel to Monitor Your Hard Drive Health</h2>
40
- <p>By using Hard Sentinel to monitor your hard drive health, you can enjoy the following benefits:</p>
41
- <ul>
42
- <li>You can prevent data loss or corruption by detecting and fixing any problems before they become worse.</li>
43
- <li>You can improve the performance and speed of your hard drive by optimizing its settings and options.</li>
44
- <li>You can extend the lifespan of your hard drive by avoiding unnecessary stress and damage.</li>
45
- <li>You can save time and money by avoiding costly repairs or replacements.</li>
46
- <li>You can have peace of mind knowing that your data is safe and secure.</li>
47
- </ul>
48
- <p>Hard Sentinel is a must-have software for anyone who cares about their hard drive and their data. It is easy to use, reliable, and affordable. You can download Hard Sentinel today and start monitoring your hard drive health in minutes.</p> ddb901b051<br />
49
- <br />
50
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download bios folder for ps3 Improve your PS3 performance and security with the latest system software update.md DELETED
@@ -1,115 +0,0 @@
1
-
2
- <h1>Download Bios Folder for PS3: A Complete Guide</h1>
3
- <p>If you own a PlayStation 3 (PS3) console, you may have heard of bios or system software. But what is bios and why do you need it? And how can you download bios folder for ps3? In this article, we will answer these questions and provide you with a step-by-step guide on how to download bios folder for ps3 using two different methods: using the internet or using a computer. We will also show you how to reinstall the system software if you ever need to.</p>
4
- <h2>download bios folder for ps3</h2><br /><p><b><b>Download File</b> &#10040; <a href="https://byltly.com/2uKyEH">https://byltly.com/2uKyEH</a></b></p><br /><br />
5
- <h2>What is Bios and Why Do You Need It?</h2>
6
- <p>Bios stands for Basic Input/Output System. It is a firmware that controls the hardware and software of your PS3 console. It is stored in a chip on the motherboard of your console and it is loaded into memory when you turn on your console.</p>
7
- <h3>Bios stands for Basic Input/Output System</h3>
8
- <p>Bios is responsible for initializing and testing the hardware components of your console, such as the CPU, GPU, RAM, hard disk drive, optical drive, etc. It also provides an interface between the hardware and the operating system (OS) of your console, which is stored on the hard disk drive. The OS allows you to run games, apps, media, and other features on your console.</p>
9
- <h3>Bios is essential for booting up your PS3 console</h3>
10
- <p>Without bios, your PS3 console would not be able to start up or function properly. Bios checks if all the hardware components are working correctly and if there are any errors or problems. If everything is OK, bios loads the OS from the hard disk drive into memory and transfers control to it. If there are any issues, bios displays an error message on the screen or flashes a red light on your console.</p>
11
- <h3>Bios can be updated to improve system performance and security</h3>
12
- <p>Sony Interactive Entertainment (SIE) regularly releases updates for the bios or system software of your PS3 console. These updates can improve the quality, stability, performance, and security of your console. They can also add new features, settings, options, and compatibility with new games and devices.</p>
13
- <p>How to download bios folder for ps3 emulator<br />
14
- Download bios folder for ps3 games on pc<br />
15
- Where to download bios folder for ps3 rpcs3<br />
16
- Download bios folder for ps3 iso files<br />
17
- Download bios folder for ps3 free and easy<br />
18
- Download bios folder for ps3 windows 10<br />
19
- Download bios folder for ps3 mac os<br />
20
- Download bios folder for ps3 linux<br />
21
- Download bios folder for ps3 android<br />
22
- Download bios folder for ps3 online<br />
23
- Download bios folder for ps3 rar<br />
24
- Download bios folder for ps3 zip<br />
25
- Download bios folder for ps3 utorrent<br />
26
- Download bios folder for ps3 mega<br />
27
- Download bios folder for ps3 mediafire<br />
28
- Download bios folder for ps3 google drive<br />
29
- Download bios folder for ps3 dropbox<br />
30
- Download bios folder for ps3 no survey<br />
31
- Download bios folder for ps3 no password<br />
32
- Download bios folder for ps3 no virus<br />
33
- Download bios folder for ps3 legit<br />
34
- Download bios folder for ps3 working<br />
35
- Download bios folder for ps3 updated<br />
36
- Download bios folder for ps3 latest version<br />
37
- Download bios folder for ps3 2021<br />
38
- Download bios folder for ps3 2022<br />
39
- Download bios folder for ps3 2023<br />
40
- Download bios folder for ps3 4k resolution<br />
41
- Download bios folder for ps3 60 fps<br />
42
- Download bios folder for ps3 best settings<br />
43
- Download bios folder for ps3 full speed<br />
44
- Download bios folder for ps3 high compatibility<br />
45
- Download bios folder for ps3 low end pc<br />
46
- Download bios folder for ps3 high end pc<br />
47
- Download bios folder for ps3 laptop<br />
48
- Download bios folder for ps3 desktop<br />
49
- Download bios folder for ps3 tutorial<br />
50
- Download bios folder for ps3 guide<br />
51
- Download bios folder for ps3 step by step<br />
52
- Download bios folder for ps3 video<br />
53
- Download bios folder for ps3 youtube<br />
54
- Download bios folder for ps3 reddit<br />
55
- Download bios folder for ps3 quora<br />
56
- Download bios folder for ps3 forum<br />
57
- Download bios folder for ps3 blog<br />
58
- Download bios folder for ps3 website<br />
59
- Download bios folder for ps3 link<br />
60
- Download bios folder for ps3 file size<br />
61
- Download bios folder for ps3 checksum</p>
62
- <p>It is recommended that you always update your PS3 console to the latest version of the system software. By updating, you can enjoy additional benefits, improved usability, and enhanced security. You can also renew the Blu-ray player encryption key, which is required to play Blu-ray discs on your console.</p>
63
- <h2>How to Download Bios Folder for PS3 Using the Internet</h2>
64
- <p>One of the easiest ways to download bios folder for ps3 is using the internet. This method requires a USB drive formatted as FAT32 and a PC or Mac with an internet connection. Here are the steps you need to follow:</p>
65
- <h3>You need a USB drive formatted as FAT32 and a PC or Mac</h3>
66
- <p>The first thing you need is a USB drive that has at least 200MB of free space and that is formatted as FAT32. FAT32 is a file system that allows your USB drive to be compatible with both Windows and Mac computers. To format your USB drive as FAT32, you can use tools such as Disk Utility on Mac or Disk Management on Windows.</p>
67
- <p>You also need a PC or Mac that has an internet connection and that can access the official SIE website. You can use any web browser such as Chrome, Firefox, Safari, etc.</p>
68
- <h3>You need to create a folder named "PS3" and another folder named "UPDATE" inside it</h3>
69
- <p>The next thing you need to do is create two folders on your USB drive: one named "PS3" and another one named "UPDATE". These folders are necessary for storing the update file that you will download from the SIE website.</p>
70
- <p>To create these folders, you can use any file manager such as Finder on Mac or File Explorer on Windows. Simply right-click on your USB drive icon and select New Folder. Name the first folder "PS3" (without quotation marks) and then open it. Inside it, create another folder named "UPDATE" (without quotation marks).</p>
71
- <h3>You need to download the latest PS3 system software update file and save it as "PS3UPDAT.PUP" in the "UPDATE" folder</h3>
72
- <p>The final thing you need to do is download the latest PS3 system software update file from the SIE website and save it in the "UPDATE" folder that you created on your USB drive. The update file has a name like "PS3UPDAT.PUP" (without quotation marks) and has a size of about 200MB.</p>
73
- <p>To download this file, you can use any web browser such as Chrome, Firefox, Safari, etc. Go to this link: https://www.playstation.com/en-us/support/hardware/ps3/system-software/ . This is the official SIE website that provides information and downloads for PS3 system software updates.</p>
74
- <p>On this website, scroll down until you see a section titled "Update using a computer". Click on this section to expand it. Then click on "Download now". This will start downloading the update file to your computer.</p>
75
- <p>Once the download is complete, locate the update file on your computer. It should be in your Downloads folder by default. Then copy or drag-and-drop this file into the "UPDATE" folder that you created on your USB drive. Make sure that you rename this file as "PS3UPDAT.PUP" (without quotation marks) if it has a different name.</p>
76
- <h3>You need to plug the USB device into your PS3 console and follow the on-screen instructions</h4>
77
- <p>The last thing you need to do is plug your USB device into one of the USB ports of your PS3 console and follow the on-screen instructions to install the update.</p>
78
- <p>To do this, turn on your PS console and go to Settings (Settings) > System Update (System Settings) > [Update via Storage Media]. The system automatically searches for and finds the update data saved on the storage media or USB device. Press the X button to start the update. Follow the on-screen instructions to complete the update.</p>
79
- <p>Please note, during an update, do not turn off the system or remove the storage media or USB device. Doing so may cause damage to your system. Also, do not use the network features of your system until all of the update data has been installed.</p>
80
- <h2>How to Download Bios Folder for PS Using a Computer</h2>
81
- <p>Another way to download bios folder for ps is using a computer. This method requires a USB drive formatted as FAT32 and a PC or Mac with a USB cable. Here are the steps you need to follow:</p>
82
- <h3>You need a USB drive formatted as FAT32 and a PC or Mac</h3>
83
- <p>The first thing you need is a USB drive that has at least 200MB of free space and that is formatted as FAT32. FAT32 is a file system that allows your USB drive to be compatible with both Windows and Mac computers. To format your USB drive as FAT32, you can use tools such as Disk Utility on Mac or Disk Management on Windows.</p>
84
- <p>You also need a PC or Mac that has a USB cable that can connect to your PS3 console. You can use any USB cable that has a Type A connector on one end and a Mini-B connector on the other end.</p>
85
- <h3>You need to download the latest PS3 system software update file and save it as "PS3UPDAT.PUP" on your computer</h3>
86
- <p>The next thing you need to do is download the latest PS3 system software update file from the SIE website and save it on your computer. The update file has a name like "PS3UPDAT.PUP" (without quotation marks) and has a size of about 200MB.</p>
87
- <p>To download this file, you can use any web browser such as Chrome, Firefox, Safari, etc. Go to this link: https://www.playstation.com/en-us/support/hardware/ps3/system-software/ . This is the official SIE website that provides information and downloads for PS3 system software updates.</p>
88
- <p>On this website, scroll down until you see a section titled "Update using a computer". Click on this section to expand it. Then click on "Download now". This will start downloading the update file to your computer.</p>
89
- <p>Once the download is complete, locate the update file on your computer. It should be in your Downloads folder by default.</p>
90
- <h3>You need to connect your PS3 console to your computer using a USB cable</h3>
91
- <p>The next thing you need to do is connect your PS3 console to your computer using a USB cable. Make sure that both your console and your computer are turned off before you do this.</p>
92
- <p>Plug one end of the USB cable into one of the USB ports of your PS3 console. Plug the other end of the USB cable into one of the USB ports of your computer.</p>
93
- <h3>You need to start the PS3 system in Safe Mode and select [6] System Update</h4>
94
- <p>The final thing you need to do is start the PS3 system in Safe Mode and select [6] System Update to install the update from your computer.</p>
95
- <p>To do this, turn on your PS console by pressing and holding the power button until you hear three beeps. The first beep tells you that the PS is powering on. Keep holding. After about 5 seconds, the second beep signifies the video reset. After another 5 seconds, the third beep will be a double-beep; you should see this screen:</p>
96
- <p><img src="https://www.wikihow.com/images/thumb/8/8e/Enter-Safe-Mode-on-a-PlayStation-3-Step-4.jpg/v4-460px-Enter-Safe-Mode-on-a-PlayStation-3-Step-4.jpg.webp" alt="Safe Mode screen" width="460" height="345"></p>
97
- <p>Connect your controller to the PS and press the PS button. The PS will proceed to the next screen.</p>
98
- <p>On this screen, select [6] System Update. The system will search for and find the update data saved on your computer. Press the X button to start the update. Follow the on-screen instructions to complete the update.</p>
99
- <p>Please note, during an update, do not turn off the system or disconnect the USB cable. Doing so may cause damage to your system. Also, do not use the network features of your system until all of the update data has been installed.</p>
100
- <h2>How to Reinstall the PS Console System Software</h2>
101
- <p>In some cases, such as after initializing your console, or encountering an error, you may need to reinstall the system software. This is a complete restoration of your system, back to the state it was in when you bought it. You will lose all data if you use this option.</p>
102
- <p>To reinstall the system software, you need to follow the same steps as downloading bios folder for ps using a computer. However, instead of selecting [6] System Update, you need to select [5] Restore PS System. This will erase everything on your hard disk drive and install a new copy of the system software. Follow the on-screen instructions to complete the process.</p>
103
- <h2>Conclusion</h2>
104
- <p>Downloading bios folder for ps is easy and beneficial. It can help you improve your system performance and security, as well as fix any issues that may prevent your console from starting up properly. You can choose between two methods: using the internet or using a computer. You can also reinstall the system software if needed. However, be careful not to lose any data or damage your system by following the instructions carefully. We hope this article was helpful and informative. Happy gaming!</p>
105
- <h4>FAQs</h4>
106
- <ul>
107
- <li>Q: What is the latest version of the PS system software? A: The latest version of the PS system software as of May 2023 is 4.90. It was released on February 28, 2023 and it improves system performance.</li>
108
- <li>Q: How can I check the version of my PS system software? A: You can check the version of your PS system software by going to Settings (Settings) > System Update (System Settings) > [System Information]. You will see the version number displayed on the screen.</li>
109
- <li>Q: How can I back up my data before using safe mode? A: You can back up your data by using the Backup Utility feature in Settings (Settings) > System Settings (System Settings) > [Backup Utility]. You will need an external storage device such as a USB drive or an external hard drive to store your backup data.</li>
110
- <li>Q: How can I restore my data after using safe mode? A: You can restore your data by using the Restore Utility feature in Settings (Settings) > System Settings (System Settings) > [Restore Utility]. You will need an external storage device that contains your backup data to restore it to your console.</li>
111
- <li>Q: How can I contact SIE for support or service? A: You can contact SIE for support or service by visiting their website at https://www.playstation.com/en-us/support/ . You can also call them at 1-800-345-7669 (US) or 1-877-971-7669 (Canada).</li>
112
- </ul>
113
- </p> 0a6ba089eb<br />
114
- <br />
115
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/2 Unlimited Discography (5 Albums 4 Singles) 1992 1998 FLAC LOSSLESS.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>2 Unlimited Discography (5 Albums 4 Singles) 1992 1998 FLAC, LOSSLESS</h2><br /><p><b><b>Download</b> &middot;&middot;&middot;&middot;&middot; <a href="https://imgfil.com/2uxZWo">https://imgfil.com/2uxZWo</a></b></p><br /><br />
2
-
3
- Official site flacattack lossless-music - download Document One ... 5 MB / 1. compression: 73 %) Time: 54:19 Total Size: 2. funk flac results 1 - 23 ... Best Of Sugar Hill Records (CD) (1998) (FLAC + 320 kbps) Rick James - Cold ... How to split & convert single-file FLAC Album into tracks. , and “Sing a Simple ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/CROCODILE PHYSICS 605 Torrent.md DELETED
@@ -1,10 +0,0 @@
1
-
2
- <p>Anyone who has seen the Crocodile films (or has at least liked the concept) shouldnt miss the this one. What you get is a very long, very creepy, and very funny ride through the countryside of Arkansas. Crocodile all the way, baby!</p>
3
- <h2>CROCODILE PHYSICS 605 Torrent</h2><br /><p><b><b>Download</b> &middot;&middot;&middot; <a href="https://imgfil.com/2uy0Sp">https://imgfil.com/2uy0Sp</a></b></p><br /><br />
4
- <p>Finally, students also studied how crocodilians use the behaviour of other animals to their advantage, such as the adult crocodiles who patrol the area in order to deter hunting. The adult male who defends his home from intruders is a familiar sight around the lakes that have crocodiles. There are lots of patterns that the young crocodiles also learn from the adults, such as how to make the warning call on mating.</p>
5
- <p>The week really got my students thinking about the environment and the natural world. We saw the direct and indirect impact of deforestation on crocodiles, and discussed the huge amount of resources it takes to raise the meat trade. We highlighted the predatory nature of crocodiles, and the importance of conserving the environment in which these animals live. We explained the value of conserving animal and plant species, their use to people, and how the meat trade is unsustainable in the long-term.</p>
6
- <p>This week was our first introduction to Crocodile Physics, and it was a phenomenal success. We found it incredibly useful, and all of my students who participated were very impressed. They highlighted a few issues that we can address in future versions. It is currently very difficult to take the device out of the water, so it may not make it into the next release.</p>
7
- <p></p>
8
- <p>Thousands of years of civilisation, gone. Never to return. All that remains of these latest civilisations is a dense cloud of dust in the dim recesses of outer space. A rogue molecule, that defies all known laws of physics, breaks the bonds that lock together the elements of matter, and all that is left in the shattered remnants of the dead worlds are billions of cosmic point particles. Will these strange new mutations slowly learn to acclimatise to their new environments, or will they become extinct just as they arrived?</p> 899543212b<br />
9
- <br />
10
- <br />
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Cloneapp Messenger APK A Powerful Tool for WhatsApp Cloning and Direct Chatting.md DELETED
@@ -1,94 +0,0 @@
1
-
2
- <h1>Clone Messenger APK: What Is It and How to Use It</h1>
3
- <p>If you are looking for a way to use multiple accounts of the same app on your Android device, you might have heard of clone messenger apk. This is a utility app that allows you to clone your personal WhatsApp account into another phone. But what exactly is clone messenger apk, how does it work, and what are the pros and cons of using it? In this article, we will answer these questions and show you how to download, install, and use clone messenger apk on your Android device. We will also introduce you to another app called App Cloner, which can help you clone other apps besides WhatsApp.</p>
4
- <h2>Introduction</h2>
5
- <p>Clone messenger apk is an app developed by BlueSoft Digital that lets you create a duplicate version of your WhatsApp account on another phone. This way, you can have a single account on two different devices, without logging out from one or the other. This can be useful if you want to separate your personal and professional chats, or if you want to have a backup account in case of emergencies.</p>
6
- <h2>clone messenger apk</h2><br /><p><b><b>Download</b> &bull;&bull;&bull; <a href="https://jinyurl.com/2uNPSe">https://jinyurl.com/2uNPSe</a></b></p><br /><br />
7
- <p>However, clone messenger apk is not an official app from WhatsApp, and it may not work properly with some features or updates. It also requires you to grant some permissions and access settings that may compromise your privacy or security. Moreover, it only works with WhatsApp, so if you want to clone other apps, you will need another tool.</p>
8
- <h2>How to Download and Install Clone Messenger APK</h2>
9
- <p>Since clone messenger apk is not available in the Google Play Store, you will need to download it from a third-party source and sideload it on your device. Here are the steps to do so:</p>
10
- <ol>
11
- <li>Go to <a href="(^1^)">this website</a> and download the latest version of clone messenger apk.</li>
12
- <li>Open the downloaded file and tap on Install. You may need to enable Unknown Sources in your security settings first.</li>
13
- <li>Once the installation is complete, open the app and grant the necessary permissions.</li>
14
- <li>Go to Settings > Accessibility > Cloneapp Service and turn it on.</li>
15
- <li>Go back to the app and tap on Start Cloning.</li>
16
- </ol>
17
- <h2>How to Clone WhatsApp Using Clone Messenger APK</h2>
18
- <p>After installing clone messenger apk, you can start cloning your WhatsApp account by following these steps:</p>
19
- <ol>
20
- <li>On your original phone, open WhatsApp and go to Settings > WhatsApp Web/Desktop > Scan QR Code.</li>
21
- <li>On your other phone, open clone messenger apk and wait for the QR code to appear.</li>
22
- <li>Scan the QR code with your original phone and wait for the connection to be established.</li>
23
- <li>You should now see your WhatsApp account duplicated on your other phone. You can use it as normal, with all the features such as chats, calls, media, etc.</li>
24
- </ol>
25
- <p>One of the advantages of clone messenger apk is that it also comes with some extra features that are not available in the official WhatsApp app. For example, You can use the direct chat and story saver features of the cloned app. The direct chat feature allows you to chat with any WhatsApp user without saving their number in your contact list. You just have to enter their number in the direct chat tab and start your conversation. The story saver feature allows you to save WhatsApp stories to your device to view them offline or re-share them with your friends and family . These features are not available in the official WhatsApp app, so they can make your communication more convenient and fun.</p>
26
- <p>clone whatsapp messenger apk<br />
27
- cloneapp messenger pro apk<br />
28
- clone messenger for web and status saver apk<br />
29
- clone app messenger dual account apk<br />
30
- clone messenger apk free download<br />
31
- cloneapp messenger latest version apk<br />
32
- clone messenger apk mod<br />
33
- cloneapp messenger premium apk<br />
34
- clone messenger apk for android<br />
35
- cloneapp messenger app cloner apk<br />
36
- clone messenger apk old version<br />
37
- cloneapp messenger story saver apk<br />
38
- clone messenger apk no ads<br />
39
- cloneapp messenger direct chat apk<br />
40
- clone messenger apk 2021<br />
41
- cloneapp messenger whatsapp web apk<br />
42
- clone messenger apk offline<br />
43
- cloneapp messenger online apk<br />
44
- clone messenger apk update<br />
45
- cloneapp messenger backup apk<br />
46
- clone messenger apk 2020<br />
47
- cloneapp messenger new update apk<br />
48
- clone messenger apk 2019<br />
49
- cloneapp messenger original apk<br />
50
- clone messenger apk 2018<br />
51
- cloneapp messenger beta apk<br />
52
- clone messenger apk 2017<br />
53
- cloneapp messenger cracked apk<br />
54
- clone messenger apk 2016<br />
55
- cloneapp messenger hack apk<br />
56
- clone messenger apk 2015<br />
57
- cloneapp messenger full version apk<br />
58
- clone messenger lite apk<br />
59
- cloneapp messenger plus apk<br />
60
- superclone app cloner for multiple accounts - dual space & parallel app - whatsapp, facebook, instagram, snapchat, twitter, telegram, line, wechat, imo, viber, zalo, kakaotalk, hike, signal, skype, gmail, youtube, tiktok, likee, bigo live, vmate, helo and more social media apps - support 64bit - support android 10 - support dark mode - support app lock - support custom icon and label - support notification badge - support multiple accounts and dual space - support incognito installation and private cloning - support speed mode and power saving mode - support task manager and app uninstaller - support cloning game apps such as pubg mobile lite, free fire and more - support cloning vpn apps such as turbo vpn and more - support cloning browser apps such as chrome and more - support cloning video player apps such as mx player and more - support cloning photo editor apps such as picsart and more - support cloning music player apps such as spotify and more - support cloning file manager apps such as es file explorer and more - support cloning launcher apps such as nova launcher and more - support cloning keyboard apps such as gboard and more - support cloning utility apps such as flashlight and more - support cloning productivity apps such as evernote and more - support cloning education apps such as duolingo and more</p>
61
- <h2>How to Clone Other Apps Using App Cloner</h2>
62
- <p>If you want to clone other apps besides WhatsApp, you will need another tool called App Cloner. App Cloner is an app that lets you create and install multiple copies of any Android app. App Cloner is different from Clone Messenger APK because it does not require you to scan a QR code or use the same account on two devices. Instead, it creates independent and customizable clones that can have different names, icons, settings, and features .</p>
63
- <p>Here is how you can use App Cloner to clone other apps on your Android device:</p>
64
- <ol>
65
- <li>Download App Cloner from <a href="(^4^)">this website</a> and install it on your device.</li>
66
- <li>Open App Cloner and select the app you want to clone from the list of installed apps.</li>
67
- <li>Tap on the pencil icon to edit the name and icon of the cloned app. You can also change the color, rotation, shape, and badge of the icon.</li>
68
- <li>Tap on the cog icon to access more options for customizing the cloned app. You can change the display, privacy, storage, network, automation, and launch options of the app. You can also enable or disable some features such as notifications, permissions, widgets, etc.</li>
69
- <li>Tap on the tick icon to confirm your changes and create the cloned app.</li>
70
- <li>Tap on Install to install the cloned app on your device. You may need to enable Unknown Sources in your security settings first.</li>
71
- <li>Open the cloned app and use it as normal. You can have different accounts, settings, and data on the cloned app and the original app.</li>
72
- </ol>
73
- <h2>Conclusion</h2>
74
- <p>Cloning apps on Android can be a useful way to use multiple accounts, backup your data, or customize your apps. Clone Messenger APK and App Cloner are two tools that can help you clone WhatsApp and other apps on your Android device. However, you should be aware of the potential risks and limitations of using cloned apps, such as compatibility issues, privacy concerns, or legal implications. You should also respect the terms and conditions of the original apps and use cloned apps responsibly and ethically.</p>
75
- <h2>FAQs</h2>
76
- <h3>What are some common issues or errors when cloning apps on Android?</h3>
77
- <p>Some common issues or errors when cloning apps on Android are:</p>
78
- <ul>
79
- <li>The cloned app may not work properly with some features or updates of the original app.</li>
80
- <li>The cloned app may crash or freeze frequently or consume more battery or memory than the original app.</li>
81
- <li>The cloned app may not be compatible with some devices or operating systems.</li>
82
- <li>The cloned app may cause conflicts or errors with the original app or other apps on your device.</li>
83
- <li>The cloned app may violate some policies or regulations of the original app or its developer.</li>
84
- </ul>
85
- <h3>Can I clone any app on Android using Clone Messenger APK or App Cloner?</h3>
86
- <p>No, you cannot clone any app on Android using Clone Messenger APK or App Cloner. Clone Messenger APK only works with WhatsApp, while App Cloner may not work with some apps that have anti-cloning measures or special requirements. Some examples of apps that cannot be cloned are Google Play Services, Google Play Store, Gmail, YouTube, Facebook Messenger, Snapchat, TikTok, etc.</p>
87
- <h3>Is cloning apps on Android legal and safe?</h3>
88
- <p>Cloning apps on Android may not be legal or safe depending on how you use them and what apps you clone. Some apps may have terms and conditions that prohibit cloning or modifying their apps without their permission. Some apps may also have security features that prevent cloning or detect cloned apps and block them. Cloning apps may also expose your personal information or data to third parties or hackers. Therefore, you should always check the legality and safety of cloning apps before doing so and use them at your own risk.</p>
89
- <h3>How can I switch between cloned apps and original apps on Android?</h3>
90
- <p>You can switch between cloned apps and original apps on Android by using the app switcher button on your device or by tapping on the app icons on your home screen or app drawer. The cloned apps and original apps have different icons, names, and colors, so you can easily distinguish them. You can also rename or change the icons of the cloned apps using App Cloner to make them more recognizable.</p>
91
- <h3>How can I delete or uninstall cloned apps on Android?</h3>
92
- <p>You can delete or uninstall cloned apps on Android by following the same steps as deleting or uninstalling any other app on your device. You can either long-press on the app icon and drag it to the trash bin, or go to Settings > Apps and select the app you want to delete or uninstall. You may also need to clear the cache and data of the app before deleting or uninstalling it.</p> 401be4b1e0<br />
93
- <br />
94
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Conquest 2 APK A Game with Hidden Treasures Fierce Enemies and New Weapons.md DELETED
@@ -1,82 +0,0 @@
1
-
2
- <h1>Conquest 2 Apk Download: How to Enjoy This Epic Strategy Game on Your Android or PC</h1>
3
- <p>If you are a fan of strategy games, you might have heard of Conquest 2, a thrilling sci-fi game that features large-scale fleet battles, intelligent admirals, and deep space exploration. Conquest 2 is the sequel to Conquest: Frontier Wars, a classic RTS game that was released in 2001. In this article, we will show you how to download Conquest 2 apk for your Android device, and how to play it on your PC using an emulator.</p>
4
- <h2>conquest 2 apk download</h2><br /><p><b><b>Download</b> &#9999; &#9999; &#9999; <a href="https://jinyurl.com/2uNSJc">https://jinyurl.com/2uNSJc</a></b></p><br /><br />
5
- <h2>What is Conquest 2?</h2>
6
- <p>Conquest 2 is a real-time strategy game that takes place in a vast galaxy where three races compete for resources and territory: the humans, the insectoid Mantis, and the energy-based Celaerans. Each race has its own strengths, weaknesses, and unique units. You can choose to play as any of them in the single-player campaign mode, or challenge other players online in the multiplayer mode.</p>
7
- <p>Conquest 2 has a lot of features that make it stand out from other strategy games. For example, you can manage your supply lines while waging war in multiple maps simultaneously using wormholes. You can also command up to six highly intelligent fleet admirals who serve as hero units and have their own personalities and abilities. Moreover, you can customize your ships and research new technologies to gain an edge over your enemies.</p>
8
- <h2>How to download Conquest 2 apk for Android devices</h2>
9
- <p>If you want to play Conquest 2 on your Android device, you will need to download the apk file from a reliable source. Here are the steps to follow:</p>
10
- <ol>
11
- <li>Go to [Epic Conquest 2 APK (Android Game) - Free Download - APKCombo](^1^) or [Art of Conquest 2 : Infinity APK (Android Game) - Free Download - APKCombo](^2^) and click on the download button.</li>
12
- <li>Wait for the apk file to be downloaded on your device.</li>
13
- <li>Go to your device settings and enable the installation of apps from unknown sources.</li>
14
- <li>Locate the apk file in your file manager and tap on it to install it.</li>
15
- <li>Launch the game and enjoy!</li>
16
- </ol>
17
- <p>Some tips and warnings before you download Conquest 2 apk:</p>
18
- <ul>
19
- <li>Make sure you have enough storage space on your device.</li>
20
- <li>Check the compatibility of the game with your device model and Android version.</li>
21
- <li>Beware of fake or malicious apk files that may harm your device or steal your data.</li>
22
- <li>Always update the game to get the latest features and bug fixes.</li>
23
- </ul>
24
- <h2>How to play Conquest 2 on PC using an emulator</h2>
25
- <p>If you prefer to play Conquest 2 on a bigger screen with better graphics and controls, you can use an emulator to run it on your PC. An emulator is a software that simulates an Android device on your computer, allowing you to play Android games and apps on your PC. Here are some benefits of playing Conquest 2 on PC using an emulator:</p>
26
- <ul>
27
- <li>You can enjoy a smoother and faster gameplay experience with higher FPS and resolution.</li>
28
- <li>You can use your keyboard and mouse or a gamepad to control the game more easily.</li>
29
- <li>You can record your gameplay or stream it online with built-in tools.</li>
30
- <li>You can access multiple games and apps at the same time with multi-instance features.</li>
31
- </ul>
32
- <p>To play Conquest 2 on PC using an emulator, you will need to follow these steps:</p>
33
- <ol>
34
- <li>Download and install an emulator of your choice. Some of the best em - ulators for Conquest 2 are [BlueStacks], [NoxPlayer], and [LDPlayer].</li>
35
- <li>Launch the emulator and sign in with your Google account.</li>
36
- <li>Go to the Google Play Store and search for Conquest 2. Alternatively, you can download the apk file from the links mentioned above and drag and drop it into the emulator.</li>
37
- <li>Install the game and open it.</li>
38
- <li>Adjust the settings and controls according to your preference.</li>
39
- <li>Start playing and have fun!</li>
40
- </ol>
41
- <h2>Conclusion</h2>
42
- <p>Conquest 2 is an amazing strategy game that will keep you hooked for hours with its immersive gameplay, stunning graphics, and challenging missions. Whether you want to play it on your Android device or your PC, you can easily download Conquest 2 apk from the links we provided and follow our simple guide. Don't miss this opportunity to experience one of the best sci-fi games ever made!</p>
43
- <p>If you liked this article, please share it with your friends and leave a comment below. Also, don't forget to check out our other articles on gaming, technology, and more. Thanks for reading!</p>
44
- <p>Epic Conquest 2 Android game free download<br />
45
- Epic Conquest 2 latest version XAPK download<br />
46
- Epic Conquest 2 open world adventure game APK<br />
47
- How to install Epic Conquest 2 on Android device<br />
48
- Epic Conquest 2 by Gaco Games APK for Android<br />
49
- Epic Conquest 2 character customization and skills APK<br />
50
- Epic Conquest 2 offline RPG game APK download<br />
51
- Epic Conquest 2 mod APK unlimited money and gems<br />
52
- Epic Conquest 2 review and gameplay APK download<br />
53
- Epic Conquest 2 APK download for PC Windows 10<br />
54
- Download Epic Conquest 2 from APKCombo website<br />
55
- Download Epic Conquest 2 from Softonic website<br />
56
- Download Epic Conquest 2 from Google Play Store<br />
57
- Epic Conquest 2 APK file size and requirements<br />
58
- Epic Conquest 2 APK update and patch notes<br />
59
- Epic Conquest 2 tips and tricks APK download<br />
60
- Epic Conquest 2 best characters and builds APK<br />
61
- Epic Conquest 2 cheats and hacks APK download<br />
62
- Epic Conquest 2 story and lore APK download<br />
63
- Epic Conquest 2 multiplayer and co-op mode APK<br />
64
- Epic Conquest 2 graphics and sound quality APK<br />
65
- Epic Conquest 2 achievements and rewards APK<br />
66
- Epic Conquest 2 bugs and issues APK download<br />
67
- Epic Conquest 2 fan art and community APK<br />
68
- Epic Conquest 2 alternatives and similar games APK</p>
69
- <h3>Frequently Asked Questions</h3>
70
- <p>Here are some of the most common questions that people ask about Conquest 2 apk download:</p>
71
- <h4>Is Conquest 2 free to play?</h4>
72
- <p>Yes, Conquest 2 is free to play, but it may contain some in-app purchases and ads.</p>
73
- <h4>Is Conquest 2 safe to download?</h4>
74
- <p>Yes, Conquest 2 is safe to download as long as you use a trusted source like the ones we recommended. However, you should always scan any apk file before installing it on your device or PC.</p>
75
- <h4>Is Conquest 2 compatible with my device?</h4>
76
- <p>Conquest 2 requires Android 4.1 or higher to run on your device. You can check your device's Android version by going to Settings > About Phone > Software Information. If your device meets the minimum requirements, you should be able to play Conquest 2 without any issues.</p>
77
- <h4>How can I update Conquest 2?</h4>
78
- <p>You can update Conquest 2 by going to the Google Play Store and tapping on the Update button. Alternatively, you can download the latest apk file from the links we provided and install it over the existing one.</p>
79
- <h4>How can I contact the developers of Conquest 2?</h4>
80
- <p>You can contact the developers of Conquest 2 by visiting their official website at [Conquest Games] or by sending them an email at [email protected]. You can also follow them on social media platforms like Facebook, Twitter, and Instagram for news and updates.</p> 197e85843d<br />
81
- <br />
82
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Destroy the Planet with Solar Smash APK - Free Download for Android.md DELETED
@@ -1,168 +0,0 @@
1
-
2
- <h1>Solar Smash: How to Download and Play the Ultimate Planet Destruction Simulator</h1>
3
- <p>Have you ever wondered what it would be like to unleash your inner villain and destroy planets with various weapons and disasters? If so, then you might want to check out Solar Smash, a game that lets you do just that. Solar Smash is a planet destruction simulator that allows you to use a variety of different weapons to destroy the planet. These include nuclear missiles, lasers, asteroids, aliens, black holes, and more. You can also customize your own planet or choose from a list of preset ones, such as Earth, Mars, Jupiter, or even a giant pumpkin. The game has stunning graphics, realistic physics, and satisfying sound effects that make you feel like a powerful cosmic force.</p>
4
- <h2>solar smash download apk</h2><br /><p><b><b>DOWNLOAD</b> &#9989; <a href="https://jinyurl.com/2uNOTw">https://jinyurl.com/2uNOTw</a></b></p><br /><br />
5
- <p>In this article, we will show you how to download and play Solar Smash on your Android device or PC, as well as some tips and tricks to help you have the best destruction experience possible. We will also introduce you to some alternatives to Solar Smash that you might enjoy if you are looking for more games like this one. So, without further ado, let's get started!</p>
6
- <h2>What is Solar Smash?</h2>
7
- <h3>A brief introduction to the game and its features</h3>
8
- <p>Solar Smash is a game developed by Paradyme Games, an indie studio based in Australia. The game was released in 2020 and has since gained over 100 million downloads on Google Play Store. The game is rated 4.6 out of 5 stars by more than 1.4 million users who praise its graphics, gameplay, and variety of weapons.</p>
9
- <p>The game has two main modes: Planet Smash and System Smash. In Planet Smash mode, you can choose a single planet to destroy with different weapons and scenarios. You can also customize your own planet by drawing on it or changing its size, color, atmosphere, and gravity. In System Smash mode, you can destroy an entire solar system with multiple planets and stars. You can also create your own system by adding or removing planets and stars.</p>
10
- <p>The game has a wide range of weapons and disasters that you can use to destroy planets. Some of them are realistic, such as nuclear missiles, lasers, asteroids, comets, volcanoes, earthquakes, tsunamis, etc. Some of them are fictional or fantastical, such as aliens, UFOs, black holes, wormholes, antimatter bombs, giant balls, etc. Each weapon has its own effect and damage level on the planet. You can also combine different weapons to create more devastating effects.</p>
11
- <p>How to download solar smash apk for free<br />
12
- Solar smash apk mod unlimited money<br />
13
- Solar smash planet destruction simulator apk<br />
14
- Solar smash apk latest version download<br />
15
- Solar smash apk for pc windows 10<br />
16
- Solar smash apk online play<br />
17
- Solar smash apk no ads<br />
18
- Solar smash apk hack download<br />
19
- Solar smash apk game review<br />
20
- Solar smash apk offline mode<br />
21
- Solar smash apk cheats and tips<br />
22
- Solar smash apk best weapons<br />
23
- Solar smash apk custom planets<br />
24
- Solar smash apk multiplayer mode<br />
25
- Solar smash apk fun and addictive<br />
26
- Solar smash apk realistic physics<br />
27
- Solar smash apk graphics settings<br />
28
- Solar smash apk file size<br />
29
- Solar smash apk requirements and compatibility<br />
30
- Solar smash apk update and new features<br />
31
- Solar smash apk alternatives and similar games<br />
32
- Solar smash apk download error and fix<br />
33
- Solar smash apk safe and secure<br />
34
- Solar smash apk ratings and feedback<br />
35
- Solar smash apk developer contact and support</p>
36
- <p>The game also has a list of achievements that you can complete by destroying planets in certain ways or using certain weapons. Some of them are easy, such as destroying Earth with a nuclear missile or destroying Mars with an asteroid. Some of them are hard, such as destroying Jupiter with a black hole or destroying Saturn with a ring breaker. Completing achievements will give you a sense of accomplishment and challenge.</p>
37
- <h3>How to download and install Solar Smash APK on Android devices</h3>
38
- <p>If you want to play Solar Smash on your Android device, you can download it for free from Google Play Store. However, if for some reason you cannot access the Play Store or want to get the latest version of the game before it is officially released, you can also download the APK file from other sources online. APK stands for Android Package Kit and it is a file format that contains all the necessary files for installing an app on an Android device.</p>
39
- <p>One of the websites that offer the APK file for Solar Smash is APKPure. To download and install the APK file from this website, follow these steps:</p>
40
- <ol>
41
- <li>Go to the APKPure website and search for Solar Smash in the search bar.</li>
42
- <li>Click on the Solar Smash icon and then click on the Download APK button.</li>
43
- <li>Wait for the download to finish and then open the APK file on your device.</li>
44
- <li>If you see a warning message that says "For your security, your phone is not allowed to install unknown apps from this source", go to your device settings and enable the option to allow installation from unknown sources.</li>
45
- <li>Follow the instructions on the screen to install the app on your device.</li>
46
- <li>Enjoy playing Solar Smash!</li>
47
- </ol>
48
- <p>Note: Downloading and installing APK files from unknown sources may pose some risks to your device and data. Make sure you trust the source and scan the file for viruses before installing it. We are not responsible for any damage or loss caused by using APK files.</p>
49
- <h3>How to play Solar Smash on PC with an emulator</h3>
50
- <p>If you want to play Solar Smash on your PC, you will need an emulator that can run Android apps on your computer. An emulator is a software that mimics the functions of another device or system. There are many emulators available online, but one of the most popular and reliable ones is BlueStacks. BlueStacks is a free emulator that allows you to play Android games and apps on your PC with ease. To play Solar Smash on PC with BlueStacks, follow these steps:</p>
51
- <ol>
52
- <li>Go to the BlueStacks website and download the installer for your PC.</li>
53
- <li>Run the installer and follow the instructions on the screen to install BlueStacks on your PC.</li>
54
- <li>Launch BlueStacks and sign in with your Google account or create a new one.</li>
55
- <li>Go to the Google Play Store app on BlueStacks and search for Solar Smash in the search bar.</li>
56
- <li>Click on the Solar Smash icon and then click on the Install button.</li>
57
- <li>Wait for the installation to finish and then click on the Open button.</li>
58
- <li>Enjoy playing Solar Smash on your PC!</li>
59
- </ol>
60
- <p>Note: Playing Solar Smash on PC may require more resources than playing it on your mobile device. Make sure you have enough RAM, CPU, and disk space to run BlueStacks smoothly. You can also adjust the settings of BlueStacks to optimize its performance and compatibility with Solar Smash.</p>
61
- <h2>Tips and Tricks for Playing Solar Smash</h2>
62
- <h3>How to complete the achievements in the game</h3>
63
- <p>Solar Smash has a list of achievements that you can complete by destroying planets in certain ways or using certain weapons. Completing achievements will give you a sense of accomplishment and challenge. Some of them are easy, such as destroying Earth with a nuclear missile or destroying Mars with an asteroid. Some of them are hard, such as destroying Jupiter with a black hole or destroying Saturn with a ring breaker. Here are some tips and tricks for completing some of the achievements in the game:</p>
64
- <ul>
65
- <li>To destroy Earth with a nuclear missile, go to Planet Smash mode and select Earth as your planet. Then, select Nuclear Missile as your weapon and aim at any spot on Earth. Press the fire button and watch as Earth explodes in a fiery blast.</li>
66
- <li>To destroy Mars with an asteroid, go to Planet Smash mode and select Mars as your planet. Then, select Asteroid as your weapon and adjust its size, speed, and angle. Aim at any spot on Mars and press the fire button. Watch as Mars gets hit by a massive rock and crumbles into pieces.</li>
67
- <li>To destroy Jupiter with a black hole, go to Planet Smash mode and select Jupiter as your planet. Then, select Black Hole as your weapon and adjust its size and speed. Aim at any spot on Jupiter and press the fire button. Watch as Jupiter gets sucked into a dark abyss and disappears.</li>
68
- <li>To destroy Saturn with a ring breaker, go to Planet Smash mode and select Saturn as your planet. Then, select Ring Breaker as your weapon and adjust its size, speed, and angle. Aim at any spot on Saturn's rings and press the fire button. Watch as Saturn's rings get shattered by a powerful beam of energy.</li>
69
- </ul>
70
- <p>You can check your progress on the achievements by clicking on the trophy icon on the top right corner of the screen. You can also see how many times you have used each weapon by clicking on the weapon icon on the top left corner of the screen.</p>
71
- <h3>How to hit the right spots to destroy planets faster</h3>
72
- <p>Solar Smash is a game that requires some skill and strategy to destroy planets efficiently. You can't just spam the fire button and hope for the best. You have to aim at the right spots to cause the most damage and destruction. Here are some tips and tricks for hitting the right spots to destroy planets faster:</p>
73
- <ul>
74
- <li>Use the zoom in and zoom out buttons to adjust your view and find the best angle to fire your weapon.</li>
75
- <li>Use the pause button to freeze the planet and plan your next move.</li>
76
- <li>Use the slow motion button to slow down the planet and see the effects of your weapon more clearly.</li>
77
- <li>Use the rewind button to undo your last move if you are not satisfied with it.</li>
78
- <li>Use the reset button to start over if you want to try a different weapon or scenario.</li>
79
- </ul>
80
- <p>Some of the weapons have specific spots that can cause more damage than others. For example, if you use the nuclear missile, you can aim at the major cities or landmarks on Earth, such as New York, London, Paris, Tokyo, etc. If you use the laser, you can aim at the poles or the equator of the planet, where the temperature difference is higher. If you use the asteroid, you can aim at the oceans or the continents, depending on whether you want to cause more water or land damage. If you use the black hole, you can aim at the center of the planet, where the gravity is stronger.</p>
81
- <p>You can also experiment with different combinations of weapons and scenarios to see what happens. For example, you can use the alien invasion scenario and then use the antimatter bomb to destroy both the aliens and the planet. Or you can use the giant ball scenario and then use the ring breaker to destroy both the ball and Saturn's rings. The possibilities are endless!</p>
82
- <h3>How to unlock all the secret planets in the game</h3>
83
- <p>Solar Smash has a list of preset planets that you can choose from in Planet Smash mode. These include Earth, Mars, Jupiter, Saturn, Uranus, Neptune, Pluto, Mercury, Venus, Moon, Sun, and Pumpkin. However, there are also some secret planets that are not shown on the list. These are hidden planets that you can unlock by completing certain tasks or using certain weapons in the game. Here are some tips and tricks for unlocking all the secret planets in Solar Smash:</p>
84
- <ul>
85
- <li>To unlock Earth 2, go to Planet Smash mode and select Earth as your planet. Then, use any weapon to destroy Earth completely. You will see a message that says "Earth 2 unlocked". Earth 2 is a replica of Earth but with different continents and countries.</li>
86
- <li>To unlock Mars 2, go to Planet Smash mode and select Mars as your planet. Then, use any weapon to destroy Mars completely. You will see a message that says "Mars 2 unlocked". Mars 2 is a replica of Mars but with water and vegetation.</li>
87
- <li>To unlock Jupiter 2, go to Planet Smash mode and select Jupiter as your planet. Then, use any weapon to destroy Jupiter completely. You will see a message that says "Jupiter 2 unlocked". Jupiter 2 is a replica of Jupiter but with rings and moons.</li>
88
- <li>To unlock Saturn 2, go to Planet Smash mode and select Saturn as your planet. Then, use any weapon to destroy Saturn completely. You will see a message that says "Saturn 2 unlocked". Saturn 2 is a replica of Saturn but with different colors and patterns.</li>
89
- <li>To unlock Uranus 2, go to Planet Smash mode and select Uranus as your planet. Then, use any weapon to destroy Uranus completely. You will see a message that says "Uranus 2 unlocked". Uranus 2 is a replica of Uranus but with more tilt and rotation.</li>
90
- <li>To unlock Neptune 2, go to Planet Smash mode and select Neptune as your planet. Then, use any weapon to destroy Neptune completely. You will see a message that says "Neptune 2 unlocked". Neptune 2 is a replica of Neptune but with more storms and winds.</li>
91
- <li>To unlock Pluto 2, go to Planet Smash mode and select Pluto as your planet. Then, use any weapon to destroy Pluto completely. You will see a message that says "Pluto 2 unlocked". Pluto 2 is a replica of Pluto but with more ice and snow.</li>
92
- <li>To unlock Mercury 2, go to Planet Smash mode and select Mercury as your planet. Then, use any weapon to destroy Mercury completely. You will see a message that says "Mercury 2 unlocked". Mercury 2 is a replica of Mercury but with more craters and volcanoes.</li>
93
- <li>To unlock Venus 2, go to Planet Smash mode and select Venus as your planet. Then, use any weapon to destroy Venus completely. You will see a message that says "Venus 2 unlocked". Venus 2 is a replica of Venus but with more clouds and acid rain.</li>
94
- <li>To unlock Moon 2, go to Planet Smash mode and select Moon as your planet. Then, use any weapon to destroy Moon completely. You will see a message that says "Moon 2 unlocked". Moon 2 is a replica of Moon but with more color and life.</li>
95
- <li>To unlock Sun 2, go to Planet Smash mode and select Sun as your planet. Then, use any weapon to destroy Sun completely. You will see a message that says "Sun 2 unlocked". Sun 2 is a replica of Sun but with more flares and spots.</li>
96
- <li>To unlock Pumpkin 2, go to Planet Smash mode and select Pumpkin as your planet. Then, use any weapon to destroy Pumpkin completely. You will see a message that says "Pumpkin 2 unlocked". Pumpkin 2 is a replica of Pumpkin but with more faces and candles.</li>
97
- </ul>
98
- <p>You can check your progress on the secret planets by clicking on the planet icon on the top right corner of the screen. You can also see how many times you have destroyed each planet by clicking on the planet icon on the top left corner of the screen.</p>
99
- <h2>Alternatives to Solar Smash</h2>
100
- <h3>Other games that let you destroy planets or simulate space scenarios</h3>
101
- <p>If you enjoy playing Solar Smash, you might also like some other games that let you destroy planets or simulate space scenarios. Here are some of the best alternatives to Solar Smash that you can try:</p>
102
- <table>
103
- <tr>
104
- <th>Game</th>
105
- <th>Description</th>
106
- </tr>
107
- <tr>
108
- <td>Universe Sandbox</td>
109
- <td>Universe Sandbox is a physics-based space simulator that allows you to create, destroy, and interact with anything in the universe. You can explore the solar system, collide planets, create black holes, simulate gravity, and more. The game has realistic graphics, sound effects, and data that make you feel like a true cosmic explorer.</td>
110
- </tr>
111
- <tr>
112
- <td>Solar 2</td>
113
- <td>Solar 2 is a sandbox game that lets you play as an asteroid, a planet, a star, or a black hole. You can grow, evolve, and interact with other objects in the universe. You can also complete missions, challenges, and achievements that test your skills and creativity. The game has simple but beautiful graphics, relaxing music, and humorous narration.</td>
114
- </tr>
115
- <tr>
116
- <td>Planet Bomber</td>
117
- <td>Planet Bomber is a casual game that lets you bomb planets with different weapons and upgrades. You can choose from various types of bombs, such as cluster bombs, nuclear bombs, plasma bombs, etc. You can also upgrade your bomber's speed, power, accuracy, and more. The game has colorful graphics, addictive gameplay, and satisfying explosions.</td>
118
- </tr>
119
- <tr>
120
- <td>Solar Smash 2</td>
121
- <td>Solar Smash 2 is the sequel to Solar Smash that adds more features and improvements to the original game. You can enjoy new weapons, scenarios, planets, systems, modes, and more. You can also play online with other players or offline with bots. The game has enhanced graphics, physics, and sound effects that make it more realistic and fun.</td>
122
- </tr>
123
- </table>
124
- <h3>Pros and cons of Solar Smash compared to other games</h3>
125
- <p>Solar Smash is a great game for anyone who likes to destroy planets or simulate space scenarios. However, it is not perfect and it has some pros and cons compared to other games in the same genre. Here are some of the pros and cons of Solar Smash:</p>
126
- <ul>
127
- <li><b>Pros:</b></li>
128
- <li>It is free to play and easy to download.</li>
129
- <li>It has stunning graphics, realistic physics, and satisfying sound effects.</li>
130
- <li>It has a wide range of weapons and disasters that you can use to destroy planets.</li>
131
- <li>It has two main modes: Planet Smash and System Smash that offer different gameplay options.</li>
132
- <li>It has a list of achievements and secret planets that you can complete and unlock for more challenge and fun.</li>
133
- <li>It has a simple and intuitive user interface that makes it easy to control and navigate.</li>
134
- </ul>
135
- <ul>
136
- <li><b>Cons:</b></li>
137
- <li>It may contain ads or in-app purchases that may interrupt or limit your gameplay.</li>
138
- <li>It may require a good internet connection and a compatible device to run the game smoothly.</li>
139
- <li>It may become repetitive or boring after a while if you run out of weapons or scenarios to try.</li>
140
- <li>It may not be very educational or realistic as some of the weapons or disasters are fictional or exaggerated.</li>
141
- <li>It may not be very suitable for children or sensitive people as some of the weapons or disasters are violent or disturbing.</li>
142
- </ul>
143
- <p>Of course, these pros and cons are subjective and may vary depending on your personal preferences and expectations. You can always try the game for yourself and see if you like it or not. After all, the best way to judge a game is to play it!</p>
144
- <h2>Conclusion</h2>
145
- <h3>A summary of the main points and a call to action for the readers</h3>
146
- <p>Solar Smash is a planet destruction simulator that allows you to use a variety of different weapons to destroy the planet. You can also customize your own planet or choose from a list of preset ones, such as Earth, Mars, Jupiter, or even a giant pumpkin. The game has stunning graphics, realistic physics, and satisfying sound effects that make you feel like a powerful cosmic force.</p>
147
- <p>In this article, we have shown you how to download and play Solar Smash on your Android device or PC, as well as some tips and tricks to help you have the best destruction experience possible. We have also introduced you to some alternatives to Solar Smash that you might enjoy if you are looking for more games like this one.</p>
148
- <p>If you are interested in playing Solar Smash, you can download it for free from Google Play Store or from other sources online. You can also visit the official website or the Facebook page of Paradyme Games, the developer of Solar Smash, to learn more about the game and its updates.</p>
149
- <p>We hope you have enjoyed reading this article and found it useful and informative. If you have any questions, comments, or feedback, feel free to leave them below. We would love to hear from you!</p>
150
- <p>Now, go ahead and unleash your inner villain and destroy some planets with Solar Smash! Have fun!</p>
151
- <h2>FAQs</h2>
152
- <h3>Five unique questions and answers related to Solar Smash</h3>
153
- <ol>
154
- <li><b>Q: Is Solar Smash safe to play?</b></li>
155
- <li>A: Solar Smash is safe to play as long as you download it from a trusted source and scan it for viruses before installing it. However, the game may contain ads or in-app purchases that may require your permission or payment. The game may also contain violent or disturbing content that may not be suitable for children or sensitive people. You can always check the ratings and reviews of the game before playing it.</li>
156
- <li><b>Q: How can I play Solar Smash with my friends?</b></li>
157
- <li>A: Solar Smash has an online multiplayer mode that allows you to play with other players around the world. You can also play offline with bots if you prefer. To play online, you need to have a good internet connection and a compatible device. You can also create or join a room with your friends by using a code. To play offline, you need to select the offline mode in the settings menu.</li>
158
- <li><b>Q: How can I get more weapons or scenarios in Solar Smash?</b></li>
159
- <li>A: Solar Smash has a lot of weapons and scenarios that you can use to destroy planets. However, some of them are locked and require you to complete certain tasks or pay real money to unlock them. You can also get more weapons or scenarios by downloading updates or mods for the game. Updates are official releases by the developer that add new features or improvements to the game. Mods are unofficial modifications by other users that change or enhance the game in some way.</li>
160
- <li><b>Q: How can I contact the developer of Solar Smash?</b></li>
161
- <li>A: If you want to contact the developer of Solar Smash, Paradyme Games, you can visit their official website or their Facebook page. You can also send them an email at [email protected]. You can also follow them on Twitter or Instagram to get the latest news and updates about their games.</li>
162
- <li><b>Q: What are some other games by Paradyme Games?</b></li>
163
- <li>A: Paradyme Games is an indie studio based in Australia that develops games for Android devices. Some of their other games are Planet Miner, Planet Miner 2, Planet Miner Idle Tycoon, Planet Miner Clicker Game, and Planet Miner Space Simulator. You can find these games on Google Play Store or on their website.</li>
164
- </ol>
165
- <p>Thank you for reading this article and I hope you have learned something new and useful about Solar Smash. If you liked this article, please share it with your friends and family who might also enjoy playing Solar Smash. You can also leave a comment below and let me know what you think about the game or the article. I would love to hear your feedback and suggestions.</p>
166
- <p>Until next time, happy smashing!</p> 401be4b1e0<br />
167
- <br />
168
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/demucs/wav.py DELETED
@@ -1,174 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from collections import OrderedDict
8
- import hashlib
9
- import math
10
- import json
11
- from pathlib import Path
12
-
13
- import julius
14
- import torch as th
15
- from torch import distributed
16
- import torchaudio as ta
17
- from torch.nn import functional as F
18
-
19
- from .audio import convert_audio_channels
20
- from .compressed import get_musdb_tracks
21
-
22
- MIXTURE = "mixture"
23
- EXT = ".wav"
24
-
25
-
26
- def _track_metadata(track, sources):
27
- track_length = None
28
- track_samplerate = None
29
- for source in sources + [MIXTURE]:
30
- file = track / f"{source}{EXT}"
31
- info = ta.info(str(file))
32
- length = info.num_frames
33
- if track_length is None:
34
- track_length = length
35
- track_samplerate = info.sample_rate
36
- elif track_length != length:
37
- raise ValueError(
38
- f"Invalid length for file {file}: "
39
- f"expecting {track_length} but got {length}.")
40
- elif info.sample_rate != track_samplerate:
41
- raise ValueError(
42
- f"Invalid sample rate for file {file}: "
43
- f"expecting {track_samplerate} but got {info.sample_rate}.")
44
- if source == MIXTURE:
45
- wav, _ = ta.load(str(file))
46
- wav = wav.mean(0)
47
- mean = wav.mean().item()
48
- std = wav.std().item()
49
-
50
- return {"length": length, "mean": mean, "std": std, "samplerate": track_samplerate}
51
-
52
-
53
- def _build_metadata(path, sources):
54
- meta = {}
55
- path = Path(path)
56
- for file in path.iterdir():
57
- meta[file.name] = _track_metadata(file, sources)
58
- return meta
59
-
60
-
61
- class Wavset:
62
- def __init__(
63
- self,
64
- root, metadata, sources,
65
- length=None, stride=None, normalize=True,
66
- samplerate=44100, channels=2):
67
- """
68
- Waveset (or mp3 set for that matter). Can be used to train
69
- with arbitrary sources. Each track should be one folder inside of `path`.
70
- The folder should contain files named `{source}.{ext}`.
71
- Files will be grouped according to `sources` (each source is a list of
72
- filenames).
73
-
74
- Sample rate and channels will be converted on the fly.
75
-
76
- `length` is the sample size to extract (in samples, not duration).
77
- `stride` is how many samples to move by between each example.
78
- """
79
- self.root = Path(root)
80
- self.metadata = OrderedDict(metadata)
81
- self.length = length
82
- self.stride = stride or length
83
- self.normalize = normalize
84
- self.sources = sources
85
- self.channels = channels
86
- self.samplerate = samplerate
87
- self.num_examples = []
88
- for name, meta in self.metadata.items():
89
- track_length = int(self.samplerate * meta['length'] / meta['samplerate'])
90
- if length is None or track_length < length:
91
- examples = 1
92
- else:
93
- examples = int(math.ceil((track_length - self.length) / self.stride) + 1)
94
- self.num_examples.append(examples)
95
-
96
- def __len__(self):
97
- return sum(self.num_examples)
98
-
99
- def get_file(self, name, source):
100
- return self.root / name / f"{source}{EXT}"
101
-
102
- def __getitem__(self, index):
103
- for name, examples in zip(self.metadata, self.num_examples):
104
- if index >= examples:
105
- index -= examples
106
- continue
107
- meta = self.metadata[name]
108
- num_frames = -1
109
- offset = 0
110
- if self.length is not None:
111
- offset = int(math.ceil(
112
- meta['samplerate'] * self.stride * index / self.samplerate))
113
- num_frames = int(math.ceil(
114
- meta['samplerate'] * self.length / self.samplerate))
115
- wavs = []
116
- for source in self.sources:
117
- file = self.get_file(name, source)
118
- wav, _ = ta.load(str(file), frame_offset=offset, num_frames=num_frames)
119
- wav = convert_audio_channels(wav, self.channels)
120
- wavs.append(wav)
121
-
122
- example = th.stack(wavs)
123
- example = julius.resample_frac(example, meta['samplerate'], self.samplerate)
124
- if self.normalize:
125
- example = (example - meta['mean']) / meta['std']
126
- if self.length:
127
- example = example[..., :self.length]
128
- example = F.pad(example, (0, self.length - example.shape[-1]))
129
- return example
130
-
131
-
132
- def get_wav_datasets(args, samples, sources):
133
- sig = hashlib.sha1(str(args.wav).encode()).hexdigest()[:8]
134
- metadata_file = args.metadata / (sig + ".json")
135
- train_path = args.wav / "train"
136
- valid_path = args.wav / "valid"
137
- if not metadata_file.is_file() and args.rank == 0:
138
- train = _build_metadata(train_path, sources)
139
- valid = _build_metadata(valid_path, sources)
140
- json.dump([train, valid], open(metadata_file, "w"))
141
- if args.world_size > 1:
142
- distributed.barrier()
143
- train, valid = json.load(open(metadata_file))
144
- train_set = Wavset(train_path, train, sources,
145
- length=samples, stride=args.data_stride,
146
- samplerate=args.samplerate, channels=args.audio_channels,
147
- normalize=args.norm_wav)
148
- valid_set = Wavset(valid_path, valid, [MIXTURE] + sources,
149
- samplerate=args.samplerate, channels=args.audio_channels,
150
- normalize=args.norm_wav)
151
- return train_set, valid_set
152
-
153
-
154
- def get_musdb_wav_datasets(args, samples, sources):
155
- metadata_file = args.metadata / "musdb_wav.json"
156
- root = args.musdb / "train"
157
- if not metadata_file.is_file() and args.rank == 0:
158
- metadata = _build_metadata(root, sources)
159
- json.dump(metadata, open(metadata_file, "w"))
160
- if args.world_size > 1:
161
- distributed.barrier()
162
- metadata = json.load(open(metadata_file))
163
-
164
- train_tracks = get_musdb_tracks(args.musdb, is_wav=True, subsets=["train"], split="train")
165
- metadata_train = {name: meta for name, meta in metadata.items() if name in train_tracks}
166
- metadata_valid = {name: meta for name, meta in metadata.items() if name not in train_tracks}
167
- train_set = Wavset(root, metadata_train, sources,
168
- length=samples, stride=args.data_stride,
169
- samplerate=args.samplerate, channels=args.audio_channels,
170
- normalize=args.norm_wav)
171
- valid_set = Wavset(root, metadata_valid, [MIXTURE] + sources,
172
- samplerate=args.samplerate, channels=args.audio_channels,
173
- normalize=args.norm_wav)
174
- return train_set, valid_set
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A1draw-12196y/DeepDanbooru_string/app.py DELETED
@@ -1,185 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- from __future__ import annotations
4
-
5
- import argparse
6
- import functools
7
- import os
8
- import html
9
- import pathlib
10
- import tarfile
11
-
12
- import deepdanbooru as dd
13
- import gradio as gr
14
- import huggingface_hub
15
- import numpy as np
16
- import PIL.Image
17
- import tensorflow as tf
18
- import piexif
19
- import piexif.helper
20
-
21
- TITLE = 'DeepDanbooru String'
22
-
23
- TOKEN = os.environ['TOKEN']
24
- MODEL_REPO = 'CikeyQI/DeepDanbooru_string'
25
- MODEL_FILENAME = 'model-resnet_custom_v3.h5'
26
- LABEL_FILENAME = 'tags.txt'
27
-
28
-
29
- def parse_args() -> argparse.Namespace:
30
- parser = argparse.ArgumentParser()
31
- parser.add_argument('--score-slider-step', type=float, default=0.05)
32
- parser.add_argument('--score-threshold', type=float, default=0.5)
33
- parser.add_argument('--theme', type=str, default='dark-grass')
34
- parser.add_argument('--live', action='store_true')
35
- parser.add_argument('--share', action='store_true')
36
- parser.add_argument('--port', type=int)
37
- parser.add_argument('--disable-queue',
38
- dest='enable_queue',
39
- action='store_false')
40
- parser.add_argument('--allow-flagging', type=str, default='never')
41
- return parser.parse_args()
42
-
43
-
44
- def load_sample_image_paths() -> list[pathlib.Path]:
45
- image_dir = pathlib.Path('images')
46
- if not image_dir.exists():
47
- dataset_repo = 'hysts/sample-images-TADNE'
48
- path = huggingface_hub.hf_hub_download(dataset_repo,
49
- 'images.tar.gz',
50
- repo_type='dataset',
51
- use_auth_token=TOKEN)
52
- with tarfile.open(path) as f:
53
- f.extractall()
54
- return sorted(image_dir.glob('*'))
55
-
56
-
57
- def load_model() -> tf.keras.Model:
58
- path = huggingface_hub.hf_hub_download(MODEL_REPO,
59
- MODEL_FILENAME,
60
- use_auth_token=TOKEN)
61
- model = tf.keras.models.load_model(path)
62
- return model
63
-
64
-
65
- def load_labels() -> list[str]:
66
- path = huggingface_hub.hf_hub_download(MODEL_REPO,
67
- LABEL_FILENAME,
68
- use_auth_token=TOKEN)
69
- with open(path) as f:
70
- labels = [line.strip() for line in f.readlines()]
71
- return labels
72
-
73
- def plaintext_to_html(text):
74
- text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
75
- return text
76
-
77
- def predict(image: PIL.Image.Image, score_threshold: float,
78
- model: tf.keras.Model, labels: list[str]) -> dict[str, float]:
79
- rawimage = image
80
- _, height, width, _ = model.input_shape
81
- image = np.asarray(image)
82
- image = tf.image.resize(image,
83
- size=(height, width),
84
- method=tf.image.ResizeMethod.AREA,
85
- preserve_aspect_ratio=True)
86
- image = image.numpy()
87
- image = dd.image.transform_and_pad_image(image, width, height)
88
- image = image / 255.
89
- probs = model.predict(image[None, ...])[0]
90
- probs = probs.astype(float)
91
- res = dict()
92
- for prob, label in zip(probs.tolist(), labels):
93
- if prob < score_threshold:
94
- continue
95
- res[label] = prob
96
- b = dict(sorted(res.items(),key=lambda item:item[1], reverse=True))
97
- a = ', '.join(list(b.keys())).replace('_',' ').replace('(','\(').replace(')','\)')
98
- c = ', '.join(list(b.keys()))
99
-
100
- items = rawimage.info
101
- geninfo = ''
102
-
103
- if "exif" in rawimage.info:
104
- exif = piexif.load(rawimage.info["exif"])
105
- exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'')
106
- try:
107
- exif_comment = piexif.helper.UserComment.load(exif_comment)
108
- except ValueError:
109
- exif_comment = exif_comment.decode('utf8', errors="ignore")
110
-
111
- items['exif comment'] = exif_comment
112
- geninfo = exif_comment
113
-
114
- for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif',
115
- 'loop', 'background', 'timestamp', 'duration']:
116
- items.pop(field, None)
117
-
118
- geninfo = items.get('parameters', geninfo)
119
-
120
- info = f"""
121
- <p><h4>PNG Info</h4></p>
122
- """
123
- for key, text in items.items():
124
- info += f"""
125
- <div>
126
- <p><b>{plaintext_to_html(str(key))}</b></p>
127
- <p>{plaintext_to_html(str(text))}</p>
128
- </div>
129
- """.strip()+"\n"
130
-
131
- if len(info) == 0:
132
- message = "Nothing found in the image."
133
- info = f"<div><p>{message}<p></div>"
134
-
135
- return (a,c,res,info)
136
-
137
-
138
- def main():
139
- args = parse_args()
140
- model = load_model()
141
- labels = load_labels()
142
-
143
- func = functools.partial(predict, model=model, labels=labels)
144
- func = functools.update_wrapper(func, predict)
145
-
146
- gr.Interface(
147
- func,
148
- [
149
- gr.inputs.Image(type='pil', label='Input'),
150
- gr.inputs.Slider(0,
151
- 1,
152
- step=args.score_slider_step,
153
- default=args.score_threshold,
154
- label='Score Threshold'),
155
- ],
156
- [
157
- gr.outputs.Textbox(label='Output (string)'),
158
- gr.outputs.Textbox(label='Output (raw string)'),
159
- gr.outputs.Label(label='Output (label)'),
160
- gr.outputs.HTML()
161
- ],
162
- examples=[
163
- ['miku.jpg',0.5],
164
- ['miku2.jpg',0.5]
165
- ],
166
- title=TITLE,
167
- description='''
168
- Demo for [KichangKim/DeepDanbooru](https://github.com/KichangKim/DeepDanbooru) with "ready to copy" prompt and a prompt analyzer.
169
-
170
- Modified from [hysts/DeepDanbooru](https://huggingface.co/spaces/hysts/DeepDanbooru)
171
-
172
- PNG Info code forked from [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
173
- ''',
174
- theme=args.theme,
175
- allow_flagging=args.allow_flagging,
176
- live=args.live,
177
- ).launch(
178
- enable_queue=args.enable_queue,
179
- server_port=args.port,
180
- share=args.share,
181
- )
182
-
183
-
184
- if __name__ == '__main__':
185
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A666sxr/Genshin_TTS/text/symbols.py DELETED
@@ -1,13 +0,0 @@
1
- '''
2
- Defines the set of symbols used in text input to the model.
3
- '''
4
- _pad = '_'
5
- _punctuation = ';:,.!?¡¿—…"«»“” '
6
- _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
7
- _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
8
-
9
-
10
- # Export all symbols:
11
- symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa)
12
- # Special symbol ids
13
- SPACE_ID = symbols.index(" ")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/syntaspeech/syntactic_graph_buider.py DELETED
@@ -1,294 +0,0 @@
1
- from copy import deepcopy
2
- import torch
3
- import dgl
4
- import stanza
5
- import networkx as nx
6
-
7
- class Sentence2GraphParser:
8
- def __init__(self, language='zh', use_gpu=False, download=False):
9
- self.language = language
10
- if download:
11
- self.stanza_parser = stanza.Pipeline(lang=language, use_gpu=use_gpu)
12
- else:
13
- self.stanza_parser = stanza.Pipeline(lang=language, use_gpu=use_gpu, download_method=None)
14
-
15
- def parse(self, clean_sentence=None, words=None, ph_words=None):
16
- if self.language == 'zh':
17
- assert words is not None and ph_words is not None
18
- ret = self._parse_zh(words, ph_words)
19
- elif self.language == 'en':
20
- assert clean_sentence is not None
21
- ret = self._parse_en(clean_sentence)
22
- else:
23
- raise NotImplementedError
24
- return ret
25
-
26
- def _parse_zh(self, words, ph_words, enable_backward_edge=True, enable_recur_edge=True,
27
- enable_inter_sentence_edge=True, sequential_edge=False):
28
- """
29
- words: <List of str>, each character in chinese is one item
30
- ph_words: <List of str>, each character in chinese is one item, represented by the phoneme
31
- Example:
32
- text1 = '宝马配挂跛骡鞍,貂蝉怨枕董翁榻.'
33
- words = ['<BOS>', '宝', '马', '配', '挂', '跛', '骡', '鞍', ','
34
- , '貂', '蝉', '怨', '枕', '董', '翁', '榻', '<EOS>']
35
- ph_words = ['<BOS>', 'b_ao3_|', 'm_a3_#', 'p_ei4_|', 'g_ua4_#',
36
- 'b_o3_#', 'l_uo2_|', 'an1', ',', 'd_iao1_|',
37
- 'ch_an2_#', 'van4_#', 'zh_en3_#', 'd_ong3_|', 'ueng1_#', 't_a4', '<EOS>']
38
- """
39
- words, ph_words = words[1:-1], ph_words[1:-1] # delete <BOS> and <EOS>
40
- for i, p_w in enumerate(ph_words):
41
- if p_w == ',':
42
- # change english ',' into chinese
43
- # we found it necessary in stanza's dependency parsing
44
- words[i], ph_words[i] = ',', ','
45
- tmp_words = deepcopy(words)
46
- num_added_space = 0
47
- for i, p_w in enumerate(ph_words):
48
- if p_w.endswith("#"):
49
- # add a blank after the p_w with '#', to separate words
50
- tmp_words.insert(num_added_space + i + 1, " ")
51
- num_added_space += 1
52
- if p_w in [',', ',']:
53
- # add one blank before and after ', ', respectively
54
- tmp_words.insert(num_added_space + i + 1, " ") # insert behind ',' first
55
- tmp_words.insert(num_added_space + i, " ") # insert before
56
- num_added_space += 2
57
- clean_text = ''.join(tmp_words).strip()
58
- parser_out = self.stanza_parser(clean_text)
59
-
60
- idx_to_word = {i + 1: w for i, w in enumerate(words)}
61
-
62
- vocab_nodes = {}
63
- vocab_idx_offset = 0
64
- for sentence in parser_out.sentences:
65
- num_nodes_in_current_sentence = 0
66
- for vocab_node in sentence.words:
67
- num_nodes_in_current_sentence += 1
68
- vocab_idx = vocab_node.id + vocab_idx_offset
69
- vocab_text = vocab_node.text.replace(" ", "") # delete blank in vocab
70
- vocab_nodes[vocab_idx] = vocab_text
71
- vocab_idx_offset += num_nodes_in_current_sentence
72
-
73
- # start vocab-to-word alignment
74
- vocab_to_word = {}
75
- current_word_idx = 1
76
- for vocab_i in vocab_nodes.keys():
77
- vocab_to_word[vocab_i] = []
78
- for w_in_vocab_i in vocab_nodes[vocab_i]:
79
- if w_in_vocab_i != idx_to_word[current_word_idx]:
80
- raise ValueError("Word Mismatch!")
81
- vocab_to_word[vocab_i].append(current_word_idx) # add a path (vocab_node_idx, word_global_idx)
82
- current_word_idx += 1
83
-
84
- # then we compute the vocab-level edges
85
- if len(parser_out.sentences) > 5:
86
- print("Detect more than 5 input sentence! pls check whether the sentence is too long!")
87
- vocab_level_source_id, vocab_level_dest_id = [], []
88
- vocab_level_edge_types = []
89
- sentences_heads = []
90
- vocab_id_offset = 0
91
- # get forward edges
92
- for s in parser_out.sentences:
93
- for w in s.words:
94
- w_idx = w.id + vocab_id_offset # it starts from 1, just same as binarizer
95
- w_dest_idx = w.head + vocab_id_offset
96
- if w.head == 0:
97
- sentences_heads.append(w_idx)
98
- continue
99
- vocab_level_source_id.append(w_idx)
100
- vocab_level_dest_id.append(w_dest_idx)
101
- vocab_id_offset += len(s.words)
102
- vocab_level_edge_types += [0] * len(vocab_level_source_id)
103
- num_vocab = vocab_id_offset
104
-
105
- # optional: get backward edges
106
- if enable_backward_edge:
107
- back_source, back_dest = deepcopy(vocab_level_dest_id), deepcopy(vocab_level_source_id)
108
- vocab_level_source_id += back_source
109
- vocab_level_dest_id += back_dest
110
- vocab_level_edge_types += [1] * len(back_source)
111
-
112
- # optional: get inter-sentence edges if num_sentences > 1
113
- inter_sentence_source, inter_sentence_dest = [], []
114
- if enable_inter_sentence_edge and len(sentences_heads) > 1:
115
- def get_full_graph_edges(nodes):
116
- tmp_edges = []
117
- for i, node_i in enumerate(nodes):
118
- for j, node_j in enumerate(nodes):
119
- if i == j:
120
- continue
121
- tmp_edges.append((node_i, node_j))
122
- return tmp_edges
123
-
124
- tmp_edges = get_full_graph_edges(sentences_heads)
125
- for (source, dest) in tmp_edges:
126
- inter_sentence_source.append(source)
127
- inter_sentence_dest.append(dest)
128
- vocab_level_source_id += inter_sentence_source
129
- vocab_level_dest_id += inter_sentence_dest
130
- vocab_level_edge_types += [3] * len(inter_sentence_source)
131
-
132
- if sequential_edge:
133
- seq_source, seq_dest = list(range(1, num_vocab)) + list(range(num_vocab, 0, -1)), \
134
- list(range(2, num_vocab + 1)) + list(range(num_vocab - 1, -1, -1))
135
- vocab_level_source_id += seq_source
136
- vocab_level_dest_id += seq_dest
137
- vocab_level_edge_types += [4] * (num_vocab - 1) + [5] * (num_vocab - 1)
138
-
139
- # Then, we use the vocab-level edges and the vocab-to-word path, to construct the word-level graph
140
- num_word = len(words)
141
- source_id, dest_id, edge_types = [], [], []
142
- for (vocab_start, vocab_end, vocab_edge_type) in zip(vocab_level_source_id, vocab_level_dest_id,
143
- vocab_level_edge_types):
144
- # connect the first word in the vocab
145
- word_start = min(vocab_to_word[vocab_start])
146
- word_end = min(vocab_to_word[vocab_end])
147
- source_id.append(word_start)
148
- dest_id.append(word_end)
149
- edge_types.append(vocab_edge_type)
150
-
151
- # sequential connection in words
152
- for word_indices_in_v in vocab_to_word.values():
153
- for i, word_idx in enumerate(word_indices_in_v):
154
- if i + 1 < len(word_indices_in_v):
155
- source_id.append(word_idx)
156
- dest_id.append(word_idx + 1)
157
- edge_types.append(4)
158
- if i - 1 >= 0:
159
- source_id.append(word_idx)
160
- dest_id.append(word_idx - 1)
161
- edge_types.append(5)
162
-
163
- # optional: get recurrent edges
164
- if enable_recur_edge:
165
- recur_source, recur_dest = list(range(1, num_word + 1)), list(range(1, num_word + 1))
166
- source_id += recur_source
167
- dest_id += recur_dest
168
- edge_types += [2] * len(recur_source)
169
-
170
- # add <BOS> and <EOS>
171
- source_id += [0, num_word + 1, 1, num_word]
172
- dest_id += [1, num_word, 0, num_word + 1]
173
- edge_types += [4, 4, 5, 5] # 4 represents sequentially forward, 5 is sequential backward
174
-
175
- edges = (torch.LongTensor(source_id), torch.LongTensor(dest_id))
176
- dgl_graph = dgl.graph(edges)
177
- assert dgl_graph.num_edges() == len(edge_types)
178
- return dgl_graph, torch.LongTensor(edge_types)
179
-
180
- def _parse_en(self, clean_sentence, enable_backward_edge=True, enable_recur_edge=True,
181
- enable_inter_sentence_edge=True, sequential_edge=False, consider_bos_for_index=True):
182
- """
183
- clean_sentence: <str>, each word or punctuation should be separated by one blank.
184
- """
185
- edge_types = [] # required for gated graph neural network
186
- clean_sentence = clean_sentence.strip()
187
- if clean_sentence.endswith((" .", " ,", " ;", " :", " ?", " !")):
188
- clean_sentence = clean_sentence[:-2]
189
- if clean_sentence.startswith(". "):
190
- clean_sentence = clean_sentence[2:]
191
- parser_out = self.stanza_parser(clean_sentence)
192
- if len(parser_out.sentences) > 5:
193
- print("Detect more than 5 input sentence! pls check whether the sentence is too long!")
194
- print(clean_sentence)
195
- source_id, dest_id = [], []
196
- sentences_heads = []
197
- word_id_offset = 0
198
- # get forward edges
199
- for s in parser_out.sentences:
200
- for w in s.words:
201
- w_idx = w.id + word_id_offset # it starts from 1, just same as binarizer
202
- w_dest_idx = w.head + word_id_offset
203
- if w.head == 0:
204
- sentences_heads.append(w_idx)
205
- continue
206
- source_id.append(w_idx)
207
- dest_id.append(w_dest_idx)
208
- word_id_offset += len(s.words)
209
- num_word = word_id_offset
210
- edge_types += [0] * len(source_id)
211
-
212
- # optional: get backward edges
213
- if enable_backward_edge:
214
- back_source, back_dest = deepcopy(dest_id), deepcopy(source_id)
215
- source_id += back_source
216
- dest_id += back_dest
217
- edge_types += [1] * len(back_source)
218
-
219
- # optional: get recurrent edges
220
- if enable_recur_edge:
221
- recur_source, recur_dest = list(range(1, num_word + 1)), list(range(1, num_word + 1))
222
- source_id += recur_source
223
- dest_id += recur_dest
224
- edge_types += [2] * len(recur_source)
225
-
226
- # optional: get inter-sentence edges if num_sentences > 1
227
- inter_sentence_source, inter_sentence_dest = [], []
228
- if enable_inter_sentence_edge and len(sentences_heads) > 1:
229
- def get_full_graph_edges(nodes):
230
- tmp_edges = []
231
- for i, node_i in enumerate(nodes):
232
- for j, node_j in enumerate(nodes):
233
- if i == j:
234
- continue
235
- tmp_edges.append((node_i, node_j))
236
- return tmp_edges
237
-
238
- tmp_edges = get_full_graph_edges(sentences_heads)
239
- for (source, dest) in tmp_edges:
240
- inter_sentence_source.append(source)
241
- inter_sentence_dest.append(dest)
242
- source_id += inter_sentence_source
243
- dest_id += inter_sentence_dest
244
- edge_types += [3] * len(inter_sentence_source)
245
-
246
- # add <BOS> and <EOS>
247
- source_id += [0, num_word + 1, 1, num_word]
248
- dest_id += [1, num_word, 0, num_word + 1]
249
- edge_types += [4, 4, 5, 5] # 4 represents sequentially forward, 5 is sequential backward
250
-
251
- # optional: sequential edge
252
- if sequential_edge:
253
- seq_source, seq_dest = list(range(1, num_word)) + list(range(num_word, 0, -1)), \
254
- list(range(2, num_word + 1)) + list(range(num_word - 1, -1, -1))
255
- source_id += seq_source
256
- dest_id += seq_dest
257
- edge_types += [4] * (num_word - 1) + [5] * (num_word - 1)
258
- if consider_bos_for_index:
259
- edges = (torch.LongTensor(source_id), torch.LongTensor(dest_id))
260
- else:
261
- edges = (torch.LongTensor(source_id) - 1, torch.LongTensor(dest_id) - 1)
262
- dgl_graph = dgl.graph(edges)
263
- assert dgl_graph.num_edges() == len(edge_types)
264
- return dgl_graph, torch.LongTensor(edge_types)
265
-
266
-
267
- def plot_dgl_sentence_graph(dgl_graph, labels):
268
- """
269
- labels = {idx: word for idx,word in enumerate(sentence.split(" ")) }
270
- """
271
- import matplotlib.pyplot as plt
272
- nx_graph = dgl_graph.to_networkx()
273
- pos = nx.random_layout(nx_graph)
274
- nx.draw(nx_graph, pos, with_labels=False)
275
- nx.draw_networkx_labels(nx_graph, pos, labels)
276
- plt.show()
277
-
278
- if __name__ == '__main__':
279
-
280
- # Unit Test for Chinese Graph Builder
281
- parser = Sentence2GraphParser("zh")
282
- text1 = '宝马配挂跛骡鞍,貂蝉怨枕董翁榻.'
283
- words = ['<BOS>', '宝', '马', '配', '挂', '跛', '骡', '鞍', ',', '貂', '蝉', '怨', '枕', '董', '翁', '榻', '<EOS>']
284
- ph_words = ['<BOS>', 'b_ao3_|', 'm_a3_#', 'p_ei4_|', 'g_ua4_#', 'b_o3_#', 'l_uo2_|', 'an1', ',', 'd_iao1_|',
285
- 'ch_an2_#', 'van4_#', 'zh_en3_#', 'd_ong3_|', 'ueng1_#', 't_a4', '<EOS>']
286
- graph1, etypes1 = parser.parse(text1, words, ph_words)
287
- plot_dgl_sentence_graph(graph1, {i: w for i, w in enumerate(ph_words)})
288
-
289
- # Unit Test for English Graph Builder
290
- parser = Sentence2GraphParser("en")
291
- text2 = "I love you . You love me . Mixue ice-scream and tea ."
292
- graph2, etypes2 = parser.parse(text2)
293
- plot_dgl_sentence_graph(graph2, {i: w for i, w in enumerate(("<BOS> " + text2 + " <EOS>").split(" "))})
294
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/portaspeech/portaspeech.py DELETED
@@ -1,233 +0,0 @@
1
- import math
2
- import torch
3
- import torch.nn.functional as F
4
- from torch import nn
5
- from torch.nn import Linear
6
-
7
- from text_to_speech.modules.commons.conv import ConvBlocks, ConditionalConvBlocks
8
- from text_to_speech.modules.commons.layers import Embedding
9
- from text_to_speech.modules.commons.rel_transformer import RelTransformerEncoder
10
- from text_to_speech.modules.commons.transformer import MultiheadAttention, FFTBlocks
11
- from text_to_speech.modules.tts.commons.align_ops import clip_mel2token_to_multiple, build_word_mask, expand_states, mel2ph_to_mel2word
12
- from text_to_speech.modules.tts.fs import FS_DECODERS, FastSpeech
13
- from text_to_speech.modules.tts.portaspeech.fvae import FVAE
14
- from text_to_speech.utils.commons.meters import Timer
15
- from text_to_speech.utils.nn.seq_utils import group_hidden_by_segs
16
-
17
-
18
- class SinusoidalPosEmb(nn.Module):
19
- def __init__(self, dim):
20
- super().__init__()
21
- self.dim = dim
22
-
23
- def forward(self, x):
24
- """
25
-
26
- :param x: [B, T]
27
- :return: [B, T, H]
28
- """
29
- device = x.device
30
- half_dim = self.dim // 2
31
- emb = math.log(10000) / (half_dim - 1)
32
- emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
33
- emb = x[:, :, None] * emb[None, :]
34
- emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
35
- return emb
36
-
37
-
38
- class PortaSpeech(FastSpeech):
39
- def __init__(self, ph_dict_size, word_dict_size, hparams, out_dims=None):
40
- self.hparams = hparams
41
- super().__init__(ph_dict_size, hparams, out_dims)
42
- # build linguistic encoder
43
- if hparams['use_word_encoder']:
44
- # default False, use independent word embedding instead of phoneme encoding to represent word
45
- self.word_encoder = RelTransformerEncoder(
46
- word_dict_size, self.hidden_size, self.hidden_size, self.hidden_size, 2,
47
- hparams['word_enc_layers'], hparams['enc_ffn_kernel_size'])
48
- if hparams['dur_level'] == 'word':
49
- if hparams['word_encoder_type'] == 'rel_fft':
50
- self.ph2word_encoder = RelTransformerEncoder(
51
- 0, self.hidden_size, self.hidden_size, self.hidden_size, 2,
52
- hparams['word_enc_layers'], hparams['enc_ffn_kernel_size'])
53
- if hparams['word_encoder_type'] == 'fft':
54
- self.ph2word_encoder = FFTBlocks(
55
- self.hidden_size, hparams['word_enc_layers'], 1, num_heads=hparams['num_heads'])
56
- self.sin_pos = SinusoidalPosEmb(self.hidden_size)
57
- self.enc_pos_proj = nn.Linear(2 * self.hidden_size, self.hidden_size)
58
- self.dec_query_proj = nn.Linear(2 * self.hidden_size, self.hidden_size)
59
- self.dec_res_proj = nn.Linear(2 * self.hidden_size, self.hidden_size)
60
- self.attn = MultiheadAttention(self.hidden_size, 1, encoder_decoder_attention=True, bias=False)
61
- self.attn.enable_torch_version = False
62
- if hparams['text_encoder_postnet']:
63
- self.text_encoder_postnet = ConvBlocks(
64
- self.hidden_size, self.hidden_size, [1] * 3, 5, layers_in_block=2)
65
- else:
66
- self.sin_pos = SinusoidalPosEmb(self.hidden_size)
67
- # build VAE decoder
68
- if hparams['use_fvae']:
69
- del self.decoder
70
- del self.mel_out
71
- self.fvae = FVAE(
72
- c_in_out=self.out_dims,
73
- hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'],
74
- kernel_size=hparams['fvae_kernel_size'],
75
- enc_n_layers=hparams['fvae_enc_n_layers'],
76
- dec_n_layers=hparams['fvae_dec_n_layers'],
77
- c_cond=self.hidden_size,
78
- use_prior_flow=hparams['use_prior_flow'],
79
- flow_hidden=hparams['prior_flow_hidden'],
80
- flow_kernel_size=hparams['prior_flow_kernel_size'],
81
- flow_n_steps=hparams['prior_flow_n_blocks'],
82
- strides=[hparams['fvae_strides']],
83
- encoder_type=hparams['fvae_encoder_type'],
84
- decoder_type=hparams['fvae_decoder_type'],
85
- )
86
- else:
87
- self.decoder = FS_DECODERS[hparams['decoder_type']](hparams)
88
- self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True)
89
- if hparams['use_pitch_embed']:
90
- self.pitch_embed = Embedding(300, self.hidden_size, 0)
91
- if self.hparams['add_word_pos']:
92
- self.word_pos_proj = Linear(self.hidden_size, self.hidden_size)
93
-
94
- def build_embedding(self, dictionary, embed_dim):
95
- num_embeddings = len(dictionary)
96
- emb = Embedding(num_embeddings, embed_dim, self.padding_idx)
97
- return emb
98
-
99
- def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None,
100
- spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None,
101
- global_step=None, *args, **kwargs):
102
- ret = {}
103
- style_embed = self.forward_style_embed(spk_embed, spk_id)
104
- x, tgt_nonpadding = self.run_text_encoder(
105
- txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, **kwargs)
106
- x = x * tgt_nonpadding
107
- ret['nonpadding'] = tgt_nonpadding
108
- if self.hparams['use_pitch_embed']:
109
- x = x + self.pitch_embed(pitch)
110
- ret['decoder_inp'] = x
111
- ret['mel_out_fvae'] = ret['mel_out'] = self.run_decoder(x, tgt_nonpadding, ret, infer, tgt_mels, global_step)
112
- return ret
113
-
114
- def run_text_encoder(self, txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, **kwargs):
115
- word2word = torch.arange(word_len)[None, :].to(ph2word.device) + 1 # [B, T_mel, T_word]
116
- src_nonpadding = (txt_tokens > 0).float()[:, :, None]
117
- use_bert = self.hparams.get("use_bert") is True
118
- if use_bert:
119
- ph_encoder_out = self.ph_encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=ph2word,
120
- graph_lst=kwargs['graph_lst'], etypes_lst=kwargs['etypes_lst'],
121
- cl_feats=kwargs['cl_feats'], ret=ret) * src_nonpadding + style_embed
122
- else:
123
- ph_encoder_out = self.ph_encoder(txt_tokens) * src_nonpadding + style_embed
124
- if self.hparams['use_word_encoder']:
125
- word_encoder_out = self.word_encoder(word_tokens) + style_embed
126
- ph_encoder_out = ph_encoder_out + expand_states(word_encoder_out, ph2word)
127
- if self.hparams['dur_level'] == 'word':
128
- word_encoder_out = 0
129
- h_ph_gb_word = group_hidden_by_segs(ph_encoder_out, ph2word, word_len)[0]
130
- word_encoder_out = word_encoder_out + self.ph2word_encoder(h_ph_gb_word)
131
- if self.hparams['use_word_encoder']:
132
- word_encoder_out = word_encoder_out + self.word_encoder(word_tokens)
133
- mel2word = self.forward_dur(ph_encoder_out, mel2word, ret, ph2word=ph2word, word_len=word_len)
134
- mel2word = clip_mel2token_to_multiple(mel2word, self.hparams['frames_multiple'])
135
- tgt_nonpadding = (mel2word > 0).float()[:, :, None]
136
- enc_pos = self.get_pos_embed(word2word, ph2word) # [B, T_ph, H]
137
- dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H]
138
- dec_word_mask = build_word_mask(mel2word, ph2word) # [B, T_mel, T_ph]
139
- x, weight = self.attention(ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask)
140
- if self.hparams['add_word_pos']:
141
- x = x + self.word_pos_proj(dec_pos)
142
- ret['attn'] = weight
143
- else:
144
- mel2ph = self.forward_dur(ph_encoder_out, mel2ph, ret)
145
- mel2ph = clip_mel2token_to_multiple(mel2ph, self.hparams['frames_multiple'])
146
- mel2word = mel2ph_to_mel2word(mel2ph, ph2word)
147
- x = expand_states(ph_encoder_out, mel2ph)
148
- if self.hparams['add_word_pos']:
149
- dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H]
150
- x = x + self.word_pos_proj(dec_pos)
151
- tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
152
- if self.hparams['use_word_encoder']:
153
- x = x + expand_states(word_encoder_out, mel2word)
154
- return x, tgt_nonpadding
155
-
156
- def attention(self, ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask):
157
- ph_kv = self.enc_pos_proj(torch.cat([ph_encoder_out, enc_pos], -1))
158
- word_enc_out_expend = expand_states(word_encoder_out, mel2word)
159
- word_enc_out_expend = torch.cat([word_enc_out_expend, dec_pos], -1)
160
- if self.hparams['text_encoder_postnet']:
161
- word_enc_out_expend = self.dec_res_proj(word_enc_out_expend)
162
- word_enc_out_expend = self.text_encoder_postnet(word_enc_out_expend)
163
- dec_q = x_res = word_enc_out_expend
164
- else:
165
- dec_q = self.dec_query_proj(word_enc_out_expend)
166
- x_res = self.dec_res_proj(word_enc_out_expend)
167
- ph_kv, dec_q = ph_kv.transpose(0, 1), dec_q.transpose(0, 1)
168
- x, (weight, _) = self.attn(dec_q, ph_kv, ph_kv, attn_mask=(1 - dec_word_mask) * -1e9)
169
- x = x.transpose(0, 1)
170
- x = x + x_res
171
- return x, weight
172
-
173
- def run_decoder(self, x, tgt_nonpadding, ret, infer, tgt_mels=None, global_step=0):
174
- if not self.hparams['use_fvae']:
175
- x = self.decoder(x)
176
- x = self.mel_out(x)
177
- ret['kl'] = 0
178
- return x * tgt_nonpadding
179
- else:
180
- decoder_inp = x
181
- x = x.transpose(1, 2) # [B, H, T]
182
- tgt_nonpadding_BHT = tgt_nonpadding.transpose(1, 2) # [B, H, T]
183
- if infer:
184
- z = self.fvae(cond=x, infer=True)
185
- else:
186
- tgt_mels = tgt_mels.transpose(1, 2) # [B, 80, T]
187
- z, ret['kl'], ret['z_p'], ret['m_q'], ret['logs_q'] = self.fvae(
188
- tgt_mels, tgt_nonpadding_BHT, cond=x)
189
- if global_step < self.hparams['posterior_start_steps']:
190
- z = torch.randn_like(z)
191
- x_recon = self.fvae.decoder(z, nonpadding=tgt_nonpadding_BHT, cond=x).transpose(1, 2)
192
- ret['pre_mel_out'] = x_recon
193
- return x_recon
194
-
195
- def forward_dur(self, dur_input, mel2word, ret, **kwargs):
196
- """
197
-
198
- :param dur_input: [B, T_txt, H]
199
- :param mel2ph: [B, T_mel]
200
- :param txt_tokens: [B, T_txt]
201
- :param ret:
202
- :return:
203
- """
204
- src_padding = dur_input.data.abs().sum(-1) == 0
205
- dur_input = dur_input.detach() + self.hparams['predictor_grad'] * (dur_input - dur_input.detach())
206
- dur = self.dur_predictor(dur_input, src_padding)
207
- if self.hparams['dur_level'] == 'word':
208
- word_len = kwargs['word_len']
209
- ph2word = kwargs['ph2word']
210
- B, T_ph = ph2word.shape
211
- dur = torch.zeros([B, word_len.max() + 1]).to(ph2word.device).scatter_add(1, ph2word, dur)
212
- dur = dur[:, 1:]
213
- ret['dur'] = dur
214
- if mel2word is None:
215
- mel2word = self.length_regulator(dur).detach()
216
- return mel2word
217
-
218
- def get_pos_embed(self, word2word, x2word):
219
- x_pos = build_word_mask(word2word, x2word).float() # [B, T_word, T_ph]
220
- x_pos = (x_pos.cumsum(-1) / x_pos.sum(-1).clamp(min=1)[..., None] * x_pos).sum(1)
221
- x_pos = self.sin_pos(x_pos.float()) # [B, T_ph, H]
222
- return x_pos
223
-
224
- def store_inverse_all(self):
225
- def remove_weight_norm(m):
226
- try:
227
- if hasattr(m, 'store_inverse'):
228
- m.store_inverse()
229
- nn.utils.remove_weight_norm(m)
230
- except ValueError: # this module didn't have weight norm
231
- return
232
-
233
- self.apply(remove_weight_norm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abdullahw72/bark-voice-cloning/hubert/__init__.py DELETED
File without changes
spaces/AfrodreamsAI/afrodreams/Home.py DELETED
@@ -1,164 +0,0 @@
1
- import neural_style
2
-
3
- import streamlit as st
4
- import os
5
- import random
6
- import numpy as np
7
- from PIL import Image, ImageEnhance
8
- from io import BytesIO
9
- import matplotlib.pyplot as plt
10
- import streamlit_ext as ste #for download button not to rerun
11
- from huggingface_hub import upload_file
12
-
13
- HF_TOKEN = os.environ.get("HF_TOKEN")
14
-
15
- st.set_page_config(layout="wide")
16
-
17
- st.markdown('<p class="font">Afrodreams.AI</p>', unsafe_allow_html=True)
18
- st.subheader("This app takes in your image and styles it with a unique african art.")
19
-
20
- #Create two columns with different width
21
- col1, col2 = st.columns( [0.8, 0.2])
22
- import time
23
-
24
-
25
-
26
- with col1: # To display the header text using css style
27
- st.markdown(""" <style> .font {
28
- font-size:35px ; font-family: 'Cooper Black'; color: #FF9633;}
29
- </style> """, unsafe_allow_html=True)
30
- st.markdown('<p class="font">Upload your photo here...</p>', unsafe_allow_html=True)
31
-
32
-
33
-
34
-
35
-
36
- #Add file uploader to allow users to upload photos
37
- uploaded_file = st.file_uploader("", type=['jpg','png','jpeg'])
38
-
39
- # add slider to side bar
40
- style_weight = st.slider("Select Style Weight", min_value=10, max_value=100, value=12)
41
- img_size_slider= st.select_slider(label= 'Seleet Output Quality Level',
42
- options = ['Very Low', 'Low', 'Normal', 'High', 'Very High'],
43
- value='Normal')
44
- img_size_mapping = {'Very Low':128, 'Low':300, 'Normal':400, 'High':500, 'Very High':600}
45
-
46
-
47
- def get_random_subset(list_, num_imgs):
48
- return random.sample(list_, num_imgs)
49
-
50
-
51
- def display_random_images(five_rand_imgs, display_type, size= (15, 6)):
52
- fig = plt.figure(figsize=size)
53
- fig.subplots_adjust(wspace=0.2)
54
- for i in range(1, len(five_rand_imgs)+1):
55
- ith_image = Image.open(five_rand_imgs[i-1])
56
-
57
- ax = fig.add_subplot(1, 5, i)
58
- ax.imshow(ith_image)
59
- ax.set_title(f'{display_type} {i}')
60
- plt.axis('off')
61
-
62
- st.pyplot(fig)
63
-
64
-
65
-
66
- path = 'stylesv2'
67
-
68
-
69
- #expander for style selection
70
- with st.expander("Expand to select style type"):
71
- img_names = [os.path.join(path, img) for img in os.listdir(path)]
72
- five_rand_imgs0 = get_random_subset(img_names, 5)
73
- if 'selected_image' not in st.session_state:
74
- st.session_state.selected_image = five_rand_imgs0
75
- five_rand_imgs = st.session_state.selected_image
76
- display_random_images(five_rand_imgs, 'Style')
77
- chosen_style = st.selectbox(
78
- 'Select the style you want to use',
79
- options = five_rand_imgs, format_func = lambda x: "Style " + str(five_rand_imgs.index(x) + 1),
80
- key= 'expander1'
81
- )
82
-
83
-
84
-
85
- #put notificaation
86
- #with st.empty():
87
- #for seconds in range(5):
88
- #st.info('Please note that by using this app, you agree that your image be will be showcased on this app.')
89
- #time.sleep(1)
90
- #st.empty()
91
-
92
- #Add 'before' and 'after' columns
93
- if uploaded_file is not None:
94
- image = Image.open(uploaded_file)
95
-
96
- col1, col2 = st.columns( [0.5, 0.5])
97
- with col1:
98
- st.markdown('<p style="text-align: center;">Before</p>',unsafe_allow_html=True)
99
- st.image(image,width=300)
100
-
101
- with col2:
102
- st.markdown('<p style="text-align: center;">After</p>',unsafe_allow_html=True)
103
-
104
- # add a button
105
- run = st.button('Generate Art')
106
- my_bar = st.progress(0)
107
- params = neural_style.TransferParams()
108
- params.gpu = "c" #0
109
- params.backend = "mkl"
110
-
111
-
112
- params.image_size = img_size_mapping[img_size_slider]
113
-
114
- params.content_image = uploaded_file
115
- params.style_weight = style_weight
116
-
117
-
118
-
119
- keep_style = False
120
- if run==True:
121
- # run image selection if keep style is false
122
- if keep_style==False:
123
-
124
- styles = os.listdir(path)
125
- #params.style_image = path + '/' + random.choice(styles)
126
- params.style_image = chosen_style
127
-
128
- st.session_state.submitted = True
129
- with st.spinner('Wait for it...'):
130
- neural_style.transfer(params)
131
-
132
- #display image when done.
133
- with col2:
134
- if 'submitted' in st.session_state:
135
- result = Image.open('out.png')
136
- st.image(result, width=300)
137
- buf = BytesIO()
138
- result.save(buf, format="png")
139
-
140
- img_file_name = f"generated_samples/{str(len(os.listdir('generated_samples')))}.png"
141
-
142
- _ = upload_file(path_or_fileobj = 'out.png',
143
- path_in_repo = img_file_name,
144
- repo_id='AfrodreamsAI/afrodreams',
145
- repo_type='space',
146
- token=HF_TOKEN
147
- )
148
-
149
- byte_im = buf.getvalue()
150
- run = ste.download_button("Download Image", data=byte_im, file_name="afrodreams.png")
151
-
152
-
153
- #if run==True:
154
- # selectiuing random iamges to be displayed
155
- img_names = [os.path.join('generated_samples', img) for img in os.listdir('generated_samples')]
156
- five_rand_imgs1 = get_random_subset(img_names, 5)
157
- st.subheader('\n\n\n\n\n\n\n\n\n Examples of some Generate Images')
158
- display_random_images(five_rand_imgs1, 'Generate image', size=(20, 15))
159
-
160
-
161
-
162
-
163
-
164
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/dialog-quest/QuestMethods.js DELETED
@@ -1,18 +0,0 @@
1
- export default {
2
- start(key) {
3
- this.questionManager
4
- .restartQuest()
5
- .getNextQuestion(key);
6
- return this;
7
- },
8
-
9
- next(key) {
10
- this.questionManager
11
- .getNextQuestion(key);
12
- return this;
13
- },
14
-
15
- isLast() {
16
- return this.questionManager.isLastQuestion();
17
- },
18
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/checkbox/Factory.d.ts DELETED
@@ -1,19 +0,0 @@
1
- import Checkbox from './Checkbox';
2
-
3
- export default function (
4
- x: number, y: number,
5
- width: number, height: number,
6
- color?: number,
7
- config?: Checkbox.IConfig
8
- ): Checkbox;
9
-
10
- export default function (
11
- x: number, y: number,
12
- width: number, height: number,
13
- config?: Checkbox.IConfig
14
- ): Checkbox;
15
-
16
-
17
- export default function (
18
- config?: Checkbox.IConfig
19
- ): Checkbox;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/skew/Skew.js DELETED
@@ -1,2 +0,0 @@
1
- import { ContainerSkew } from '../../../plugins/quadimage.js';
2
- export default ContainerSkew;
 
 
 
spaces/Akmyradov/TurkmenSpeechRecogntion/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: TurkmenSpeechRecognition
3
- emoji: ⚡
4
- colorFrom: gray
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.33.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlekseyKorshuk/thin-plate-spline-motion-model/run.py DELETED
@@ -1,89 +0,0 @@
1
- import matplotlib
2
- matplotlib.use('Agg')
3
-
4
- import os, sys
5
- import yaml
6
- from argparse import ArgumentParser
7
- from time import gmtime, strftime
8
- from shutil import copy
9
- from frames_dataset import FramesDataset
10
-
11
- from modules.inpainting_network import InpaintingNetwork
12
- from modules.keypoint_detector import KPDetector
13
- from modules.bg_motion_predictor import BGMotionPredictor
14
- from modules.dense_motion import DenseMotionNetwork
15
- from modules.avd_network import AVDNetwork
16
- import torch
17
- from train import train
18
- from train_avd import train_avd
19
- from reconstruction import reconstruction
20
- import os
21
-
22
-
23
- if __name__ == "__main__":
24
-
25
- if sys.version_info[0] < 3:
26
- raise Exception("You must use Python 3 or higher. Recommended version is Python 3.9")
27
-
28
- parser = ArgumentParser()
29
- parser.add_argument("--config", default="config/vox-256.yaml", help="path to config")
30
- parser.add_argument("--mode", default="train", choices=["train", "reconstruction", "train_avd"])
31
- parser.add_argument("--log_dir", default='log', help="path to log into")
32
- parser.add_argument("--checkpoint", default=None, help="path to checkpoint to restore")
33
- parser.add_argument("--device_ids", default="0,1", type=lambda x: list(map(int, x.split(','))),
34
- help="Names of the devices comma separated.")
35
-
36
- opt = parser.parse_args()
37
- with open(opt.config) as f:
38
- config = yaml.load(f)
39
-
40
- if opt.checkpoint is not None:
41
- log_dir = os.path.join(*os.path.split(opt.checkpoint)[:-1])
42
- else:
43
- log_dir = os.path.join(opt.log_dir, os.path.basename(opt.config).split('.')[0])
44
- log_dir += ' ' + strftime("%d_%m_%y_%H.%M.%S", gmtime())
45
-
46
- inpainting = InpaintingNetwork(**config['model_params']['generator_params'],
47
- **config['model_params']['common_params'])
48
-
49
- if torch.cuda.is_available():
50
- cuda_device = torch.device('cuda:'+str(opt.device_ids[0]))
51
- inpainting.to(cuda_device)
52
-
53
- kp_detector = KPDetector(**config['model_params']['common_params'])
54
- dense_motion_network = DenseMotionNetwork(**config['model_params']['common_params'],
55
- **config['model_params']['dense_motion_params'])
56
-
57
- if torch.cuda.is_available():
58
- kp_detector.to(opt.device_ids[0])
59
- dense_motion_network.to(opt.device_ids[0])
60
-
61
- bg_predictor = None
62
- if (config['model_params']['common_params']['bg']):
63
- bg_predictor = BGMotionPredictor()
64
- if torch.cuda.is_available():
65
- bg_predictor.to(opt.device_ids[0])
66
-
67
- avd_network = None
68
- if opt.mode == "train_avd":
69
- avd_network = AVDNetwork(num_tps=config['model_params']['common_params']['num_tps'],
70
- **config['model_params']['avd_network_params'])
71
- if torch.cuda.is_available():
72
- avd_network.to(opt.device_ids[0])
73
-
74
- dataset = FramesDataset(is_train=(opt.mode.startswith('train')), **config['dataset_params'])
75
-
76
- if not os.path.exists(log_dir):
77
- os.makedirs(log_dir)
78
- if not os.path.exists(os.path.join(log_dir, os.path.basename(opt.config))):
79
- copy(opt.config, log_dir)
80
-
81
- if opt.mode == 'train':
82
- print("Training...")
83
- train(config, inpainting, kp_detector, bg_predictor, dense_motion_network, opt.checkpoint, log_dir, dataset)
84
- elif opt.mode == 'train_avd':
85
- print("Training Animation via Disentaglement...")
86
- train_avd(config, inpainting, kp_detector, bg_predictor, dense_motion_network, avd_network, opt.checkpoint, log_dir, dataset)
87
- elif opt.mode == 'reconstruction':
88
- print("Reconstruction...")
89
- reconstruction(config, inpainting, kp_detector, bg_predictor, dense_motion_network, opt.checkpoint, log_dir, dataset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aloento/9Nine-PITS/data_utils.py DELETED
@@ -1,358 +0,0 @@
1
- # modified from https://github.com/jaywalnut310/vits
2
- import os
3
- import random
4
-
5
- import torch
6
- import torch.utils.data
7
-
8
- import commons
9
- from analysis import Pitch
10
- from mel_processing import spectrogram_torch
11
- from text import cleaned_text_to_sequence
12
- from utils import load_wav_to_torch, load_filepaths_and_text
13
-
14
- """ Modified from Multi speaker version of VITS"""
15
-
16
-
17
- class TextAudioSpeakerLoader(torch.utils.data.Dataset):
18
- """
19
- 1) loads audio, speaker_id, text pairs
20
- 2) normalizes text and converts them to sequences of integers
21
- 3) computes spectrograms from audio files.
22
- """
23
-
24
- def __init__(self, audiopaths_sid_text, hparams, pt_run=False):
25
- self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
26
- self.sampling_rate = hparams.sampling_rate
27
- self.filter_length = hparams.filter_length
28
- self.hop_length = hparams.hop_length
29
- self.win_length = hparams.win_length
30
-
31
- self.add_blank = hparams.add_blank
32
- self.min_text_len = 1
33
- self.max_text_len = 190
34
-
35
- self.speaker_dict = {
36
- speaker: idx
37
- for idx, speaker in enumerate(hparams.speakers)
38
- }
39
- self.data_path = hparams.data_path
40
-
41
- self.pitch = Pitch(sr=hparams.sampling_rate,
42
- W=hparams.tau_max,
43
- tau_max=hparams.tau_max,
44
- midi_start=hparams.midi_start,
45
- midi_end=hparams.midi_end,
46
- octave_range=hparams.octave_range)
47
-
48
- random.seed(1234)
49
- random.shuffle(self.audiopaths_sid_text)
50
- self._filter()
51
- if pt_run:
52
- for _audiopaths_sid_text in self.audiopaths_sid_text:
53
- _ = self.get_audio_text_speaker_pair(_audiopaths_sid_text,
54
- True)
55
-
56
- def _filter(self):
57
- """
58
- Filter text & store spec lengths
59
- """
60
- # Store spectrogram lengths for Bucketing
61
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
62
- # spec_length = wav_length // hop_length
63
-
64
- audiopaths_sid_text_new = []
65
- lengths = []
66
- for audiopath, spk, text, lang in self.audiopaths_sid_text:
67
- if self.min_text_len <= len(text) and len(
68
- text) <= self.max_text_len:
69
- audiopath = os.path.join(self.data_path, audiopath)
70
- if not os.path.exists(audiopath):
71
- print(audiopath, "not exist!")
72
- continue
73
- try:
74
- audio, sampling_rate = load_wav_to_torch(audiopath)
75
- except:
76
- print(audiopath, "load error!")
77
- continue
78
- audiopaths_sid_text_new.append([audiopath, spk, text, lang])
79
- lengths.append(
80
- os.path.getsize(audiopath) // (2 * self.hop_length))
81
- self.audiopaths_sid_text = audiopaths_sid_text_new
82
- self.lengths = lengths
83
-
84
- def get_audio_text_speaker_pair(self, audiopath_sid_text, pt_run=False):
85
- # separate filename, speaker_id and text
86
- audiopath, spk, text, lang = audiopath_sid_text
87
- text, lang = self.get_text(text, lang)
88
- spec, ying, wav = self.get_audio(audiopath, pt_run)
89
- sid = self.get_sid(self.speaker_dict[spk])
90
- return (text, spec, ying, wav, sid, lang)
91
-
92
- def get_audio(self, filename, pt_run=False):
93
- audio, sampling_rate = load_wav_to_torch(filename)
94
- if sampling_rate != self.sampling_rate:
95
- raise ValueError("{} {} SR doesn't match target {} SR".format(
96
- sampling_rate, self.sampling_rate))
97
- audio_norm = audio.unsqueeze(0)
98
- spec_filename = filename.replace(".wav", ".spec.pt")
99
- ying_filename = filename.replace(".wav", ".ying.pt")
100
- if os.path.exists(spec_filename) and not pt_run:
101
- spec = torch.load(spec_filename, map_location='cpu')
102
- else:
103
- spec = spectrogram_torch(audio_norm,
104
- self.filter_length,
105
- self.sampling_rate,
106
- self.hop_length,
107
- self.win_length,
108
- center=False)
109
- spec = torch.squeeze(spec, 0)
110
- torch.save(spec, spec_filename)
111
- if os.path.exists(ying_filename) and not pt_run:
112
- ying = torch.load(ying_filename, map_location='cpu')
113
- else:
114
- wav = torch.nn.functional.pad(
115
- audio_norm.unsqueeze(0),
116
- (self.filter_length - self.hop_length,
117
- self.filter_length - self.hop_length +
118
- (-audio_norm.shape[1]) % self.hop_length + self.hop_length * (audio_norm.shape[1] % self.hop_length == 0)),
119
- mode='constant').squeeze(0)
120
- ying = self.pitch.yingram(wav)[0]
121
- torch.save(ying, ying_filename)
122
- return spec, ying, audio_norm
123
-
124
- def get_text(self, text, lang):
125
- text_norm = cleaned_text_to_sequence(text)
126
- lang = [int(i) for i in lang.split(" ")]
127
- if self.add_blank:
128
- text_norm, lang = commons.intersperse_with_language_id(text_norm, lang, 0)
129
- text_norm = torch.LongTensor(text_norm)
130
- lang = torch.LongTensor(lang)
131
- return text_norm, lang
132
-
133
- def get_sid(self, sid):
134
- sid = torch.LongTensor([int(sid)])
135
- return sid
136
-
137
- def __getitem__(self, index):
138
- return self.get_audio_text_speaker_pair(
139
- self.audiopaths_sid_text[index])
140
-
141
- def __len__(self):
142
- return len(self.audiopaths_sid_text)
143
-
144
-
145
- class TextAudioSpeakerCollate():
146
- """ Zero-pads model inputs and targets"""
147
-
148
- def __init__(self, return_ids=False):
149
- self.return_ids = return_ids
150
-
151
- def __call__(self, batch):
152
- """Collate's training batch from normalized text, audio and speaker identities
153
- PARAMS
154
- ------
155
- batch: [text_normalized, spec_normalized, wav_normalized, sid]
156
- """
157
- # Right zero-pad all one-hot text sequences to max input length
158
- _, ids_sorted_decreasing = torch.sort(torch.LongTensor(
159
- [x[1].size(1) for x in batch]),
160
- dim=0,
161
- descending=True)
162
-
163
- max_text_len = max([len(x[0]) for x in batch])
164
- max_spec_len = max([x[1].size(1) for x in batch])
165
- max_ying_len = max([x[2].size(1) for x in batch])
166
- max_wav_len = max([x[3].size(1) for x in batch])
167
-
168
- text_lengths = torch.LongTensor(len(batch))
169
- spec_lengths = torch.LongTensor(len(batch))
170
- ying_lengths = torch.LongTensor(len(batch))
171
- wav_lengths = torch.LongTensor(len(batch))
172
- sid = torch.LongTensor(len(batch))
173
-
174
- text_padded = torch.LongTensor(len(batch), max_text_len)
175
- tone_padded = torch.LongTensor(len(batch), max_text_len)
176
- spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0),
177
- max_spec_len)
178
- ying_padded = torch.FloatTensor(len(batch), batch[0][2].size(0),
179
- max_ying_len)
180
- wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
181
- text_padded.zero_()
182
- tone_padded.zero_()
183
- spec_padded.zero_()
184
- ying_padded.zero_()
185
- wav_padded.zero_()
186
- for i in range(len(ids_sorted_decreasing)):
187
- row = batch[ids_sorted_decreasing[i]]
188
-
189
- text = row[0]
190
- text_padded[i, :text.size(0)] = text
191
- text_lengths[i] = text.size(0)
192
-
193
- spec = row[1]
194
- spec_padded[i, :, :spec.size(1)] = spec
195
- spec_lengths[i] = spec.size(1)
196
-
197
- ying = row[2]
198
- ying_padded[i, :, :ying.size(1)] = ying
199
- ying_lengths[i] = ying.size(1)
200
-
201
- wav = row[3]
202
- wav_padded[i, :, :wav.size(1)] = wav
203
- wav_lengths[i] = wav.size(1)
204
-
205
- tone = row[5]
206
- tone_padded[i, :text.size(0)] = tone
207
-
208
- sid[i] = row[4]
209
-
210
- if self.return_ids:
211
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing
212
- return text_padded, text_lengths, spec_padded, spec_lengths, ying_padded, ying_lengths, wav_padded, wav_lengths, sid, tone_padded
213
-
214
-
215
- class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler
216
- ):
217
- """
218
- Maintain similar input lengths in a batch.
219
- Length groups are specified by boundaries.
220
- Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
221
-
222
- It removes samples which are not included in the boundaries.
223
- Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
224
- """
225
-
226
- def __init__(self,
227
- dataset,
228
- batch_size,
229
- boundaries,
230
- num_replicas=None,
231
- rank=None,
232
- shuffle=True):
233
- super().__init__(dataset,
234
- num_replicas=num_replicas,
235
- rank=rank,
236
- shuffle=shuffle)
237
- self.lengths = dataset.lengths
238
- self.batch_size = batch_size
239
- self.boundaries = boundaries
240
-
241
- self.buckets, self.num_samples_per_bucket = self._create_buckets()
242
- self.total_size = sum(self.num_samples_per_bucket)
243
- self.num_samples = self.total_size // self.num_replicas
244
-
245
- def _create_buckets(self):
246
- buckets = [[] for _ in range(len(self.boundaries) - 1)]
247
- for i in range(len(self.lengths)):
248
- length = self.lengths[i]
249
- idx_bucket = self._bisect(length)
250
- if idx_bucket != -1:
251
- buckets[idx_bucket].append(i)
252
-
253
- for i in range(len(buckets) - 1, -1, -1):
254
- if len(buckets[i]) == 0:
255
- buckets.pop(i)
256
- self.boundaries.pop(i + 1)
257
-
258
- num_samples_per_bucket = []
259
- for i in range(len(buckets)):
260
- len_bucket = len(buckets[i])
261
- total_batch_size = self.num_replicas * self.batch_size
262
- rem = (total_batch_size -
263
- (len_bucket % total_batch_size)) % total_batch_size
264
- num_samples_per_bucket.append(len_bucket + rem)
265
- return buckets, num_samples_per_bucket
266
-
267
- def __iter__(self):
268
- # deterministically shuffle based on epoch
269
- g = torch.Generator()
270
- g.manual_seed(self.epoch)
271
-
272
- indices = []
273
- if self.shuffle:
274
- for bucket in self.buckets:
275
- indices.append(
276
- torch.randperm(len(bucket), generator=g).tolist())
277
- else:
278
- for bucket in self.buckets:
279
- indices.append(list(range(len(bucket))))
280
-
281
- batches = []
282
- for i in range(len(self.buckets)):
283
- bucket = self.buckets[i]
284
- len_bucket = len(bucket)
285
- ids_bucket = indices[i]
286
- num_samples_bucket = self.num_samples_per_bucket[i]
287
-
288
- # add extra samples to make it evenly divisible
289
- rem = num_samples_bucket - len_bucket
290
- ids_bucket = ids_bucket + ids_bucket * \
291
- (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]
292
-
293
- # subsample
294
- ids_bucket = ids_bucket[self.rank::self.num_replicas]
295
-
296
- # batching
297
- for j in range(len(ids_bucket) // self.batch_size):
298
- batch = [
299
- bucket[idx]
300
- for idx in ids_bucket[j * self.batch_size:(j + 1) *
301
- self.batch_size]
302
- ]
303
- batches.append(batch)
304
-
305
- if self.shuffle:
306
- batch_ids = torch.randperm(len(batches), generator=g).tolist()
307
- batches = [batches[i] for i in batch_ids]
308
- self.batches = batches
309
-
310
- assert len(self.batches) * self.batch_size == self.num_samples
311
- return iter(self.batches)
312
-
313
- def _bisect(self, x, lo=0, hi=None):
314
- if hi is None:
315
- hi = len(self.boundaries) - 1
316
-
317
- if hi > lo:
318
- mid = (hi + lo) // 2
319
- if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
320
- return mid
321
- elif x <= self.boundaries[mid]:
322
- return self._bisect(x, lo, mid)
323
- else:
324
- return self._bisect(x, mid + 1, hi)
325
- else:
326
- return -1
327
-
328
- def __len__(self):
329
- return self.num_samples // self.batch_size
330
-
331
-
332
- def create_spec(audiopaths_sid_text, hparams):
333
- audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
334
- for audiopath, _, _, _ in audiopaths_sid_text:
335
- audiopath = os.path.join(hparams.data_path, audiopath)
336
- if not os.path.exists(audiopath):
337
- print(audiopath, "not exist!")
338
- continue
339
- try:
340
- audio, sampling_rate = load_wav_to_torch(audiopath)
341
- except:
342
- print(audiopath, "load error!")
343
- continue
344
- if sampling_rate != hparams.sampling_rate:
345
- raise ValueError("{} {} SR doesn't match target {} SR".format(
346
- sampling_rate, hparams.sampling_rate))
347
- audio_norm = audio.unsqueeze(0)
348
- specpath = audiopath.replace(".wav", ".spec.pt")
349
-
350
- if not os.path.exists(specpath):
351
- spec = spectrogram_torch(audio_norm,
352
- hparams.filter_length,
353
- hparams.sampling_rate,
354
- hparams.hop_length,
355
- hparams.win_length,
356
- center=False)
357
- spec = torch.squeeze(spec, 0)
358
- torch.save(spec, specpath)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/hubble-jwst-compare/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Hubble Jwst Compare
3
- emoji: 😻
4
- colorFrom: pink
5
- colorTo: gray
6
- sdk: streamlit
7
- sdk_version: 1.10.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/conceptual/philosophy.md DELETED
@@ -1,110 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Philosophy
14
-
15
- 🧨 Diffusers provides **state-of-the-art** pretrained diffusion models across multiple modalities.
16
- Its purpose is to serve as a **modular toolbox** for both inference and training.
17
-
18
- We aim at building a library that stands the test of time and therefore take API design very seriously.
19
-
20
- In a nutshell, Diffusers is built to be a natural extension of PyTorch. Therefore, most of our design choices are based on [PyTorch's Design Principles](https://pytorch.org/docs/stable/community/design.html#pytorch-design-philosophy). Let's go over the most important ones:
21
-
22
- ## Usability over Performance
23
-
24
- - While Diffusers has many built-in performance-enhancing features (see [Memory and Speed](https://huggingface.co/docs/diffusers/optimization/fp16)), models are always loaded with the highest precision and lowest optimization. Therefore, by default diffusion pipelines are always instantiated on CPU with float32 precision if not otherwise defined by the user. This ensures usability across different platforms and accelerators and means that no complex installations are required to run the library.
25
- - Diffusers aim at being a **light-weight** package and therefore has very few required dependencies, but many soft dependencies that can improve performance (such as `accelerate`, `safetensors`, `onnx`, etc...). We strive to keep the library as lightweight as possible so that it can be added without much concern as a dependency on other packages.
26
- - Diffusers prefers simple, self-explainable code over condensed, magic code. This means that short-hand code syntaxes such as lambda functions, and advanced PyTorch operators are often not desired.
27
-
28
- ## Simple over easy
29
-
30
- As PyTorch states, **explicit is better than implicit** and **simple is better than complex**. This design philosophy is reflected in multiple parts of the library:
31
- - We follow PyTorch's API with methods like [`DiffusionPipeline.to`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.to) to let the user handle device management.
32
- - Raising concise error messages is preferred to silently correct erroneous input. Diffusers aims at teaching the user, rather than making the library as easy to use as possible.
33
- - Complex model vs. scheduler logic is exposed instead of magically handled inside. Schedulers/Samplers are separated from diffusion models with minimal dependencies on each other. This forces the user to write the unrolled denoising loop. However, the separation allows for easier debugging and gives the user more control over adapting the denoising process or switching out diffusion models or schedulers.
34
- - Separately trained components of the diffusion pipeline, *e.g.* the text encoder, the unet, and the variational autoencoder, each have their own model class. This forces the user to handle the interaction between the different model components, and the serialization format separates the model components into different files. However, this allows for easier debugging and customization. Dreambooth or textual inversion training
35
- is very simple thanks to diffusers' ability to separate single components of the diffusion pipeline.
36
-
37
- ## Tweakable, contributor-friendly over abstraction
38
-
39
- For large parts of the library, Diffusers adopts an important design principle of the [Transformers library](https://github.com/huggingface/transformers), which is to prefer copy-pasted code over hasty abstractions. This design principle is very opinionated and stands in stark contrast to popular design principles such as [Don't repeat yourself (DRY)](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself).
40
- In short, just like Transformers does for modeling files, diffusers prefers to keep an extremely low level of abstraction and very self-contained code for pipelines and schedulers.
41
- Functions, long code blocks, and even classes can be copied across multiple files which at first can look like a bad, sloppy design choice that makes the library unmaintainable.
42
- **However**, this design has proven to be extremely successful for Transformers and makes a lot of sense for community-driven, open-source machine learning libraries because:
43
- - Machine Learning is an extremely fast-moving field in which paradigms, model architectures, and algorithms are changing rapidly, which therefore makes it very difficult to define long-lasting code abstractions.
44
- - Machine Learning practitioners like to be able to quickly tweak existing code for ideation and research and therefore prefer self-contained code over one that contains many abstractions.
45
- - Open-source libraries rely on community contributions and therefore must build a library that is easy to contribute to. The more abstract the code, the more dependencies, the harder to read, and the harder to contribute to. Contributors simply stop contributing to very abstract libraries out of fear of breaking vital functionality. If contributing to a library cannot break other fundamental code, not only is it more inviting for potential new contributors, but it is also easier to review and contribute to multiple parts in parallel.
46
-
47
- At Hugging Face, we call this design the **single-file policy** which means that almost all of the code of a certain class should be written in a single, self-contained file. To read more about the philosophy, you can have a look
48
- at [this blog post](https://huggingface.co/blog/transformers-design-philosophy).
49
-
50
- In diffusers, we follow this philosophy for both pipelines and schedulers, but only partly for diffusion models. The reason we don't follow this design fully for diffusion models is because almost all diffusion pipelines, such
51
- as [DDPM](https://huggingface.co/docs/diffusers/v0.12.0/en/api/pipelines/ddpm), [Stable Diffusion](https://huggingface.co/docs/diffusers/v0.12.0/en/api/pipelines/stable_diffusion/overview#stable-diffusion-pipelines), [UnCLIP (Dalle-2)](https://huggingface.co/docs/diffusers/v0.12.0/en/api/pipelines/unclip#overview) and [Imagen](https://imagen.research.google/) all rely on the same diffusion model, the [UNet](https://huggingface.co/docs/diffusers/api/models#diffusers.UNet2DConditionModel).
52
-
53
- Great, now you should have generally understood why 🧨 Diffusers is designed the way it is 🤗.
54
- We try to apply these design principles consistently across the library. Nevertheless, there are some minor exceptions to the philosophy or some unlucky design choices. If you have feedback regarding the design, we would ❤️ to hear it [directly on GitHub](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=).
55
-
56
- ## Design Philosophy in Details
57
-
58
- Now, let's look a bit into the nitty-gritty details of the design philosophy. Diffusers essentially consist of three major classes, [pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines), [models](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models), and [schedulers](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers).
59
- Let's walk through more in-detail design decisions for each class.
60
-
61
- ### Pipelines
62
-
63
- Pipelines are designed to be easy to use (therefore do not follow [*Simple over easy*](#simple-over-easy) 100%), are not feature complete, and should loosely be seen as examples of how to use [models](#models) and [schedulers](#schedulers) for inference.
64
-
65
- The following design principles are followed:
66
- - Pipelines follow the single-file policy. All pipelines can be found in individual directories under src/diffusers/pipelines. One pipeline folder corresponds to one diffusion paper/project/release. Multiple pipeline files can be gathered in one pipeline folder, as it’s done for [`src/diffusers/pipelines/stable-diffusion`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/stable_diffusion). If pipelines share similar functionality, one can make use of the [#Copied from mechanism](https://github.com/huggingface/diffusers/blob/125d783076e5bd9785beb05367a2d2566843a271/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L251).
67
- - Pipelines all inherit from [`DiffusionPipeline`].
68
- - Every pipeline consists of different model and scheduler components, that are documented in the [`model_index.json` file](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/model_index.json), are accessible under the same name as attributes of the pipeline and can be shared between pipelines with [`DiffusionPipeline.components`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.components) function.
69
- - Every pipeline should be loadable via the [`DiffusionPipeline.from_pretrained`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained) function.
70
- - Pipelines should be used **only** for inference.
71
- - Pipelines should be very readable, self-explanatory, and easy to tweak.
72
- - Pipelines should be designed to build on top of each other and be easy to integrate into higher-level APIs.
73
- - Pipelines are **not** intended to be feature-complete user interfaces. For future complete user interfaces one should rather have a look at [InvokeAI](https://github.com/invoke-ai/InvokeAI), [Diffuzers](https://github.com/abhishekkrthakur/diffuzers), and [lama-cleaner](https://github.com/Sanster/lama-cleaner).
74
- - Every pipeline should have one and only one way to run it via a `__call__` method. The naming of the `__call__` arguments should be shared across all pipelines.
75
- - Pipelines should be named after the task they are intended to solve.
76
- - In almost all cases, novel diffusion pipelines shall be implemented in a new pipeline folder/file.
77
-
78
- ### Models
79
-
80
- Models are designed as configurable toolboxes that are natural extensions of [PyTorch's Module class](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). They only partly follow the **single-file policy**.
81
-
82
- The following design principles are followed:
83
- - Models correspond to **a type of model architecture**. *E.g.* the [`UNet2DConditionModel`] class is used for all UNet variations that expect 2D image inputs and are conditioned on some context.
84
- - All models can be found in [`src/diffusers/models`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models) and every model architecture shall be defined in its file, e.g. [`unet_2d_condition.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py), [`transformer_2d.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/transformer_2d.py), etc...
85
- - Models **do not** follow the single-file policy and should make use of smaller model building blocks, such as [`attention.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py), [`resnet.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/resnet.py), [`embeddings.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/embeddings.py), etc... **Note**: This is in stark contrast to Transformers' modeling files and shows that models do not really follow the single-file policy.
86
- - Models intend to expose complexity, just like PyTorch's module does, and give clear error messages.
87
- - Models all inherit from `ModelMixin` and `ConfigMixin`.
88
- - Models can be optimized for performance when it doesn’t demand major code changes, keeps backward compatibility, and gives significant memory or compute gain.
89
- - Models should by default have the highest precision and lowest performance setting.
90
- - To integrate new model checkpoints whose general architecture can be classified as an architecture that already exists in Diffusers, the existing model architecture shall be adapted to make it work with the new checkpoint. One should only create a new file if the model architecture is fundamentally different.
91
- - Models should be designed to be easily extendable to future changes. This can be achieved by limiting public function arguments, configuration arguments, and "foreseeing" future changes, *e.g.* it is usually better to add `string` "...type" arguments that can easily be extended to new future types instead of boolean `is_..._type` arguments. Only the minimum amount of changes shall be made to existing architectures to make a new model checkpoint work.
92
- - The model design is a difficult trade-off between keeping code readable and concise and supporting many model checkpoints. For most parts of the modeling code, classes shall be adapted for new model checkpoints, while there are some exceptions where it is preferred to add new classes to make sure the code is kept concise and
93
- readable longterm, such as [UNet blocks](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_blocks.py) and [Attention processors](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
94
-
95
- ### Schedulers
96
-
97
- Schedulers are responsible to guide the denoising process for inference as well as to define a noise schedule for training. They are designed as individual classes with loadable configuration files and strongly follow the **single-file policy**.
98
-
99
- The following design principles are followed:
100
- - All schedulers are found in [`src/diffusers/schedulers`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers).
101
- - Schedulers are **not** allowed to import from large utils files and shall be kept very self-contained.
102
- - One scheduler python file corresponds to one scheduler algorithm (as might be defined in a paper).
103
- - If schedulers share similar functionalities, we can make use of the `#Copied from` mechanism.
104
- - Schedulers all inherit from `SchedulerMixin` and `ConfigMixin`.
105
- - Schedulers can be easily swapped out with the [`ConfigMixin.from_config`](https://huggingface.co/docs/diffusers/main/en/api/configuration#diffusers.ConfigMixin.from_config) method as explained in detail [here](./using-diffusers/schedulers.md).
106
- - Every scheduler has to have a `set_num_inference_steps`, and a `step` function. `set_num_inference_steps(...)` has to be called before every denoising process, *i.e.* before `step(...)` is called.
107
- - Every scheduler exposes the timesteps to be "looped over" via a `timesteps` attribute, which is an array of timesteps the model will be called upon.
108
- - The `step(...)` function takes a predicted model output and the "current" sample (x_t) and returns the "previous", slightly more denoised sample (x_t-1).
109
- - Given the complexity of diffusion schedulers, the `step` function does not expose all the complexity and can be a bit of a "black box".
110
- - In almost all cases, novel schedulers shall be implemented in a new scheduling file.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_pndm.py DELETED
@@ -1,242 +0,0 @@
1
- import tempfile
2
-
3
- import torch
4
-
5
- from diffusers import PNDMScheduler
6
-
7
- from .test_schedulers import SchedulerCommonTest
8
-
9
-
10
- class PNDMSchedulerTest(SchedulerCommonTest):
11
- scheduler_classes = (PNDMScheduler,)
12
- forward_default_kwargs = (("num_inference_steps", 50),)
13
-
14
- def get_scheduler_config(self, **kwargs):
15
- config = {
16
- "num_train_timesteps": 1000,
17
- "beta_start": 0.0001,
18
- "beta_end": 0.02,
19
- "beta_schedule": "linear",
20
- }
21
-
22
- config.update(**kwargs)
23
- return config
24
-
25
- def check_over_configs(self, time_step=0, **config):
26
- kwargs = dict(self.forward_default_kwargs)
27
- num_inference_steps = kwargs.pop("num_inference_steps", None)
28
- sample = self.dummy_sample
29
- residual = 0.1 * sample
30
- dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
31
-
32
- for scheduler_class in self.scheduler_classes:
33
- scheduler_config = self.get_scheduler_config(**config)
34
- scheduler = scheduler_class(**scheduler_config)
35
- scheduler.set_timesteps(num_inference_steps)
36
- # copy over dummy past residuals
37
- scheduler.ets = dummy_past_residuals[:]
38
-
39
- with tempfile.TemporaryDirectory() as tmpdirname:
40
- scheduler.save_config(tmpdirname)
41
- new_scheduler = scheduler_class.from_pretrained(tmpdirname)
42
- new_scheduler.set_timesteps(num_inference_steps)
43
- # copy over dummy past residuals
44
- new_scheduler.ets = dummy_past_residuals[:]
45
-
46
- output = scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
47
- new_output = new_scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
48
-
49
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
50
-
51
- output = scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
52
- new_output = new_scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
53
-
54
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
55
-
56
- def test_from_save_pretrained(self):
57
- pass
58
-
59
- def check_over_forward(self, time_step=0, **forward_kwargs):
60
- kwargs = dict(self.forward_default_kwargs)
61
- num_inference_steps = kwargs.pop("num_inference_steps", None)
62
- sample = self.dummy_sample
63
- residual = 0.1 * sample
64
- dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
65
-
66
- for scheduler_class in self.scheduler_classes:
67
- scheduler_config = self.get_scheduler_config()
68
- scheduler = scheduler_class(**scheduler_config)
69
- scheduler.set_timesteps(num_inference_steps)
70
-
71
- # copy over dummy past residuals (must be after setting timesteps)
72
- scheduler.ets = dummy_past_residuals[:]
73
-
74
- with tempfile.TemporaryDirectory() as tmpdirname:
75
- scheduler.save_config(tmpdirname)
76
- new_scheduler = scheduler_class.from_pretrained(tmpdirname)
77
- # copy over dummy past residuals
78
- new_scheduler.set_timesteps(num_inference_steps)
79
-
80
- # copy over dummy past residual (must be after setting timesteps)
81
- new_scheduler.ets = dummy_past_residuals[:]
82
-
83
- output = scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
84
- new_output = new_scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
85
-
86
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
87
-
88
- output = scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
89
- new_output = new_scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
90
-
91
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
92
-
93
- def full_loop(self, **config):
94
- scheduler_class = self.scheduler_classes[0]
95
- scheduler_config = self.get_scheduler_config(**config)
96
- scheduler = scheduler_class(**scheduler_config)
97
-
98
- num_inference_steps = 10
99
- model = self.dummy_model()
100
- sample = self.dummy_sample_deter
101
- scheduler.set_timesteps(num_inference_steps)
102
-
103
- for i, t in enumerate(scheduler.prk_timesteps):
104
- residual = model(sample, t)
105
- sample = scheduler.step_prk(residual, t, sample).prev_sample
106
-
107
- for i, t in enumerate(scheduler.plms_timesteps):
108
- residual = model(sample, t)
109
- sample = scheduler.step_plms(residual, t, sample).prev_sample
110
-
111
- return sample
112
-
113
- def test_step_shape(self):
114
- kwargs = dict(self.forward_default_kwargs)
115
-
116
- num_inference_steps = kwargs.pop("num_inference_steps", None)
117
-
118
- for scheduler_class in self.scheduler_classes:
119
- scheduler_config = self.get_scheduler_config()
120
- scheduler = scheduler_class(**scheduler_config)
121
-
122
- sample = self.dummy_sample
123
- residual = 0.1 * sample
124
-
125
- if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
126
- scheduler.set_timesteps(num_inference_steps)
127
- elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
128
- kwargs["num_inference_steps"] = num_inference_steps
129
-
130
- # copy over dummy past residuals (must be done after set_timesteps)
131
- dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
132
- scheduler.ets = dummy_past_residuals[:]
133
-
134
- output_0 = scheduler.step_prk(residual, 0, sample, **kwargs).prev_sample
135
- output_1 = scheduler.step_prk(residual, 1, sample, **kwargs).prev_sample
136
-
137
- self.assertEqual(output_0.shape, sample.shape)
138
- self.assertEqual(output_0.shape, output_1.shape)
139
-
140
- output_0 = scheduler.step_plms(residual, 0, sample, **kwargs).prev_sample
141
- output_1 = scheduler.step_plms(residual, 1, sample, **kwargs).prev_sample
142
-
143
- self.assertEqual(output_0.shape, sample.shape)
144
- self.assertEqual(output_0.shape, output_1.shape)
145
-
146
- def test_timesteps(self):
147
- for timesteps in [100, 1000]:
148
- self.check_over_configs(num_train_timesteps=timesteps)
149
-
150
- def test_steps_offset(self):
151
- for steps_offset in [0, 1]:
152
- self.check_over_configs(steps_offset=steps_offset)
153
-
154
- scheduler_class = self.scheduler_classes[0]
155
- scheduler_config = self.get_scheduler_config(steps_offset=1)
156
- scheduler = scheduler_class(**scheduler_config)
157
- scheduler.set_timesteps(10)
158
- assert torch.equal(
159
- scheduler.timesteps,
160
- torch.LongTensor(
161
- [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]
162
- ),
163
- )
164
-
165
- def test_betas(self):
166
- for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02]):
167
- self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
168
-
169
- def test_schedules(self):
170
- for schedule in ["linear", "squaredcos_cap_v2"]:
171
- self.check_over_configs(beta_schedule=schedule)
172
-
173
- def test_prediction_type(self):
174
- for prediction_type in ["epsilon", "v_prediction"]:
175
- self.check_over_configs(prediction_type=prediction_type)
176
-
177
- def test_time_indices(self):
178
- for t in [1, 5, 10]:
179
- self.check_over_forward(time_step=t)
180
-
181
- def test_inference_steps(self):
182
- for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]):
183
- self.check_over_forward(num_inference_steps=num_inference_steps)
184
-
185
- def test_pow_of_3_inference_steps(self):
186
- # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
187
- num_inference_steps = 27
188
-
189
- for scheduler_class in self.scheduler_classes:
190
- sample = self.dummy_sample
191
- residual = 0.1 * sample
192
-
193
- scheduler_config = self.get_scheduler_config()
194
- scheduler = scheduler_class(**scheduler_config)
195
-
196
- scheduler.set_timesteps(num_inference_steps)
197
-
198
- # before power of 3 fix, would error on first step, so we only need to do two
199
- for i, t in enumerate(scheduler.prk_timesteps[:2]):
200
- sample = scheduler.step_prk(residual, t, sample).prev_sample
201
-
202
- def test_inference_plms_no_past_residuals(self):
203
- with self.assertRaises(ValueError):
204
- scheduler_class = self.scheduler_classes[0]
205
- scheduler_config = self.get_scheduler_config()
206
- scheduler = scheduler_class(**scheduler_config)
207
-
208
- scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample).prev_sample
209
-
210
- def test_full_loop_no_noise(self):
211
- sample = self.full_loop()
212
- result_sum = torch.sum(torch.abs(sample))
213
- result_mean = torch.mean(torch.abs(sample))
214
-
215
- assert abs(result_sum.item() - 198.1318) < 1e-2
216
- assert abs(result_mean.item() - 0.2580) < 1e-3
217
-
218
- def test_full_loop_with_v_prediction(self):
219
- sample = self.full_loop(prediction_type="v_prediction")
220
- result_sum = torch.sum(torch.abs(sample))
221
- result_mean = torch.mean(torch.abs(sample))
222
-
223
- assert abs(result_sum.item() - 67.3986) < 1e-2
224
- assert abs(result_mean.item() - 0.0878) < 1e-3
225
-
226
- def test_full_loop_with_set_alpha_to_one(self):
227
- # We specify different beta, so that the first alpha is 0.99
228
- sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
229
- result_sum = torch.sum(torch.abs(sample))
230
- result_mean = torch.mean(torch.abs(sample))
231
-
232
- assert abs(result_sum.item() - 230.0399) < 1e-2
233
- assert abs(result_mean.item() - 0.2995) < 1e-3
234
-
235
- def test_full_loop_with_no_set_alpha_to_one(self):
236
- # We specify different beta, so that the first alpha is 0.99
237
- sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
238
- result_sum = torch.sum(torch.abs(sample))
239
- result_mean = torch.mean(torch.abs(sample))
240
-
241
- assert abs(result_sum.item() - 186.9482) < 1e-2
242
- assert abs(result_mean.item() - 0.2434) < 1e-3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py DELETED
@@ -1,13 +0,0 @@
1
- _base_ = './retinanet_r50_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnext101_64x4d',
4
- backbone=dict(
5
- type='ResNeXt',
6
- depth=101,
7
- groups=64,
8
- base_width=4,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- frozen_stages=1,
12
- norm_cfg=dict(type='BN', requires_grad=True),
13
- style='pytorch'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/evaluation/mean_ap.py DELETED
@@ -1,469 +0,0 @@
1
- from multiprocessing import Pool
2
-
3
- import mmcv
4
- import numpy as np
5
- from mmcv.utils import print_log
6
- from terminaltables import AsciiTable
7
-
8
- from .bbox_overlaps import bbox_overlaps
9
- from .class_names import get_classes
10
-
11
-
12
- def average_precision(recalls, precisions, mode='area'):
13
- """Calculate average precision (for single or multiple scales).
14
-
15
- Args:
16
- recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )
17
- precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )
18
- mode (str): 'area' or '11points', 'area' means calculating the area
19
- under precision-recall curve, '11points' means calculating
20
- the average precision of recalls at [0, 0.1, ..., 1]
21
-
22
- Returns:
23
- float or ndarray: calculated average precision
24
- """
25
- no_scale = False
26
- if recalls.ndim == 1:
27
- no_scale = True
28
- recalls = recalls[np.newaxis, :]
29
- precisions = precisions[np.newaxis, :]
30
- assert recalls.shape == precisions.shape and recalls.ndim == 2
31
- num_scales = recalls.shape[0]
32
- ap = np.zeros(num_scales, dtype=np.float32)
33
- if mode == 'area':
34
- zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
35
- ones = np.ones((num_scales, 1), dtype=recalls.dtype)
36
- mrec = np.hstack((zeros, recalls, ones))
37
- mpre = np.hstack((zeros, precisions, zeros))
38
- for i in range(mpre.shape[1] - 1, 0, -1):
39
- mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])
40
- for i in range(num_scales):
41
- ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]
42
- ap[i] = np.sum(
43
- (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])
44
- elif mode == '11points':
45
- for i in range(num_scales):
46
- for thr in np.arange(0, 1 + 1e-3, 0.1):
47
- precs = precisions[i, recalls[i, :] >= thr]
48
- prec = precs.max() if precs.size > 0 else 0
49
- ap[i] += prec
50
- ap /= 11
51
- else:
52
- raise ValueError(
53
- 'Unrecognized mode, only "area" and "11points" are supported')
54
- if no_scale:
55
- ap = ap[0]
56
- return ap
57
-
58
-
59
- def tpfp_imagenet(det_bboxes,
60
- gt_bboxes,
61
- gt_bboxes_ignore=None,
62
- default_iou_thr=0.5,
63
- area_ranges=None):
64
- """Check if detected bboxes are true positive or false positive.
65
-
66
- Args:
67
- det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
68
- gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
69
- gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
70
- of shape (k, 4). Default: None
71
- default_iou_thr (float): IoU threshold to be considered as matched for
72
- medium and large bboxes (small ones have special rules).
73
- Default: 0.5.
74
- area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,
75
- in the format [(min1, max1), (min2, max2), ...]. Default: None.
76
-
77
- Returns:
78
- tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of
79
- each array is (num_scales, m).
80
- """
81
- # an indicator of ignored gts
82
- gt_ignore_inds = np.concatenate(
83
- (np.zeros(gt_bboxes.shape[0], dtype=np.bool),
84
- np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))
85
- # stack gt_bboxes and gt_bboxes_ignore for convenience
86
- gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))
87
-
88
- num_dets = det_bboxes.shape[0]
89
- num_gts = gt_bboxes.shape[0]
90
- if area_ranges is None:
91
- area_ranges = [(None, None)]
92
- num_scales = len(area_ranges)
93
- # tp and fp are of shape (num_scales, num_gts), each row is tp or fp
94
- # of a certain scale.
95
- tp = np.zeros((num_scales, num_dets), dtype=np.float32)
96
- fp = np.zeros((num_scales, num_dets), dtype=np.float32)
97
- if gt_bboxes.shape[0] == 0:
98
- if area_ranges == [(None, None)]:
99
- fp[...] = 1
100
- else:
101
- det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * (
102
- det_bboxes[:, 3] - det_bboxes[:, 1])
103
- for i, (min_area, max_area) in enumerate(area_ranges):
104
- fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
105
- return tp, fp
106
- ious = bbox_overlaps(det_bboxes, gt_bboxes - 1)
107
- gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0]
108
- gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1]
109
- iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)),
110
- default_iou_thr)
111
- # sort all detections by scores in descending order
112
- sort_inds = np.argsort(-det_bboxes[:, -1])
113
- for k, (min_area, max_area) in enumerate(area_ranges):
114
- gt_covered = np.zeros(num_gts, dtype=bool)
115
- # if no area range is specified, gt_area_ignore is all False
116
- if min_area is None:
117
- gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
118
- else:
119
- gt_areas = gt_w * gt_h
120
- gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
121
- for i in sort_inds:
122
- max_iou = -1
123
- matched_gt = -1
124
- # find best overlapped available gt
125
- for j in range(num_gts):
126
- # different from PASCAL VOC: allow finding other gts if the
127
- # best overlapped ones are already matched by other det bboxes
128
- if gt_covered[j]:
129
- continue
130
- elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou:
131
- max_iou = ious[i, j]
132
- matched_gt = j
133
- # there are 4 cases for a det bbox:
134
- # 1. it matches a gt, tp = 1, fp = 0
135
- # 2. it matches an ignored gt, tp = 0, fp = 0
136
- # 3. it matches no gt and within area range, tp = 0, fp = 1
137
- # 4. it matches no gt but is beyond area range, tp = 0, fp = 0
138
- if matched_gt >= 0:
139
- gt_covered[matched_gt] = 1
140
- if not (gt_ignore_inds[matched_gt]
141
- or gt_area_ignore[matched_gt]):
142
- tp[k, i] = 1
143
- elif min_area is None:
144
- fp[k, i] = 1
145
- else:
146
- bbox = det_bboxes[i, :4]
147
- area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
148
- if area >= min_area and area < max_area:
149
- fp[k, i] = 1
150
- return tp, fp
151
-
152
-
153
- def tpfp_default(det_bboxes,
154
- gt_bboxes,
155
- gt_bboxes_ignore=None,
156
- iou_thr=0.5,
157
- area_ranges=None):
158
- """Check if detected bboxes are true positive or false positive.
159
-
160
- Args:
161
- det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
162
- gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
163
- gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
164
- of shape (k, 4). Default: None
165
- iou_thr (float): IoU threshold to be considered as matched.
166
- Default: 0.5.
167
- area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,
168
- in the format [(min1, max1), (min2, max2), ...]. Default: None.
169
-
170
- Returns:
171
- tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of
172
- each array is (num_scales, m).
173
- """
174
- # an indicator of ignored gts
175
- gt_ignore_inds = np.concatenate(
176
- (np.zeros(gt_bboxes.shape[0], dtype=np.bool),
177
- np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))
178
- # stack gt_bboxes and gt_bboxes_ignore for convenience
179
- gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))
180
-
181
- num_dets = det_bboxes.shape[0]
182
- num_gts = gt_bboxes.shape[0]
183
- if area_ranges is None:
184
- area_ranges = [(None, None)]
185
- num_scales = len(area_ranges)
186
- # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of
187
- # a certain scale
188
- tp = np.zeros((num_scales, num_dets), dtype=np.float32)
189
- fp = np.zeros((num_scales, num_dets), dtype=np.float32)
190
-
191
- # if there is no gt bboxes in this image, then all det bboxes
192
- # within area range are false positives
193
- if gt_bboxes.shape[0] == 0:
194
- if area_ranges == [(None, None)]:
195
- fp[...] = 1
196
- else:
197
- det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * (
198
- det_bboxes[:, 3] - det_bboxes[:, 1])
199
- for i, (min_area, max_area) in enumerate(area_ranges):
200
- fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
201
- return tp, fp
202
-
203
- ious = bbox_overlaps(det_bboxes, gt_bboxes)
204
- # for each det, the max iou with all gts
205
- ious_max = ious.max(axis=1)
206
- # for each det, which gt overlaps most with it
207
- ious_argmax = ious.argmax(axis=1)
208
- # sort all dets in descending order by scores
209
- sort_inds = np.argsort(-det_bboxes[:, -1])
210
- for k, (min_area, max_area) in enumerate(area_ranges):
211
- gt_covered = np.zeros(num_gts, dtype=bool)
212
- # if no area range is specified, gt_area_ignore is all False
213
- if min_area is None:
214
- gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
215
- else:
216
- gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (
217
- gt_bboxes[:, 3] - gt_bboxes[:, 1])
218
- gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
219
- for i in sort_inds:
220
- if ious_max[i] >= iou_thr:
221
- matched_gt = ious_argmax[i]
222
- if not (gt_ignore_inds[matched_gt]
223
- or gt_area_ignore[matched_gt]):
224
- if not gt_covered[matched_gt]:
225
- gt_covered[matched_gt] = True
226
- tp[k, i] = 1
227
- else:
228
- fp[k, i] = 1
229
- # otherwise ignore this detected bbox, tp = 0, fp = 0
230
- elif min_area is None:
231
- fp[k, i] = 1
232
- else:
233
- bbox = det_bboxes[i, :4]
234
- area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
235
- if area >= min_area and area < max_area:
236
- fp[k, i] = 1
237
- return tp, fp
238
-
239
-
240
- def get_cls_results(det_results, annotations, class_id):
241
- """Get det results and gt information of a certain class.
242
-
243
- Args:
244
- det_results (list[list]): Same as `eval_map()`.
245
- annotations (list[dict]): Same as `eval_map()`.
246
- class_id (int): ID of a specific class.
247
-
248
- Returns:
249
- tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes
250
- """
251
- cls_dets = [img_res[class_id] for img_res in det_results]
252
- cls_gts = []
253
- cls_gts_ignore = []
254
- for ann in annotations:
255
- gt_inds = ann['labels'] == class_id
256
- cls_gts.append(ann['bboxes'][gt_inds, :])
257
-
258
- if ann.get('labels_ignore', None) is not None:
259
- ignore_inds = ann['labels_ignore'] == class_id
260
- cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :])
261
- else:
262
- cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32))
263
-
264
- return cls_dets, cls_gts, cls_gts_ignore
265
-
266
-
267
- def eval_map(det_results,
268
- annotations,
269
- scale_ranges=None,
270
- iou_thr=0.5,
271
- dataset=None,
272
- logger=None,
273
- tpfp_fn=None,
274
- nproc=4):
275
- """Evaluate mAP of a dataset.
276
-
277
- Args:
278
- det_results (list[list]): [[cls1_det, cls2_det, ...], ...].
279
- The outer list indicates images, and the inner list indicates
280
- per-class detected bboxes.
281
- annotations (list[dict]): Ground truth annotations where each item of
282
- the list indicates an image. Keys of annotations are:
283
-
284
- - `bboxes`: numpy array of shape (n, 4)
285
- - `labels`: numpy array of shape (n, )
286
- - `bboxes_ignore` (optional): numpy array of shape (k, 4)
287
- - `labels_ignore` (optional): numpy array of shape (k, )
288
- scale_ranges (list[tuple] | None): Range of scales to be evaluated,
289
- in the format [(min1, max1), (min2, max2), ...]. A range of
290
- (32, 64) means the area range between (32**2, 64**2).
291
- Default: None.
292
- iou_thr (float): IoU threshold to be considered as matched.
293
- Default: 0.5.
294
- dataset (list[str] | str | None): Dataset name or dataset classes,
295
- there are minor differences in metrics for different datsets, e.g.
296
- "voc07", "imagenet_det", etc. Default: None.
297
- logger (logging.Logger | str | None): The way to print the mAP
298
- summary. See `mmcv.utils.print_log()` for details. Default: None.
299
- tpfp_fn (callable | None): The function used to determine true/
300
- false positives. If None, :func:`tpfp_default` is used as default
301
- unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this
302
- case). If it is given as a function, then this function is used
303
- to evaluate tp & fp. Default None.
304
- nproc (int): Processes used for computing TP and FP.
305
- Default: 4.
306
-
307
- Returns:
308
- tuple: (mAP, [dict, dict, ...])
309
- """
310
- assert len(det_results) == len(annotations)
311
-
312
- num_imgs = len(det_results)
313
- num_scales = len(scale_ranges) if scale_ranges is not None else 1
314
- num_classes = len(det_results[0]) # positive class num
315
- area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges]
316
- if scale_ranges is not None else None)
317
-
318
- pool = Pool(nproc)
319
- eval_results = []
320
- for i in range(num_classes):
321
- # get gt and det bboxes of this class
322
- cls_dets, cls_gts, cls_gts_ignore = get_cls_results(
323
- det_results, annotations, i)
324
- # choose proper function according to datasets to compute tp and fp
325
- if tpfp_fn is None:
326
- if dataset in ['det', 'vid']:
327
- tpfp_fn = tpfp_imagenet
328
- else:
329
- tpfp_fn = tpfp_default
330
- if not callable(tpfp_fn):
331
- raise ValueError(
332
- f'tpfp_fn has to be a function or None, but got {tpfp_fn}')
333
-
334
- # compute tp and fp for each image with multiple processes
335
- tpfp = pool.starmap(
336
- tpfp_fn,
337
- zip(cls_dets, cls_gts, cls_gts_ignore,
338
- [iou_thr for _ in range(num_imgs)],
339
- [area_ranges for _ in range(num_imgs)]))
340
- tp, fp = tuple(zip(*tpfp))
341
- # calculate gt number of each scale
342
- # ignored gts or gts beyond the specific scale are not counted
343
- num_gts = np.zeros(num_scales, dtype=int)
344
- for j, bbox in enumerate(cls_gts):
345
- if area_ranges is None:
346
- num_gts[0] += bbox.shape[0]
347
- else:
348
- gt_areas = (bbox[:, 2] - bbox[:, 0]) * (
349
- bbox[:, 3] - bbox[:, 1])
350
- for k, (min_area, max_area) in enumerate(area_ranges):
351
- num_gts[k] += np.sum((gt_areas >= min_area)
352
- & (gt_areas < max_area))
353
- # sort all det bboxes by score, also sort tp and fp
354
- cls_dets = np.vstack(cls_dets)
355
- num_dets = cls_dets.shape[0]
356
- sort_inds = np.argsort(-cls_dets[:, -1])
357
- tp = np.hstack(tp)[:, sort_inds]
358
- fp = np.hstack(fp)[:, sort_inds]
359
- # calculate recall and precision with tp and fp
360
- tp = np.cumsum(tp, axis=1)
361
- fp = np.cumsum(fp, axis=1)
362
- eps = np.finfo(np.float32).eps
363
- recalls = tp / np.maximum(num_gts[:, np.newaxis], eps)
364
- precisions = tp / np.maximum((tp + fp), eps)
365
- # calculate AP
366
- if scale_ranges is None:
367
- recalls = recalls[0, :]
368
- precisions = precisions[0, :]
369
- num_gts = num_gts.item()
370
- mode = 'area' if dataset != 'voc07' else '11points'
371
- ap = average_precision(recalls, precisions, mode)
372
- eval_results.append({
373
- 'num_gts': num_gts,
374
- 'num_dets': num_dets,
375
- 'recall': recalls,
376
- 'precision': precisions,
377
- 'ap': ap
378
- })
379
- pool.close()
380
- if scale_ranges is not None:
381
- # shape (num_classes, num_scales)
382
- all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])
383
- all_num_gts = np.vstack(
384
- [cls_result['num_gts'] for cls_result in eval_results])
385
- mean_ap = []
386
- for i in range(num_scales):
387
- if np.any(all_num_gts[:, i] > 0):
388
- mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean())
389
- else:
390
- mean_ap.append(0.0)
391
- else:
392
- aps = []
393
- for cls_result in eval_results:
394
- if cls_result['num_gts'] > 0:
395
- aps.append(cls_result['ap'])
396
- mean_ap = np.array(aps).mean().item() if aps else 0.0
397
-
398
- print_map_summary(
399
- mean_ap, eval_results, dataset, area_ranges, logger=logger)
400
-
401
- return mean_ap, eval_results
402
-
403
-
404
- def print_map_summary(mean_ap,
405
- results,
406
- dataset=None,
407
- scale_ranges=None,
408
- logger=None):
409
- """Print mAP and results of each class.
410
-
411
- A table will be printed to show the gts/dets/recall/AP of each class and
412
- the mAP.
413
-
414
- Args:
415
- mean_ap (float): Calculated from `eval_map()`.
416
- results (list[dict]): Calculated from `eval_map()`.
417
- dataset (list[str] | str | None): Dataset name or dataset classes.
418
- scale_ranges (list[tuple] | None): Range of scales to be evaluated.
419
- logger (logging.Logger | str | None): The way to print the mAP
420
- summary. See `mmcv.utils.print_log()` for details. Default: None.
421
- """
422
-
423
- if logger == 'silent':
424
- return
425
-
426
- if isinstance(results[0]['ap'], np.ndarray):
427
- num_scales = len(results[0]['ap'])
428
- else:
429
- num_scales = 1
430
-
431
- if scale_ranges is not None:
432
- assert len(scale_ranges) == num_scales
433
-
434
- num_classes = len(results)
435
-
436
- recalls = np.zeros((num_scales, num_classes), dtype=np.float32)
437
- aps = np.zeros((num_scales, num_classes), dtype=np.float32)
438
- num_gts = np.zeros((num_scales, num_classes), dtype=int)
439
- for i, cls_result in enumerate(results):
440
- if cls_result['recall'].size > 0:
441
- recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]
442
- aps[:, i] = cls_result['ap']
443
- num_gts[:, i] = cls_result['num_gts']
444
-
445
- if dataset is None:
446
- label_names = [str(i) for i in range(num_classes)]
447
- elif mmcv.is_str(dataset):
448
- label_names = get_classes(dataset)
449
- else:
450
- label_names = dataset
451
-
452
- if not isinstance(mean_ap, list):
453
- mean_ap = [mean_ap]
454
-
455
- header = ['class', 'gts', 'dets', 'recall', 'ap']
456
- for i in range(num_scales):
457
- if scale_ranges is not None:
458
- print_log(f'Scale range {scale_ranges[i]}', logger=logger)
459
- table_data = [header]
460
- for j in range(num_classes):
461
- row_data = [
462
- label_names[j], num_gts[i, j], results[j]['num_dets'],
463
- f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}'
464
- ]
465
- table_data.append(row_data)
466
- table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}'])
467
- table = AsciiTable(table_data)
468
- table.inner_footing_row_border = True
469
- print_log('\n' + table.table, logger=logger)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/_distutils_hack/__init__.py DELETED
@@ -1,222 +0,0 @@
1
- # don't import any costly modules
2
- import sys
3
- import os
4
-
5
-
6
- is_pypy = '__pypy__' in sys.builtin_module_names
7
-
8
-
9
- def warn_distutils_present():
10
- if 'distutils' not in sys.modules:
11
- return
12
- if is_pypy and sys.version_info < (3, 7):
13
- # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
14
- # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
15
- return
16
- import warnings
17
-
18
- warnings.warn(
19
- "Distutils was imported before Setuptools, but importing Setuptools "
20
- "also replaces the `distutils` module in `sys.modules`. This may lead "
21
- "to undesirable behaviors or errors. To avoid these issues, avoid "
22
- "using distutils directly, ensure that setuptools is installed in the "
23
- "traditional way (e.g. not an editable install), and/or make sure "
24
- "that setuptools is always imported before distutils."
25
- )
26
-
27
-
28
- def clear_distutils():
29
- if 'distutils' not in sys.modules:
30
- return
31
- import warnings
32
-
33
- warnings.warn("Setuptools is replacing distutils.")
34
- mods = [
35
- name
36
- for name in sys.modules
37
- if name == "distutils" or name.startswith("distutils.")
38
- ]
39
- for name in mods:
40
- del sys.modules[name]
41
-
42
-
43
- def enabled():
44
- """
45
- Allow selection of distutils by environment variable.
46
- """
47
- which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
48
- return which == 'local'
49
-
50
-
51
- def ensure_local_distutils():
52
- import importlib
53
-
54
- clear_distutils()
55
-
56
- # With the DistutilsMetaFinder in place,
57
- # perform an import to cause distutils to be
58
- # loaded from setuptools._distutils. Ref #2906.
59
- with shim():
60
- importlib.import_module('distutils')
61
-
62
- # check that submodules load as expected
63
- core = importlib.import_module('distutils.core')
64
- assert '_distutils' in core.__file__, core.__file__
65
- assert 'setuptools._distutils.log' not in sys.modules
66
-
67
-
68
- def do_override():
69
- """
70
- Ensure that the local copy of distutils is preferred over stdlib.
71
-
72
- See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
73
- for more motivation.
74
- """
75
- if enabled():
76
- warn_distutils_present()
77
- ensure_local_distutils()
78
-
79
-
80
- class _TrivialRe:
81
- def __init__(self, *patterns):
82
- self._patterns = patterns
83
-
84
- def match(self, string):
85
- return all(pat in string for pat in self._patterns)
86
-
87
-
88
- class DistutilsMetaFinder:
89
- def find_spec(self, fullname, path, target=None):
90
- # optimization: only consider top level modules and those
91
- # found in the CPython test suite.
92
- if path is not None and not fullname.startswith('test.'):
93
- return
94
-
95
- method_name = 'spec_for_{fullname}'.format(**locals())
96
- method = getattr(self, method_name, lambda: None)
97
- return method()
98
-
99
- def spec_for_distutils(self):
100
- if self.is_cpython():
101
- return
102
-
103
- import importlib
104
- import importlib.abc
105
- import importlib.util
106
-
107
- try:
108
- mod = importlib.import_module('setuptools._distutils')
109
- except Exception:
110
- # There are a couple of cases where setuptools._distutils
111
- # may not be present:
112
- # - An older Setuptools without a local distutils is
113
- # taking precedence. Ref #2957.
114
- # - Path manipulation during sitecustomize removes
115
- # setuptools from the path but only after the hook
116
- # has been loaded. Ref #2980.
117
- # In either case, fall back to stdlib behavior.
118
- return
119
-
120
- class DistutilsLoader(importlib.abc.Loader):
121
- def create_module(self, spec):
122
- mod.__name__ = 'distutils'
123
- return mod
124
-
125
- def exec_module(self, module):
126
- pass
127
-
128
- return importlib.util.spec_from_loader(
129
- 'distutils', DistutilsLoader(), origin=mod.__file__
130
- )
131
-
132
- @staticmethod
133
- def is_cpython():
134
- """
135
- Suppress supplying distutils for CPython (build and tests).
136
- Ref #2965 and #3007.
137
- """
138
- return os.path.isfile('pybuilddir.txt')
139
-
140
- def spec_for_pip(self):
141
- """
142
- Ensure stdlib distutils when running under pip.
143
- See pypa/pip#8761 for rationale.
144
- """
145
- if self.pip_imported_during_build():
146
- return
147
- clear_distutils()
148
- self.spec_for_distutils = lambda: None
149
-
150
- @classmethod
151
- def pip_imported_during_build(cls):
152
- """
153
- Detect if pip is being imported in a build script. Ref #2355.
154
- """
155
- import traceback
156
-
157
- return any(
158
- cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None)
159
- )
160
-
161
- @staticmethod
162
- def frame_file_is_setup(frame):
163
- """
164
- Return True if the indicated frame suggests a setup.py file.
165
- """
166
- # some frames may not have __file__ (#2940)
167
- return frame.f_globals.get('__file__', '').endswith('setup.py')
168
-
169
- def spec_for_sensitive_tests(self):
170
- """
171
- Ensure stdlib distutils when running select tests under CPython.
172
-
173
- python/cpython#91169
174
- """
175
- clear_distutils()
176
- self.spec_for_distutils = lambda: None
177
-
178
- sensitive_tests = (
179
- [
180
- 'test.test_distutils',
181
- 'test.test_peg_generator',
182
- 'test.test_importlib',
183
- ]
184
- if sys.version_info < (3, 10)
185
- else [
186
- 'test.test_distutils',
187
- ]
188
- )
189
-
190
-
191
- for name in DistutilsMetaFinder.sensitive_tests:
192
- setattr(
193
- DistutilsMetaFinder,
194
- f'spec_for_{name}',
195
- DistutilsMetaFinder.spec_for_sensitive_tests,
196
- )
197
-
198
-
199
- DISTUTILS_FINDER = DistutilsMetaFinder()
200
-
201
-
202
- def add_shim():
203
- DISTUTILS_FINDER in sys.meta_path or insert_shim()
204
-
205
-
206
- class shim:
207
- def __enter__(self):
208
- insert_shim()
209
-
210
- def __exit__(self, exc, value, tb):
211
- remove_shim()
212
-
213
-
214
- def insert_shim():
215
- sys.meta_path.insert(0, DISTUTILS_FINDER)
216
-
217
-
218
- def remove_shim():
219
- try:
220
- sys.meta_path.remove(DISTUTILS_FINDER)
221
- except ValueError:
222
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/contrib/socks.py DELETED
@@ -1,216 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- This module contains provisional support for SOCKS proxies from within
4
- urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and
5
- SOCKS5. To enable its functionality, either install PySocks or install this
6
- module with the ``socks`` extra.
7
-
8
- The SOCKS implementation supports the full range of urllib3 features. It also
9
- supports the following SOCKS features:
10
-
11
- - SOCKS4A (``proxy_url='socks4a://...``)
12
- - SOCKS4 (``proxy_url='socks4://...``)
13
- - SOCKS5 with remote DNS (``proxy_url='socks5h://...``)
14
- - SOCKS5 with local DNS (``proxy_url='socks5://...``)
15
- - Usernames and passwords for the SOCKS proxy
16
-
17
- .. note::
18
- It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in
19
- your ``proxy_url`` to ensure that DNS resolution is done from the remote
20
- server instead of client-side when connecting to a domain name.
21
-
22
- SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5
23
- supports IPv4, IPv6, and domain names.
24
-
25
- When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url``
26
- will be sent as the ``userid`` section of the SOCKS request:
27
-
28
- .. code-block:: python
29
-
30
- proxy_url="socks4a://<userid>@proxy-host"
31
-
32
- When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion
33
- of the ``proxy_url`` will be sent as the username/password to authenticate
34
- with the proxy:
35
-
36
- .. code-block:: python
37
-
38
- proxy_url="socks5h://<username>:<password>@proxy-host"
39
-
40
- """
41
- from __future__ import absolute_import
42
-
43
- try:
44
- import socks
45
- except ImportError:
46
- import warnings
47
-
48
- from ..exceptions import DependencyWarning
49
-
50
- warnings.warn(
51
- (
52
- "SOCKS support in urllib3 requires the installation of optional "
53
- "dependencies: specifically, PySocks. For more information, see "
54
- "https://urllib3.readthedocs.io/en/1.26.x/contrib.html#socks-proxies"
55
- ),
56
- DependencyWarning,
57
- )
58
- raise
59
-
60
- from socket import error as SocketError
61
- from socket import timeout as SocketTimeout
62
-
63
- from ..connection import HTTPConnection, HTTPSConnection
64
- from ..connectionpool import HTTPConnectionPool, HTTPSConnectionPool
65
- from ..exceptions import ConnectTimeoutError, NewConnectionError
66
- from ..poolmanager import PoolManager
67
- from ..util.url import parse_url
68
-
69
- try:
70
- import ssl
71
- except ImportError:
72
- ssl = None
73
-
74
-
75
- class SOCKSConnection(HTTPConnection):
76
- """
77
- A plain-text HTTP connection that connects via a SOCKS proxy.
78
- """
79
-
80
- def __init__(self, *args, **kwargs):
81
- self._socks_options = kwargs.pop("_socks_options")
82
- super(SOCKSConnection, self).__init__(*args, **kwargs)
83
-
84
- def _new_conn(self):
85
- """
86
- Establish a new connection via the SOCKS proxy.
87
- """
88
- extra_kw = {}
89
- if self.source_address:
90
- extra_kw["source_address"] = self.source_address
91
-
92
- if self.socket_options:
93
- extra_kw["socket_options"] = self.socket_options
94
-
95
- try:
96
- conn = socks.create_connection(
97
- (self.host, self.port),
98
- proxy_type=self._socks_options["socks_version"],
99
- proxy_addr=self._socks_options["proxy_host"],
100
- proxy_port=self._socks_options["proxy_port"],
101
- proxy_username=self._socks_options["username"],
102
- proxy_password=self._socks_options["password"],
103
- proxy_rdns=self._socks_options["rdns"],
104
- timeout=self.timeout,
105
- **extra_kw
106
- )
107
-
108
- except SocketTimeout:
109
- raise ConnectTimeoutError(
110
- self,
111
- "Connection to %s timed out. (connect timeout=%s)"
112
- % (self.host, self.timeout),
113
- )
114
-
115
- except socks.ProxyError as e:
116
- # This is fragile as hell, but it seems to be the only way to raise
117
- # useful errors here.
118
- if e.socket_err:
119
- error = e.socket_err
120
- if isinstance(error, SocketTimeout):
121
- raise ConnectTimeoutError(
122
- self,
123
- "Connection to %s timed out. (connect timeout=%s)"
124
- % (self.host, self.timeout),
125
- )
126
- else:
127
- raise NewConnectionError(
128
- self, "Failed to establish a new connection: %s" % error
129
- )
130
- else:
131
- raise NewConnectionError(
132
- self, "Failed to establish a new connection: %s" % e
133
- )
134
-
135
- except SocketError as e: # Defensive: PySocks should catch all these.
136
- raise NewConnectionError(
137
- self, "Failed to establish a new connection: %s" % e
138
- )
139
-
140
- return conn
141
-
142
-
143
- # We don't need to duplicate the Verified/Unverified distinction from
144
- # urllib3/connection.py here because the HTTPSConnection will already have been
145
- # correctly set to either the Verified or Unverified form by that module. This
146
- # means the SOCKSHTTPSConnection will automatically be the correct type.
147
- class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
148
- pass
149
-
150
-
151
- class SOCKSHTTPConnectionPool(HTTPConnectionPool):
152
- ConnectionCls = SOCKSConnection
153
-
154
-
155
- class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
156
- ConnectionCls = SOCKSHTTPSConnection
157
-
158
-
159
- class SOCKSProxyManager(PoolManager):
160
- """
161
- A version of the urllib3 ProxyManager that routes connections via the
162
- defined SOCKS proxy.
163
- """
164
-
165
- pool_classes_by_scheme = {
166
- "http": SOCKSHTTPConnectionPool,
167
- "https": SOCKSHTTPSConnectionPool,
168
- }
169
-
170
- def __init__(
171
- self,
172
- proxy_url,
173
- username=None,
174
- password=None,
175
- num_pools=10,
176
- headers=None,
177
- **connection_pool_kw
178
- ):
179
- parsed = parse_url(proxy_url)
180
-
181
- if username is None and password is None and parsed.auth is not None:
182
- split = parsed.auth.split(":")
183
- if len(split) == 2:
184
- username, password = split
185
- if parsed.scheme == "socks5":
186
- socks_version = socks.PROXY_TYPE_SOCKS5
187
- rdns = False
188
- elif parsed.scheme == "socks5h":
189
- socks_version = socks.PROXY_TYPE_SOCKS5
190
- rdns = True
191
- elif parsed.scheme == "socks4":
192
- socks_version = socks.PROXY_TYPE_SOCKS4
193
- rdns = False
194
- elif parsed.scheme == "socks4a":
195
- socks_version = socks.PROXY_TYPE_SOCKS4
196
- rdns = True
197
- else:
198
- raise ValueError("Unable to determine SOCKS version from %s" % proxy_url)
199
-
200
- self.proxy_url = proxy_url
201
-
202
- socks_options = {
203
- "socks_version": socks_version,
204
- "proxy_host": parsed.host,
205
- "proxy_port": parsed.port,
206
- "username": username,
207
- "password": password,
208
- "rdns": rdns,
209
- }
210
- connection_pool_kw["_socks_options"] = socks_options
211
-
212
- super(SOCKSProxyManager, self).__init__(
213
- num_pools, headers, **connection_pool_kw
214
- )
215
-
216
- self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/factory.py DELETED
@@ -1,277 +0,0 @@
1
- import json
2
- import logging
3
- import os
4
- import pathlib
5
- import re
6
- from copy import deepcopy
7
- from pathlib import Path
8
-
9
- import torch
10
-
11
- from .model import CLAP, convert_weights_to_fp16
12
- from .openai import load_openai_model
13
- from .pretrained import get_pretrained_url, download_pretrained
14
- from .transform import image_transform
15
-
16
- _MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
17
- _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
18
-
19
-
20
- def _natural_key(string_):
21
- return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
22
-
23
-
24
- def _rescan_model_configs():
25
- global _MODEL_CONFIGS
26
-
27
- config_ext = (".json",)
28
- config_files = []
29
- for config_path in _MODEL_CONFIG_PATHS:
30
- if config_path.is_file() and config_path.suffix in config_ext:
31
- config_files.append(config_path)
32
- elif config_path.is_dir():
33
- for ext in config_ext:
34
- config_files.extend(config_path.glob(f"*{ext}"))
35
-
36
- for cf in config_files:
37
- if os.path.basename(cf)[0] == ".":
38
- continue # Ignore hidden files
39
-
40
- with open(cf, "r") as f:
41
- model_cfg = json.load(f)
42
- if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")):
43
- _MODEL_CONFIGS[cf.stem] = model_cfg
44
-
45
- _MODEL_CONFIGS = {
46
- k: v
47
- for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
48
- }
49
-
50
-
51
- _rescan_model_configs() # initial populate of model config registry
52
-
53
-
54
- def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True):
55
- checkpoint = torch.load(checkpoint_path, map_location=map_location)
56
- if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
57
- state_dict = checkpoint["state_dict"]
58
- else:
59
- state_dict = checkpoint
60
- if skip_params:
61
- if next(iter(state_dict.items()))[0].startswith("module"):
62
- state_dict = {k[7:]: v for k, v in state_dict.items()}
63
- # for k in state_dict:
64
- # if k.startswith('transformer'):
65
- # v = state_dict.pop(k)
66
- # state_dict['text_branch.' + k[12:]] = v
67
- return state_dict
68
-
69
-
70
- def create_model(
71
- amodel_name: str,
72
- tmodel_name: str,
73
- pretrained: str = "",
74
- precision: str = "fp32",
75
- device: torch.device = torch.device("cpu"),
76
- jit: bool = False,
77
- force_quick_gelu: bool = False,
78
- openai_model_cache_dir: str = os.path.expanduser("~/.cache/clip"),
79
- skip_params=True,
80
- pretrained_audio: str = "",
81
- pretrained_text: str = "",
82
- enable_fusion: bool = False,
83
- fusion_type: str = "None"
84
- # pretrained_image: bool = False,
85
- ):
86
- amodel_name = amodel_name.replace(
87
- "/", "-"
88
- ) # for callers using old naming with / in ViT names
89
- pretrained_orig = pretrained
90
- pretrained = pretrained.lower()
91
- if pretrained == "openai":
92
- if amodel_name in _MODEL_CONFIGS:
93
- logging.info(f"Loading {amodel_name} model config.")
94
- model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
95
- else:
96
- logging.error(
97
- f"Model config for {amodel_name} not found; available models {list_models()}."
98
- )
99
- raise RuntimeError(f"Model config for {amodel_name} not found.")
100
-
101
- logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.")
102
- # Hard Code in model name
103
- model_cfg["text_cfg"]["model_type"] = tmodel_name
104
- model = load_openai_model(
105
- "ViT-B-16",
106
- model_cfg,
107
- device=device,
108
- jit=jit,
109
- cache_dir=openai_model_cache_dir,
110
- enable_fusion=enable_fusion,
111
- fusion_type=fusion_type,
112
- )
113
- # See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
114
- if precision == "amp" or precision == "fp32":
115
- model = model.float()
116
- else:
117
- if amodel_name in _MODEL_CONFIGS:
118
- logging.info(f"Loading {amodel_name} model config.")
119
- model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
120
- else:
121
- logging.error(
122
- f"Model config for {amodel_name} not found; available models {list_models()}."
123
- )
124
- raise RuntimeError(f"Model config for {amodel_name} not found.")
125
-
126
- if force_quick_gelu:
127
- # override for use of QuickGELU on non-OpenAI transformer models
128
- model_cfg["quick_gelu"] = True
129
-
130
- # if pretrained_image:
131
- # if 'timm_amodel_name' in model_cfg.get('vision_cfg', {}):
132
- # # pretrained weight loading for timm models set via vision_cfg
133
- # model_cfg['vision_cfg']['timm_model_pretrained'] = True
134
- # else:
135
- # assert False, 'pretrained image towers currently only supported for timm models'
136
- model_cfg["text_cfg"]["model_type"] = tmodel_name
137
- model_cfg["enable_fusion"] = enable_fusion
138
- model_cfg["fusion_type"] = fusion_type
139
- model = CLAP(**model_cfg)
140
-
141
- if pretrained:
142
- checkpoint_path = ""
143
- url = get_pretrained_url(amodel_name, pretrained)
144
- if url:
145
- checkpoint_path = download_pretrained(url, root=openai_model_cache_dir)
146
- elif os.path.exists(pretrained_orig):
147
- checkpoint_path = pretrained_orig
148
- if checkpoint_path:
149
- logging.info(
150
- f"Loading pretrained {amodel_name}-{tmodel_name} weights ({pretrained})."
151
- )
152
- ckpt = load_state_dict(checkpoint_path, skip_params=True)
153
- model.load_state_dict(ckpt)
154
- param_names = [n for n, p in model.named_parameters()]
155
- # for n in param_names:
156
- # print(n, "\t", "Loaded" if n in ckpt else "Unloaded")
157
- else:
158
- logging.warning(
159
- f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
160
- )
161
- raise RuntimeError(
162
- f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
163
- )
164
-
165
- if pretrained_audio:
166
- if amodel_name.startswith("PANN"):
167
- if "Cnn14_mAP" in pretrained_audio: # official checkpoint
168
- audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
169
- audio_ckpt = audio_ckpt["model"]
170
- keys = list(audio_ckpt.keys())
171
- for key in keys:
172
- if (
173
- "spectrogram_extractor" not in key
174
- and "logmel_extractor" not in key
175
- ):
176
- v = audio_ckpt.pop(key)
177
- audio_ckpt["audio_branch." + key] = v
178
- elif os.path.basename(pretrained_audio).startswith(
179
- "PANN"
180
- ): # checkpoint trained via HTSAT codebase
181
- audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
182
- audio_ckpt = audio_ckpt["state_dict"]
183
- keys = list(audio_ckpt.keys())
184
- for key in keys:
185
- if key.startswith("sed_model"):
186
- v = audio_ckpt.pop(key)
187
- audio_ckpt["audio_branch." + key[10:]] = v
188
- elif os.path.basename(pretrained_audio).startswith(
189
- "finetuned"
190
- ): # checkpoint trained via linear probe codebase
191
- audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
192
- else:
193
- raise ValueError("Unknown audio checkpoint")
194
- elif amodel_name.startswith("HTSAT"):
195
- if "HTSAT_AudioSet_Saved" in pretrained_audio: # official checkpoint
196
- audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
197
- audio_ckpt = audio_ckpt["state_dict"]
198
- keys = list(audio_ckpt.keys())
199
- for key in keys:
200
- if key.startswith("sed_model") and (
201
- "spectrogram_extractor" not in key
202
- and "logmel_extractor" not in key
203
- ):
204
- v = audio_ckpt.pop(key)
205
- audio_ckpt["audio_branch." + key[10:]] = v
206
- elif os.path.basename(pretrained_audio).startswith(
207
- "HTSAT"
208
- ): # checkpoint trained via HTSAT codebase
209
- audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
210
- audio_ckpt = audio_ckpt["state_dict"]
211
- keys = list(audio_ckpt.keys())
212
- for key in keys:
213
- if key.startswith("sed_model"):
214
- v = audio_ckpt.pop(key)
215
- audio_ckpt["audio_branch." + key[10:]] = v
216
- elif os.path.basename(pretrained_audio).startswith(
217
- "finetuned"
218
- ): # checkpoint trained via linear probe codebase
219
- audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
220
- else:
221
- raise ValueError("Unknown audio checkpoint")
222
- else:
223
- raise f"this audio encoder pretrained checkpoint is not support"
224
-
225
- model.load_state_dict(audio_ckpt, strict=False)
226
- logging.info(
227
- f"Loading pretrained {amodel_name} weights ({pretrained_audio})."
228
- )
229
- param_names = [n for n, p in model.named_parameters()]
230
- for n in param_names:
231
- print(n, "\t", "Loaded" if n in audio_ckpt else "Unloaded")
232
-
233
- model.to(device=device)
234
- if precision == "fp16":
235
- assert device.type != "cpu"
236
- convert_weights_to_fp16(model)
237
-
238
- if jit:
239
- model = torch.jit.script(model)
240
-
241
- return model, model_cfg
242
-
243
-
244
- def create_model_and_transforms(
245
- model_name: str,
246
- pretrained: str = "",
247
- precision: str = "fp32",
248
- device: torch.device = torch.device("cpu"),
249
- jit: bool = False,
250
- force_quick_gelu: bool = False,
251
- # pretrained_image: bool = False,
252
- ):
253
- model = create_model(
254
- model_name,
255
- pretrained,
256
- precision,
257
- device,
258
- jit,
259
- force_quick_gelu=force_quick_gelu,
260
- # pretrained_image=pretrained_image
261
- )
262
- preprocess_train = image_transform(model.visual.image_size, is_train=True)
263
- preprocess_val = image_transform(model.visual.image_size, is_train=False)
264
- return model, preprocess_train, preprocess_val
265
-
266
-
267
- def list_models():
268
- """enumerate available model architectures based on config files"""
269
- return list(_MODEL_CONFIGS.keys())
270
-
271
-
272
- def add_model_config(path):
273
- """add model config path or file and update registry"""
274
- if not isinstance(path, Path):
275
- path = Path(path)
276
- _MODEL_CONFIG_PATHS.append(path)
277
- _rescan_model_configs()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BOXNYC/shirley/README.md DELETED
@@ -1,17 +0,0 @@
1
- ---
2
- title: Shirley
3
- emoji: 🚀
4
- colorFrom: gray
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.35.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Developed using this tutorial (Thanks!) https://medium.com/@sohaibshaheen/train-chatgpt-with-custom-data-and-create-your-own-chat-bot-using-macos-fb78c2f9646d
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
15
-
16
- Notes:
17
- - Changed model from 'text-davinci-003' to 'gpt-4'. Another model is 'gpt-3.5-turbo'.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/app/interface/bottom-bar/index.tsx DELETED
@@ -1,187 +0,0 @@
1
- import { useStore } from "@/app/store"
2
- import { HuggingClap } from "@/components/icons/hugging-clap"
3
- import { Button } from "@/components/ui/button"
4
- import { base64ToFile } from "@/lib/base64ToFile"
5
- import { uploadToHuggingFace } from "@/lib/uploadToHuggingFace"
6
- import { cn } from "@/lib/utils"
7
- import { startTransition, useState } from "react"
8
- import { upscaleImage } from "@/app/engine/render"
9
- import { sleep } from "@/lib/sleep"
10
-
11
- export function BottomBar() {
12
- const download = useStore(state => state.download)
13
- const isGeneratingStory = useStore(state => state.isGeneratingStory)
14
- const prompt = useStore(state => state.prompt)
15
- const panelGenerationStatus = useStore(state => state.panelGenerationStatus)
16
- const page = useStore(state => state.page)
17
- const preset = useStore(state => state.preset)
18
- const pageToImage = useStore(state => state.pageToImage)
19
-
20
- const allStatus = Object.values(panelGenerationStatus)
21
- const remainingImages = allStatus.reduce((acc, s) => (acc + (s ? 1 : 0)), 0)
22
-
23
- const upscaleQueue = useStore(state => state.upscaleQueue)
24
- const renderedScenes = useStore(state => state.renderedScenes)
25
- const removeFromUpscaleQueue = useStore(state => state.removeFromUpscaleQueue)
26
- const setRendered = useStore(state => state.setRendered)
27
- const [isUpscaling, setUpscaling] = useState(false)
28
-
29
- const handleUpscale = () => {
30
- setUpscaling(true)
31
- startTransition(() => {
32
- const fn = async () => {
33
- for (let [panelId, renderedScene] of Object.entries(upscaleQueue)) {
34
- try {
35
- console.log(`upscaling panel ${panelId} (${renderedScene.renderId})`)
36
- const result = await upscaleImage(renderedScene.assetUrl)
37
- await sleep(1000)
38
- if (result.assetUrl) {
39
- console.log(`upscale successful, removing ${panelId} (${renderedScene.renderId}) from upscale queue`)
40
- setRendered(panelId, {
41
- ...renderedScene,
42
- assetUrl: result.assetUrl
43
- })
44
- removeFromUpscaleQueue(panelId)
45
- }
46
-
47
- } catch (err) {
48
- console.error(`failed to upscale: ${err}`)
49
- }
50
- }
51
-
52
- setUpscaling(false)
53
- }
54
-
55
- fn()
56
- })
57
- }
58
- const handleBuyMeACoffee = () => {
59
- window.open("https://www.buymeacoffee.com/aicomicfactory", '_blank');
60
- }
61
- const handleShare = async () => {
62
- const dataUrl = await pageToImage()
63
- // console.log("dataUrl:", dataUrl)
64
- const fileToUpload = base64ToFile(dataUrl, "comic.png")
65
- let uploadUrl = ""
66
- try {
67
- uploadUrl = await uploadToHuggingFace(fileToUpload)
68
- console.log("uploadUrl:", uploadUrl)
69
- } catch (err) {
70
- console.error("Failed to upload the image to Hugging Face")
71
- }
72
-
73
-
74
- const descriptionMd = `
75
- #### Prompt:
76
- \`\`\`${prompt}\`\`\`
77
-
78
- #### Preset:
79
- \`\`\`${preset.label}\`\`\`
80
-
81
- #### Comic:
82
- ${uploadUrl
83
- ? (`![${prompt}](${uploadUrl})`)
84
- : (`(please drag & drop your JPG image here)`)}
85
- `;
86
-
87
- console.log("descriptionMd:", descriptionMd)
88
-
89
- const params = new URLSearchParams({
90
- title: `[Comic] ${prompt}`,
91
- description: descriptionMd,
92
- });
93
- const paramsStr = params.toString();
94
- window.open(`https://huggingface.co/spaces/jbilcke-hf/comic-factory/discussions/new?${paramsStr}`, '_blank');
95
- }
96
-
97
- const handlePrint = () => {
98
- window.print()
99
- }
100
- return (
101
- <div className={cn(
102
- `print:hidden`,
103
- `fixed bottom-2 md:bottom-4 left-2 right-0 md:left-3 md:right-1`,
104
- `flex flex-row`,
105
- `justify-between`,
106
- `pointer-events-none`
107
- )}>
108
- <div className={cn(
109
- `flex flex-row`,
110
- `items-end`,
111
- `pointer-events-auto`,
112
- `animation-all duration-300 ease-in-out`,
113
- isGeneratingStory ? `scale-0 opacity-0` : ``,
114
- `space-x-3`,
115
- `scale-[0.9]`
116
- )}>
117
- <Button variant="outline" onClick={handleBuyMeACoffee}>
118
- <span className="hidden md:inline">Buy me a coffee</span>
119
- <span className="inline md:hidden">Support</span>
120
- </Button>
121
- </div>
122
- <div className={cn(
123
- `flex flex-row`,
124
- `pointer-events-auto`,
125
- `animation-all duration-300 ease-in-out`,
126
- isGeneratingStory ? `scale-0 opacity-0` : ``,
127
- `space-x-3`,
128
- `scale-[0.9]`
129
- )}>
130
- <div>
131
- {
132
- // there is an issue, this env check doesn't work..
133
- // process.env.NEXT_PUBLIC_CAN_UPSCALE === "true" ?
134
- <Button
135
- onClick={handleUpscale}
136
- disabled={!prompt?.length || remainingImages > 0 || isUpscaling || !Object.values(upscaleQueue).length}
137
- >
138
- {isUpscaling
139
- ? `${allStatus.length - Object.values(upscaleQueue).length}/${allStatus.length} ⌛`
140
- : "Upscale"}
141
- </Button>
142
- // : null
143
- }
144
- </div>
145
- <div>
146
- <Button
147
- onClick={handlePrint}
148
- disabled={!prompt?.length}
149
- >
150
- Print
151
- </Button>
152
- </div>
153
- <div>
154
- <Button
155
- onClick={download}
156
- disabled={!prompt?.length}
157
- >
158
- <span className="hidden md:inline">{
159
- remainingImages ? `${allStatus.length - remainingImages}/${allStatus.length} panels ⌛` : `Save`
160
- }</span>
161
- <span className="inline md:hidden">{
162
- remainingImages ? `${allStatus.length - remainingImages}/${allStatus.length} ⌛` : `Save`
163
- }</span>
164
- </Button>
165
- </div>
166
- <div>
167
- {
168
- // there is an issue, this env check doesn't work..
169
- // process.env.NEXT_PUBLIC_ENABLE_COMMUNITY_SHARING === "true" ?
170
- <Button
171
- onClick={handleShare}
172
- disabled={!prompt?.length}
173
- className="space-x-2"
174
- >
175
- <div className="scale-105"><HuggingClap /></div>
176
- <div>
177
- <span className="hidden md:inline">Share to community</span>
178
- <span className="inline md:hidden">Share</span>
179
- </div>
180
- </Button>
181
- //: null
182
- }
183
- </div>
184
- </div>
185
- </div>
186
- )
187
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/extract_locale.py DELETED
@@ -1,34 +0,0 @@
1
- import json
2
- import re
3
-
4
- # Define regular expression patterns
5
- pattern = r"""i18n\([\s\n\t]*(["'][^"']+["'])[\s\n\t]*\)"""
6
-
7
- # Initialize the dictionary to store key-value pairs
8
- data = {}
9
-
10
-
11
- def process(fn: str):
12
- global data
13
- with open(fn, "r", encoding="utf-8") as f:
14
- contents = f.read()
15
- matches = re.findall(pattern, contents)
16
- for key in matches:
17
- key = eval(key)
18
- print("extract:", key)
19
- data[key] = key
20
-
21
-
22
- print("processing infer-web.py")
23
- process("infer-web.py")
24
-
25
- print("processing gui_v0.py")
26
- process("gui_v0.py")
27
-
28
- print("processing gui_v1.py")
29
- process("gui_v1.py")
30
-
31
- # Save as a JSON file
32
- with open("./i18n/en_US.json", "w", encoding="utf-8") as f:
33
- json.dump(data, f, ensure_ascii=False, indent=4)
34
- f.write("\n")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Camin Simulador ltimo Coche Descarga Apk.md DELETED
@@ -1,48 +0,0 @@
1
- <br />
2
- <h1>Hay día Hack monedas ilimitadas y diamantes APK Descargar</h1>
3
- <p>Hay Day es uno de los juegos de simulación de agricultura más populares en dispositivos Android e iOS. En este juego, puedes crear tu propia granja, cultivar, criar animales, comerciar con otros jugadores y más. Sin embargo, para disfrutar de todas las características y beneficios del juego, necesitas monedas y diamantes, que son las principales monedas en Hay Day. Las monedas se utilizan para comprar objetos, mejorar edificios, ampliar tu terreno y más. Los diamantes se utilizan para acelerar los procesos, desbloquear objetos especiales y mucho más. Sin embargo, ganar monedas y diamantes en el juego puede ser lento y desafiante, especialmente si quieres progresar más rápido y divertirte más. Es por eso que muchos jugadores están buscando una manera de obtener monedas y diamantes ilimitados en Hay Day sin gastar dinero real. </p>
4
- <p>Si eres uno de ellos, entonces estás de suerte. En este artículo, le mostraremos cómo descargar e instalar Hay Day Hack APK, que es una versión modificada del juego original que le da monedas ilimitadas y diamantes gratis. También le mostraremos cómo usarlo, qué características y beneficios ofrece, y algunos consejos y trucos para jugar Hay Day con Hay Day Hack APK. Así que, sin más preámbulos, empecemos. </p>
5
- <h2>camión simulador último coche descarga apk</h2><br /><p><b><b>DOWNLOAD</b> ->->->-> <a href="https://bltlly.com/2v6JfM">https://bltlly.com/2v6JfM</a></b></p><br /><br />
6
- <h2>Cómo descargar e instalar Hay Day Hack APK</h2>
7
- <p>Descargar e instalar Hay Day Hack APK es muy fácil y simple. Solo tienes que seguir estos pasos:</p>
8
- <ol>
9
- <li>Descargar el archivo APK de una fuente de confianza. Puede utilizar el siguiente enlace para descargarlo directamente desde nuestro sitio web. El tamaño del archivo es de unos 150 MB, así que asegúrate de tener suficiente espacio en tu dispositivo. </li>
10
- <li>Habilitar fuentes desconocidas en su dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store.</li>
11
-
12
- </ol>
13
- <h2>Cómo utilizar Hay Día Hack APK</h2>
14
- <p>El uso de Hay Día Hack APK es muy fácil y simple, así. Solo tienes que seguir estos pasos:</p>
15
- <ol>
16
- <li>Inicia sesión con tu cuenta de Facebook o crea una nueva. Cuando inicies el juego por primera vez, se te pedirá que inicies sesión con tu cuenta de Facebook o que crees una nueva. Recomendamos usar una cuenta de Facebook falsa o secundaria para este propósito, ya que el uso de su cuenta de Facebook real o principal puede resultar en una prohibición del juego o de Facebook en sí. </li>
17
- <li>Elija la cantidad de monedas y diamantes que desea generar. Una vez que hayas iniciado sesión, verás un menú en la esquina superior derecha de la pantalla con dos botones: Monedas y Diamantes. Toque en cualquiera de ellos e introduzca la cantidad de monedas o diamantes que desea generar. Puede elegir la cantidad que desee, del 1 al 9999999. </li>
18
- <li>Espere a que el hack para completar y disfrutar de sus recursos. Después de introducir la cantidad de monedas o diamantes que desea, toque en el botón Generar y espere unos segundos. El hack procesará su solicitud y agregará los recursos a su cuenta. Verá un mensaje de confirmación en la pantalla cuando el hack esté terminado. A continuación, puede cerrar el menú y disfrutar de sus monedas y diamantes ilimitados. </li>
19
- </ol>
20
- <h2>Características y beneficios de Hay Day Hack APK</h2>
21
- <p>Hay Día Hack APK no es solo un simple mod que le da monedas ilimitadas y diamantes. También ofrece muchas otras características y beneficios que lo convierten en uno de los mejores hacks para Hay Day. Estos son algunos de ellos:</p>
22
- <ul>
23
- <li><b>Monedas y diamantes ilimitados</b>: Esta es la característica principal de Hay Day Hack APK. Puede generar tantas monedas y diamantes como desee, en cualquier momento que desee, sin gastar dinero real. Puedes usarlos para comprar artículos, mejorar edificios, expandir tu tierra, acelerar procesos, desbloquear artículos especiales y más. También puede utilizarlos para comprar y revender artículos de otros jugadores, obteniendo más ganancias. </li>
24
-
25
- <li><b>Seguro y seguro</b>: Hay Día Hack APK es seguro y seguro de usar. No contiene ningún virus, malware, spyware u otros elementos dañinos que podrían dañar su dispositivo o su cuenta. Tampoco solicita información personal ni acceso a los datos de su dispositivo. Solo utiliza tu cuenta de Facebook para iniciar sesión en el juego y generar los recursos. También tiene un sistema anti-van que protege su cuenta de ser detectado o prohibido por los servidores del juego. </li>
26
- <li><b>Compatible con todos los dispositivos y versiones</b>: Hay Day Hack APK es compatible con todos los dispositivos y versiones del juego. Puede usarlo en cualquier dispositivo Android o iOS, ya sea un teléfono inteligente, tableta o emulador. También puedes usarlo en cualquier versión del juego, ya sea viejo o nuevo, oficial o modificado. Funciona perfectamente con todos ellos. </li>
27
- </ul>
28
- <h2>Consejos y trucos para jugar Hay Día con Hay Día Hack APK</h2>
29
- <p>Ahora que tienes monedas y diamantes ilimitados en Hay Day, puedes preguntarte cómo sacarles el máximo partido. Aquí hay algunos consejos y trucos para jugar Hay Día con Hay Día Hack APK:</p>
30
- <ul>
31
- <li><b>Usa tus monedas y diamantes sabiamente</b>: A pesar de que tienes monedas y diamantes ilimitados en Hay Day, todavía debes usarlos sabiamente. No los malgastes en cosas innecesarias ni los gastes todos a la vez. Guarda algunos para más tarde o para emergencias. Nunca sabes cuándo los necesitarás. </li>
32
- <li><b>Plantar cultivos de crecimiento lento por la noche o durante las horas de trabajo</b>: Una forma de maximizar su productividad y ganancias en Hay Day es plantar cultivos de crecimiento lento por la noche o durante las horas de trabajo. Estos cultivos tardan más en crecer, pero producen más productos y dinero. Por ejemplo, el trigo tarda 2 minutos en crecer, pero solo da 1 producto, mientras que el añil tarda 2 horas en crecer, pero da 2 productos. Al plantar cultivos de crecimiento lento cuando no estás jugando, puedes cosecharlos cuando vuelvas y obtener más ganancias. </li>
33
-
34
- <li><b>Comprar y revender artículos de otros jugadores</b>: Una forma inteligente de obtener más ganancias en Hay Day es comprar y revender artículos de otros jugadores. Puedes usar tus monedas y diamantes ilimitados para comprar artículos de los stands de otros jugadores a precios bajos y luego revenderlos a precios más altos en tu propio stand. También puedes usar esta estrategia para completar pedidos más rápido y ganar más recompensas. </li>
35
- </ <h2>Conclusión y preguntas frecuentes</h2>
36
- <p>En conclusión, Hay Día Hack APK es una gran manera de disfrutar de Hay Día con monedas ilimitadas y diamantes. Puede descargarlo e instalarlo de forma fácil y segura, y usarlo para comprar artículos, actualizar edificios, expandir su tierra, acelerar los procesos, desbloquear artículos especiales y más. También puedes usarlo para ganar más dinero vendiendo tus productos y comprando y revendiendo artículos de otros jugadores. Hay Día Hack APK es compatible con todos los dispositivos y versiones del juego, y no requiere ninguna raíz o jailbreak. También tiene un sistema anti-van que protege tu cuenta de ser detectada o prohibida por los servidores del juego. </p>
37
- <p>Si usted es un fan de Hay Día y quiere tener más diversión y libertad en el juego, entonces usted debe probar definitivamente Hay Día Hack APK. Hará que su experiencia agrícola sea más agradable y gratificante. Sin embargo, también debes ser cuidadoso y responsable al usarlo, ya que puede afectar el equilibrio y la equidad del juego. También debes respetar a otros jugadores y no abusar ni acosarlos con tus recursos ilimitados. Recuerde, Hay Day es un juego para el entretenimiento y la relajación, no para hacer trampa o intimidación. </p>
38
- <p></p>
39
- <p>Si usted tiene alguna pregunta o duda acerca de Hay Day Hack APK, puede consultar estas preguntas frecuentes a continuación:</p>
40
- <ol>
41
-
42
- <li><b> ¿Es seguro Hay Día Hack APK? </b>: Hay Día Hack APK es seguro de usar, ya que no contiene ningún virus, malware, spyware, u otros elementos dañinos que podrían dañar su dispositivo o su cuenta. Tampoco solicita información personal ni acceso a los datos de su dispositivo. Solo utiliza tu cuenta de Facebook para iniciar sesión en el juego y generar los recursos. También tiene un sistema anti-van que protege su cuenta de ser detectado o prohibido por los servidores del juego. </li>
43
- <li><b> ¿Con qué frecuencia puedo utilizar Hay Day Hack APK? </b>: Puede utilizar Hay Día Hack APK tan a menudo como quieras, ya que no hay límite a la cantidad de monedas y diamantes que puede generar. Sin embargo, debes usarlo de forma moderada y razonable, ya que generar demasiados recursos a la vez puede levantar sospechas o causar errores en el juego. También debe evitar usarlo durante eventos o competiciones, ya que puede arruinar la diversión y el desafío para usted y otros. </li>
44
- <li><b> ¿Voy a ser prohibido para el uso de Hay Día Hack APK? </b>: Hay una baja probabilidad de ser prohibido por el uso de Hay Day Hack APK, ya que tiene un sistema anti-van que protege su cuenta de ser detectado o prohibido por los servidores del juego. Sin embargo, todavía existe el riesgo de ser prohibido si lo usas de manera excesiva o irresponsable, como generar demasiados recursos a la vez, utilizarlos para acosar o intimidar a otros jugadores, o participar en eventos o competiciones con ellos. También debes evitar usar tu cuenta de Facebook real o principal para este propósito, ya que usar una cuenta de Facebook falsa o secundaria puede reducir el riesgo de ser prohibida. </li>
45
-
46
- </ol></p> 64aa2da5cf<br />
47
- <br />
48
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py DELETED
@@ -1,277 +0,0 @@
1
- # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
2
- import re
3
- import sys
4
- import os
5
-
6
- from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style, BEL
7
- from .winterm import enable_vt_processing, WinTerm, WinColor, WinStyle
8
- from .win32 import windll, winapi_test
9
-
10
-
11
- winterm = None
12
- if windll is not None:
13
- winterm = WinTerm()
14
-
15
-
16
- class StreamWrapper(object):
17
- '''
18
- Wraps a stream (such as stdout), acting as a transparent proxy for all
19
- attribute access apart from method 'write()', which is delegated to our
20
- Converter instance.
21
- '''
22
- def __init__(self, wrapped, converter):
23
- # double-underscore everything to prevent clashes with names of
24
- # attributes on the wrapped stream object.
25
- self.__wrapped = wrapped
26
- self.__convertor = converter
27
-
28
- def __getattr__(self, name):
29
- return getattr(self.__wrapped, name)
30
-
31
- def __enter__(self, *args, **kwargs):
32
- # special method lookup bypasses __getattr__/__getattribute__, see
33
- # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit
34
- # thus, contextlib magic methods are not proxied via __getattr__
35
- return self.__wrapped.__enter__(*args, **kwargs)
36
-
37
- def __exit__(self, *args, **kwargs):
38
- return self.__wrapped.__exit__(*args, **kwargs)
39
-
40
- def __setstate__(self, state):
41
- self.__dict__ = state
42
-
43
- def __getstate__(self):
44
- return self.__dict__
45
-
46
- def write(self, text):
47
- self.__convertor.write(text)
48
-
49
- def isatty(self):
50
- stream = self.__wrapped
51
- if 'PYCHARM_HOSTED' in os.environ:
52
- if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__):
53
- return True
54
- try:
55
- stream_isatty = stream.isatty
56
- except AttributeError:
57
- return False
58
- else:
59
- return stream_isatty()
60
-
61
- @property
62
- def closed(self):
63
- stream = self.__wrapped
64
- try:
65
- return stream.closed
66
- # AttributeError in the case that the stream doesn't support being closed
67
- # ValueError for the case that the stream has already been detached when atexit runs
68
- except (AttributeError, ValueError):
69
- return True
70
-
71
-
72
- class AnsiToWin32(object):
73
- '''
74
- Implements a 'write()' method which, on Windows, will strip ANSI character
75
- sequences from the text, and if outputting to a tty, will convert them into
76
- win32 function calls.
77
- '''
78
- ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
79
- ANSI_OSC_RE = re.compile('\001?\033\\]([^\a]*)(\a)\002?') # Operating System Command
80
-
81
- def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
82
- # The wrapped stream (normally sys.stdout or sys.stderr)
83
- self.wrapped = wrapped
84
-
85
- # should we reset colors to defaults after every .write()
86
- self.autoreset = autoreset
87
-
88
- # create the proxy wrapping our output stream
89
- self.stream = StreamWrapper(wrapped, self)
90
-
91
- on_windows = os.name == 'nt'
92
- # We test if the WinAPI works, because even if we are on Windows
93
- # we may be using a terminal that doesn't support the WinAPI
94
- # (e.g. Cygwin Terminal). In this case it's up to the terminal
95
- # to support the ANSI codes.
96
- conversion_supported = on_windows and winapi_test()
97
- try:
98
- fd = wrapped.fileno()
99
- except Exception:
100
- fd = -1
101
- system_has_native_ansi = not on_windows or enable_vt_processing(fd)
102
- have_tty = not self.stream.closed and self.stream.isatty()
103
- need_conversion = conversion_supported and not system_has_native_ansi
104
-
105
- # should we strip ANSI sequences from our output?
106
- if strip is None:
107
- strip = need_conversion or not have_tty
108
- self.strip = strip
109
-
110
- # should we should convert ANSI sequences into win32 calls?
111
- if convert is None:
112
- convert = need_conversion and have_tty
113
- self.convert = convert
114
-
115
- # dict of ansi codes to win32 functions and parameters
116
- self.win32_calls = self.get_win32_calls()
117
-
118
- # are we wrapping stderr?
119
- self.on_stderr = self.wrapped is sys.stderr
120
-
121
- def should_wrap(self):
122
- '''
123
- True if this class is actually needed. If false, then the output
124
- stream will not be affected, nor will win32 calls be issued, so
125
- wrapping stdout is not actually required. This will generally be
126
- False on non-Windows platforms, unless optional functionality like
127
- autoreset has been requested using kwargs to init()
128
- '''
129
- return self.convert or self.strip or self.autoreset
130
-
131
- def get_win32_calls(self):
132
- if self.convert and winterm:
133
- return {
134
- AnsiStyle.RESET_ALL: (winterm.reset_all, ),
135
- AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
136
- AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
137
- AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
138
- AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
139
- AnsiFore.RED: (winterm.fore, WinColor.RED),
140
- AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
141
- AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
142
- AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
143
- AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
144
- AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
145
- AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
146
- AnsiFore.RESET: (winterm.fore, ),
147
- AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
148
- AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
149
- AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
150
- AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
151
- AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
152
- AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
153
- AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
154
- AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
155
- AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
156
- AnsiBack.RED: (winterm.back, WinColor.RED),
157
- AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
158
- AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
159
- AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
160
- AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
161
- AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
162
- AnsiBack.WHITE: (winterm.back, WinColor.GREY),
163
- AnsiBack.RESET: (winterm.back, ),
164
- AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
165
- AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
166
- AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
167
- AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
168
- AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
169
- AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
170
- AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
171
- AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
172
- }
173
- return dict()
174
-
175
- def write(self, text):
176
- if self.strip or self.convert:
177
- self.write_and_convert(text)
178
- else:
179
- self.wrapped.write(text)
180
- self.wrapped.flush()
181
- if self.autoreset:
182
- self.reset_all()
183
-
184
-
185
- def reset_all(self):
186
- if self.convert:
187
- self.call_win32('m', (0,))
188
- elif not self.strip and not self.stream.closed:
189
- self.wrapped.write(Style.RESET_ALL)
190
-
191
-
192
- def write_and_convert(self, text):
193
- '''
194
- Write the given text to our wrapped stream, stripping any ANSI
195
- sequences from the text, and optionally converting them into win32
196
- calls.
197
- '''
198
- cursor = 0
199
- text = self.convert_osc(text)
200
- for match in self.ANSI_CSI_RE.finditer(text):
201
- start, end = match.span()
202
- self.write_plain_text(text, cursor, start)
203
- self.convert_ansi(*match.groups())
204
- cursor = end
205
- self.write_plain_text(text, cursor, len(text))
206
-
207
-
208
- def write_plain_text(self, text, start, end):
209
- if start < end:
210
- self.wrapped.write(text[start:end])
211
- self.wrapped.flush()
212
-
213
-
214
- def convert_ansi(self, paramstring, command):
215
- if self.convert:
216
- params = self.extract_params(command, paramstring)
217
- self.call_win32(command, params)
218
-
219
-
220
- def extract_params(self, command, paramstring):
221
- if command in 'Hf':
222
- params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
223
- while len(params) < 2:
224
- # defaults:
225
- params = params + (1,)
226
- else:
227
- params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
228
- if len(params) == 0:
229
- # defaults:
230
- if command in 'JKm':
231
- params = (0,)
232
- elif command in 'ABCD':
233
- params = (1,)
234
-
235
- return params
236
-
237
-
238
- def call_win32(self, command, params):
239
- if command == 'm':
240
- for param in params:
241
- if param in self.win32_calls:
242
- func_args = self.win32_calls[param]
243
- func = func_args[0]
244
- args = func_args[1:]
245
- kwargs = dict(on_stderr=self.on_stderr)
246
- func(*args, **kwargs)
247
- elif command in 'J':
248
- winterm.erase_screen(params[0], on_stderr=self.on_stderr)
249
- elif command in 'K':
250
- winterm.erase_line(params[0], on_stderr=self.on_stderr)
251
- elif command in 'Hf': # cursor position - absolute
252
- winterm.set_cursor_position(params, on_stderr=self.on_stderr)
253
- elif command in 'ABCD': # cursor position - relative
254
- n = params[0]
255
- # A - up, B - down, C - forward, D - back
256
- x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
257
- winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
258
-
259
-
260
- def convert_osc(self, text):
261
- for match in self.ANSI_OSC_RE.finditer(text):
262
- start, end = match.span()
263
- text = text[:start] + text[end:]
264
- paramstring, command = match.groups()
265
- if command == BEL:
266
- if paramstring.count(";") == 1:
267
- params = paramstring.split(";")
268
- # 0 - change title and icon (we will only change title)
269
- # 1 - change icon (we don't support this)
270
- # 2 - change title
271
- if params[0] in '02':
272
- winterm.set_title(params[1])
273
- return text
274
-
275
-
276
- def flush(self):
277
- self.wrapped.flush()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boadiwaa/Recipes/openai/api_resources/edit.py DELETED
@@ -1,32 +0,0 @@
1
- import time
2
-
3
- from openai import util
4
- from openai.api_resources.abstract.engine_api_resource import EngineAPIResource
5
- from openai.error import InvalidRequestError, TryAgain
6
-
7
-
8
- class Edit(EngineAPIResource):
9
- engine_required = False
10
- OBJECT_NAME = "edits"
11
-
12
- @classmethod
13
- def create(cls, *args, **kwargs):
14
- """
15
- Creates a new edit for the provided input, instruction, and parameters.
16
- """
17
- start = time.time()
18
- timeout = kwargs.pop("timeout", None)
19
- if kwargs.get("model", None) is None and kwargs.get("engine", None) is None:
20
- raise InvalidRequestError(
21
- "Must provide an 'engine' or 'model' parameter to create an Edit.",
22
- param="engine",
23
- )
24
-
25
- while True:
26
- try:
27
- return super().create(*args, **kwargs)
28
- except TryAgain as e:
29
- if timeout is not None and time.time() > start + timeout:
30
- raise
31
-
32
- util.log_info("Waiting for model to warm up", error=e)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BwayKC/prompthero-openjourney-v2/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Prompthero Openjourney V2
3
- emoji: 🐢
4
- colorFrom: blue
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.16.0
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- duplicated_from: lizhome/prompthero-openjourney-v2
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/include/pybind11/detail/common.h DELETED
@@ -1,837 +0,0 @@
1
- /*
2
- pybind11/detail/common.h -- Basic macros
3
-
4
- Copyright (c) 2016 Wenzel Jakob <[email protected]>
5
-
6
- All rights reserved. Use of this source code is governed by a
7
- BSD-style license that can be found in the LICENSE file.
8
- */
9
-
10
- #pragma once
11
-
12
- #define PYBIND11_VERSION_MAJOR 2
13
- #define PYBIND11_VERSION_MINOR 6
14
- #define PYBIND11_VERSION_PATCH dev0
15
-
16
- #define PYBIND11_NAMESPACE_BEGIN(name) namespace name {
17
- #define PYBIND11_NAMESPACE_END(name) }
18
-
19
- // Robust support for some features and loading modules compiled against different pybind versions
20
- // requires forcing hidden visibility on pybind code, so we enforce this by setting the attribute on
21
- // the main `pybind11` namespace.
22
- #if !defined(PYBIND11_NAMESPACE)
23
- # ifdef __GNUG__
24
- # define PYBIND11_NAMESPACE pybind11 __attribute__((visibility("hidden")))
25
- # else
26
- # define PYBIND11_NAMESPACE pybind11
27
- # endif
28
- #endif
29
-
30
- #if !(defined(_MSC_VER) && __cplusplus == 199711L) && !defined(__INTEL_COMPILER)
31
- # if __cplusplus >= 201402L
32
- # define PYBIND11_CPP14
33
- # if __cplusplus >= 201703L
34
- # define PYBIND11_CPP17
35
- # endif
36
- # endif
37
- #elif defined(_MSC_VER) && __cplusplus == 199711L
38
- // MSVC sets _MSVC_LANG rather than __cplusplus (supposedly until the standard is fully implemented)
39
- // Unless you use the /Zc:__cplusplus flag on Visual Studio 2017 15.7 Preview 3 or newer
40
- # if _MSVC_LANG >= 201402L
41
- # define PYBIND11_CPP14
42
- # if _MSVC_LANG > 201402L && _MSC_VER >= 1910
43
- # define PYBIND11_CPP17
44
- # endif
45
- # endif
46
- #endif
47
-
48
- // Compiler version assertions
49
- #if defined(__INTEL_COMPILER)
50
- # if __INTEL_COMPILER < 1700
51
- # error pybind11 requires Intel C++ compiler v17 or newer
52
- # endif
53
- #elif defined(__clang__) && !defined(__apple_build_version__)
54
- # if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 3)
55
- # error pybind11 requires clang 3.3 or newer
56
- # endif
57
- #elif defined(__clang__)
58
- // Apple changes clang version macros to its Xcode version; the first Xcode release based on
59
- // (upstream) clang 3.3 was Xcode 5:
60
- # if __clang_major__ < 5
61
- # error pybind11 requires Xcode/clang 5.0 or newer
62
- # endif
63
- #elif defined(__GNUG__)
64
- # if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8)
65
- # error pybind11 requires gcc 4.8 or newer
66
- # endif
67
- #elif defined(_MSC_VER)
68
- // Pybind hits various compiler bugs in 2015u2 and earlier, and also makes use of some stl features
69
- // (e.g. std::negation) added in 2015u3:
70
- # if _MSC_FULL_VER < 190024210
71
- # error pybind11 requires MSVC 2015 update 3 or newer
72
- # endif
73
- #endif
74
-
75
- #if !defined(PYBIND11_EXPORT)
76
- # if defined(WIN32) || defined(_WIN32)
77
- # define PYBIND11_EXPORT __declspec(dllexport)
78
- # else
79
- # define PYBIND11_EXPORT __attribute__ ((visibility("default")))
80
- # endif
81
- #endif
82
-
83
- #if defined(_MSC_VER)
84
- # define PYBIND11_NOINLINE __declspec(noinline)
85
- #else
86
- # define PYBIND11_NOINLINE __attribute__ ((noinline))
87
- #endif
88
-
89
- #if defined(PYBIND11_CPP14)
90
- # define PYBIND11_DEPRECATED(reason) [[deprecated(reason)]]
91
- #else
92
- # define PYBIND11_DEPRECATED(reason) __attribute__((deprecated(reason)))
93
- #endif
94
-
95
- #if defined(PYBIND11_CPP17)
96
- # define PYBIND11_MAYBE_UNUSED [[maybe_unused]]
97
- #elif defined(_MSC_VER) && !defined(__clang__)
98
- # define PYBIND11_MAYBE_UNUSED
99
- #else
100
- # define PYBIND11_MAYBE_UNUSED __attribute__ ((__unused__))
101
- #endif
102
-
103
- /* Don't let Python.h #define (v)snprintf as macro because they are implemented
104
- properly in Visual Studio since 2015. */
105
- #if defined(_MSC_VER) && _MSC_VER >= 1900
106
- # define HAVE_SNPRINTF 1
107
- #endif
108
-
109
- /// Include Python header, disable linking to pythonX_d.lib on Windows in debug mode
110
- #if defined(_MSC_VER)
111
- # if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 4)
112
- # define HAVE_ROUND 1
113
- # endif
114
- # pragma warning(push)
115
- # pragma warning(disable: 4510 4610 4512 4005)
116
- # if defined(_DEBUG) && !defined(Py_DEBUG)
117
- # define PYBIND11_DEBUG_MARKER
118
- # undef _DEBUG
119
- # endif
120
- #endif
121
-
122
- #include <Python.h>
123
- #include <frameobject.h>
124
- #include <pythread.h>
125
-
126
- /* Python #defines overrides on all sorts of core functions, which
127
- tends to weak havok in C++ codebases that expect these to work
128
- like regular functions (potentially with several overloads) */
129
- #if defined(isalnum)
130
- # undef isalnum
131
- # undef isalpha
132
- # undef islower
133
- # undef isspace
134
- # undef isupper
135
- # undef tolower
136
- # undef toupper
137
- #endif
138
-
139
- #if defined(copysign)
140
- # undef copysign
141
- #endif
142
-
143
- #if defined(_MSC_VER)
144
- # if defined(PYBIND11_DEBUG_MARKER)
145
- # define _DEBUG
146
- # undef PYBIND11_DEBUG_MARKER
147
- # endif
148
- # pragma warning(pop)
149
- #endif
150
-
151
- #include <cstddef>
152
- #include <cstring>
153
- #include <forward_list>
154
- #include <vector>
155
- #include <string>
156
- #include <stdexcept>
157
- #include <unordered_set>
158
- #include <unordered_map>
159
- #include <memory>
160
- #include <typeindex>
161
- #include <type_traits>
162
-
163
- #if PY_MAJOR_VERSION >= 3 /// Compatibility macros for various Python versions
164
- #define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyInstanceMethod_New(ptr)
165
- #define PYBIND11_INSTANCE_METHOD_CHECK PyInstanceMethod_Check
166
- #define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyInstanceMethod_GET_FUNCTION
167
- #define PYBIND11_BYTES_CHECK PyBytes_Check
168
- #define PYBIND11_BYTES_FROM_STRING PyBytes_FromString
169
- #define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyBytes_FromStringAndSize
170
- #define PYBIND11_BYTES_AS_STRING_AND_SIZE PyBytes_AsStringAndSize
171
- #define PYBIND11_BYTES_AS_STRING PyBytes_AsString
172
- #define PYBIND11_BYTES_SIZE PyBytes_Size
173
- #define PYBIND11_LONG_CHECK(o) PyLong_Check(o)
174
- #define PYBIND11_LONG_AS_LONGLONG(o) PyLong_AsLongLong(o)
175
- #define PYBIND11_LONG_FROM_SIGNED(o) PyLong_FromSsize_t((ssize_t) o)
176
- #define PYBIND11_LONG_FROM_UNSIGNED(o) PyLong_FromSize_t((size_t) o)
177
- #define PYBIND11_BYTES_NAME "bytes"
178
- #define PYBIND11_STRING_NAME "str"
179
- #define PYBIND11_SLICE_OBJECT PyObject
180
- #define PYBIND11_FROM_STRING PyUnicode_FromString
181
- #define PYBIND11_STR_TYPE ::pybind11::str
182
- #define PYBIND11_BOOL_ATTR "__bool__"
183
- #define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_bool)
184
- // Providing a separate declaration to make Clang's -Wmissing-prototypes happy.
185
- // See comment for PYBIND11_MODULE below for why this is marked "maybe unused".
186
- #define PYBIND11_PLUGIN_IMPL(name) \
187
- extern "C" PYBIND11_MAYBE_UNUSED PYBIND11_EXPORT PyObject *PyInit_##name(); \
188
- extern "C" PYBIND11_EXPORT PyObject *PyInit_##name()
189
-
190
- #else
191
- #define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyMethod_New(ptr, nullptr, class_)
192
- #define PYBIND11_INSTANCE_METHOD_CHECK PyMethod_Check
193
- #define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyMethod_GET_FUNCTION
194
- #define PYBIND11_BYTES_CHECK PyString_Check
195
- #define PYBIND11_BYTES_FROM_STRING PyString_FromString
196
- #define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyString_FromStringAndSize
197
- #define PYBIND11_BYTES_AS_STRING_AND_SIZE PyString_AsStringAndSize
198
- #define PYBIND11_BYTES_AS_STRING PyString_AsString
199
- #define PYBIND11_BYTES_SIZE PyString_Size
200
- #define PYBIND11_LONG_CHECK(o) (PyInt_Check(o) || PyLong_Check(o))
201
- #define PYBIND11_LONG_AS_LONGLONG(o) (PyInt_Check(o) ? (long long) PyLong_AsLong(o) : PyLong_AsLongLong(o))
202
- #define PYBIND11_LONG_FROM_SIGNED(o) PyInt_FromSsize_t((ssize_t) o) // Returns long if needed.
203
- #define PYBIND11_LONG_FROM_UNSIGNED(o) PyInt_FromSize_t((size_t) o) // Returns long if needed.
204
- #define PYBIND11_BYTES_NAME "str"
205
- #define PYBIND11_STRING_NAME "unicode"
206
- #define PYBIND11_SLICE_OBJECT PySliceObject
207
- #define PYBIND11_FROM_STRING PyString_FromString
208
- #define PYBIND11_STR_TYPE ::pybind11::bytes
209
- #define PYBIND11_BOOL_ATTR "__nonzero__"
210
- #define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_nonzero)
211
- // Providing a separate PyInit decl to make Clang's -Wmissing-prototypes happy.
212
- // See comment for PYBIND11_MODULE below for why this is marked "maybe unused".
213
- #define PYBIND11_PLUGIN_IMPL(name) \
214
- static PyObject *pybind11_init_wrapper(); \
215
- extern "C" PYBIND11_MAYBE_UNUSED PYBIND11_EXPORT void init##name(); \
216
- extern "C" PYBIND11_EXPORT void init##name() { \
217
- (void)pybind11_init_wrapper(); \
218
- } \
219
- PyObject *pybind11_init_wrapper()
220
- #endif
221
-
222
- #if PY_VERSION_HEX >= 0x03050000 && PY_VERSION_HEX < 0x03050200
223
- extern "C" {
224
- struct _Py_atomic_address { void *value; };
225
- PyAPI_DATA(_Py_atomic_address) _PyThreadState_Current;
226
- }
227
- #endif
228
-
229
- #define PYBIND11_TRY_NEXT_OVERLOAD ((PyObject *) 1) // special failure return code
230
- #define PYBIND11_STRINGIFY(x) #x
231
- #define PYBIND11_TOSTRING(x) PYBIND11_STRINGIFY(x)
232
- #define PYBIND11_CONCAT(first, second) first##second
233
- #define PYBIND11_ENSURE_INTERNALS_READY \
234
- pybind11::detail::get_internals();
235
-
236
- #define PYBIND11_CHECK_PYTHON_VERSION \
237
- { \
238
- const char *compiled_ver = PYBIND11_TOSTRING(PY_MAJOR_VERSION) \
239
- "." PYBIND11_TOSTRING(PY_MINOR_VERSION); \
240
- const char *runtime_ver = Py_GetVersion(); \
241
- size_t len = std::strlen(compiled_ver); \
242
- if (std::strncmp(runtime_ver, compiled_ver, len) != 0 \
243
- || (runtime_ver[len] >= '0' && runtime_ver[len] <= '9')) { \
244
- PyErr_Format(PyExc_ImportError, \
245
- "Python version mismatch: module was compiled for Python %s, " \
246
- "but the interpreter version is incompatible: %s.", \
247
- compiled_ver, runtime_ver); \
248
- return nullptr; \
249
- } \
250
- }
251
-
252
- #define PYBIND11_CATCH_INIT_EXCEPTIONS \
253
- catch (pybind11::error_already_set &e) { \
254
- PyErr_SetString(PyExc_ImportError, e.what()); \
255
- return nullptr; \
256
- } catch (const std::exception &e) { \
257
- PyErr_SetString(PyExc_ImportError, e.what()); \
258
- return nullptr; \
259
- } \
260
-
261
- /** \rst
262
- ***Deprecated in favor of PYBIND11_MODULE***
263
-
264
- This macro creates the entry point that will be invoked when the Python interpreter
265
- imports a plugin library. Please create a `module` in the function body and return
266
- the pointer to its underlying Python object at the end.
267
-
268
- .. code-block:: cpp
269
-
270
- PYBIND11_PLUGIN(example) {
271
- pybind11::module m("example", "pybind11 example plugin");
272
- /// Set up bindings here
273
- return m.ptr();
274
- }
275
- \endrst */
276
- #define PYBIND11_PLUGIN(name) \
277
- PYBIND11_DEPRECATED("PYBIND11_PLUGIN is deprecated, use PYBIND11_MODULE") \
278
- static PyObject *pybind11_init(); \
279
- PYBIND11_PLUGIN_IMPL(name) { \
280
- PYBIND11_CHECK_PYTHON_VERSION \
281
- PYBIND11_ENSURE_INTERNALS_READY \
282
- try { \
283
- return pybind11_init(); \
284
- } PYBIND11_CATCH_INIT_EXCEPTIONS \
285
- } \
286
- PyObject *pybind11_init()
287
-
288
- /** \rst
289
- This macro creates the entry point that will be invoked when the Python interpreter
290
- imports an extension module. The module name is given as the fist argument and it
291
- should not be in quotes. The second macro argument defines a variable of type
292
- `py::module` which can be used to initialize the module.
293
-
294
- The entry point is marked as "maybe unused" to aid dead-code detection analysis:
295
- since the entry point is typically only looked up at runtime and not referenced
296
- during translation, it would otherwise appear as unused ("dead") code.
297
-
298
- .. code-block:: cpp
299
-
300
- PYBIND11_MODULE(example, m) {
301
- m.doc() = "pybind11 example module";
302
-
303
- // Add bindings here
304
- m.def("foo", []() {
305
- return "Hello, World!";
306
- });
307
- }
308
- \endrst */
309
- #define PYBIND11_MODULE(name, variable) \
310
- PYBIND11_MAYBE_UNUSED \
311
- static void PYBIND11_CONCAT(pybind11_init_, name)(pybind11::module &); \
312
- PYBIND11_PLUGIN_IMPL(name) { \
313
- PYBIND11_CHECK_PYTHON_VERSION \
314
- PYBIND11_ENSURE_INTERNALS_READY \
315
- auto m = pybind11::module(PYBIND11_TOSTRING(name)); \
316
- try { \
317
- PYBIND11_CONCAT(pybind11_init_, name)(m); \
318
- return m.ptr(); \
319
- } PYBIND11_CATCH_INIT_EXCEPTIONS \
320
- } \
321
- void PYBIND11_CONCAT(pybind11_init_, name)(pybind11::module &variable)
322
-
323
-
324
- PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
325
-
326
- using ssize_t = Py_ssize_t;
327
- using size_t = std::size_t;
328
-
329
- /// Approach used to cast a previously unknown C++ instance into a Python object
330
- enum class return_value_policy : uint8_t {
331
- /** This is the default return value policy, which falls back to the policy
332
- return_value_policy::take_ownership when the return value is a pointer.
333
- Otherwise, it uses return_value::move or return_value::copy for rvalue
334
- and lvalue references, respectively. See below for a description of what
335
- all of these different policies do. */
336
- automatic = 0,
337
-
338
- /** As above, but use policy return_value_policy::reference when the return
339
- value is a pointer. This is the default conversion policy for function
340
- arguments when calling Python functions manually from C++ code (i.e. via
341
- handle::operator()). You probably won't need to use this. */
342
- automatic_reference,
343
-
344
- /** Reference an existing object (i.e. do not create a new copy) and take
345
- ownership. Python will call the destructor and delete operator when the
346
- object’s reference count reaches zero. Undefined behavior ensues when
347
- the C++ side does the same.. */
348
- take_ownership,
349
-
350
- /** Create a new copy of the returned object, which will be owned by
351
- Python. This policy is comparably safe because the lifetimes of the two
352
- instances are decoupled. */
353
- copy,
354
-
355
- /** Use std::move to move the return value contents into a new instance
356
- that will be owned by Python. This policy is comparably safe because the
357
- lifetimes of the two instances (move source and destination) are
358
- decoupled. */
359
- move,
360
-
361
- /** Reference an existing object, but do not take ownership. The C++ side
362
- is responsible for managing the object’s lifetime and deallocating it
363
- when it is no longer used. Warning: undefined behavior will ensue when
364
- the C++ side deletes an object that is still referenced and used by
365
- Python. */
366
- reference,
367
-
368
- /** This policy only applies to methods and properties. It references the
369
- object without taking ownership similar to the above
370
- return_value_policy::reference policy. In contrast to that policy, the
371
- function or property’s implicit this argument (called the parent) is
372
- considered to be the the owner of the return value (the child).
373
- pybind11 then couples the lifetime of the parent to the child via a
374
- reference relationship that ensures that the parent cannot be garbage
375
- collected while Python is still using the child. More advanced
376
- variations of this scheme are also possible using combinations of
377
- return_value_policy::reference and the keep_alive call policy */
378
- reference_internal
379
- };
380
-
381
- PYBIND11_NAMESPACE_BEGIN(detail)
382
-
383
- inline static constexpr int log2(size_t n, int k = 0) { return (n <= 1) ? k : log2(n >> 1, k + 1); }
384
-
385
- // Returns the size as a multiple of sizeof(void *), rounded up.
386
- inline static constexpr size_t size_in_ptrs(size_t s) { return 1 + ((s - 1) >> log2(sizeof(void *))); }
387
-
388
- /**
389
- * The space to allocate for simple layout instance holders (see below) in multiple of the size of
390
- * a pointer (e.g. 2 means 16 bytes on 64-bit architectures). The default is the minimum required
391
- * to holder either a std::unique_ptr or std::shared_ptr (which is almost always
392
- * sizeof(std::shared_ptr<T>)).
393
- */
394
- constexpr size_t instance_simple_holder_in_ptrs() {
395
- static_assert(sizeof(std::shared_ptr<int>) >= sizeof(std::unique_ptr<int>),
396
- "pybind assumes std::shared_ptrs are at least as big as std::unique_ptrs");
397
- return size_in_ptrs(sizeof(std::shared_ptr<int>));
398
- }
399
-
400
- // Forward declarations
401
- struct type_info;
402
- struct value_and_holder;
403
-
404
- struct nonsimple_values_and_holders {
405
- void **values_and_holders;
406
- uint8_t *status;
407
- };
408
-
409
- /// The 'instance' type which needs to be standard layout (need to be able to use 'offsetof')
410
- struct instance {
411
- PyObject_HEAD
412
- /// Storage for pointers and holder; see simple_layout, below, for a description
413
- union {
414
- void *simple_value_holder[1 + instance_simple_holder_in_ptrs()];
415
- nonsimple_values_and_holders nonsimple;
416
- };
417
- /// Weak references
418
- PyObject *weakrefs;
419
- /// If true, the pointer is owned which means we're free to manage it with a holder.
420
- bool owned : 1;
421
- /**
422
- * An instance has two possible value/holder layouts.
423
- *
424
- * Simple layout (when this flag is true), means the `simple_value_holder` is set with a pointer
425
- * and the holder object governing that pointer, i.e. [val1*][holder]. This layout is applied
426
- * whenever there is no python-side multiple inheritance of bound C++ types *and* the type's
427
- * holder will fit in the default space (which is large enough to hold either a std::unique_ptr
428
- * or std::shared_ptr).
429
- *
430
- * Non-simple layout applies when using custom holders that require more space than `shared_ptr`
431
- * (which is typically the size of two pointers), or when multiple inheritance is used on the
432
- * python side. Non-simple layout allocates the required amount of memory to have multiple
433
- * bound C++ classes as parents. Under this layout, `nonsimple.values_and_holders` is set to a
434
- * pointer to allocated space of the required space to hold a sequence of value pointers and
435
- * holders followed `status`, a set of bit flags (1 byte each), i.e.
436
- * [val1*][holder1][val2*][holder2]...[bb...] where each [block] is rounded up to a multiple of
437
- * `sizeof(void *)`. `nonsimple.status` is, for convenience, a pointer to the
438
- * beginning of the [bb...] block (but not independently allocated).
439
- *
440
- * Status bits indicate whether the associated holder is constructed (&
441
- * status_holder_constructed) and whether the value pointer is registered (&
442
- * status_instance_registered) in `registered_instances`.
443
- */
444
- bool simple_layout : 1;
445
- /// For simple layout, tracks whether the holder has been constructed
446
- bool simple_holder_constructed : 1;
447
- /// For simple layout, tracks whether the instance is registered in `registered_instances`
448
- bool simple_instance_registered : 1;
449
- /// If true, get_internals().patients has an entry for this object
450
- bool has_patients : 1;
451
-
452
- /// Initializes all of the above type/values/holders data (but not the instance values themselves)
453
- void allocate_layout();
454
-
455
- /// Destroys/deallocates all of the above
456
- void deallocate_layout();
457
-
458
- /// Returns the value_and_holder wrapper for the given type (or the first, if `find_type`
459
- /// omitted). Returns a default-constructed (with `.inst = nullptr`) object on failure if
460
- /// `throw_if_missing` is false.
461
- value_and_holder get_value_and_holder(const type_info *find_type = nullptr, bool throw_if_missing = true);
462
-
463
- /// Bit values for the non-simple status flags
464
- static constexpr uint8_t status_holder_constructed = 1;
465
- static constexpr uint8_t status_instance_registered = 2;
466
- };
467
-
468
- static_assert(std::is_standard_layout<instance>::value, "Internal error: `pybind11::detail::instance` is not standard layout!");
469
-
470
- /// from __cpp_future__ import (convenient aliases from C++14/17)
471
- #if defined(PYBIND11_CPP14) && (!defined(_MSC_VER) || _MSC_VER >= 1910)
472
- using std::enable_if_t;
473
- using std::conditional_t;
474
- using std::remove_cv_t;
475
- using std::remove_reference_t;
476
- #else
477
- template <bool B, typename T = void> using enable_if_t = typename std::enable_if<B, T>::type;
478
- template <bool B, typename T, typename F> using conditional_t = typename std::conditional<B, T, F>::type;
479
- template <typename T> using remove_cv_t = typename std::remove_cv<T>::type;
480
- template <typename T> using remove_reference_t = typename std::remove_reference<T>::type;
481
- #endif
482
-
483
- /// Index sequences
484
- #if defined(PYBIND11_CPP14)
485
- using std::index_sequence;
486
- using std::make_index_sequence;
487
- #else
488
- template<size_t ...> struct index_sequence { };
489
- template<size_t N, size_t ...S> struct make_index_sequence_impl : make_index_sequence_impl <N - 1, N - 1, S...> { };
490
- template<size_t ...S> struct make_index_sequence_impl <0, S...> { typedef index_sequence<S...> type; };
491
- template<size_t N> using make_index_sequence = typename make_index_sequence_impl<N>::type;
492
- #endif
493
-
494
- /// Make an index sequence of the indices of true arguments
495
- template <typename ISeq, size_t, bool...> struct select_indices_impl { using type = ISeq; };
496
- template <size_t... IPrev, size_t I, bool B, bool... Bs> struct select_indices_impl<index_sequence<IPrev...>, I, B, Bs...>
497
- : select_indices_impl<conditional_t<B, index_sequence<IPrev..., I>, index_sequence<IPrev...>>, I + 1, Bs...> {};
498
- template <bool... Bs> using select_indices = typename select_indices_impl<index_sequence<>, 0, Bs...>::type;
499
-
500
- /// Backports of std::bool_constant and std::negation to accommodate older compilers
501
- template <bool B> using bool_constant = std::integral_constant<bool, B>;
502
- template <typename T> struct negation : bool_constant<!T::value> { };
503
-
504
- template <typename...> struct void_t_impl { using type = void; };
505
- template <typename... Ts> using void_t = typename void_t_impl<Ts...>::type;
506
-
507
- /// Compile-time all/any/none of that check the boolean value of all template types
508
- #if defined(__cpp_fold_expressions) && !(defined(_MSC_VER) && (_MSC_VER < 1916))
509
- template <class... Ts> using all_of = bool_constant<(Ts::value && ...)>;
510
- template <class... Ts> using any_of = bool_constant<(Ts::value || ...)>;
511
- #elif !defined(_MSC_VER)
512
- template <bool...> struct bools {};
513
- template <class... Ts> using all_of = std::is_same<
514
- bools<Ts::value..., true>,
515
- bools<true, Ts::value...>>;
516
- template <class... Ts> using any_of = negation<all_of<negation<Ts>...>>;
517
- #else
518
- // MSVC has trouble with the above, but supports std::conjunction, which we can use instead (albeit
519
- // at a slight loss of compilation efficiency).
520
- template <class... Ts> using all_of = std::conjunction<Ts...>;
521
- template <class... Ts> using any_of = std::disjunction<Ts...>;
522
- #endif
523
- template <class... Ts> using none_of = negation<any_of<Ts...>>;
524
-
525
- template <class T, template<class> class... Predicates> using satisfies_all_of = all_of<Predicates<T>...>;
526
- template <class T, template<class> class... Predicates> using satisfies_any_of = any_of<Predicates<T>...>;
527
- template <class T, template<class> class... Predicates> using satisfies_none_of = none_of<Predicates<T>...>;
528
-
529
- /// Strip the class from a method type
530
- template <typename T> struct remove_class { };
531
- template <typename C, typename R, typename... A> struct remove_class<R (C::*)(A...)> { typedef R type(A...); };
532
- template <typename C, typename R, typename... A> struct remove_class<R (C::*)(A...) const> { typedef R type(A...); };
533
-
534
- /// Helper template to strip away type modifiers
535
- template <typename T> struct intrinsic_type { typedef T type; };
536
- template <typename T> struct intrinsic_type<const T> { typedef typename intrinsic_type<T>::type type; };
537
- template <typename T> struct intrinsic_type<T*> { typedef typename intrinsic_type<T>::type type; };
538
- template <typename T> struct intrinsic_type<T&> { typedef typename intrinsic_type<T>::type type; };
539
- template <typename T> struct intrinsic_type<T&&> { typedef typename intrinsic_type<T>::type type; };
540
- template <typename T, size_t N> struct intrinsic_type<const T[N]> { typedef typename intrinsic_type<T>::type type; };
541
- template <typename T, size_t N> struct intrinsic_type<T[N]> { typedef typename intrinsic_type<T>::type type; };
542
- template <typename T> using intrinsic_t = typename intrinsic_type<T>::type;
543
-
544
- /// Helper type to replace 'void' in some expressions
545
- struct void_type { };
546
-
547
- /// Helper template which holds a list of types
548
- template <typename...> struct type_list { };
549
-
550
- /// Compile-time integer sum
551
- #ifdef __cpp_fold_expressions
552
- template <typename... Ts> constexpr size_t constexpr_sum(Ts... ns) { return (0 + ... + size_t{ns}); }
553
- #else
554
- constexpr size_t constexpr_sum() { return 0; }
555
- template <typename T, typename... Ts>
556
- constexpr size_t constexpr_sum(T n, Ts... ns) { return size_t{n} + constexpr_sum(ns...); }
557
- #endif
558
-
559
- PYBIND11_NAMESPACE_BEGIN(constexpr_impl)
560
- /// Implementation details for constexpr functions
561
- constexpr int first(int i) { return i; }
562
- template <typename T, typename... Ts>
563
- constexpr int first(int i, T v, Ts... vs) { return v ? i : first(i + 1, vs...); }
564
-
565
- constexpr int last(int /*i*/, int result) { return result; }
566
- template <typename T, typename... Ts>
567
- constexpr int last(int i, int result, T v, Ts... vs) { return last(i + 1, v ? i : result, vs...); }
568
- PYBIND11_NAMESPACE_END(constexpr_impl)
569
-
570
- /// Return the index of the first type in Ts which satisfies Predicate<T>. Returns sizeof...(Ts) if
571
- /// none match.
572
- template <template<typename> class Predicate, typename... Ts>
573
- constexpr int constexpr_first() { return constexpr_impl::first(0, Predicate<Ts>::value...); }
574
-
575
- /// Return the index of the last type in Ts which satisfies Predicate<T>, or -1 if none match.
576
- template <template<typename> class Predicate, typename... Ts>
577
- constexpr int constexpr_last() { return constexpr_impl::last(0, -1, Predicate<Ts>::value...); }
578
-
579
- /// Return the Nth element from the parameter pack
580
- template <size_t N, typename T, typename... Ts>
581
- struct pack_element { using type = typename pack_element<N - 1, Ts...>::type; };
582
- template <typename T, typename... Ts>
583
- struct pack_element<0, T, Ts...> { using type = T; };
584
-
585
- /// Return the one and only type which matches the predicate, or Default if none match.
586
- /// If more than one type matches the predicate, fail at compile-time.
587
- template <template<typename> class Predicate, typename Default, typename... Ts>
588
- struct exactly_one {
589
- static constexpr auto found = constexpr_sum(Predicate<Ts>::value...);
590
- static_assert(found <= 1, "Found more than one type matching the predicate");
591
-
592
- static constexpr auto index = found ? constexpr_first<Predicate, Ts...>() : 0;
593
- using type = conditional_t<found, typename pack_element<index, Ts...>::type, Default>;
594
- };
595
- template <template<typename> class P, typename Default>
596
- struct exactly_one<P, Default> { using type = Default; };
597
-
598
- template <template<typename> class Predicate, typename Default, typename... Ts>
599
- using exactly_one_t = typename exactly_one<Predicate, Default, Ts...>::type;
600
-
601
- /// Defer the evaluation of type T until types Us are instantiated
602
- template <typename T, typename... /*Us*/> struct deferred_type { using type = T; };
603
- template <typename T, typename... Us> using deferred_t = typename deferred_type<T, Us...>::type;
604
-
605
- /// Like is_base_of, but requires a strict base (i.e. `is_strict_base_of<T, T>::value == false`,
606
- /// unlike `std::is_base_of`)
607
- template <typename Base, typename Derived> using is_strict_base_of = bool_constant<
608
- std::is_base_of<Base, Derived>::value && !std::is_same<Base, Derived>::value>;
609
-
610
- /// Like is_base_of, but also requires that the base type is accessible (i.e. that a Derived pointer
611
- /// can be converted to a Base pointer)
612
- template <typename Base, typename Derived> using is_accessible_base_of = bool_constant<
613
- std::is_base_of<Base, Derived>::value && std::is_convertible<Derived *, Base *>::value>;
614
-
615
- template <template<typename...> class Base>
616
- struct is_template_base_of_impl {
617
- template <typename... Us> static std::true_type check(Base<Us...> *);
618
- static std::false_type check(...);
619
- };
620
-
621
- /// Check if a template is the base of a type. For example:
622
- /// `is_template_base_of<Base, T>` is true if `struct T : Base<U> {}` where U can be anything
623
- template <template<typename...> class Base, typename T>
624
- #if !defined(_MSC_VER)
625
- using is_template_base_of = decltype(is_template_base_of_impl<Base>::check((intrinsic_t<T>*)nullptr));
626
- #else // MSVC2015 has trouble with decltype in template aliases
627
- struct is_template_base_of : decltype(is_template_base_of_impl<Base>::check((intrinsic_t<T>*)nullptr)) { };
628
- #endif
629
-
630
- /// Check if T is an instantiation of the template `Class`. For example:
631
- /// `is_instantiation<shared_ptr, T>` is true if `T == shared_ptr<U>` where U can be anything.
632
- template <template<typename...> class Class, typename T>
633
- struct is_instantiation : std::false_type { };
634
- template <template<typename...> class Class, typename... Us>
635
- struct is_instantiation<Class, Class<Us...>> : std::true_type { };
636
-
637
- /// Check if T is std::shared_ptr<U> where U can be anything
638
- template <typename T> using is_shared_ptr = is_instantiation<std::shared_ptr, T>;
639
-
640
- /// Check if T looks like an input iterator
641
- template <typename T, typename = void> struct is_input_iterator : std::false_type {};
642
- template <typename T>
643
- struct is_input_iterator<T, void_t<decltype(*std::declval<T &>()), decltype(++std::declval<T &>())>>
644
- : std::true_type {};
645
-
646
- template <typename T> using is_function_pointer = bool_constant<
647
- std::is_pointer<T>::value && std::is_function<typename std::remove_pointer<T>::type>::value>;
648
-
649
- template <typename F> struct strip_function_object {
650
- using type = typename remove_class<decltype(&F::operator())>::type;
651
- };
652
-
653
- // Extracts the function signature from a function, function pointer or lambda.
654
- template <typename Function, typename F = remove_reference_t<Function>>
655
- using function_signature_t = conditional_t<
656
- std::is_function<F>::value,
657
- F,
658
- typename conditional_t<
659
- std::is_pointer<F>::value || std::is_member_pointer<F>::value,
660
- std::remove_pointer<F>,
661
- strip_function_object<F>
662
- >::type
663
- >;
664
-
665
- /// Returns true if the type looks like a lambda: that is, isn't a function, pointer or member
666
- /// pointer. Note that this can catch all sorts of other things, too; this is intended to be used
667
- /// in a place where passing a lambda makes sense.
668
- template <typename T> using is_lambda = satisfies_none_of<remove_reference_t<T>,
669
- std::is_function, std::is_pointer, std::is_member_pointer>;
670
-
671
- /// Ignore that a variable is unused in compiler warnings
672
- inline void ignore_unused(const int *) { }
673
-
674
- /// Apply a function over each element of a parameter pack
675
- #ifdef __cpp_fold_expressions
676
- #define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) (((PATTERN), void()), ...)
677
- #else
678
- using expand_side_effects = bool[];
679
- #define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) (void)pybind11::detail::expand_side_effects{ ((PATTERN), void(), false)..., false }
680
- #endif
681
-
682
- PYBIND11_NAMESPACE_END(detail)
683
-
684
- /// C++ bindings of builtin Python exceptions
685
- class builtin_exception : public std::runtime_error {
686
- public:
687
- using std::runtime_error::runtime_error;
688
- /// Set the error using the Python C API
689
- virtual void set_error() const = 0;
690
- };
691
-
692
- #define PYBIND11_RUNTIME_EXCEPTION(name, type) \
693
- class name : public builtin_exception { public: \
694
- using builtin_exception::builtin_exception; \
695
- name() : name("") { } \
696
- void set_error() const override { PyErr_SetString(type, what()); } \
697
- };
698
-
699
- PYBIND11_RUNTIME_EXCEPTION(stop_iteration, PyExc_StopIteration)
700
- PYBIND11_RUNTIME_EXCEPTION(index_error, PyExc_IndexError)
701
- PYBIND11_RUNTIME_EXCEPTION(key_error, PyExc_KeyError)
702
- PYBIND11_RUNTIME_EXCEPTION(value_error, PyExc_ValueError)
703
- PYBIND11_RUNTIME_EXCEPTION(type_error, PyExc_TypeError)
704
- PYBIND11_RUNTIME_EXCEPTION(buffer_error, PyExc_BufferError)
705
- PYBIND11_RUNTIME_EXCEPTION(import_error, PyExc_ImportError)
706
- PYBIND11_RUNTIME_EXCEPTION(cast_error, PyExc_RuntimeError) /// Thrown when pybind11::cast or handle::call fail due to a type casting error
707
- PYBIND11_RUNTIME_EXCEPTION(reference_cast_error, PyExc_RuntimeError) /// Used internally
708
-
709
- [[noreturn]] PYBIND11_NOINLINE inline void pybind11_fail(const char *reason) { throw std::runtime_error(reason); }
710
- [[noreturn]] PYBIND11_NOINLINE inline void pybind11_fail(const std::string &reason) { throw std::runtime_error(reason); }
711
-
712
- template <typename T, typename SFINAE = void> struct format_descriptor { };
713
-
714
- PYBIND11_NAMESPACE_BEGIN(detail)
715
- // Returns the index of the given type in the type char array below, and in the list in numpy.h
716
- // The order here is: bool; 8 ints ((signed,unsigned)x(8,16,32,64)bits); float,double,long double;
717
- // complex float,double,long double. Note that the long double types only participate when long
718
- // double is actually longer than double (it isn't under MSVC).
719
- // NB: not only the string below but also complex.h and numpy.h rely on this order.
720
- template <typename T, typename SFINAE = void> struct is_fmt_numeric { static constexpr bool value = false; };
721
- template <typename T> struct is_fmt_numeric<T, enable_if_t<std::is_arithmetic<T>::value>> {
722
- static constexpr bool value = true;
723
- static constexpr int index = std::is_same<T, bool>::value ? 0 : 1 + (
724
- std::is_integral<T>::value ? detail::log2(sizeof(T))*2 + std::is_unsigned<T>::value : 8 + (
725
- std::is_same<T, double>::value ? 1 : std::is_same<T, long double>::value ? 2 : 0));
726
- };
727
- PYBIND11_NAMESPACE_END(detail)
728
-
729
- template <typename T> struct format_descriptor<T, detail::enable_if_t<std::is_arithmetic<T>::value>> {
730
- static constexpr const char c = "?bBhHiIqQfdg"[detail::is_fmt_numeric<T>::index];
731
- static constexpr const char value[2] = { c, '\0' };
732
- static std::string format() { return std::string(1, c); }
733
- };
734
-
735
- #if !defined(PYBIND11_CPP17)
736
-
737
- template <typename T> constexpr const char format_descriptor<
738
- T, detail::enable_if_t<std::is_arithmetic<T>::value>>::value[2];
739
-
740
- #endif
741
-
742
- /// RAII wrapper that temporarily clears any Python error state
743
- struct error_scope {
744
- PyObject *type, *value, *trace;
745
- error_scope() { PyErr_Fetch(&type, &value, &trace); }
746
- ~error_scope() { PyErr_Restore(type, value, trace); }
747
- };
748
-
749
- /// Dummy destructor wrapper that can be used to expose classes with a private destructor
750
- struct nodelete { template <typename T> void operator()(T*) { } };
751
-
752
- PYBIND11_NAMESPACE_BEGIN(detail)
753
- template <typename... Args>
754
- struct overload_cast_impl {
755
- constexpr overload_cast_impl() {} // MSVC 2015 needs this
756
-
757
- template <typename Return>
758
- constexpr auto operator()(Return (*pf)(Args...)) const noexcept
759
- -> decltype(pf) { return pf; }
760
-
761
- template <typename Return, typename Class>
762
- constexpr auto operator()(Return (Class::*pmf)(Args...), std::false_type = {}) const noexcept
763
- -> decltype(pmf) { return pmf; }
764
-
765
- template <typename Return, typename Class>
766
- constexpr auto operator()(Return (Class::*pmf)(Args...) const, std::true_type) const noexcept
767
- -> decltype(pmf) { return pmf; }
768
- };
769
- PYBIND11_NAMESPACE_END(detail)
770
-
771
- // overload_cast requires variable templates: C++14
772
- #if defined(PYBIND11_CPP14)
773
- #define PYBIND11_OVERLOAD_CAST 1
774
- /// Syntax sugar for resolving overloaded function pointers:
775
- /// - regular: static_cast<Return (Class::*)(Arg0, Arg1, Arg2)>(&Class::func)
776
- /// - sweet: overload_cast<Arg0, Arg1, Arg2>(&Class::func)
777
- template <typename... Args>
778
- static constexpr detail::overload_cast_impl<Args...> overload_cast = {};
779
- // MSVC 2015 only accepts this particular initialization syntax for this variable template.
780
- #endif
781
-
782
- /// Const member function selector for overload_cast
783
- /// - regular: static_cast<Return (Class::*)(Arg) const>(&Class::func)
784
- /// - sweet: overload_cast<Arg>(&Class::func, const_)
785
- static constexpr auto const_ = std::true_type{};
786
-
787
- #if !defined(PYBIND11_CPP14) // no overload_cast: providing something that static_assert-fails:
788
- template <typename... Args> struct overload_cast {
789
- static_assert(detail::deferred_t<std::false_type, Args...>::value,
790
- "pybind11::overload_cast<...> requires compiling in C++14 mode");
791
- };
792
- #endif // overload_cast
793
-
794
- PYBIND11_NAMESPACE_BEGIN(detail)
795
-
796
- // Adaptor for converting arbitrary container arguments into a vector; implicitly convertible from
797
- // any standard container (or C-style array) supporting std::begin/std::end, any singleton
798
- // arithmetic type (if T is arithmetic), or explicitly constructible from an iterator pair.
799
- template <typename T>
800
- class any_container {
801
- std::vector<T> v;
802
- public:
803
- any_container() = default;
804
-
805
- // Can construct from a pair of iterators
806
- template <typename It, typename = enable_if_t<is_input_iterator<It>::value>>
807
- any_container(It first, It last) : v(first, last) { }
808
-
809
- // Implicit conversion constructor from any arbitrary container type with values convertible to T
810
- template <typename Container, typename = enable_if_t<std::is_convertible<decltype(*std::begin(std::declval<const Container &>())), T>::value>>
811
- any_container(const Container &c) : any_container(std::begin(c), std::end(c)) { }
812
-
813
- // initializer_list's aren't deducible, so don't get matched by the above template; we need this
814
- // to explicitly allow implicit conversion from one:
815
- template <typename TIn, typename = enable_if_t<std::is_convertible<TIn, T>::value>>
816
- any_container(const std::initializer_list<TIn> &c) : any_container(c.begin(), c.end()) { }
817
-
818
- // Avoid copying if given an rvalue vector of the correct type.
819
- any_container(std::vector<T> &&v) : v(std::move(v)) { }
820
-
821
- // Moves the vector out of an rvalue any_container
822
- operator std::vector<T> &&() && { return std::move(v); }
823
-
824
- // Dereferencing obtains a reference to the underlying vector
825
- std::vector<T> &operator*() { return v; }
826
- const std::vector<T> &operator*() const { return v; }
827
-
828
- // -> lets you call methods on the underlying vector
829
- std::vector<T> *operator->() { return &v; }
830
- const std::vector<T> *operator->() const { return &v; }
831
- };
832
-
833
- PYBIND11_NAMESPACE_END(detail)
834
-
835
-
836
-
837
- PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/uninitialized_copy.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits uninitialized_copy
22
- #include <thrust/system/cpp/detail/uninitialized_copy.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_R_50_FPN_50ep_LSJ.py DELETED
@@ -1,14 +0,0 @@
1
- from .mask_rcnn_R_50_FPN_100ep_LSJ import (
2
- dataloader,
3
- lr_multiplier,
4
- model,
5
- optimizer,
6
- train,
7
- )
8
-
9
- train.max_iter //= 2 # 100ep -> 50ep
10
-
11
- lr_multiplier.scheduler.milestones = [
12
- milestone // 2 for milestone in lr_multiplier.scheduler.milestones
13
- ]
14
- lr_multiplier.scheduler.num_updates = train.max_iter
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cartinoe5930/LLMAgora/result/MMLU/README.md DELETED
File without changes
spaces/Catmeow/Face2Painting_From_Photo/face_detection.py DELETED
@@ -1,140 +0,0 @@
1
- # Copyright (c) 2021 Justin Pinkney
2
-
3
- import dlib
4
- import numpy as np
5
- import os
6
- from PIL import Image
7
- from PIL import ImageOps
8
- from scipy.ndimage import gaussian_filter
9
- import cv2
10
-
11
-
12
- MODEL_PATH = "shape_predictor_5_face_landmarks.dat"
13
- detector = dlib.get_frontal_face_detector()
14
-
15
-
16
- def align(image_in, face_index=0, output_size=256):
17
- try:
18
- image_in = ImageOps.exif_transpose(image_in)
19
- except:
20
- print("exif problem, not rotating")
21
-
22
- landmarks = list(get_landmarks(image_in))
23
- n_faces = len(landmarks)
24
- face_index = min(n_faces-1, face_index)
25
- if n_faces == 0:
26
- aligned_image = image_in
27
- quad = None
28
- else:
29
- aligned_image, quad = image_align(image_in, landmarks[face_index], output_size=output_size)
30
-
31
- return aligned_image, n_faces, quad
32
-
33
-
34
- def composite_images(quad, img, output):
35
- """Composite an image into and output canvas according to transformed co-ords"""
36
- output = output.convert("RGBA")
37
- img = img.convert("RGBA")
38
- input_size = img.size
39
- src = np.array(((0, 0), (0, input_size[1]), input_size, (input_size[0], 0)), dtype=np.float32)
40
- dst = np.float32(quad)
41
- mtx = cv2.getPerspectiveTransform(dst, src)
42
- img = img.transform(output.size, Image.PERSPECTIVE, mtx.flatten(), Image.BILINEAR)
43
- output.alpha_composite(img)
44
-
45
- return output.convert("RGB")
46
-
47
-
48
- def get_landmarks(image):
49
- """Get landmarks from PIL image"""
50
- shape_predictor = dlib.shape_predictor(MODEL_PATH)
51
-
52
- max_size = max(image.size)
53
- reduction_scale = int(max_size/512)
54
- if reduction_scale == 0:
55
- reduction_scale = 1
56
- downscaled = image.reduce(reduction_scale)
57
- img = np.array(downscaled)
58
- detections = detector(img, 0)
59
-
60
- for detection in detections:
61
- try:
62
- face_landmarks = [(reduction_scale*item.x, reduction_scale*item.y) for item in shape_predictor(img, detection).parts()]
63
- yield face_landmarks
64
- except Exception as e:
65
- print(e)
66
-
67
-
68
- def image_align(src_img, face_landmarks, output_size=512, transform_size=2048, enable_padding=True, x_scale=1, y_scale=1, em_scale=0.1, alpha=False):
69
- # Align function modified from ffhq-dataset
70
- # See https://github.com/NVlabs/ffhq-dataset for license
71
-
72
- lm = np.array(face_landmarks)
73
- lm_eye_left = lm[2:3] # left-clockwise
74
- lm_eye_right = lm[0:1] # left-clockwise
75
-
76
- # Calculate auxiliary vectors.
77
- eye_left = np.mean(lm_eye_left, axis=0)
78
- eye_right = np.mean(lm_eye_right, axis=0)
79
- eye_avg = (eye_left + eye_right) * 0.5
80
- eye_to_eye = 0.71*(eye_right - eye_left)
81
- mouth_avg = lm[4]
82
- eye_to_mouth = 1.35*(mouth_avg - eye_avg)
83
-
84
- # Choose oriented crop rectangle.
85
- x = eye_to_eye.copy()
86
- x /= np.hypot(*x)
87
- x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
88
- x *= x_scale
89
- y = np.flipud(x) * [-y_scale, y_scale]
90
- c = eye_avg + eye_to_mouth * em_scale
91
- quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
92
- quad_orig = quad.copy()
93
- qsize = np.hypot(*x) * 2
94
-
95
- img = src_img.convert('RGBA').convert('RGB')
96
-
97
- # Shrink.
98
- shrink = int(np.floor(qsize / output_size * 0.5))
99
- if shrink > 1:
100
- rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
101
- img = img.resize(rsize, Image.ANTIALIAS)
102
- quad /= shrink
103
- qsize /= shrink
104
-
105
- # Crop.
106
- border = max(int(np.rint(qsize * 0.1)), 3)
107
- crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
108
- crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
109
- if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
110
- img = img.crop(crop)
111
- quad -= crop[0:2]
112
-
113
- # Pad.
114
- pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
115
- pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
116
- if enable_padding and max(pad) > border - 4:
117
- pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
118
- img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
119
- h, w, _ = img.shape
120
- y, x, _ = np.ogrid[:h, :w, :1]
121
- mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))
122
- blur = qsize * 0.02
123
- img += (gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
124
- img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
125
- img = np.uint8(np.clip(np.rint(img), 0, 255))
126
- if alpha:
127
- mask = 1-np.clip(3.0 * mask, 0.0, 1.0)
128
- mask = np.uint8(np.clip(np.rint(mask*255), 0, 255))
129
- img = np.concatenate((img, mask), axis=2)
130
- img = Image.fromarray(img, 'RGBA')
131
- else:
132
- img = Image.fromarray(img, 'RGB')
133
- quad += pad[:2]
134
-
135
- # Transform.
136
- img = img.transform((transform_size, transform_size), Image.QUAD, (quad + 0.5).flatten(), Image.BILINEAR)
137
- if output_size < transform_size:
138
- img = img.resize((output_size, output_size), Image.ANTIALIAS)
139
-
140
- return img, quad_orig
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/PpmImagePlugin.py DELETED
@@ -1,347 +0,0 @@
1
- #
2
- # The Python Imaging Library.
3
- # $Id$
4
- #
5
- # PPM support for PIL
6
- #
7
- # History:
8
- # 96-03-24 fl Created
9
- # 98-03-06 fl Write RGBA images (as RGB, that is)
10
- #
11
- # Copyright (c) Secret Labs AB 1997-98.
12
- # Copyright (c) Fredrik Lundh 1996.
13
- #
14
- # See the README file for information on usage and redistribution.
15
- #
16
-
17
-
18
- from . import Image, ImageFile
19
- from ._binary import i16be as i16
20
- from ._binary import o8
21
- from ._binary import o32le as o32
22
-
23
- #
24
- # --------------------------------------------------------------------
25
-
26
- b_whitespace = b"\x20\x09\x0a\x0b\x0c\x0d"
27
-
28
- MODES = {
29
- # standard
30
- b"P1": "1",
31
- b"P2": "L",
32
- b"P3": "RGB",
33
- b"P4": "1",
34
- b"P5": "L",
35
- b"P6": "RGB",
36
- # extensions
37
- b"P0CMYK": "CMYK",
38
- # PIL extensions (for test purposes only)
39
- b"PyP": "P",
40
- b"PyRGBA": "RGBA",
41
- b"PyCMYK": "CMYK",
42
- }
43
-
44
-
45
- def _accept(prefix):
46
- return prefix[0:1] == b"P" and prefix[1] in b"0123456y"
47
-
48
-
49
- ##
50
- # Image plugin for PBM, PGM, and PPM images.
51
-
52
-
53
- class PpmImageFile(ImageFile.ImageFile):
54
- format = "PPM"
55
- format_description = "Pbmplus image"
56
-
57
- def _read_magic(self):
58
- magic = b""
59
- # read until whitespace or longest available magic number
60
- for _ in range(6):
61
- c = self.fp.read(1)
62
- if not c or c in b_whitespace:
63
- break
64
- magic += c
65
- return magic
66
-
67
- def _read_token(self):
68
- token = b""
69
- while len(token) <= 10: # read until next whitespace or limit of 10 characters
70
- c = self.fp.read(1)
71
- if not c:
72
- break
73
- elif c in b_whitespace: # token ended
74
- if not token:
75
- # skip whitespace at start
76
- continue
77
- break
78
- elif c == b"#":
79
- # ignores rest of the line; stops at CR, LF or EOF
80
- while self.fp.read(1) not in b"\r\n":
81
- pass
82
- continue
83
- token += c
84
- if not token:
85
- # Token was not even 1 byte
86
- msg = "Reached EOF while reading header"
87
- raise ValueError(msg)
88
- elif len(token) > 10:
89
- msg = f"Token too long in file header: {token.decode()}"
90
- raise ValueError(msg)
91
- return token
92
-
93
- def _open(self):
94
- magic_number = self._read_magic()
95
- try:
96
- mode = MODES[magic_number]
97
- except KeyError:
98
- msg = "not a PPM file"
99
- raise SyntaxError(msg)
100
-
101
- if magic_number in (b"P1", b"P4"):
102
- self.custom_mimetype = "image/x-portable-bitmap"
103
- elif magic_number in (b"P2", b"P5"):
104
- self.custom_mimetype = "image/x-portable-graymap"
105
- elif magic_number in (b"P3", b"P6"):
106
- self.custom_mimetype = "image/x-portable-pixmap"
107
-
108
- maxval = None
109
- decoder_name = "raw"
110
- if magic_number in (b"P1", b"P2", b"P3"):
111
- decoder_name = "ppm_plain"
112
- for ix in range(3):
113
- token = int(self._read_token())
114
- if ix == 0: # token is the x size
115
- xsize = token
116
- elif ix == 1: # token is the y size
117
- ysize = token
118
- if mode == "1":
119
- self.mode = "1"
120
- rawmode = "1;I"
121
- break
122
- else:
123
- self.mode = rawmode = mode
124
- elif ix == 2: # token is maxval
125
- maxval = token
126
- if not 0 < maxval < 65536:
127
- msg = "maxval must be greater than 0 and less than 65536"
128
- raise ValueError(msg)
129
- if maxval > 255 and mode == "L":
130
- self.mode = "I"
131
-
132
- if decoder_name != "ppm_plain":
133
- # If maxval matches a bit depth, use the raw decoder directly
134
- if maxval == 65535 and mode == "L":
135
- rawmode = "I;16B"
136
- elif maxval != 255:
137
- decoder_name = "ppm"
138
-
139
- args = (rawmode, 0, 1) if decoder_name == "raw" else (rawmode, maxval)
140
- self._size = xsize, ysize
141
- self.tile = [(decoder_name, (0, 0, xsize, ysize), self.fp.tell(), args)]
142
-
143
-
144
- #
145
- # --------------------------------------------------------------------
146
-
147
-
148
- class PpmPlainDecoder(ImageFile.PyDecoder):
149
- _pulls_fd = True
150
-
151
- def _read_block(self):
152
- return self.fd.read(ImageFile.SAFEBLOCK)
153
-
154
- def _find_comment_end(self, block, start=0):
155
- a = block.find(b"\n", start)
156
- b = block.find(b"\r", start)
157
- return min(a, b) if a * b > 0 else max(a, b) # lowest nonnegative index (or -1)
158
-
159
- def _ignore_comments(self, block):
160
- if self._comment_spans:
161
- # Finish current comment
162
- while block:
163
- comment_end = self._find_comment_end(block)
164
- if comment_end != -1:
165
- # Comment ends in this block
166
- # Delete tail of comment
167
- block = block[comment_end + 1 :]
168
- break
169
- else:
170
- # Comment spans whole block
171
- # So read the next block, looking for the end
172
- block = self._read_block()
173
-
174
- # Search for any further comments
175
- self._comment_spans = False
176
- while True:
177
- comment_start = block.find(b"#")
178
- if comment_start == -1:
179
- # No comment found
180
- break
181
- comment_end = self._find_comment_end(block, comment_start)
182
- if comment_end != -1:
183
- # Comment ends in this block
184
- # Delete comment
185
- block = block[:comment_start] + block[comment_end + 1 :]
186
- else:
187
- # Comment continues to next block(s)
188
- block = block[:comment_start]
189
- self._comment_spans = True
190
- break
191
- return block
192
-
193
- def _decode_bitonal(self):
194
- """
195
- This is a separate method because in the plain PBM format, all data tokens are
196
- exactly one byte, so the inter-token whitespace is optional.
197
- """
198
- data = bytearray()
199
- total_bytes = self.state.xsize * self.state.ysize
200
-
201
- while len(data) != total_bytes:
202
- block = self._read_block() # read next block
203
- if not block:
204
- # eof
205
- break
206
-
207
- block = self._ignore_comments(block)
208
-
209
- tokens = b"".join(block.split())
210
- for token in tokens:
211
- if token not in (48, 49):
212
- msg = b"Invalid token for this mode: %s" % bytes([token])
213
- raise ValueError(msg)
214
- data = (data + tokens)[:total_bytes]
215
- invert = bytes.maketrans(b"01", b"\xFF\x00")
216
- return data.translate(invert)
217
-
218
- def _decode_blocks(self, maxval):
219
- data = bytearray()
220
- max_len = 10
221
- out_byte_count = 4 if self.mode == "I" else 1
222
- out_max = 65535 if self.mode == "I" else 255
223
- bands = Image.getmodebands(self.mode)
224
- total_bytes = self.state.xsize * self.state.ysize * bands * out_byte_count
225
-
226
- half_token = False
227
- while len(data) != total_bytes:
228
- block = self._read_block() # read next block
229
- if not block:
230
- if half_token:
231
- block = bytearray(b" ") # flush half_token
232
- else:
233
- # eof
234
- break
235
-
236
- block = self._ignore_comments(block)
237
-
238
- if half_token:
239
- block = half_token + block # stitch half_token to new block
240
- half_token = False
241
-
242
- tokens = block.split()
243
-
244
- if block and not block[-1:].isspace(): # block might split token
245
- half_token = tokens.pop() # save half token for later
246
- if len(half_token) > max_len: # prevent buildup of half_token
247
- msg = (
248
- b"Token too long found in data: %s" % half_token[: max_len + 1]
249
- )
250
- raise ValueError(msg)
251
-
252
- for token in tokens:
253
- if len(token) > max_len:
254
- msg = b"Token too long found in data: %s" % token[: max_len + 1]
255
- raise ValueError(msg)
256
- value = int(token)
257
- if value > maxval:
258
- msg = f"Channel value too large for this mode: {value}"
259
- raise ValueError(msg)
260
- value = round(value / maxval * out_max)
261
- data += o32(value) if self.mode == "I" else o8(value)
262
- if len(data) == total_bytes: # finished!
263
- break
264
- return data
265
-
266
- def decode(self, buffer):
267
- self._comment_spans = False
268
- if self.mode == "1":
269
- data = self._decode_bitonal()
270
- rawmode = "1;8"
271
- else:
272
- maxval = self.args[-1]
273
- data = self._decode_blocks(maxval)
274
- rawmode = "I;32" if self.mode == "I" else self.mode
275
- self.set_as_raw(bytes(data), rawmode)
276
- return -1, 0
277
-
278
-
279
- class PpmDecoder(ImageFile.PyDecoder):
280
- _pulls_fd = True
281
-
282
- def decode(self, buffer):
283
- data = bytearray()
284
- maxval = self.args[-1]
285
- in_byte_count = 1 if maxval < 256 else 2
286
- out_byte_count = 4 if self.mode == "I" else 1
287
- out_max = 65535 if self.mode == "I" else 255
288
- bands = Image.getmodebands(self.mode)
289
- while len(data) < self.state.xsize * self.state.ysize * bands * out_byte_count:
290
- pixels = self.fd.read(in_byte_count * bands)
291
- if len(pixels) < in_byte_count * bands:
292
- # eof
293
- break
294
- for b in range(bands):
295
- value = (
296
- pixels[b] if in_byte_count == 1 else i16(pixels, b * in_byte_count)
297
- )
298
- value = min(out_max, round(value / maxval * out_max))
299
- data += o32(value) if self.mode == "I" else o8(value)
300
- rawmode = "I;32" if self.mode == "I" else self.mode
301
- self.set_as_raw(bytes(data), rawmode)
302
- return -1, 0
303
-
304
-
305
- #
306
- # --------------------------------------------------------------------
307
-
308
-
309
- def _save(im, fp, filename):
310
- if im.mode == "1":
311
- rawmode, head = "1;I", b"P4"
312
- elif im.mode == "L":
313
- rawmode, head = "L", b"P5"
314
- elif im.mode == "I":
315
- rawmode, head = "I;16B", b"P5"
316
- elif im.mode in ("RGB", "RGBA"):
317
- rawmode, head = "RGB", b"P6"
318
- else:
319
- msg = f"cannot write mode {im.mode} as PPM"
320
- raise OSError(msg)
321
- fp.write(head + b"\n%d %d\n" % im.size)
322
- if head == b"P6":
323
- fp.write(b"255\n")
324
- elif head == b"P5":
325
- if rawmode == "L":
326
- fp.write(b"255\n")
327
- else:
328
- fp.write(b"65535\n")
329
- ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))])
330
-
331
- # ALTERNATIVE: save via builtin debug function
332
- # im._dump(filename)
333
-
334
-
335
- #
336
- # --------------------------------------------------------------------
337
-
338
-
339
- Image.register_open(PpmImageFile.format, PpmImageFile, _accept)
340
- Image.register_save(PpmImageFile.format, _save)
341
-
342
- Image.register_decoder("ppm", PpmDecoder)
343
- Image.register_decoder("ppm_plain", PpmPlainDecoder)
344
-
345
- Image.register_extensions(PpmImageFile.format, [".pbm", ".pgm", ".ppm", ".pnm"])
346
-
347
- Image.register_mime(PpmImageFile.format, "image/x-portable-anymap")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/TiffTags.py DELETED
@@ -1,560 +0,0 @@
1
- #
2
- # The Python Imaging Library.
3
- # $Id$
4
- #
5
- # TIFF tags
6
- #
7
- # This module provides clear-text names for various well-known
8
- # TIFF tags. the TIFF codec works just fine without it.
9
- #
10
- # Copyright (c) Secret Labs AB 1999.
11
- #
12
- # See the README file for information on usage and redistribution.
13
- #
14
-
15
- ##
16
- # This module provides constants and clear-text names for various
17
- # well-known TIFF tags.
18
- ##
19
-
20
- from collections import namedtuple
21
-
22
-
23
- class TagInfo(namedtuple("_TagInfo", "value name type length enum")):
24
- __slots__ = []
25
-
26
- def __new__(cls, value=None, name="unknown", type=None, length=None, enum=None):
27
- return super().__new__(cls, value, name, type, length, enum or {})
28
-
29
- def cvt_enum(self, value):
30
- # Using get will call hash(value), which can be expensive
31
- # for some types (e.g. Fraction). Since self.enum is rarely
32
- # used, it's usually better to test it first.
33
- return self.enum.get(value, value) if self.enum else value
34
-
35
-
36
- def lookup(tag, group=None):
37
- """
38
- :param tag: Integer tag number
39
- :param group: Which :py:data:`~PIL.TiffTags.TAGS_V2_GROUPS` to look in
40
-
41
- .. versionadded:: 8.3.0
42
-
43
- :returns: Taginfo namedtuple, From the ``TAGS_V2`` info if possible,
44
- otherwise just populating the value and name from ``TAGS``.
45
- If the tag is not recognized, "unknown" is returned for the name
46
-
47
- """
48
-
49
- if group is not None:
50
- info = TAGS_V2_GROUPS[group].get(tag) if group in TAGS_V2_GROUPS else None
51
- else:
52
- info = TAGS_V2.get(tag)
53
- return info or TagInfo(tag, TAGS.get(tag, "unknown"))
54
-
55
-
56
- ##
57
- # Map tag numbers to tag info.
58
- #
59
- # id: (Name, Type, Length, enum_values)
60
- #
61
- # The length here differs from the length in the tiff spec. For
62
- # numbers, the tiff spec is for the number of fields returned. We
63
- # agree here. For string-like types, the tiff spec uses the length of
64
- # field in bytes. In Pillow, we are using the number of expected
65
- # fields, in general 1 for string-like types.
66
-
67
-
68
- BYTE = 1
69
- ASCII = 2
70
- SHORT = 3
71
- LONG = 4
72
- RATIONAL = 5
73
- SIGNED_BYTE = 6
74
- UNDEFINED = 7
75
- SIGNED_SHORT = 8
76
- SIGNED_LONG = 9
77
- SIGNED_RATIONAL = 10
78
- FLOAT = 11
79
- DOUBLE = 12
80
- IFD = 13
81
- LONG8 = 16
82
-
83
- TAGS_V2 = {
84
- 254: ("NewSubfileType", LONG, 1),
85
- 255: ("SubfileType", SHORT, 1),
86
- 256: ("ImageWidth", LONG, 1),
87
- 257: ("ImageLength", LONG, 1),
88
- 258: ("BitsPerSample", SHORT, 0),
89
- 259: (
90
- "Compression",
91
- SHORT,
92
- 1,
93
- {
94
- "Uncompressed": 1,
95
- "CCITT 1d": 2,
96
- "Group 3 Fax": 3,
97
- "Group 4 Fax": 4,
98
- "LZW": 5,
99
- "JPEG": 6,
100
- "PackBits": 32773,
101
- },
102
- ),
103
- 262: (
104
- "PhotometricInterpretation",
105
- SHORT,
106
- 1,
107
- {
108
- "WhiteIsZero": 0,
109
- "BlackIsZero": 1,
110
- "RGB": 2,
111
- "RGB Palette": 3,
112
- "Transparency Mask": 4,
113
- "CMYK": 5,
114
- "YCbCr": 6,
115
- "CieLAB": 8,
116
- "CFA": 32803, # TIFF/EP, Adobe DNG
117
- "LinearRaw": 32892, # Adobe DNG
118
- },
119
- ),
120
- 263: ("Threshholding", SHORT, 1),
121
- 264: ("CellWidth", SHORT, 1),
122
- 265: ("CellLength", SHORT, 1),
123
- 266: ("FillOrder", SHORT, 1),
124
- 269: ("DocumentName", ASCII, 1),
125
- 270: ("ImageDescription", ASCII, 1),
126
- 271: ("Make", ASCII, 1),
127
- 272: ("Model", ASCII, 1),
128
- 273: ("StripOffsets", LONG, 0),
129
- 274: ("Orientation", SHORT, 1),
130
- 277: ("SamplesPerPixel", SHORT, 1),
131
- 278: ("RowsPerStrip", LONG, 1),
132
- 279: ("StripByteCounts", LONG, 0),
133
- 280: ("MinSampleValue", SHORT, 0),
134
- 281: ("MaxSampleValue", SHORT, 0),
135
- 282: ("XResolution", RATIONAL, 1),
136
- 283: ("YResolution", RATIONAL, 1),
137
- 284: ("PlanarConfiguration", SHORT, 1, {"Contiguous": 1, "Separate": 2}),
138
- 285: ("PageName", ASCII, 1),
139
- 286: ("XPosition", RATIONAL, 1),
140
- 287: ("YPosition", RATIONAL, 1),
141
- 288: ("FreeOffsets", LONG, 1),
142
- 289: ("FreeByteCounts", LONG, 1),
143
- 290: ("GrayResponseUnit", SHORT, 1),
144
- 291: ("GrayResponseCurve", SHORT, 0),
145
- 292: ("T4Options", LONG, 1),
146
- 293: ("T6Options", LONG, 1),
147
- 296: ("ResolutionUnit", SHORT, 1, {"none": 1, "inch": 2, "cm": 3}),
148
- 297: ("PageNumber", SHORT, 2),
149
- 301: ("TransferFunction", SHORT, 0),
150
- 305: ("Software", ASCII, 1),
151
- 306: ("DateTime", ASCII, 1),
152
- 315: ("Artist", ASCII, 1),
153
- 316: ("HostComputer", ASCII, 1),
154
- 317: ("Predictor", SHORT, 1, {"none": 1, "Horizontal Differencing": 2}),
155
- 318: ("WhitePoint", RATIONAL, 2),
156
- 319: ("PrimaryChromaticities", RATIONAL, 6),
157
- 320: ("ColorMap", SHORT, 0),
158
- 321: ("HalftoneHints", SHORT, 2),
159
- 322: ("TileWidth", LONG, 1),
160
- 323: ("TileLength", LONG, 1),
161
- 324: ("TileOffsets", LONG, 0),
162
- 325: ("TileByteCounts", LONG, 0),
163
- 330: ("SubIFDs", LONG, 0),
164
- 332: ("InkSet", SHORT, 1),
165
- 333: ("InkNames", ASCII, 1),
166
- 334: ("NumberOfInks", SHORT, 1),
167
- 336: ("DotRange", SHORT, 0),
168
- 337: ("TargetPrinter", ASCII, 1),
169
- 338: ("ExtraSamples", SHORT, 0),
170
- 339: ("SampleFormat", SHORT, 0),
171
- 340: ("SMinSampleValue", DOUBLE, 0),
172
- 341: ("SMaxSampleValue", DOUBLE, 0),
173
- 342: ("TransferRange", SHORT, 6),
174
- 347: ("JPEGTables", UNDEFINED, 1),
175
- # obsolete JPEG tags
176
- 512: ("JPEGProc", SHORT, 1),
177
- 513: ("JPEGInterchangeFormat", LONG, 1),
178
- 514: ("JPEGInterchangeFormatLength", LONG, 1),
179
- 515: ("JPEGRestartInterval", SHORT, 1),
180
- 517: ("JPEGLosslessPredictors", SHORT, 0),
181
- 518: ("JPEGPointTransforms", SHORT, 0),
182
- 519: ("JPEGQTables", LONG, 0),
183
- 520: ("JPEGDCTables", LONG, 0),
184
- 521: ("JPEGACTables", LONG, 0),
185
- 529: ("YCbCrCoefficients", RATIONAL, 3),
186
- 530: ("YCbCrSubSampling", SHORT, 2),
187
- 531: ("YCbCrPositioning", SHORT, 1),
188
- 532: ("ReferenceBlackWhite", RATIONAL, 6),
189
- 700: ("XMP", BYTE, 0),
190
- 33432: ("Copyright", ASCII, 1),
191
- 33723: ("IptcNaaInfo", UNDEFINED, 1),
192
- 34377: ("PhotoshopInfo", BYTE, 0),
193
- # FIXME add more tags here
194
- 34665: ("ExifIFD", LONG, 1),
195
- 34675: ("ICCProfile", UNDEFINED, 1),
196
- 34853: ("GPSInfoIFD", LONG, 1),
197
- 36864: ("ExifVersion", UNDEFINED, 1),
198
- 37724: ("ImageSourceData", UNDEFINED, 1),
199
- 40965: ("InteroperabilityIFD", LONG, 1),
200
- 41730: ("CFAPattern", UNDEFINED, 1),
201
- # MPInfo
202
- 45056: ("MPFVersion", UNDEFINED, 1),
203
- 45057: ("NumberOfImages", LONG, 1),
204
- 45058: ("MPEntry", UNDEFINED, 1),
205
- 45059: ("ImageUIDList", UNDEFINED, 0), # UNDONE, check
206
- 45060: ("TotalFrames", LONG, 1),
207
- 45313: ("MPIndividualNum", LONG, 1),
208
- 45569: ("PanOrientation", LONG, 1),
209
- 45570: ("PanOverlap_H", RATIONAL, 1),
210
- 45571: ("PanOverlap_V", RATIONAL, 1),
211
- 45572: ("BaseViewpointNum", LONG, 1),
212
- 45573: ("ConvergenceAngle", SIGNED_RATIONAL, 1),
213
- 45574: ("BaselineLength", RATIONAL, 1),
214
- 45575: ("VerticalDivergence", SIGNED_RATIONAL, 1),
215
- 45576: ("AxisDistance_X", SIGNED_RATIONAL, 1),
216
- 45577: ("AxisDistance_Y", SIGNED_RATIONAL, 1),
217
- 45578: ("AxisDistance_Z", SIGNED_RATIONAL, 1),
218
- 45579: ("YawAngle", SIGNED_RATIONAL, 1),
219
- 45580: ("PitchAngle", SIGNED_RATIONAL, 1),
220
- 45581: ("RollAngle", SIGNED_RATIONAL, 1),
221
- 40960: ("FlashPixVersion", UNDEFINED, 1),
222
- 50741: ("MakerNoteSafety", SHORT, 1, {"Unsafe": 0, "Safe": 1}),
223
- 50780: ("BestQualityScale", RATIONAL, 1),
224
- 50838: ("ImageJMetaDataByteCounts", LONG, 0), # Can be more than one
225
- 50839: ("ImageJMetaData", UNDEFINED, 1), # see Issue #2006
226
- }
227
- TAGS_V2_GROUPS = {
228
- # ExifIFD
229
- 34665: {
230
- 36864: ("ExifVersion", UNDEFINED, 1),
231
- 40960: ("FlashPixVersion", UNDEFINED, 1),
232
- 40965: ("InteroperabilityIFD", LONG, 1),
233
- 41730: ("CFAPattern", UNDEFINED, 1),
234
- },
235
- # GPSInfoIFD
236
- 34853: {
237
- 0: ("GPSVersionID", BYTE, 4),
238
- 1: ("GPSLatitudeRef", ASCII, 2),
239
- 2: ("GPSLatitude", RATIONAL, 3),
240
- 3: ("GPSLongitudeRef", ASCII, 2),
241
- 4: ("GPSLongitude", RATIONAL, 3),
242
- 5: ("GPSAltitudeRef", BYTE, 1),
243
- 6: ("GPSAltitude", RATIONAL, 1),
244
- 7: ("GPSTimeStamp", RATIONAL, 3),
245
- 8: ("GPSSatellites", ASCII, 0),
246
- 9: ("GPSStatus", ASCII, 2),
247
- 10: ("GPSMeasureMode", ASCII, 2),
248
- 11: ("GPSDOP", RATIONAL, 1),
249
- 12: ("GPSSpeedRef", ASCII, 2),
250
- 13: ("GPSSpeed", RATIONAL, 1),
251
- 14: ("GPSTrackRef", ASCII, 2),
252
- 15: ("GPSTrack", RATIONAL, 1),
253
- 16: ("GPSImgDirectionRef", ASCII, 2),
254
- 17: ("GPSImgDirection", RATIONAL, 1),
255
- 18: ("GPSMapDatum", ASCII, 0),
256
- 19: ("GPSDestLatitudeRef", ASCII, 2),
257
- 20: ("GPSDestLatitude", RATIONAL, 3),
258
- 21: ("GPSDestLongitudeRef", ASCII, 2),
259
- 22: ("GPSDestLongitude", RATIONAL, 3),
260
- 23: ("GPSDestBearingRef", ASCII, 2),
261
- 24: ("GPSDestBearing", RATIONAL, 1),
262
- 25: ("GPSDestDistanceRef", ASCII, 2),
263
- 26: ("GPSDestDistance", RATIONAL, 1),
264
- 27: ("GPSProcessingMethod", UNDEFINED, 0),
265
- 28: ("GPSAreaInformation", UNDEFINED, 0),
266
- 29: ("GPSDateStamp", ASCII, 11),
267
- 30: ("GPSDifferential", SHORT, 1),
268
- },
269
- # InteroperabilityIFD
270
- 40965: {1: ("InteropIndex", ASCII, 1), 2: ("InteropVersion", UNDEFINED, 1)},
271
- }
272
-
273
- # Legacy Tags structure
274
- # these tags aren't included above, but were in the previous versions
275
- TAGS = {
276
- 347: "JPEGTables",
277
- 700: "XMP",
278
- # Additional Exif Info
279
- 32932: "Wang Annotation",
280
- 33434: "ExposureTime",
281
- 33437: "FNumber",
282
- 33445: "MD FileTag",
283
- 33446: "MD ScalePixel",
284
- 33447: "MD ColorTable",
285
- 33448: "MD LabName",
286
- 33449: "MD SampleInfo",
287
- 33450: "MD PrepDate",
288
- 33451: "MD PrepTime",
289
- 33452: "MD FileUnits",
290
- 33550: "ModelPixelScaleTag",
291
- 33723: "IptcNaaInfo",
292
- 33918: "INGR Packet Data Tag",
293
- 33919: "INGR Flag Registers",
294
- 33920: "IrasB Transformation Matrix",
295
- 33922: "ModelTiepointTag",
296
- 34264: "ModelTransformationTag",
297
- 34377: "PhotoshopInfo",
298
- 34735: "GeoKeyDirectoryTag",
299
- 34736: "GeoDoubleParamsTag",
300
- 34737: "GeoAsciiParamsTag",
301
- 34850: "ExposureProgram",
302
- 34852: "SpectralSensitivity",
303
- 34855: "ISOSpeedRatings",
304
- 34856: "OECF",
305
- 34864: "SensitivityType",
306
- 34865: "StandardOutputSensitivity",
307
- 34866: "RecommendedExposureIndex",
308
- 34867: "ISOSpeed",
309
- 34868: "ISOSpeedLatitudeyyy",
310
- 34869: "ISOSpeedLatitudezzz",
311
- 34908: "HylaFAX FaxRecvParams",
312
- 34909: "HylaFAX FaxSubAddress",
313
- 34910: "HylaFAX FaxRecvTime",
314
- 36864: "ExifVersion",
315
- 36867: "DateTimeOriginal",
316
- 36868: "DateTimeDigitized",
317
- 37121: "ComponentsConfiguration",
318
- 37122: "CompressedBitsPerPixel",
319
- 37724: "ImageSourceData",
320
- 37377: "ShutterSpeedValue",
321
- 37378: "ApertureValue",
322
- 37379: "BrightnessValue",
323
- 37380: "ExposureBiasValue",
324
- 37381: "MaxApertureValue",
325
- 37382: "SubjectDistance",
326
- 37383: "MeteringMode",
327
- 37384: "LightSource",
328
- 37385: "Flash",
329
- 37386: "FocalLength",
330
- 37396: "SubjectArea",
331
- 37500: "MakerNote",
332
- 37510: "UserComment",
333
- 37520: "SubSec",
334
- 37521: "SubSecTimeOriginal",
335
- 37522: "SubsecTimeDigitized",
336
- 40960: "FlashPixVersion",
337
- 40961: "ColorSpace",
338
- 40962: "PixelXDimension",
339
- 40963: "PixelYDimension",
340
- 40964: "RelatedSoundFile",
341
- 40965: "InteroperabilityIFD",
342
- 41483: "FlashEnergy",
343
- 41484: "SpatialFrequencyResponse",
344
- 41486: "FocalPlaneXResolution",
345
- 41487: "FocalPlaneYResolution",
346
- 41488: "FocalPlaneResolutionUnit",
347
- 41492: "SubjectLocation",
348
- 41493: "ExposureIndex",
349
- 41495: "SensingMethod",
350
- 41728: "FileSource",
351
- 41729: "SceneType",
352
- 41730: "CFAPattern",
353
- 41985: "CustomRendered",
354
- 41986: "ExposureMode",
355
- 41987: "WhiteBalance",
356
- 41988: "DigitalZoomRatio",
357
- 41989: "FocalLengthIn35mmFilm",
358
- 41990: "SceneCaptureType",
359
- 41991: "GainControl",
360
- 41992: "Contrast",
361
- 41993: "Saturation",
362
- 41994: "Sharpness",
363
- 41995: "DeviceSettingDescription",
364
- 41996: "SubjectDistanceRange",
365
- 42016: "ImageUniqueID",
366
- 42032: "CameraOwnerName",
367
- 42033: "BodySerialNumber",
368
- 42034: "LensSpecification",
369
- 42035: "LensMake",
370
- 42036: "LensModel",
371
- 42037: "LensSerialNumber",
372
- 42112: "GDAL_METADATA",
373
- 42113: "GDAL_NODATA",
374
- 42240: "Gamma",
375
- 50215: "Oce Scanjob Description",
376
- 50216: "Oce Application Selector",
377
- 50217: "Oce Identification Number",
378
- 50218: "Oce ImageLogic Characteristics",
379
- # Adobe DNG
380
- 50706: "DNGVersion",
381
- 50707: "DNGBackwardVersion",
382
- 50708: "UniqueCameraModel",
383
- 50709: "LocalizedCameraModel",
384
- 50710: "CFAPlaneColor",
385
- 50711: "CFALayout",
386
- 50712: "LinearizationTable",
387
- 50713: "BlackLevelRepeatDim",
388
- 50714: "BlackLevel",
389
- 50715: "BlackLevelDeltaH",
390
- 50716: "BlackLevelDeltaV",
391
- 50717: "WhiteLevel",
392
- 50718: "DefaultScale",
393
- 50719: "DefaultCropOrigin",
394
- 50720: "DefaultCropSize",
395
- 50721: "ColorMatrix1",
396
- 50722: "ColorMatrix2",
397
- 50723: "CameraCalibration1",
398
- 50724: "CameraCalibration2",
399
- 50725: "ReductionMatrix1",
400
- 50726: "ReductionMatrix2",
401
- 50727: "AnalogBalance",
402
- 50728: "AsShotNeutral",
403
- 50729: "AsShotWhiteXY",
404
- 50730: "BaselineExposure",
405
- 50731: "BaselineNoise",
406
- 50732: "BaselineSharpness",
407
- 50733: "BayerGreenSplit",
408
- 50734: "LinearResponseLimit",
409
- 50735: "CameraSerialNumber",
410
- 50736: "LensInfo",
411
- 50737: "ChromaBlurRadius",
412
- 50738: "AntiAliasStrength",
413
- 50740: "DNGPrivateData",
414
- 50778: "CalibrationIlluminant1",
415
- 50779: "CalibrationIlluminant2",
416
- 50784: "Alias Layer Metadata",
417
- }
418
-
419
-
420
- def _populate():
421
- for k, v in TAGS_V2.items():
422
- # Populate legacy structure.
423
- TAGS[k] = v[0]
424
- if len(v) == 4:
425
- for sk, sv in v[3].items():
426
- TAGS[(k, sv)] = sk
427
-
428
- TAGS_V2[k] = TagInfo(k, *v)
429
-
430
- for group, tags in TAGS_V2_GROUPS.items():
431
- for k, v in tags.items():
432
- tags[k] = TagInfo(k, *v)
433
-
434
-
435
- _populate()
436
- ##
437
- # Map type numbers to type names -- defined in ImageFileDirectory.
438
-
439
- TYPES = {}
440
-
441
- # was:
442
- # TYPES = {
443
- # 1: "byte",
444
- # 2: "ascii",
445
- # 3: "short",
446
- # 4: "long",
447
- # 5: "rational",
448
- # 6: "signed byte",
449
- # 7: "undefined",
450
- # 8: "signed short",
451
- # 9: "signed long",
452
- # 10: "signed rational",
453
- # 11: "float",
454
- # 12: "double",
455
- # }
456
-
457
- #
458
- # These tags are handled by default in libtiff, without
459
- # adding to the custom dictionary. From tif_dir.c, searching for
460
- # case TIFFTAG in the _TIFFVSetField function:
461
- # Line: item.
462
- # 148: case TIFFTAG_SUBFILETYPE:
463
- # 151: case TIFFTAG_IMAGEWIDTH:
464
- # 154: case TIFFTAG_IMAGELENGTH:
465
- # 157: case TIFFTAG_BITSPERSAMPLE:
466
- # 181: case TIFFTAG_COMPRESSION:
467
- # 202: case TIFFTAG_PHOTOMETRIC:
468
- # 205: case TIFFTAG_THRESHHOLDING:
469
- # 208: case TIFFTAG_FILLORDER:
470
- # 214: case TIFFTAG_ORIENTATION:
471
- # 221: case TIFFTAG_SAMPLESPERPIXEL:
472
- # 228: case TIFFTAG_ROWSPERSTRIP:
473
- # 238: case TIFFTAG_MINSAMPLEVALUE:
474
- # 241: case TIFFTAG_MAXSAMPLEVALUE:
475
- # 244: case TIFFTAG_SMINSAMPLEVALUE:
476
- # 247: case TIFFTAG_SMAXSAMPLEVALUE:
477
- # 250: case TIFFTAG_XRESOLUTION:
478
- # 256: case TIFFTAG_YRESOLUTION:
479
- # 262: case TIFFTAG_PLANARCONFIG:
480
- # 268: case TIFFTAG_XPOSITION:
481
- # 271: case TIFFTAG_YPOSITION:
482
- # 274: case TIFFTAG_RESOLUTIONUNIT:
483
- # 280: case TIFFTAG_PAGENUMBER:
484
- # 284: case TIFFTAG_HALFTONEHINTS:
485
- # 288: case TIFFTAG_COLORMAP:
486
- # 294: case TIFFTAG_EXTRASAMPLES:
487
- # 298: case TIFFTAG_MATTEING:
488
- # 305: case TIFFTAG_TILEWIDTH:
489
- # 316: case TIFFTAG_TILELENGTH:
490
- # 327: case TIFFTAG_TILEDEPTH:
491
- # 333: case TIFFTAG_DATATYPE:
492
- # 344: case TIFFTAG_SAMPLEFORMAT:
493
- # 361: case TIFFTAG_IMAGEDEPTH:
494
- # 364: case TIFFTAG_SUBIFD:
495
- # 376: case TIFFTAG_YCBCRPOSITIONING:
496
- # 379: case TIFFTAG_YCBCRSUBSAMPLING:
497
- # 383: case TIFFTAG_TRANSFERFUNCTION:
498
- # 389: case TIFFTAG_REFERENCEBLACKWHITE:
499
- # 393: case TIFFTAG_INKNAMES:
500
-
501
- # Following pseudo-tags are also handled by default in libtiff:
502
- # TIFFTAG_JPEGQUALITY 65537
503
-
504
- # some of these are not in our TAGS_V2 dict and were included from tiff.h
505
-
506
- # This list also exists in encode.c
507
- LIBTIFF_CORE = {
508
- 255,
509
- 256,
510
- 257,
511
- 258,
512
- 259,
513
- 262,
514
- 263,
515
- 266,
516
- 274,
517
- 277,
518
- 278,
519
- 280,
520
- 281,
521
- 340,
522
- 341,
523
- 282,
524
- 283,
525
- 284,
526
- 286,
527
- 287,
528
- 296,
529
- 297,
530
- 321,
531
- 320,
532
- 338,
533
- 32995,
534
- 322,
535
- 323,
536
- 32998,
537
- 32996,
538
- 339,
539
- 32997,
540
- 330,
541
- 531,
542
- 530,
543
- 301,
544
- 532,
545
- 333,
546
- # as above
547
- 269, # this has been in our tests forever, and works
548
- 65537,
549
- }
550
-
551
- LIBTIFF_CORE.remove(255) # We don't have support for subfiletypes
552
- LIBTIFF_CORE.remove(322) # We don't have support for writing tiled images with libtiff
553
- LIBTIFF_CORE.remove(323) # Tiled images
554
- LIBTIFF_CORE.remove(333) # Ink Names either
555
-
556
- # Note to advanced users: There may be combinations of these
557
- # parameters and values that when added properly, will work and
558
- # produce valid tiff images that may work in your application.
559
- # It is safe to add and remove tags from this set from Pillow's point
560
- # of view so long as you test against libtiff.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_server.py DELETED
@@ -1,62 +0,0 @@
1
- """Low level HTTP server."""
2
- import asyncio
3
- from typing import Any, Awaitable, Callable, Dict, List, Optional # noqa
4
-
5
- from .abc import AbstractStreamWriter
6
- from .helpers import get_running_loop
7
- from .http_parser import RawRequestMessage
8
- from .streams import StreamReader
9
- from .web_protocol import RequestHandler, _RequestFactory, _RequestHandler
10
- from .web_request import BaseRequest
11
-
12
- __all__ = ("Server",)
13
-
14
-
15
- class Server:
16
- def __init__(
17
- self,
18
- handler: _RequestHandler,
19
- *,
20
- request_factory: Optional[_RequestFactory] = None,
21
- loop: Optional[asyncio.AbstractEventLoop] = None,
22
- **kwargs: Any
23
- ) -> None:
24
- self._loop = get_running_loop(loop)
25
- self._connections: Dict[RequestHandler, asyncio.Transport] = {}
26
- self._kwargs = kwargs
27
- self.requests_count = 0
28
- self.request_handler = handler
29
- self.request_factory = request_factory or self._make_request
30
-
31
- @property
32
- def connections(self) -> List[RequestHandler]:
33
- return list(self._connections.keys())
34
-
35
- def connection_made(
36
- self, handler: RequestHandler, transport: asyncio.Transport
37
- ) -> None:
38
- self._connections[handler] = transport
39
-
40
- def connection_lost(
41
- self, handler: RequestHandler, exc: Optional[BaseException] = None
42
- ) -> None:
43
- if handler in self._connections:
44
- del self._connections[handler]
45
-
46
- def _make_request(
47
- self,
48
- message: RawRequestMessage,
49
- payload: StreamReader,
50
- protocol: RequestHandler,
51
- writer: AbstractStreamWriter,
52
- task: "asyncio.Task[None]",
53
- ) -> BaseRequest:
54
- return BaseRequest(message, payload, protocol, writer, task, self._loop)
55
-
56
- async def shutdown(self, timeout: Optional[float] = None) -> None:
57
- coros = [conn.shutdown(timeout) for conn in self._connections]
58
- await asyncio.gather(*coros)
59
- self._connections.clear()
60
-
61
- def __call__(self) -> RequestHandler:
62
- return RequestHandler(self, loop=self._loop, **self._kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/roundTools.py DELETED
@@ -1,109 +0,0 @@
1
- """
2
- Various round-to-integer helpers.
3
- """
4
-
5
- import math
6
- import functools
7
- import logging
8
-
9
- log = logging.getLogger(__name__)
10
-
11
- __all__ = [
12
- "noRound",
13
- "otRound",
14
- "maybeRound",
15
- "roundFunc",
16
- ]
17
-
18
-
19
- def noRound(value):
20
- return value
21
-
22
-
23
- def otRound(value):
24
- """Round float value to nearest integer towards ``+Infinity``.
25
-
26
- The OpenType spec (in the section on `"normalization" of OpenType Font Variations <https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#coordinate-scales-and-normalization>`_)
27
- defines the required method for converting floating point values to
28
- fixed-point. In particular it specifies the following rounding strategy:
29
-
30
- for fractional values of 0.5 and higher, take the next higher integer;
31
- for other fractional values, truncate.
32
-
33
- This function rounds the floating-point value according to this strategy
34
- in preparation for conversion to fixed-point.
35
-
36
- Args:
37
- value (float): The input floating-point value.
38
-
39
- Returns
40
- float: The rounded value.
41
- """
42
- # See this thread for how we ended up with this implementation:
43
- # https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
44
- return int(math.floor(value + 0.5))
45
-
46
-
47
- def maybeRound(v, tolerance, round=otRound):
48
- rounded = round(v)
49
- return rounded if abs(rounded - v) <= tolerance else v
50
-
51
-
52
- def roundFunc(tolerance, round=otRound):
53
- if tolerance < 0:
54
- raise ValueError("Rounding tolerance must be positive")
55
-
56
- if tolerance == 0:
57
- return noRound
58
-
59
- if tolerance >= 0.5:
60
- return round
61
-
62
- return functools.partial(maybeRound, tolerance=tolerance, round=round)
63
-
64
-
65
- def nearestMultipleShortestRepr(value: float, factor: float) -> str:
66
- """Round to nearest multiple of factor and return shortest decimal representation.
67
-
68
- This chooses the float that is closer to a multiple of the given factor while
69
- having the shortest decimal representation (the least number of fractional decimal
70
- digits).
71
-
72
- For example, given the following:
73
-
74
- >>> nearestMultipleShortestRepr(-0.61883544921875, 1.0/(1<<14))
75
- '-0.61884'
76
-
77
- Useful when you need to serialize or print a fixed-point number (or multiples
78
- thereof, such as F2Dot14 fractions of 180 degrees in COLRv1 PaintRotate) in
79
- a human-readable form.
80
-
81
- Args:
82
- value (value): The value to be rounded and serialized.
83
- factor (float): The value which the result is a close multiple of.
84
-
85
- Returns:
86
- str: A compact string representation of the value.
87
- """
88
- if not value:
89
- return "0.0"
90
-
91
- value = otRound(value / factor) * factor
92
- eps = 0.5 * factor
93
- lo = value - eps
94
- hi = value + eps
95
- # If the range of valid choices spans an integer, return the integer.
96
- if int(lo) != int(hi):
97
- return str(float(round(value)))
98
-
99
- fmt = "%.8f"
100
- lo = fmt % lo
101
- hi = fmt % hi
102
- assert len(lo) == len(hi) and lo != hi
103
- for i in range(len(lo)):
104
- if lo[i] != hi[i]:
105
- break
106
- period = lo.find(".")
107
- assert period < i
108
- fmt = "%%.%df" % (i - period)
109
- return fmt % value
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/inference/_text_generation.py DELETED
@@ -1,479 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023-present, the HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- #
16
- # Original implementation taken from the `text-generation` Python client (see https://pypi.org/project/text-generation/
17
- # and https://github.com/huggingface/text-generation-inference/tree/main/clients/python)
18
- #
19
- # Changes compared to original implementation:
20
- # - use pydantic.dataclasses instead of BaseModel
21
- # - default to Python's dataclasses if Pydantic is not installed (same implementation but no validation)
22
- # - added default values for all parameters (not needed in BaseModel but dataclasses yes)
23
- # - integrated in `huggingface_hub.InferenceClient``
24
- # - added `stream: bool` and `details: bool` in the `text_generation` method instead of having different methods for each use case
25
- # - NO asyncio support yet => TODO soon
26
-
27
- from dataclasses import field
28
- from enum import Enum
29
- from typing import List, NoReturn, Optional
30
-
31
- from requests import HTTPError
32
-
33
- from ..utils import is_pydantic_available
34
-
35
-
36
- if is_pydantic_available():
37
- from pydantic import validator
38
- from pydantic.dataclasses import dataclass
39
- else:
40
- # No validation if Pydantic is not installed
41
- from dataclasses import dataclass # type: ignore
42
-
43
- def validator(x): # type: ignore
44
- return lambda y: y
45
-
46
-
47
- @dataclass
48
- class TextGenerationParameters:
49
- """
50
- Parameters for text generation.
51
-
52
- Args:
53
- do_sample (`bool`, *optional*):
54
- Activate logits sampling. Defaults to False.
55
- max_new_tokens (`int`, *optional*):
56
- Maximum number of generated tokens. Defaults to 20.
57
- repetition_penalty (`Optional[float]`, *optional*):
58
- The parameter for repetition penalty. A value of 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf)
59
- for more details. Defaults to None.
60
- return_full_text (`bool`, *optional*):
61
- Whether to prepend the prompt to the generated text. Defaults to False.
62
- stop (`List[str]`, *optional*):
63
- Stop generating tokens if a member of `stop_sequences` is generated. Defaults to an empty list.
64
- seed (`Optional[int]`, *optional*):
65
- Random sampling seed. Defaults to None.
66
- temperature (`Optional[float]`, *optional*):
67
- The value used to modulate the logits distribution. Defaults to None.
68
- top_k (`Optional[int]`, *optional*):
69
- The number of highest probability vocabulary tokens to keep for top-k-filtering. Defaults to None.
70
- top_p (`Optional[float]`, *optional*):
71
- If set to a value less than 1, only the smallest set of most probable tokens with probabilities that add up
72
- to `top_p` or higher are kept for generation. Defaults to None.
73
- truncate (`Optional[int]`, *optional*):
74
- Truncate input tokens to the given size. Defaults to None.
75
- typical_p (`Optional[float]`, *optional*):
76
- Typical Decoding mass. See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666)
77
- for more information. Defaults to None.
78
- best_of (`Optional[int]`, *optional*):
79
- Generate `best_of` sequences and return the one with the highest token logprobs. Defaults to None.
80
- watermark (`bool`, *optional*):
81
- Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226). Defaults to False.
82
- details (`bool`, *optional*):
83
- Get generation details. Defaults to False.
84
- decoder_input_details (`bool`, *optional*):
85
- Get decoder input token logprobs and ids. Defaults to False.
86
- """
87
-
88
- # Activate logits sampling
89
- do_sample: bool = False
90
- # Maximum number of generated tokens
91
- max_new_tokens: int = 20
92
- # The parameter for repetition penalty. 1.0 means no penalty.
93
- # See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
94
- repetition_penalty: Optional[float] = None
95
- # Whether to prepend the prompt to the generated text
96
- return_full_text: bool = False
97
- # Stop generating tokens if a member of `stop_sequences` is generated
98
- stop: List[str] = field(default_factory=lambda: [])
99
- # Random sampling seed
100
- seed: Optional[int] = None
101
- # The value used to module the logits distribution.
102
- temperature: Optional[float] = None
103
- # The number of highest probability vocabulary tokens to keep for top-k-filtering.
104
- top_k: Optional[int] = None
105
- # If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
106
- # higher are kept for generation.
107
- top_p: Optional[float] = None
108
- # truncate inputs tokens to the given size
109
- truncate: Optional[int] = None
110
- # Typical Decoding mass
111
- # See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
112
- typical_p: Optional[float] = None
113
- # Generate best_of sequences and return the one if the highest token logprobs
114
- best_of: Optional[int] = None
115
- # Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
116
- watermark: bool = False
117
- # Get generation details
118
- details: bool = False
119
- # Get decoder input token logprobs and ids
120
- decoder_input_details: bool = False
121
-
122
- @validator("best_of")
123
- def valid_best_of(cls, field_value, values):
124
- if field_value is not None:
125
- if field_value <= 0:
126
- raise ValueError("`best_of` must be strictly positive")
127
- if field_value > 1 and values["seed"] is not None:
128
- raise ValueError("`seed` must not be set when `best_of` is > 1")
129
- sampling = (
130
- values["do_sample"]
131
- | (values["temperature"] is not None)
132
- | (values["top_k"] is not None)
133
- | (values["top_p"] is not None)
134
- | (values["typical_p"] is not None)
135
- )
136
- if field_value > 1 and not sampling:
137
- raise ValueError("you must use sampling when `best_of` is > 1")
138
-
139
- return field_value
140
-
141
- @validator("repetition_penalty")
142
- def valid_repetition_penalty(cls, v):
143
- if v is not None and v <= 0:
144
- raise ValueError("`repetition_penalty` must be strictly positive")
145
- return v
146
-
147
- @validator("seed")
148
- def valid_seed(cls, v):
149
- if v is not None and v < 0:
150
- raise ValueError("`seed` must be positive")
151
- return v
152
-
153
- @validator("temperature")
154
- def valid_temp(cls, v):
155
- if v is not None and v <= 0:
156
- raise ValueError("`temperature` must be strictly positive")
157
- return v
158
-
159
- @validator("top_k")
160
- def valid_top_k(cls, v):
161
- if v is not None and v <= 0:
162
- raise ValueError("`top_k` must be strictly positive")
163
- return v
164
-
165
- @validator("top_p")
166
- def valid_top_p(cls, v):
167
- if v is not None and (v <= 0 or v >= 1.0):
168
- raise ValueError("`top_p` must be > 0.0 and < 1.0")
169
- return v
170
-
171
- @validator("truncate")
172
- def valid_truncate(cls, v):
173
- if v is not None and v <= 0:
174
- raise ValueError("`truncate` must be strictly positive")
175
- return v
176
-
177
- @validator("typical_p")
178
- def valid_typical_p(cls, v):
179
- if v is not None and (v <= 0 or v >= 1.0):
180
- raise ValueError("`typical_p` must be > 0.0 and < 1.0")
181
- return v
182
-
183
-
184
- @dataclass
185
- class TextGenerationRequest:
186
- """
187
- Request object for text generation (only for internal use).
188
-
189
- Args:
190
- inputs (`str`):
191
- The prompt for text generation.
192
- parameters (`Optional[TextGenerationParameters]`, *optional*):
193
- Generation parameters.
194
- stream (`bool`, *optional*):
195
- Whether to stream output tokens. Defaults to False.
196
- """
197
-
198
- # Prompt
199
- inputs: str
200
- # Generation parameters
201
- parameters: Optional[TextGenerationParameters] = None
202
- # Whether to stream output tokens
203
- stream: bool = False
204
-
205
- @validator("inputs")
206
- def valid_input(cls, v):
207
- if not v:
208
- raise ValueError("`inputs` cannot be empty")
209
- return v
210
-
211
- @validator("stream")
212
- def valid_best_of_stream(cls, field_value, values):
213
- parameters = values["parameters"]
214
- if parameters is not None and parameters.best_of is not None and parameters.best_of > 1 and field_value:
215
- raise ValueError("`best_of` != 1 is not supported when `stream` == True")
216
- return field_value
217
-
218
-
219
- # Decoder input tokens
220
- @dataclass
221
- class InputToken:
222
- """
223
- Represents an input token.
224
-
225
- Args:
226
- id (`int`):
227
- Token ID from the model tokenizer.
228
- text (`str`):
229
- Token text.
230
- logprob (`float` or `None`):
231
- Log probability of the token. Optional since the logprob of the first token cannot be computed.
232
- """
233
-
234
- # Token ID from the model tokenizer
235
- id: int
236
- # Token text
237
- text: str
238
- # Logprob
239
- # Optional since the logprob of the first token cannot be computed
240
- logprob: Optional[float] = None
241
-
242
-
243
- # Generated tokens
244
- @dataclass
245
- class Token:
246
- """
247
- Represents a token.
248
-
249
- Args:
250
- id (`int`):
251
- Token ID from the model tokenizer.
252
- text (`str`):
253
- Token text.
254
- logprob (`float`):
255
- Log probability of the token.
256
- special (`bool`):
257
- Indicates whether the token is a special token. It can be used to ignore
258
- tokens when concatenating.
259
- """
260
-
261
- # Token ID from the model tokenizer
262
- id: int
263
- # Token text
264
- text: str
265
- # Logprob
266
- logprob: float
267
- # Is the token a special token
268
- # Can be used to ignore tokens when concatenating
269
- special: bool
270
-
271
-
272
- # Generation finish reason
273
- class FinishReason(str, Enum):
274
- # number of generated tokens == `max_new_tokens`
275
- Length = "length"
276
- # the model generated its end of sequence token
277
- EndOfSequenceToken = "eos_token"
278
- # the model generated a text included in `stop_sequences`
279
- StopSequence = "stop_sequence"
280
-
281
-
282
- # Additional sequences when using the `best_of` parameter
283
- @dataclass
284
- class BestOfSequence:
285
- """
286
- Represents a best-of sequence generated during text generation.
287
-
288
- Args:
289
- generated_text (`str`):
290
- The generated text.
291
- finish_reason (`FinishReason`):
292
- The reason for the generation to finish, represented by a `FinishReason` value.
293
- generated_tokens (`int`):
294
- The number of generated tokens in the sequence.
295
- seed (`Optional[int]`):
296
- The sampling seed if sampling was activated.
297
- prefill (`List[InputToken]`):
298
- The decoder input tokens. Empty if `decoder_input_details` is False. Defaults to an empty list.
299
- tokens (`List[Token]`):
300
- The generated tokens. Defaults to an empty list.
301
- """
302
-
303
- # Generated text
304
- generated_text: str
305
- # Generation finish reason
306
- finish_reason: FinishReason
307
- # Number of generated tokens
308
- generated_tokens: int
309
- # Sampling seed if sampling was activated
310
- seed: Optional[int] = None
311
- # Decoder input tokens, empty if decoder_input_details is False
312
- prefill: List[InputToken] = field(default_factory=lambda: [])
313
- # Generated tokens
314
- tokens: List[Token] = field(default_factory=lambda: [])
315
-
316
-
317
- # `generate` details
318
- @dataclass
319
- class Details:
320
- """
321
- Represents details of a text generation.
322
-
323
- Args:
324
- finish_reason (`FinishReason`):
325
- The reason for the generation to finish, represented by a `FinishReason` value.
326
- generated_tokens (`int`):
327
- The number of generated tokens.
328
- seed (`Optional[int]`):
329
- The sampling seed if sampling was activated.
330
- prefill (`List[InputToken]`, *optional*):
331
- The decoder input tokens. Empty if `decoder_input_details` is False. Defaults to an empty list.
332
- tokens (`List[Token]`):
333
- The generated tokens. Defaults to an empty list.
334
- best_of_sequences (`Optional[List[BestOfSequence]]`):
335
- Additional sequences when using the `best_of` parameter.
336
- """
337
-
338
- # Generation finish reason
339
- finish_reason: FinishReason
340
- # Number of generated tokens
341
- generated_tokens: int
342
- # Sampling seed if sampling was activated
343
- seed: Optional[int] = None
344
- # Decoder input tokens, empty if decoder_input_details is False
345
- prefill: List[InputToken] = field(default_factory=lambda: [])
346
- # Generated tokens
347
- tokens: List[Token] = field(default_factory=lambda: [])
348
- # Additional sequences when using the `best_of` parameter
349
- best_of_sequences: Optional[List[BestOfSequence]] = None
350
-
351
-
352
- # `generate` return value
353
- @dataclass
354
- class TextGenerationResponse:
355
- """
356
- Represents a response for text generation.
357
-
358
- In practice, if `details=False` is passed (default), only the generated text is returned.
359
-
360
- Args:
361
- generated_text (`str`):
362
- The generated text.
363
- details (`Optional[Details]`):
364
- Generation details. Returned only if `details=True` is sent to the server.
365
- """
366
-
367
- # Generated text
368
- generated_text: str
369
- # Generation details
370
- details: Optional[Details] = None
371
-
372
-
373
- # `generate_stream` details
374
- @dataclass
375
- class StreamDetails:
376
- """
377
- Represents details of a text generation stream.
378
-
379
- Args:
380
- finish_reason (`FinishReason`):
381
- The reason for the generation to finish, represented by a `FinishReason` value.
382
- generated_tokens (`int`):
383
- The number of generated tokens.
384
- seed (`Optional[int]`):
385
- The sampling seed if sampling was activated.
386
- """
387
-
388
- # Generation finish reason
389
- finish_reason: FinishReason
390
- # Number of generated tokens
391
- generated_tokens: int
392
- # Sampling seed if sampling was activated
393
- seed: Optional[int] = None
394
-
395
-
396
- # `generate_stream` return value
397
- @dataclass
398
- class TextGenerationStreamResponse:
399
- """
400
- Represents a response for text generation when `stream=True` is passed
401
-
402
- Args:
403
- token (`Token`):
404
- The generated token.
405
- generated_text (`Optional[str]`, *optional*):
406
- The complete generated text. Only available when the generation is finished.
407
- details (`Optional[StreamDetails]`, *optional*):
408
- Generation details. Only available when the generation is finished.
409
- """
410
-
411
- # Generated token
412
- token: Token
413
- # Complete generated text
414
- # Only available when the generation is finished
415
- generated_text: Optional[str] = None
416
- # Generation details
417
- # Only available when the generation is finished
418
- details: Optional[StreamDetails] = None
419
-
420
-
421
- # TEXT GENERATION ERRORS
422
- # ----------------------
423
- # Text-generation errors are parsed separately to handle as much as possible the errors returned by the text generation
424
- # inference project (https://github.com/huggingface/text-generation-inference).
425
- # ----------------------
426
-
427
-
428
- class TextGenerationError(HTTPError):
429
- """Generic error raised if text-generation went wrong."""
430
-
431
-
432
- # Text Generation Inference Errors
433
- class ValidationError(TextGenerationError):
434
- """Server-side validation error."""
435
-
436
-
437
- class GenerationError(TextGenerationError):
438
- pass
439
-
440
-
441
- class OverloadedError(TextGenerationError):
442
- pass
443
-
444
-
445
- class IncompleteGenerationError(TextGenerationError):
446
- pass
447
-
448
-
449
- def raise_text_generation_error(http_error: HTTPError) -> NoReturn:
450
- """
451
- Try to parse text-generation-inference error message and raise HTTPError in any case.
452
-
453
- Args:
454
- error (`HTTPError`):
455
- The HTTPError that have been raised.
456
- """
457
- # Try to parse a Text Generation Inference error
458
-
459
- try:
460
- # Hacky way to retrieve payload in case of aiohttp error
461
- payload = getattr(http_error, "response_error_payload", None) or http_error.response.json()
462
- message = payload.get("error")
463
- error_type = payload.get("error_type")
464
- except Exception: # no payload
465
- raise http_error
466
-
467
- # If error_type => more information than `hf_raise_for_status`
468
- if error_type is not None:
469
- if error_type == "generation":
470
- raise GenerationError(message) from http_error
471
- if error_type == "incomplete_generation":
472
- raise IncompleteGenerationError(message) from http_error
473
- if error_type == "overloaded":
474
- raise OverloadedError(message) from http_error
475
- if error_type == "validation":
476
- raise ValidationError(message) from http_error
477
-
478
- # Otherwise, fallback to default error
479
- raise http_error
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dinoking/Guccio-AI-Designer/models/stylegan2/stylegan2-pytorch/distributed.py DELETED
@@ -1,126 +0,0 @@
1
- import math
2
- import pickle
3
-
4
- import torch
5
- from torch import distributed as dist
6
- from torch.utils.data.sampler import Sampler
7
-
8
-
9
- def get_rank():
10
- if not dist.is_available():
11
- return 0
12
-
13
- if not dist.is_initialized():
14
- return 0
15
-
16
- return dist.get_rank()
17
-
18
-
19
- def synchronize():
20
- if not dist.is_available():
21
- return
22
-
23
- if not dist.is_initialized():
24
- return
25
-
26
- world_size = dist.get_world_size()
27
-
28
- if world_size == 1:
29
- return
30
-
31
- dist.barrier()
32
-
33
-
34
- def get_world_size():
35
- if not dist.is_available():
36
- return 1
37
-
38
- if not dist.is_initialized():
39
- return 1
40
-
41
- return dist.get_world_size()
42
-
43
-
44
- def reduce_sum(tensor):
45
- if not dist.is_available():
46
- return tensor
47
-
48
- if not dist.is_initialized():
49
- return tensor
50
-
51
- tensor = tensor.clone()
52
- dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
53
-
54
- return tensor
55
-
56
-
57
- def gather_grad(params):
58
- world_size = get_world_size()
59
-
60
- if world_size == 1:
61
- return
62
-
63
- for param in params:
64
- if param.grad is not None:
65
- dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
66
- param.grad.data.div_(world_size)
67
-
68
-
69
- def all_gather(data):
70
- world_size = get_world_size()
71
-
72
- if world_size == 1:
73
- return [data]
74
-
75
- buffer = pickle.dumps(data)
76
- storage = torch.ByteStorage.from_buffer(buffer)
77
- tensor = torch.ByteTensor(storage).to('cuda')
78
-
79
- local_size = torch.IntTensor([tensor.numel()]).to('cuda')
80
- size_list = [torch.IntTensor([0]).to('cuda') for _ in range(world_size)]
81
- dist.all_gather(size_list, local_size)
82
- size_list = [int(size.item()) for size in size_list]
83
- max_size = max(size_list)
84
-
85
- tensor_list = []
86
- for _ in size_list:
87
- tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
88
-
89
- if local_size != max_size:
90
- padding = torch.ByteTensor(size=(max_size - local_size,)).to('cuda')
91
- tensor = torch.cat((tensor, padding), 0)
92
-
93
- dist.all_gather(tensor_list, tensor)
94
-
95
- data_list = []
96
-
97
- for size, tensor in zip(size_list, tensor_list):
98
- buffer = tensor.cpu().numpy().tobytes()[:size]
99
- data_list.append(pickle.loads(buffer))
100
-
101
- return data_list
102
-
103
-
104
- def reduce_loss_dict(loss_dict):
105
- world_size = get_world_size()
106
-
107
- if world_size < 2:
108
- return loss_dict
109
-
110
- with torch.no_grad():
111
- keys = []
112
- losses = []
113
-
114
- for k in sorted(loss_dict.keys()):
115
- keys.append(k)
116
- losses.append(loss_dict[k])
117
-
118
- losses = torch.stack(losses, 0)
119
- dist.reduce(losses, dst=0)
120
-
121
- if dist.get_rank() == 0:
122
- losses /= world_size
123
-
124
- reduced_losses = {k: v for k, v in zip(keys, losses)}
125
-
126
- return reduced_losses
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/bytetrack/yolox/data/dataloading.py DELETED
@@ -1,178 +0,0 @@
1
- #!/usr/bin/env python3
2
- # -*- coding:utf-8 -*-
3
- # Copyright (c) Megvii, Inc. and its affiliates.
4
-
5
- import torch
6
- from torch.utils.data.dataloader import DataLoader as torchDataLoader
7
- from torch.utils.data.dataloader import default_collate
8
-
9
- import os
10
- import random
11
-
12
- from .samplers import YoloBatchSampler
13
-
14
-
15
- def get_yolox_datadir():
16
- """
17
- get dataset dir of YOLOX. If environment variable named `YOLOX_DATADIR` is set,
18
- this function will return value of the environment variable. Otherwise, use data
19
- """
20
- yolox_datadir = os.getenv("YOLOX_DATADIR", None)
21
- if yolox_datadir is None:
22
- import yolox
23
-
24
- yolox_path = os.path.dirname(os.path.dirname(yolox.__file__))
25
- yolox_datadir = os.path.join(yolox_path, "datasets")
26
- return yolox_datadir
27
-
28
-
29
- class DataLoader(torchDataLoader):
30
- """
31
- Lightnet dataloader that enables on the fly resizing of the images.
32
- See :class:`torch.utils.data.DataLoader` for more information on the arguments.
33
- Check more on the following website:
34
- https://gitlab.com/EAVISE/lightnet/-/blob/master/lightnet/data/_dataloading.py
35
-
36
- Note:
37
- This dataloader only works with :class:`lightnet.data.Dataset` based datasets.
38
-
39
- Example:
40
- >>> class CustomSet(ln.data.Dataset):
41
- ... def __len__(self):
42
- ... return 4
43
- ... @ln.data.Dataset.resize_getitem
44
- ... def __getitem__(self, index):
45
- ... # Should return (image, anno) but here we return (input_dim,)
46
- ... return (self.input_dim,)
47
- >>> dl = ln.data.DataLoader(
48
- ... CustomSet((200,200)),
49
- ... batch_size = 2,
50
- ... collate_fn = ln.data.list_collate # We want the data to be grouped as a list
51
- ... )
52
- >>> dl.dataset.input_dim # Default input_dim
53
- (200, 200)
54
- >>> for d in dl:
55
- ... d
56
- [[(200, 200), (200, 200)]]
57
- [[(200, 200), (200, 200)]]
58
- >>> dl.change_input_dim(320, random_range=None)
59
- (320, 320)
60
- >>> for d in dl:
61
- ... d
62
- [[(320, 320), (320, 320)]]
63
- [[(320, 320), (320, 320)]]
64
- >>> dl.change_input_dim((480, 320), random_range=None)
65
- (480, 320)
66
- >>> for d in dl:
67
- ... d
68
- [[(480, 320), (480, 320)]]
69
- [[(480, 320), (480, 320)]]
70
- """
71
-
72
- def __init__(self, *args, **kwargs):
73
- super().__init__(*args, **kwargs)
74
- self.__initialized = False
75
- shuffle = False
76
- batch_sampler = None
77
- if len(args) > 5:
78
- shuffle = args[2]
79
- sampler = args[3]
80
- batch_sampler = args[4]
81
- elif len(args) > 4:
82
- shuffle = args[2]
83
- sampler = args[3]
84
- if "batch_sampler" in kwargs:
85
- batch_sampler = kwargs["batch_sampler"]
86
- elif len(args) > 3:
87
- shuffle = args[2]
88
- if "sampler" in kwargs:
89
- sampler = kwargs["sampler"]
90
- if "batch_sampler" in kwargs:
91
- batch_sampler = kwargs["batch_sampler"]
92
- else:
93
- if "shuffle" in kwargs:
94
- shuffle = kwargs["shuffle"]
95
- if "sampler" in kwargs:
96
- sampler = kwargs["sampler"]
97
- if "batch_sampler" in kwargs:
98
- batch_sampler = kwargs["batch_sampler"]
99
-
100
- # Use custom BatchSampler
101
- if batch_sampler is None:
102
- if sampler is None:
103
- if shuffle:
104
- sampler = torch.utils.data.sampler.RandomSampler(self.dataset)
105
- # sampler = torch.utils.data.DistributedSampler(self.dataset)
106
- else:
107
- sampler = torch.utils.data.sampler.SequentialSampler(self.dataset)
108
- batch_sampler = YoloBatchSampler(
109
- sampler,
110
- self.batch_size,
111
- self.drop_last,
112
- input_dimension=self.dataset.input_dim,
113
- )
114
- # batch_sampler = IterationBasedBatchSampler(batch_sampler, num_iterations =
115
-
116
- self.batch_sampler = batch_sampler
117
-
118
- self.__initialized = True
119
-
120
- def close_mosaic(self):
121
- self.batch_sampler.mosaic = False
122
-
123
- def change_input_dim(self, multiple=32, random_range=(10, 19)):
124
- """This function will compute a new size and update it on the next mini_batch.
125
-
126
- Args:
127
- multiple (int or tuple, optional): values to multiply the randomly generated range by.
128
- Default **32**
129
- random_range (tuple, optional): This (min, max) tuple sets the range
130
- for the randomisation; Default **(10, 19)**
131
-
132
- Return:
133
- tuple: width, height tuple with new dimension
134
-
135
- Note:
136
- The new size is generated as follows: |br|
137
- First we compute a random integer inside ``[random_range]``.
138
- We then multiply that number with the ``multiple`` argument,
139
- which gives our final new input size. |br|
140
- If ``multiple`` is an integer we generate a square size. If you give a tuple
141
- of **(width, height)**, the size is computed
142
- as :math:`rng * multiple[0], rng * multiple[1]`.
143
-
144
- Note:
145
- You can set the ``random_range`` argument to **None** to set
146
- an exact size of multiply. |br|
147
- See the example above for how this works.
148
- """
149
- if random_range is None:
150
- size = 1
151
- else:
152
- size = random.randint(*random_range)
153
-
154
- if isinstance(multiple, int):
155
- size = (size * multiple, size * multiple)
156
- else:
157
- size = (size * multiple[0], size * multiple[1])
158
-
159
- self.batch_sampler.new_input_dim = size
160
-
161
- return size
162
-
163
-
164
- def list_collate(batch):
165
- """
166
- Function that collates lists or tuples together into one list (of lists/tuples).
167
- Use this as the collate function in a Dataloader, if you want to have a list of
168
- items as an output, as opposed to tensors (eg. Brambox.boxes).
169
- """
170
- items = list(zip(*batch))
171
-
172
- for i in range(len(items)):
173
- if isinstance(items[i][0], (list, tuple)):
174
- items[i] = list(items[i])
175
- else:
176
- items[i] = default_collate(items[i])
177
-
178
- return items
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Eddycrack864/Applio-Inference/venv.sh DELETED
@@ -1 +0,0 @@
1
- python3.8 -m venv .venv
 
 
spaces/EronSamez/RVC_HFmeu/Applio-RVC-Fork/utils/clonerepo_experimental.py DELETED
@@ -1,253 +0,0 @@
1
- import os
2
- import subprocess
3
- import shutil
4
- from concurrent.futures import ThreadPoolExecutor, as_completed
5
- from tqdm.notebook import tqdm
6
- from pathlib import Path
7
- import requests
8
-
9
- def run_script():
10
- def run_cmd(cmd):
11
- process = subprocess.run(cmd, shell=True, check=True, text=True)
12
- return process.stdout
13
-
14
- # Change the current directory to /content/
15
- os.chdir('/content/')
16
- print("Changing dir to /content/")
17
-
18
- # Your function to edit the file
19
- def edit_file(file_path):
20
- temp_file_path = "/tmp/temp_file.py"
21
- changes_made = False
22
- with open(file_path, "r") as file, open(temp_file_path, "w") as temp_file:
23
- previous_line = ""
24
- second_previous_line = ""
25
- for line in file:
26
- new_line = line.replace("value=160", "value=128")
27
- if new_line != line:
28
- print("Replaced 'value=160' with 'value=128'")
29
- changes_made = True
30
- line = new_line
31
-
32
- new_line = line.replace("crepe hop length: 160", "crepe hop length: 128")
33
- if new_line != line:
34
- print("Replaced 'crepe hop length: 160' with 'crepe hop length: 128'")
35
- changes_made = True
36
- line = new_line
37
-
38
- new_line = line.replace("value=0.88", "value=0.75")
39
- if new_line != line:
40
- print("Replaced 'value=0.88' with 'value=0.75'")
41
- changes_made = True
42
- line = new_line
43
-
44
- if "label=i18n(\"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络\")" in previous_line and "value=1," in line:
45
- new_line = line.replace("value=1,", "value=0.25,")
46
- if new_line != line:
47
- print("Replaced 'value=1,' with 'value=0.25,' based on the condition")
48
- changes_made = True
49
- line = new_line
50
-
51
- if "label=i18n(\"总训练轮数total_epoch\")" in previous_line and "value=20," in line:
52
- new_line = line.replace("value=20,", "value=500,")
53
- if new_line != line:
54
- print("Replaced 'value=20,' with 'value=500,' based on the condition for DEFAULT EPOCH")
55
- changes_made = True
56
- line = new_line
57
-
58
- if 'choices=["pm", "harvest", "dio", "crepe", "crepe-tiny", "mangio-crepe", "mangio-crepe-tiny"], # Fork Feature. Add Crepe-Tiny' in previous_line:
59
- if 'value="pm",' in line:
60
- new_line = line.replace('value="pm",', 'value="mangio-crepe",')
61
- if new_line != line:
62
- print("Replaced 'value=\"pm\",' with 'value=\"mangio-crepe\",' based on the condition")
63
- changes_made = True
64
- line = new_line
65
-
66
- new_line = line.replace('label=i18n("输入训练文件夹路径"), value="E:\\\\语音音频+标注\\\\米津玄师\\\\src"', 'label=i18n("输入训练文件夹路径"), value="/content/dataset/"')
67
- if new_line != line:
68
- print("Replaced 'label=i18n(\"输入训练文件夹路径\"), value=\"E:\\\\语音音频+标注\\\\米津玄师\\\\src\"' with 'label=i18n(\"输入训练文件夹路径\"), value=\"/content/dataset/\"'")
69
- changes_made = True
70
- line = new_line
71
-
72
- if 'label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"),' in second_previous_line:
73
- if 'value=i18n("否"),' in line:
74
- new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),')
75
- if new_line != line:
76
- print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE ONLY LATEST")
77
- changes_made = True
78
- line = new_line
79
-
80
- if 'label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"),' in second_previous_line:
81
- if 'value=i18n("否"),' in line:
82
- new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),')
83
- if new_line != line:
84
- print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE SMALL WEIGHTS")
85
- changes_made = True
86
- line = new_line
87
-
88
- temp_file.write(line)
89
- second_previous_line = previous_line
90
- previous_line = line
91
-
92
- # After finished, we replace the original file with the temp one
93
- import shutil
94
- shutil.move(temp_file_path, file_path)
95
-
96
- if changes_made:
97
- print("Changes made and file saved successfully.")
98
- else:
99
- print("No changes were needed.")
100
-
101
- # Define the repo path
102
- repo_path = '/content/Applio-RVC-Fork'
103
-
104
- def copy_all_files_in_directory(src_dir, dest_dir):
105
- # Iterate over all files in source directory
106
- for item in Path(src_dir).glob('*'):
107
- if item.is_file():
108
- # Copy each file to destination directory
109
- shutil.copy(item, dest_dir)
110
- else:
111
- # If it's a directory, make a new directory in the destination and copy the files recursively
112
- new_dest = Path(dest_dir) / item.name
113
- new_dest.mkdir(exist_ok=True)
114
- copy_all_files_in_directory(str(item), str(new_dest))
115
-
116
- def clone_and_copy_repo(repo_path):
117
- # New repository link
118
- new_repo_link = "https://github.com/IAHispano/Applio-RVC-Fork/"
119
- # Temporary path to clone the repository
120
- temp_repo_path = "/content/temp_Applio-RVC-Fork"
121
- # New folder name
122
- new_folder_name = "Applio-RVC-Fork"
123
-
124
- # Clone the latest code from the new repository to a temporary location
125
- run_cmd(f"git clone {new_repo_link} {temp_repo_path}")
126
- os.chdir(temp_repo_path)
127
-
128
- run_cmd(f"git checkout 3fa4dad3d8961e5ca2522e9e12c0b4ddb71ad402")
129
- run_cmd(f"git checkout f9e606c279cb49420597519b0a83b92be81e42e4")
130
- run_cmd(f"git checkout 9e305588844c5442d58add1061b29beeca89d679")
131
- run_cmd(f"git checkout bf92dc1eb54b4f28d6396a4d1820a25896cc9af8")
132
- run_cmd(f"git checkout c3810e197d3cb98039973b2f723edf967ecd9e61")
133
- run_cmd(f"git checkout a33159efd134c2413b0afe26a76b7dc87926d2de")
134
- run_cmd(f"git checkout 24e251fb62c662e39ac5cf9253cc65deb9be94ec")
135
- run_cmd(f"git checkout ad5667d3017e93232dba85969cddac1322ba2902")
136
- run_cmd(f"git checkout ce9715392cf52dd5a0e18e00d1b5e408f08dbf27")
137
- run_cmd(f"git checkout 7c7da3f2ac68f3bd8f3ad5ca5c700f18ab9f90eb")
138
- run_cmd(f"git checkout 4ac395eab101955e8960b50d772c26f592161764")
139
- run_cmd(f"git checkout b15b358702294c7375761584e5276c811ffab5e8")
140
- run_cmd(f"git checkout 1501793dc490982db9aca84a50647764caa66e51")
141
- run_cmd(f"git checkout 21f7faf57219c75e6ba837062350391a803e9ae2")
142
- run_cmd(f"git checkout b5eb689fbc409b49f065a431817f822f554cebe7")
143
- run_cmd(f"git checkout 7e02fae1ebf24cb151bf6cbe787d06734aa65862")
144
- run_cmd(f"git checkout 6aea5ea18ed0b9a1e03fa5d268d6bc3c616672a9")
145
- run_cmd(f"git checkout f0f9b25717e59116473fb42bd7f9252cfc32b398")
146
- run_cmd(f"git checkout b394de424088a81fc081224bc27338a8651ad3b2")
147
- run_cmd(f"git checkout f1999406a88b80c965d2082340f5ea2bfa9ab67a")
148
- run_cmd(f"git checkout d98a0fa8dc715308dfc73eac5c553b69c6ee072b")
149
- run_cmd(f"git checkout d73267a415fb0eba98477afa43ef71ffd82a7157")
150
- run_cmd(f"git checkout 1a03d01356ae79179e1fb8d8915dc9cc79925742")
151
- run_cmd(f"git checkout 81497bb3115e92c754300c9b3992df428886a3e9")
152
- run_cmd(f"git checkout c5af1f8edcf79cb70f065c0110e279e78e48caf9")
153
- run_cmd(f"git checkout cdb3c90109387fa4dfa92f53c3864c71170ffc77")
154
-
155
- # Edit the file here, before copying
156
- #edit_file(f"{temp_repo_path}/infer-web.py")
157
-
158
- # Copy all files from the cloned repository to the existing path
159
- copy_all_files_in_directory(temp_repo_path, repo_path)
160
- print(f"Copying all {new_folder_name} files from GitHub.")
161
-
162
- # Change working directory back to /content/
163
- os.chdir('/content/')
164
- print("Changed path back to /content/")
165
-
166
- # Remove the temporary cloned repository
167
- shutil.rmtree(temp_repo_path)
168
-
169
- # Call the function
170
- clone_and_copy_repo(repo_path)
171
-
172
- # Download the credentials file for RVC archive sheet
173
- os.makedirs('/content/Applio-RVC-Fork/stats/', exist_ok=True)
174
- run_cmd("wget -q https://cdn.discordapp.com/attachments/945486970883285045/1114717554481569802/peppy-generator-388800-07722f17a188.json -O /content/Applio-RVC-Fork/stats/peppy-generator-388800-07722f17a188.json")
175
-
176
- # Forcefully delete any existing torchcrepe dependencies downloaded from an earlier run just in case
177
- shutil.rmtree('/content/Applio-RVC-Fork/torchcrepe', ignore_errors=True)
178
- shutil.rmtree('/content/torchcrepe', ignore_errors=True)
179
-
180
- # Download the torchcrepe folder from the maxrmorrison/torchcrepe repository
181
- run_cmd("git clone https://github.com/maxrmorrison/torchcrepe.git")
182
- shutil.move('/content/torchcrepe/torchcrepe', '/content/Applio-RVC-Fork/')
183
- shutil.rmtree('/content/torchcrepe', ignore_errors=True) # Delete the torchcrepe repository folder
184
-
185
- # Change the current directory to /content/Applio-RVC-Fork
186
- os.chdir('/content/Applio-RVC-Fork')
187
- os.makedirs('pretrained', exist_ok=True)
188
- os.makedirs('uvr5_weights', exist_ok=True)
189
-
190
- def download_file(url, filepath):
191
- response = requests.get(url, stream=True)
192
- response.raise_for_status()
193
-
194
- with open(filepath, "wb") as file:
195
- for chunk in response.iter_content(chunk_size=8192):
196
- if chunk:
197
- file.write(chunk)
198
-
199
- def download_pretrained_models():
200
- pretrained_models = {
201
- "pretrained": [
202
- "D40k.pth",
203
- "G40k.pth",
204
- "f0D40k.pth",
205
- "f0G40k.pth"
206
- ],
207
- "pretrained_v2": [
208
- "D40k.pth",
209
- "G40k.pth",
210
- "f0D40k.pth",
211
- "f0G40k.pth",
212
- "f0G48k.pth",
213
- "f0D48k.pth"
214
- ],
215
- "uvr5_weights": [
216
- "HP2-人声vocals+非人声instrumentals.pth",
217
- "HP5-主旋律人声vocals+其他instrumentals.pth",
218
- "VR-DeEchoNormal.pth",
219
- "VR-DeEchoDeReverb.pth",
220
- "VR-DeEchoAggressive.pth",
221
- "HP5_only_main_vocal.pth",
222
- "HP3_all_vocals.pth",
223
- "HP2_all_vocals.pth"
224
- ]
225
- }
226
- part2 = "I"
227
- base_url = "https://huggingface.co/lj1995/VoiceConversionWebU" + part2 + "/resolve/main/"
228
- base_path = "/content/Applio-RVC-Fork/"
229
- base_pathm = base_path
230
-
231
- # Calculate total number of files to download
232
- total_files = sum(len(files) for files in pretrained_models.values()) + 1 # +1 for hubert_base.pt
233
-
234
- with tqdm(total=total_files, desc="Downloading files") as pbar:
235
- for folder, models in pretrained_models.items():
236
- folder_path = os.path.join(base_path, folder)
237
- os.makedirs(folder_path, exist_ok=True)
238
- for model in models:
239
- url = base_url + folder + "/" + model
240
- filepath = os.path.join(folder_path, model)
241
- download_file(url, filepath)
242
- pbar.update()
243
-
244
- # Download hubert_base.pt to the base path
245
- hubert_url = base_url + "hubert_base.pt"
246
- hubert_filepath = os.path.join(base_pathm, "hubert_base.pt")
247
- download_file(hubert_url, hubert_filepath)
248
- pbar.update()
249
- def clone_repository(run_download):
250
- with ThreadPoolExecutor(max_workers=2) as executor:
251
- executor.submit(run_script)
252
- if run_download:
253
- executor.submit(download_pretrained_models)