Commit
·
4a64a5c
1
Parent(s):
1d90038
Update parquet files (step 65 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/0xSpleef/openchat-openchat_8192/app.py +0 -3
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dibac For Sketchup 2015 VERIFIED Crack Full Download.md +0 -128
- spaces/1gistliPinn/ChatGPT4/Examples/Chhota Bheem And The Throne Of Bali Dubbed Movie Download [UPDATED].md +0 -74
- spaces/1line/AutoGPT/autogpt/logs.py +0 -332
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK Award Presents FIFA 16 - The Most Beautiful and Fastest Soccer Game on Mobile.md +0 -138
- spaces/1phancelerku/anime-remove-background/Boost your Android device with Speed APK The ultimate performance optimizer.md +0 -128
- spaces/1phancelerku/anime-remove-background/Download Google Drive APK for Android and Enjoy Free Cloud Storage.md +0 -130
- spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_dpmsolver_multistep.py +0 -524
- spaces/7hao/bingo/src/components/ui/sheet.tsx +0 -122
- spaces/801artistry/RVC801/infer/lib/infer_pack/attentions.py +0 -417
- spaces/AI-Zero-to-Hero/02-H5-AR-VR-IOT/index.html +0 -66
- spaces/AIFILMS/Image-Animation-using-Thin-Plate-Spline-Motion-Model/style.css +0 -19
- spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/timm_model.py +0 -112
- spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/speech_base.py +0 -373
- spaces/AILab-CVC/SEED-LLaMA/scripts/seed_llama_inference_14B.py +0 -120
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/yolov7_l_syncbn_fast_6x16b-100e_coco.py +0 -489
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnetv1c101_8xb32_in1k.py +0 -7
- spaces/Ababababababbababa/Ashaar/poetry_diacritizer/util/utils.py +0 -238
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/__init__.py +0 -100
- spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/op_edit/fused_bias_act.cpp +0 -23
- spaces/Anar0140/4.RealTime-MediaPipe-AI-From-Video-On-Any-Device/app.py +0 -59
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +0 -748
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_unclip/test_stable_unclip.py +0 -241
- spaces/Andy1621/uniformer_image_detection/configs/foveabox/README.md +0 -41
- spaces/Andy1621/uniformer_image_detection/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py +0 -10
- spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x512_160k_ade20k.py +0 -6
- spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/datasets/README.md +0 -27
- spaces/AnthonyTruchetPoC/persistent-docker/scripts/run-all-precommit-checks.sh +0 -2
- spaces/Araby/BRATArA/README.md +0 -13
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/cli.py +0 -199
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/hash.py +0 -59
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/containers.py +0 -167
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/functools.py +0 -525
- spaces/Audio-AGI/AudioSep/models/CLAP/training/lp_train.py +0 -301
- spaces/Audio-AGI/WavJourney/VoiceParser/customtokenizer.py +0 -202
- spaces/Benson/text-generation/Examples/Bitcoin-qt.exe Download.md +0 -61
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/__init__.py +0 -331
- spaces/Boadiwaa/Recipes/README.md +0 -12
- spaces/CVPR/LIVE/pybind11/.github/ISSUE_TEMPLATE/question.md +0 -21
- spaces/CVPR/LIVE/pybind11/tests/test_stl_binders.cpp +0 -129
- spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/inner_product.h +0 -22
- spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/unique_by_key.h +0 -934
- spaces/CVPR/WALT/mmdet/core/export/pytorch2onnx.py +0 -154
- spaces/CVPR/transfiner/configs/quick_schedules/README.md +0 -8
- spaces/CikeyQI/meme-api/meme_generator/memes/charpic/__init__.py +0 -38
- spaces/Cletrason/dalle2-dreamweddingbooth/app.py +0 -3
- spaces/CofAI/chat.b4/client/js/theme-toggler.js +0 -22
- spaces/CofAI/chat.b4/g4f/Provider/Providers/hteyun.py +0 -34
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/FliImagePlugin.py +0 -171
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_o_p_b_d.py +0 -6
spaces/0xSpleef/openchat-openchat_8192/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/openchat/openchat_8192").launch()
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dibac For Sketchup 2015 VERIFIED Crack Full Download.md
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Dibac for SketchUp 2015 Crack Full Download: A Complete Guide</h1>
|
3 |
-
<p>If you are looking for a plugin that can help you draw architectural plans in 2D and get the 3D automatically, you might want to try Dibac for SketchUp 2015. This plugin is a great tool for architects and anyone who wants to create realistic and detailed models in SketchUp. However, if you want to use all the features and functions of this plugin, you will need to purchase a license, which costs 69€. Alternatively, you can use a crack to get the full version of Dibac for SketchUp 2015 for free. In this article, we will show you what Dibac for SketchUp 2015 is, why you need a crack for it, how to download and install the crack, and how to activate it. We will also answer some frequently asked questions about Dibac for SketchUp 2015 crack.</p>
|
4 |
-
<h2>dibac for sketchup 2015 crack full download</h2><br /><p><b><b>Download File</b> ——— <a href="https://byltly.com/2uKw1Y">https://byltly.com/2uKw1Y</a></b></p><br /><br />
|
5 |
-
<h2>What is Dibac for SketchUp 2015?</h2>
|
6 |
-
<p>Dibac for SketchUp 2015 is a plugin that allows you to draw in 2D and get the 3D with just one click. It works with SketchUp 2014, 2015, 2016, 2017, and 2018. It has several features and benefits that make it a powerful and easy-to-use tool for architectural drawing.</p>
|
7 |
-
<h3>Features and benefits of Dibac for SketchUp 2015</h3>
|
8 |
-
<p>Some of the features and benefits of Dibac for SketchUp 2015 are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Walls: You can create walls with different thicknesses, parallel walls, and wall extensions. You can also change the height of the walls after converting them to 3D.</li>
|
11 |
-
<li>Doors, windows, and wardrobes: You can use the default dynamic components of Dibac for SketchUp 2015 or choose your own custom components or joinery from your library. You can insert them into the walls easily and adjust their parameters.</li>
|
12 |
-
<li>Solid sections: You can add a solid face to your sections, which is very useful for creating plans and elevations.</li>
|
13 |
-
<li>Converting to 3D automagically: You can click just one button and Dibac will convert your 2D floor plan into a 3D model. You can also edit your model in both 2D and 3D modes.</li>
|
14 |
-
<li>Staircases: You can create staircases dynamically in just no time. You can choose from different types of stairs, such as straight, spiral, or U-shaped.</li>
|
15 |
-
<li>Materials: You can apply materials and textures to your geometry created with Dibac for SketchUp 2015. The plugin will remember the applied materials when you convert your floor plan to 3D.</li>
|
16 |
-
<li>Dimensions tool: You can use the continuous dimension tool of Dibac for SketchUp 2015 to measure your floor plan in 2D mode. You can also set a minimum dimension to be displayed with this tool.</li>
|
17 |
-
</ul>
|
18 |
-
<h3>How to use Dibac for SketchUp 2015</h3>
|
19 |
-
<p>To use Dibac for SketchUp 2015, you need to download it from [10](https://www.dibac.com/dibac ) and install it on your computer. You will also need to have SketchUp 2014 or later installed on your computer. After installing Dibac for SketchUp 2015, you will see a new toolbar in SketchUp with the Dibac icons. You can also access the Dibac menu from the Extensions menu in SketchUp. To start using Dibac for SketchUp 2015, you need to follow these steps:</p>
|
20 |
-
<ol>
|
21 |
-
<li>Open SketchUp and create a new file or open an existing one.</li>
|
22 |
-
<li>Click on the Dibac icon on the toolbar or go to Extensions > Dibac > Start Dibac.</li>
|
23 |
-
<li>Draw your floor plan in 2D mode using the Dibac tools, such as walls, doors, windows, stairs, etc. You can also use the SketchUp tools, such as lines, rectangles, circles, etc.</li>
|
24 |
-
<li>Apply materials and textures to your geometry if you want.</li>
|
25 |
-
<li>Click on the Convert to 3D icon on the toolbar or go to Extensions > Dibac > Convert to 3D.</li>
|
26 |
-
<li>Enjoy your 3D model created with Dibac for SketchUp 2015. You can also edit your model in both 2D and 3D modes.</li>
|
27 |
-
</ol>
|
28 |
-
<h2>Why do you need a crack for Dibac for SketchUp 2015?</h2>
|
29 |
-
<p>Dibac for SketchUp 2015 is a paid plugin that requires a license to use all its features and functions. The license costs 69€ and it is valid for one year. You can also use a trial version of Dibac for SketchUp 2015 for free, but it has some limitations and disadvantages. Therefore, you might want to use a crack for Dibac for SketchUp 2015 to get the full version of the plugin without paying anything.</p>
|
30 |
-
<h3>The disadvantages of using the trial version</h3>
|
31 |
-
<p>The trial version of Dibac for SketchUp 2015 has the following disadvantages:</p>
|
32 |
-
<ul>
|
33 |
-
<li>It expires after 16 hours of use.</li>
|
34 |
-
<li>It does not allow you to save or export your models created with Dibac.</li>
|
35 |
-
<li>It does not allow you to use custom components or joinery from your library.</li>
|
36 |
-
<li>It does not allow you to change the height of the walls after converting them to 3D.</li>
|
37 |
-
<li>It does not allow you to use the solid sections feature.</li>
|
38 |
-
<li>It does not allow you to use the dimensions tool.</li>
|
39 |
-
</ul>
|
40 |
-
<h3>The advantages of using the full version</h3>
|
41 |
-
<p>The full version of Dibac for SketchUp 2015 has the following advantages:</p>
|
42 |
-
<p></p>
|
43 |
-
<ul>
|
44 |
-
<li>It does not expire and you can use it as long as you want.</li>
|
45 |
-
<li>It allows you to save and export your models created with Dibac.</li>
|
46 |
-
<li>It allows you to use custom components or joinery from your library.</li>
|
47 |
-
<li>It allows you to change the height of the walls after converting them to 3D.</li>
|
48 |
-
<li>It allows you to use the solid sections feature.</li>
|
49 |
-
<li>It allows you to use the dimensions tool.</li>
|
50 |
-
</ul>
|
51 |
-
<h2>How to download and install Dibac for SketchUp 2015 crack?</h2>
|
52 |
-
<p>If you want to download and install Dibac for SketchUp 2015 crack, you need to be aware of the risks and precautions of using a crack. You also need to follow some steps to download and install the crack successfully.</p>
|
53 |
-
<h3>The risks and precautions of using a crack</h3>
|
54 |
-
<p>A crack is a software that modifies or bypasses the security features of another software, such as a license or activation code. Using a crack can be illegal, unethical, and risky. Some of the risks and precautions of using a crack are:</p>
|
55 |
-
<ul>
|
56 |
-
<li>You might violate the intellectual property rights of the software developer and face legal consequences.</li>
|
57 |
-
<li>You might expose your computer to viruses, malware, spyware, or other harmful programs that can damage your system or steal your data.</li>
|
58 |
-
<li>You might compromise the quality and performance of the software and experience errors, crashes, bugs, or glitches.</li>
|
59 |
-
<li>You might lose access to updates, support, or customer service from the software developer.</li>
|
60 |
-
<li>You might have ethical issues with using a software that someone else has worked hard to create and deserves compensation for their work.</li>
|
61 |
-
</ul>
|
62 |
-
<p>To avoid or minimize these risks and precautions, you should:</p>
|
63 |
-
<ul>
|
64 |
-
<li>Use a reliable antivirus program and scan your computer regularly.</li>
|
65 |
-
<li>Use a trusted source or website to download the crack and check the reviews and ratings of other users.</li>
|
66 |
-
<li>Backup your data and create a restore point before installing the crack.</li>
|
67 |
-
<li>Disable your internet connection and antivirus program temporarily while installing the crack.</li>
|
68 |
-
<li>Support the software developer if you can afford it and buy the license if you like the software.</li>
|
69 |
-
</ul>
|
70 |
-
<h3>The steps to download and install the crack</h3>
|
71 |
-
<p>To download and install Dibac for SketchUp 2015 crack, you need to follow these steps:</p>
|
72 |
-
<ol>
|
73 |
-
<li>Go to [1](https://crack4windows.com/crack?s=dibac-for-sketchup&id=41164) and click on the Download button. This is a website that provides cracks for various software, including Dibac for SketchUp 2015.</li>
|
74 |
-
<li>Wait for the download to finish and extract the zip file to a folder on your computer.</li>
|
75 |
-
<li>Open the folder and run the setup.exe file as administrator. Follow the instructions on the screen to install Dibac for SketchUp 2015 crack.</li>
|
76 |
-
<li>Copy the crack file from the folder and paste it into the installation directory of Dibac for SketchUp 2015. This is usually C:\Program Files\SketchUp\SketchUp 2015\Plugins\Dibac.</li>
|
77 |
-
<li>Replace the original file with the crack file when prompted.</li>
|
78 |
-
<li>Restart your computer and launch SketchUp. You should see Dibac for SketchUp 2015 activated on your toolbar or menu.</li>
|
79 |
-
</ol>
|
80 |
-
<h2>How to activate Dibac for SketchUp 2015 crack?</h2>
|
81 |
-
<p>After installing Dibac for SketchUp 2015 crack, you need to activate it to use all its features and functions. To activate Dibac for SketchUp 2015 crack, you need to follow these instructions:</p>
|
82 |
-
<h3>The instructions to activate the crack</h3>
|
83 |
-
<p>To activate Dibac for SketchUp 2015 crack, you need to follow these instructions:</p>
|
84 |
-
<ol>
|
85 |
-
<li>Open SketchUp and go to Extensions > Dibac > License Manager.</li>
|
86 |
-
<li>Click on the Activate button and enter any email address and serial number. You can use any random email address and serial number, such as [email protected] and 1234567890.</li>
|
87 |
-
<li>Click on the OK button and wait for a few seconds. You should see a message that says "License activated successfully".</li>
|
88 |
-
<li>Click on the Close button and enjoy using Dibac for SketchUp 2015 crack.</li>
|
89 |
-
</ol>
|
90 |
-
<h3>The tips and tricks to make the most of the crack</h3>
|
91 |
-
<p>To make the most of Dibac for SketchUp 2015 crack, you can use some tips and tricks, such as:</p>
|
92 |
-
<ul>
|
93 |
-
<li>Watch some tutorials or read some manuals on how to use Dibac for SketchUp 2015. You can find some resources on [2](https://www.dibac.com/tutorials ) or [3](https://www.dibac.com/manuals).</li>
|
94 |
-
<li>Practice your skills and creativity by creating different types of architectural plans and models with Dibac for SketchUp 2015. You can also share your work with other users on [4](https://www.dibac.com/gallery) or [5](https://forums.sketchup.com/c/sketchup/dibac/).</li>
|
95 |
-
<li>Explore the different options and settings of Dibac for SketchUp 2015 to customize your workflow and preferences. You can access the options and settings from Extensions > Dibac > Options.</li>
|
96 |
-
<li>Use the keyboard shortcuts of Dibac for SketchUp 2015 to speed up your drawing process. You can find the list of keyboard shortcuts on [6](https://www.dibac.com/keyboard-shortcuts).</li>
|
97 |
-
<li>Check for updates and new features of Dibac for SketchUp 2015 regularly. You can check for updates from Extensions > Dibac > Check for Updates.</li>
|
98 |
-
</ul>
|
99 |
-
<h2>Conclusion</h2>
|
100 |
-
<p>Dibac for SketchUp 2015 is a plugin that allows you to draw in 2D and get the 3D with just one click. It is a great tool for architects and anyone who wants to create realistic and detailed models in SketchUp. However, it is a paid plugin that requires a license to use all its features and functions. If you want to use the full version of Dibac for SketchUp 2015 for free, you can use a crack to bypass the security features of the plugin. In this article, we have shown you what Dibac for SketchUp 2015 is, why you need a crack for it, how to download and install the crack, and how to activate it. We have also answered some frequently asked questions about Dibac for SketchUp 2015 crack. We hope this article has been helpful and informative for you. If you have any questions or comments, please feel free to leave them below.</p>
|
101 |
-
<h2>FAQs</h2>
|
102 |
-
<p>Here are some frequently asked questions about Dibac for SketchUp 2015 crack:</p>
|
103 |
-
<h3>Is Dibac for SketchUp 2015 compatible with Mac?</h3>
|
104 |
-
<p>No, Dibac for SketchUp 2015 is only compatible with Windows operating systems. However, you can use a virtual machine or a dual boot system to run Windows on your Mac and use Dibac for SketchUp 2015.</p>
|
105 |
-
<h3>Is Dibac for SketchUp 2015 compatible with other versions of SketchUp?</h3>
|
106 |
-
<p>Yes, Dibac for SketchUp 2015 is compatible with SketchUp 2014, 2015, 2016, 2017, and 2018. However, it is not compatible with SketchUp 2019 or later.</p>
|
107 |
-
<h3>Is Dibac for SketchUp 2015 safe to use?</h3>
|
108 |
-
<p>Dibac for SketchUp 2015 is safe to use if you download it from the official website of the developer or a trusted source. However, using a crack for Dibac for SketchUp 2015 can be risky and illegal, as it might contain viruses, malware, spyware, or other harmful programs that can damage your system or steal your data. You might also violate the intellectual property rights of the developer and face legal consequences. Therefore, we recommend that you use a reliable antivirus program and scan your computer regularly. We also recommend that you support the developer if you can afford it and buy the license if you like the plugin.</p>
|
109 |
-
<h3>How can I uninstall Dibac for SketchUp 2015?</h3>
|
110 |
-
<p>To uninstall Dibac for SketchUp 2015, you need to follow these steps:</p>
|
111 |
-
<ol>
|
112 |
-
<li>Open SketchUp and go to Extensions > Dibac > Uninstall.</li>
|
113 |
-
<li>Click on the Yes button to confirm the uninstallation.</li>
|
114 |
-
<li>Close SketchUp and delete the folder C:\Program Files\SketchUp\SketchUp 2015\Plugins\Dibac.</li>
|
115 |
-
<li>Delete the file C:\Users\YourUserName\AppData\Roaming\SketchUp\SketchUp 2015\Plugins\Dibac.json.</li>
|
116 |
-
<li>Restart your computer and check if Dibac for SketchUp 2015 is removed from your toolbar or menu.</li>
|
117 |
-
</ol>
|
118 |
-
<h3>How can I contact the developer of Dibac for SketchUp 2015?</h3>
|
119 |
-
<p>If you have any questions, suggestions, feedback, or issues with Dibac for SketchUp 2015, you can contact the developer by using the following methods:</p>
|
120 |
-
<ul>
|
121 |
-
<li>Email: [email protected]</li>
|
122 |
-
<li>Phone: +34 93 433 77 77</li>
|
123 |
-
<li>Website: [7](https://www.dibac.com/contact)</li>
|
124 |
-
<li>Facebook: [8](https://www.facebook.com/DibacSketchup)</li>
|
125 |
-
<li>Twitter: [9](https://twitter.com/DibacSketchup)</li>
|
126 |
-
</ul></p> b2dd77e56b<br />
|
127 |
-
<br />
|
128 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Chhota Bheem And The Throne Of Bali Dubbed Movie Download [UPDATED].md
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Chhota Bheem and the Throne of Bali Dubbed Movie Download: A Review</h1>
|
3 |
-
<p>If you are looking for a fun and adventurous animated movie for your kids, you might want to check out <strong>Chhota Bheem and the Throne of Bali</strong>. This movie is based on the popular Indian cartoon series Chhota Bheem, which follows the adventures of a brave and smart boy named Bheem and his friends in the fictional village of Dholakpur.</p>
|
4 |
-
<p>In this movie, Bheem and his friends are invited by the King of Bali to attend the crowning ceremony of his son, Prince Arjun. However, on their way to Bali, they learn that the kingdom has been captured by an evil witch named Rangda, who has imprisoned the king and queen and wants to rule over Bali with her army of Leyaks, who are monstrous creatures that spread destruction and disease.</p>
|
5 |
-
<h2>Chhota Bheem and the throne of Bali dubbed movie download</h2><br /><p><b><b>DOWNLOAD</b> ✅ <a href="https://imgfil.com/2uxX3Z">https://imgfil.com/2uxX3Z</a></b></p><br /><br />
|
6 |
-
<p>Bheem and his friends team up with Prince Arjun, who has escaped from Rangda's clutches, and decide to fight against the witch and her minions. Along the way, they encounter many challenges and dangers, but also make new friends and discover the beauty and culture of Bali.</p>
|
7 |
-
<h2>Why You Should Watch Chhota Bheem and the Throne of Bali Dubbed Movie</h2>
|
8 |
-
<p>There are many reasons why you should watch <strong>Chhota Bheem and the Throne of Bali dubbed movie</strong>. Here are some of them:</p>
|
9 |
-
<ul>
|
10 |
-
<li>The movie is full of action, comedy, and drama. It will keep you and your kids entertained and engaged throughout.</li>
|
11 |
-
<li>The movie has a positive message about friendship, courage, loyalty, and teamwork. It will inspire you and your kids to be brave and kind like Bheem and his friends.</li>
|
12 |
-
<li>The movie showcases the rich and diverse culture of Bali, such as its music, dance, art, architecture, and cuisine. It will educate you and your kids about a different country and its traditions.</li>
|
13 |
-
<li>The movie has amazing visuals and animation. It will dazzle you and your kids with its colorful and detailed scenes of Bali and its creatures.</li>
|
14 |
-
<li>The movie has catchy songs and music. It will make you and your kids sing along and enjoy the tunes.</li>
|
15 |
-
</ul>
|
16 |
-
<h3>How to Download Chhota Bheem and the Throne of Bali Dubbed Movie for Free</h3>
|
17 |
-
<p>If you want to download <strong>Chhota Bheem and the Throne of Bali dubbed movie</strong> for free, you can follow these simple steps:</p>
|
18 |
-
<ol>
|
19 |
-
<li>Go to a reliable website that offers free downloads of animated movies. You can search for such websites on Google or any other search engine.</li>
|
20 |
-
<li>Search for Chhota Bheem and the Throne of Bali dubbed movie on the website. You can use the search bar or browse through the categories.</li>
|
21 |
-
<li>Select the movie from the list of results. Make sure it is in good quality and has clear audio.</li>
|
22 |
-
<li>Click on the download button or link. You might have to register or sign up on the website before downloading.</li>
|
23 |
-
<li>Choose a suitable format and resolution for your device. You can also select a preferred language if available.</li>
|
24 |
-
<li>Wait for the download to complete. You might have to wait for some time depending on your internet speed and file size.</li>
|
25 |
-
<li>Enjoy watching Chhota Bheem and the Throne of Bali dubbed movie with your kids!</li>
|
26 |
-
</ol>
|
27 |
-
<p>Note: Downloading movies from unauthorized sources may be illegal or unsafe. We do not endorse or promote any such websites or activities. Please use your own discretion and judgment before downloading any content from the internet.</p>
|
28 |
-
<h4>Conclusion</h4>
|
29 |
-
<p><strong>Chhota Bheem and the Throne of Bali dubbed movie</strong> is a great choice for a family-friendly entertainment. It is a fun-filled adventure that will make you laugh, cry, cheer, and learn. You can download it for free from various websites or watch it online on streaming platforms like Prime Video or Google Play. So what are you waiting for? Grab some popcorns and enjoy this amazing movie with your kids!</p>
|
30 |
-
<h5>Who are the Characters of Chhota Bheem and the Throne of Bali Dubbed Movie</h5>
|
31 |
-
<p>One of the reasons why <strong>Chhota Bheem and the Throne of Bali dubbed movie</strong> is so popular is because of its lovable and memorable characters. Here are some of the main characters of the movie:</p>
|
32 |
-
<ul>
|
33 |
-
<li>Bheem: He is the protagonist of the movie and the leader of his friends. He is brave, strong, smart, and kind. He loves to eat laddoos, which give him extra strength and energy. He is always ready to help others and fight against evil.</li>
|
34 |
-
<li>Chutki: She is a seven-year-old girl and Bheem's best friend. She is sweet, caring, and loyal. She likes to cook and make flower garlands. She often accompanies Bheem on his adventures and supports him.</li>
|
35 |
-
<li>Raju: He is a four-year-old boy and Bheem's youngest friend. He is cute, innocent, and cheerful. He admires Bheem and wants to be like him. He often gets into trouble but also helps Bheem in his missions.</li>
|
36 |
-
<li>Jaggu: He is a talking monkey and Bheem's pet. He is witty, funny, and agile. He can swing from trees and jump over obstacles. He loves bananas and shares a special bond with Bheem.</li>
|
37 |
-
<li>Kalia: He is a ten-year-old boy and Bheem's rival. He is arrogant, greedy, and lazy. He often tries to compete with Bheem and prove himself better than him. He has two sidekicks, Dholu and Bholu, who follow him everywhere.</li>
|
38 |
-
<li>Indumati: She is a seven-year-old girl and the princess of Dholakpur. She is beautiful, graceful, and polite. She respects her father, King Indravarma, and cares for her people. She is friends with Bheem and his gang.</li>
|
39 |
-
<li>Arjun: He is an eight-year-old boy and the prince of Bali. He is brave, noble, and generous. He invites Bheem and his friends to his coronation ceremony but gets into trouble when Rangda captures his kingdom. He joins forces with Bheem to defeat Rangda and free his parents.</li>
|
40 |
-
<li>Rangda: She is the antagonist of the movie and an evil witch. She is cruel, cunning, and powerful. She wants to rule over Bali with her army of Leyaks, who are monstrous creatures that spread destruction and disease. She kidnaps the king and queen of Bali and tries to stop Bheem and his friends from saving them.</li>
|
41 |
-
</ul>
|
42 |
-
<h6>Where to Watch Chhota Bheem and the Throne of Bali Dubbed Movie Online</h6>
|
43 |
-
<p>If you want to watch <strong>Chhota Bheem and the Throne of Bali dubbed movie</strong> online, you have several options to choose from. Here are some of them:</p>
|
44 |
-
<ul>
|
45 |
-
<li>Prime Video: You can watch Chhota Bheem and the Throne of Bali dubbed movie online on Prime Video, which is a streaming service by Amazon. You can either rent or buy the movie in HD quality with English subtitles. You can also watch other Chhota Bheem movies and shows on Prime Video.</li>
|
46 |
-
<li>Google Play: You can watch Chhota Bheem and the Throne of Bali dubbed movie online on Google Play, which is a digital store by Google. You can either rent or buy the movie in HD quality with English subtitles. You can also watch other Chhota Bheem movies and shows on Google Play.</li>
|
47 |
-
<li>Atozcartoons: You can watch Chhota Bheem and the Throne of Bali dubbed movie online on Atozcartoons, which is a website that offers free downloads of animated movies in Hindi and Telugu languages. You can download the movie in MP4 format with good quality and clear audio.</li>
|
48 |
-
</ul>
|
49 |
-
<p>Note: Watching movies from unauthorized sources may be illegal or unsafe. We do not endorse or promote any such websites or activities. Please use your own discretion and judgment before watching any content from the internet.</p>
|
50 |
-
<p></p>
|
51 |
-
<h8>How to Enjoy Chhota Bheem and the Throne of Bali Dubbed Movie with Your Kids</h8>
|
52 |
-
<p><strong>Chhota Bheem and the Throne of Bali dubbed movie</strong> is not only a great entertainment for you, but also for your kids. You can enjoy this movie with your kids in many ways. Here are some of them:</p>
|
53 |
-
<ul>
|
54 |
-
<li>Watch the movie together: You can watch the movie together with your kids on your TV, laptop, tablet, or smartphone. You can also use headphones or speakers to enhance the sound quality. You can pause, rewind, or fast forward the movie as per your convenience. You can also discuss the movie with your kids and share your opinions and feelings.</li>
|
55 |
-
<li>Sing along the songs: You can sing along the songs of the movie with your kids and have fun. You can find the lyrics of the songs online or on YouTube. You can also learn the tunes and melodies of the songs and hum them. You can also dance along the songs and express yourself.</li>
|
56 |
-
<li>Play games related to the movie: You can play games related to the movie with your kids and have fun. You can play quizzes, puzzles, word games, memory games, etc. based on the characters, scenes, dialogues, and songs of the movie. You can also make your own games and rules and challenge each other.</li>
|
57 |
-
<li>Draw or color pictures related to the movie: You can draw or color pictures related to the movie with your kids and have fun. You can use pencils, crayons, paints, stickers, etc. to create your own artworks. You can draw or color your favorite characters, scenes, or moments from the movie. You can also make collages or posters related to the movie.</li>
|
58 |
-
<li>Act out scenes from the movie: You can act out scenes from the movie with your kids and have fun. You can use costumes, props, masks, etc. to make your own drama. You can imitate your favorite characters, dialogues, or actions from the movie. You can also improvise or add your own twists to the scenes.</li>
|
59 |
-
</ul>
|
60 |
-
<p>These are some of the ways you can enjoy <strong>Chhota Bheem and the Throne of Bali dubbed movie</strong> with your kids. You can also come up with your own ideas and make your own fun. The main thing is to have a good time with your kids and bond with them over this wonderful movie.</p>
|
61 |
-
<h9>What are the Reviews of Chhota Bheem and the Throne of Bali Dubbed Movie</h9>
|
62 |
-
<p><strong>Chhota Bheem and the Throne of Bali dubbed movie</strong> has received mixed reviews from critics and audiences alike. Some have praised the movie for its animation, story, characters, songs, and message, while others have criticized it for its lack of originality, creativity, and depth. Here are some of the reviews of the movie:</p>
|
63 |
-
<ul>
|
64 |
-
<li>The Times of India gave the movie 3 stars out of 5 and said, \"It's a perfect vacation film for kids. You too can accompany them. The film will make you smile.\" [1]</li>
|
65 |
-
<li>Wikipedia gave the movie a positive review and said, \"It is the sixteenth instalment in the Chhota Bheem film series and the second film in the series to be released directly to movie theatres. Distributed by Yash Raj Films, it was released in four different languages (English, Hindi, Tamil, and Telugu). It received mixed reviews.\" [2]</li>
|
66 |
-
<li>Bollywood Hungama gave the movie a negative review and said, \"Chhota Bheem and the throne of Bali Review – Get Chhota Bheem and the throne of Bali Movie Review, Film Ratings, Chhota Bheem and the throne of Bali Review, Chhota Bheem and the throne of Bali User Review, Chhota Bheem and the throne of Bali Critic Review and Latest Movie Reviews and Ratings on Bollywoodhungama.com.\" [3]</li>
|
67 |
-
</ul>
|
68 |
-
<p>These are some of the reviews of <strong>Chhota Bheem and the Throne of Bali dubbed movie</strong>. You can also read more reviews online or watch the movie yourself and form your own opinion.</p>
|
69 |
-
<h10>Conclusion</h10>
|
70 |
-
<p><strong>Chhota Bheem and the Throne of Bali dubbed movie</strong> is a fun and adventurous animated movie that will appeal to kids and adults alike. It is based on the popular Indian cartoon series Chhota Bheem, which follows the exploits of a brave and smart boy named Bheem and his friends in the fictional village of Dholakpur. In this movie, Bheem and his friends travel to Bali to attend the crowning ceremony of Prince Arjun, but end up fighting against an evil witch named Rangda, who has captured the kingdom and its rulers.</p>
|
71 |
-
<p>The movie has many positive aspects, such as its action, comedy, drama, message, characters, songs, music, visuals, and animation. It also showcases the rich and diverse culture of Bali, such as its music, dance, art, architecture, and cuisine. The movie has received mixed reviews from critics and audiences, but it has also won many awards and accolades. It is the sixteenth instalment in the Chhota Bheem film series and the second film in the series to be released directly to movie theatres.</p>
|
72 |
-
<p>You can download Chhota Bheem and the Throne of Bali dubbed movie for free from various websites or watch it online on streaming platforms like Prime Video or Google Play. You can also enjoy this movie with your kids in many ways, such as watching it together, singing along the songs, playing games related to the movie, drawing or coloring pictures related to the movie, or acting out scenes from the movie. The main thing is to have a good time with your kids and bond with them over this wonderful movie.</p> 3cee63e6c2<br />
|
73 |
-
<br />
|
74 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1line/AutoGPT/autogpt/logs.py
DELETED
@@ -1,332 +0,0 @@
|
|
1 |
-
"""Logging module for Auto-GPT."""
|
2 |
-
import json
|
3 |
-
import logging
|
4 |
-
import os
|
5 |
-
import random
|
6 |
-
import re
|
7 |
-
import time
|
8 |
-
import traceback
|
9 |
-
from logging import LogRecord
|
10 |
-
|
11 |
-
from colorama import Fore, Style
|
12 |
-
|
13 |
-
from autogpt.config import Config, Singleton
|
14 |
-
from autogpt.speech import say_text
|
15 |
-
|
16 |
-
CFG = Config()
|
17 |
-
|
18 |
-
|
19 |
-
class Logger(metaclass=Singleton):
|
20 |
-
"""
|
21 |
-
Logger that handle titles in different colors.
|
22 |
-
Outputs logs in console, activity.log, and errors.log
|
23 |
-
For console handler: simulates typing
|
24 |
-
"""
|
25 |
-
|
26 |
-
def __init__(self):
|
27 |
-
# create log directory if it doesn't exist
|
28 |
-
this_files_dir_path = os.path.dirname(__file__)
|
29 |
-
log_dir = os.path.join(this_files_dir_path, "../logs")
|
30 |
-
if not os.path.exists(log_dir):
|
31 |
-
os.makedirs(log_dir)
|
32 |
-
|
33 |
-
log_file = "activity.log"
|
34 |
-
error_file = "error.log"
|
35 |
-
|
36 |
-
console_formatter = AutoGptFormatter("%(title_color)s %(message)s")
|
37 |
-
|
38 |
-
# Create a handler for console which simulate typing
|
39 |
-
self.typing_console_handler = TypingConsoleHandler()
|
40 |
-
self.typing_console_handler.setLevel(logging.INFO)
|
41 |
-
self.typing_console_handler.setFormatter(console_formatter)
|
42 |
-
|
43 |
-
# Create a handler for console without typing simulation
|
44 |
-
self.console_handler = ConsoleHandler()
|
45 |
-
self.console_handler.setLevel(logging.DEBUG)
|
46 |
-
self.console_handler.setFormatter(console_formatter)
|
47 |
-
|
48 |
-
# Info handler in activity.log
|
49 |
-
self.file_handler = logging.FileHandler(
|
50 |
-
os.path.join(log_dir, log_file), "a", "utf-8"
|
51 |
-
)
|
52 |
-
self.file_handler.setLevel(logging.DEBUG)
|
53 |
-
info_formatter = AutoGptFormatter(
|
54 |
-
"%(asctime)s %(levelname)s %(title)s %(message_no_color)s"
|
55 |
-
)
|
56 |
-
self.file_handler.setFormatter(info_formatter)
|
57 |
-
|
58 |
-
# Error handler error.log
|
59 |
-
error_handler = logging.FileHandler(
|
60 |
-
os.path.join(log_dir, error_file), "a", "utf-8"
|
61 |
-
)
|
62 |
-
error_handler.setLevel(logging.ERROR)
|
63 |
-
error_formatter = AutoGptFormatter(
|
64 |
-
"%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s"
|
65 |
-
" %(message_no_color)s"
|
66 |
-
)
|
67 |
-
error_handler.setFormatter(error_formatter)
|
68 |
-
|
69 |
-
self.typing_logger = logging.getLogger("TYPER")
|
70 |
-
self.typing_logger.addHandler(self.typing_console_handler)
|
71 |
-
self.typing_logger.addHandler(self.file_handler)
|
72 |
-
self.typing_logger.addHandler(error_handler)
|
73 |
-
self.typing_logger.setLevel(logging.DEBUG)
|
74 |
-
|
75 |
-
self.logger = logging.getLogger("LOGGER")
|
76 |
-
self.logger.addHandler(self.console_handler)
|
77 |
-
self.logger.addHandler(self.file_handler)
|
78 |
-
self.logger.addHandler(error_handler)
|
79 |
-
self.logger.setLevel(logging.DEBUG)
|
80 |
-
|
81 |
-
def typewriter_log(
|
82 |
-
self, title="", title_color="", content="", speak_text=False, level=logging.INFO
|
83 |
-
):
|
84 |
-
if speak_text and CFG.speak_mode:
|
85 |
-
say_text(f"{title}. {content}")
|
86 |
-
|
87 |
-
if content:
|
88 |
-
if isinstance(content, list):
|
89 |
-
content = " ".join(content)
|
90 |
-
else:
|
91 |
-
content = ""
|
92 |
-
|
93 |
-
self.typing_logger.log(
|
94 |
-
level, content, extra={"title": title, "color": title_color}
|
95 |
-
)
|
96 |
-
|
97 |
-
def debug(
|
98 |
-
self,
|
99 |
-
message,
|
100 |
-
title="",
|
101 |
-
title_color="",
|
102 |
-
):
|
103 |
-
self._log(title, title_color, message, logging.DEBUG)
|
104 |
-
|
105 |
-
def warn(
|
106 |
-
self,
|
107 |
-
message,
|
108 |
-
title="",
|
109 |
-
title_color="",
|
110 |
-
):
|
111 |
-
self._log(title, title_color, message, logging.WARN)
|
112 |
-
|
113 |
-
def error(self, title, message=""):
|
114 |
-
self._log(title, Fore.RED, message, logging.ERROR)
|
115 |
-
|
116 |
-
def _log(self, title="", title_color="", message="", level=logging.INFO):
|
117 |
-
if message:
|
118 |
-
if isinstance(message, list):
|
119 |
-
message = " ".join(message)
|
120 |
-
self.logger.log(level, message, extra={"title": title, "color": title_color})
|
121 |
-
|
122 |
-
def set_level(self, level):
|
123 |
-
self.logger.setLevel(level)
|
124 |
-
self.typing_logger.setLevel(level)
|
125 |
-
|
126 |
-
def double_check(self, additionalText=None):
|
127 |
-
if not additionalText:
|
128 |
-
additionalText = (
|
129 |
-
"Please ensure you've setup and configured everything"
|
130 |
-
" correctly. Read https://github.com/Torantulino/Auto-GPT#readme to "
|
131 |
-
"double check. You can also create a github issue or join the discord"
|
132 |
-
" and ask there!"
|
133 |
-
)
|
134 |
-
|
135 |
-
self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText)
|
136 |
-
|
137 |
-
|
138 |
-
"""
|
139 |
-
Output stream to console using simulated typing
|
140 |
-
"""
|
141 |
-
|
142 |
-
|
143 |
-
class TypingConsoleHandler(logging.StreamHandler):
|
144 |
-
def emit(self, record):
|
145 |
-
min_typing_speed = 0.05
|
146 |
-
max_typing_speed = 0.01
|
147 |
-
|
148 |
-
msg = self.format(record)
|
149 |
-
try:
|
150 |
-
words = msg.split()
|
151 |
-
for i, word in enumerate(words):
|
152 |
-
print(word, end="", flush=True)
|
153 |
-
if i < len(words) - 1:
|
154 |
-
print(" ", end="", flush=True)
|
155 |
-
typing_speed = random.uniform(min_typing_speed, max_typing_speed)
|
156 |
-
time.sleep(typing_speed)
|
157 |
-
# type faster after each word
|
158 |
-
min_typing_speed = min_typing_speed * 0.95
|
159 |
-
max_typing_speed = max_typing_speed * 0.95
|
160 |
-
print()
|
161 |
-
except Exception:
|
162 |
-
self.handleError(record)
|
163 |
-
|
164 |
-
|
165 |
-
class ConsoleHandler(logging.StreamHandler):
|
166 |
-
def emit(self, record) -> None:
|
167 |
-
msg = self.format(record)
|
168 |
-
try:
|
169 |
-
print(msg)
|
170 |
-
except Exception:
|
171 |
-
self.handleError(record)
|
172 |
-
|
173 |
-
|
174 |
-
class AutoGptFormatter(logging.Formatter):
|
175 |
-
"""
|
176 |
-
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
|
177 |
-
To use this formatter, make sure to pass 'color', 'title' as log extras.
|
178 |
-
"""
|
179 |
-
|
180 |
-
def format(self, record: LogRecord) -> str:
|
181 |
-
if hasattr(record, "color"):
|
182 |
-
record.title_color = (
|
183 |
-
getattr(record, "color")
|
184 |
-
+ getattr(record, "title")
|
185 |
-
+ " "
|
186 |
-
+ Style.RESET_ALL
|
187 |
-
)
|
188 |
-
else:
|
189 |
-
record.title_color = getattr(record, "title")
|
190 |
-
if hasattr(record, "msg"):
|
191 |
-
record.message_no_color = remove_color_codes(getattr(record, "msg"))
|
192 |
-
else:
|
193 |
-
record.message_no_color = ""
|
194 |
-
return super().format(record)
|
195 |
-
|
196 |
-
|
197 |
-
def remove_color_codes(s: str) -> str:
|
198 |
-
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
|
199 |
-
return ansi_escape.sub("", s)
|
200 |
-
|
201 |
-
|
202 |
-
logger = Logger()
|
203 |
-
|
204 |
-
|
205 |
-
def print_assistant_thoughts(ai_name, assistant_reply):
|
206 |
-
"""Prints the assistant's thoughts to the console"""
|
207 |
-
from autogpt.json_utils.json_fix_llm import (
|
208 |
-
attempt_to_fix_json_by_finding_outermost_brackets,
|
209 |
-
fix_and_parse_json,
|
210 |
-
)
|
211 |
-
|
212 |
-
try:
|
213 |
-
try:
|
214 |
-
# Parse and print Assistant response
|
215 |
-
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
216 |
-
except json.JSONDecodeError:
|
217 |
-
logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
|
218 |
-
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
|
219 |
-
assistant_reply
|
220 |
-
)
|
221 |
-
if isinstance(assistant_reply_json, str):
|
222 |
-
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
|
223 |
-
|
224 |
-
# Check if assistant_reply_json is a string and attempt to parse
|
225 |
-
# it into a JSON object
|
226 |
-
if isinstance(assistant_reply_json, str):
|
227 |
-
try:
|
228 |
-
assistant_reply_json = json.loads(assistant_reply_json)
|
229 |
-
except json.JSONDecodeError:
|
230 |
-
logger.error("Error: Invalid JSON\n", assistant_reply)
|
231 |
-
assistant_reply_json = (
|
232 |
-
attempt_to_fix_json_by_finding_outermost_brackets(
|
233 |
-
assistant_reply_json
|
234 |
-
)
|
235 |
-
)
|
236 |
-
|
237 |
-
assistant_thoughts_reasoning = None
|
238 |
-
assistant_thoughts_plan = None
|
239 |
-
assistant_thoughts_speak = None
|
240 |
-
assistant_thoughts_criticism = None
|
241 |
-
if not isinstance(assistant_reply_json, dict):
|
242 |
-
assistant_reply_json = {}
|
243 |
-
assistant_thoughts = assistant_reply_json.get("thoughts", {})
|
244 |
-
assistant_thoughts_text = assistant_thoughts.get("text")
|
245 |
-
|
246 |
-
if assistant_thoughts:
|
247 |
-
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
|
248 |
-
assistant_thoughts_plan = assistant_thoughts.get("plan")
|
249 |
-
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
|
250 |
-
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
251 |
-
|
252 |
-
logger.typewriter_log(
|
253 |
-
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
|
254 |
-
)
|
255 |
-
logger.typewriter_log(
|
256 |
-
"REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
|
257 |
-
)
|
258 |
-
|
259 |
-
if assistant_thoughts_plan:
|
260 |
-
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
261 |
-
# If it's a list, join it into a string
|
262 |
-
if isinstance(assistant_thoughts_plan, list):
|
263 |
-
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
264 |
-
elif isinstance(assistant_thoughts_plan, dict):
|
265 |
-
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
266 |
-
|
267 |
-
# Split the input_string using the newline character and dashes
|
268 |
-
lines = assistant_thoughts_plan.split("\n")
|
269 |
-
for line in lines:
|
270 |
-
line = line.lstrip("- ")
|
271 |
-
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
272 |
-
|
273 |
-
logger.typewriter_log(
|
274 |
-
"CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
|
275 |
-
)
|
276 |
-
# Speak the assistant's thoughts
|
277 |
-
if CFG.speak_mode and assistant_thoughts_speak:
|
278 |
-
say_text(assistant_thoughts_speak)
|
279 |
-
else:
|
280 |
-
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
|
281 |
-
|
282 |
-
return assistant_reply_json
|
283 |
-
except json.decoder.JSONDecodeError:
|
284 |
-
logger.error("Error: Invalid JSON\n", assistant_reply)
|
285 |
-
if CFG.speak_mode:
|
286 |
-
say_text(
|
287 |
-
"I have received an invalid JSON response from the OpenAI API."
|
288 |
-
" I cannot ignore this response."
|
289 |
-
)
|
290 |
-
|
291 |
-
# All other errors, return "Error: + error message"
|
292 |
-
except Exception:
|
293 |
-
call_stack = traceback.format_exc()
|
294 |
-
logger.error("Error: \n", call_stack)
|
295 |
-
|
296 |
-
|
297 |
-
def print_assistant_thoughts(
|
298 |
-
ai_name: object, assistant_reply_json_valid: object
|
299 |
-
) -> None:
|
300 |
-
assistant_thoughts_reasoning = None
|
301 |
-
assistant_thoughts_plan = None
|
302 |
-
assistant_thoughts_speak = None
|
303 |
-
assistant_thoughts_criticism = None
|
304 |
-
|
305 |
-
assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
|
306 |
-
assistant_thoughts_text = assistant_thoughts.get("text")
|
307 |
-
if assistant_thoughts:
|
308 |
-
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
|
309 |
-
assistant_thoughts_plan = assistant_thoughts.get("plan")
|
310 |
-
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
|
311 |
-
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
312 |
-
logger.typewriter_log(
|
313 |
-
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
|
314 |
-
)
|
315 |
-
logger.typewriter_log("REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}")
|
316 |
-
if assistant_thoughts_plan:
|
317 |
-
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
318 |
-
# If it's a list, join it into a string
|
319 |
-
if isinstance(assistant_thoughts_plan, list):
|
320 |
-
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
321 |
-
elif isinstance(assistant_thoughts_plan, dict):
|
322 |
-
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
323 |
-
|
324 |
-
# Split the input_string using the newline character and dashes
|
325 |
-
lines = assistant_thoughts_plan.split("\n")
|
326 |
-
for line in lines:
|
327 |
-
line = line.lstrip("- ")
|
328 |
-
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
329 |
-
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
|
330 |
-
# Speak the assistant's thoughts
|
331 |
-
if CFG.speak_mode and assistant_thoughts_speak:
|
332 |
-
say_text(assistant_thoughts_speak)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK Award Presents FIFA 16 - The Most Beautiful and Fastest Soccer Game on Mobile.md
DELETED
@@ -1,138 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>FIFA 16 Mobile: A Review of the Game and How to Download It</h1>
|
3 |
-
<p>If you are a fan of football games, you might have heard of FIFA 16 Mobile, a popular and realistic soccer simulation game for Android devices. In this article, we will review the game and its features, as well as show you how to download it from apkaward.com, a trusted website that offers free apk files for Android games. We will also share some tips and tricks to help you play better and enjoy the game more.</p>
|
4 |
-
<h2>What is FIFA 16 Mobile?</h2>
|
5 |
-
<p>FIFA 16 Mobile is a mobile version of FIFA 16, a console and PC game developed by EA Sports. It was released in September 2015 and it is one of the most downloaded games on Google Play. FIFA 16 Mobile lets you play beautiful football with a newer, better, and faster experience on your mobile device. You can choose from over 10,000 players from over 500 licensed teams and go to battle against other players from real leagues in real arenas from around the world. You can also build and manage your own ultimate team, earn, trade, and transfer superstars like Lionel Messi, Jordan Henderson, and Juan Cuadrado. You can also show off your skills on the pitch with challenging skill games, dynamic accomplishments, and unique player celebrations.</p>
|
6 |
-
<h2>fifa 16 mobile apkaward</h2><br /><p><b><b>Download File</b> ❤ <a href="https://urlin.us/2uSZgg">https://urlin.us/2uSZgg</a></b></p><br /><br />
|
7 |
-
<h3>What are the main features of FIFA 16 Mobile?</h3>
|
8 |
-
<p>Some of the main features of FIFA 16 Mobile are:</p>
|
9 |
-
<ul>
|
10 |
-
<li><b>All-new engine:</b> FIFA 16 Mobile uses an all-new engine that delivers better skill moves, more exciting goals, more responsive controls, smarter teammates, and improved animations. You can also use enhanced hybrid controls that let you use gestures or buttons to control the ball. You can also gain improved offside awareness and more with attacking intelligence.</li>
|
11 |
-
<li><b>Ultimate team:</b> FIFA 16 Mobile allows you to build and manage your own fantasy team. You can choose your play style, formation, kits, and more, then balance player chemistry for the strongest squad compositions. You can also simulate matches or take control of them on the pitch.</li>
|
12 |
-
<li><b>Skill games:</b> FIFA 16 Mobile offers you various skill games to test your abilities on the pitch. You can choose your daily challenge from shooting, ground passing, dribbling, crossing, penalties, and more. Then, pick the right player and beat the challenge to earn rewards.</li>
|
13 |
-
<li><b>Real world football:</b> FIFA 16 Mobile gives you a realistic football experience with real players, teams, leagues, stadiums, and events. You can recreate challenges from current live-event football matches or create your own custom tournaments.</li>
|
14 |
-
<li><b>Player exchange:</b> FIFA 16 Mobile introduces a new feature called player exchange. You can trade players and items you no longer need for a chance of unlocking something better. The higher value items or players you trade, the better the upgrades you’ll get back.</li>
|
15 |
-
</ul>
|
16 |
-
<h3>What are the pros and cons of FIFA 16 Mobile?</h3>
|
17 |
-
<p>Like any game, FIFA 16 Mobile has its pros and cons. Here are some of them:</p>
|
18 |
-
<table>
|
19 |
-
<tr>
|
20 |
-
<th>Pros</th>
|
21 |
-
<th>Cons</th>
|
22 |
-
</tr>
|
23 |
-
<tr>
|
24 |
-
<td><ul>
|
25 |
-
<li>Realistic and immersive graphics and animations</li>
|
26 |
-
<li>Wide variety of players, teams, leagues, and modes</li>
|
27 |
-
<li>Easy and intuitive controls and interface</li>
|
28 |
-
<li>Fun and challenging skill games and achievements</li>
|
29 |
-
<li>Innovative and rewarding player exchange feature</li>
|
30 |
-
</ul></td>
|
31 |
-
<td><ul>
|
32 |
-
<li>Large file size and high device requirements</li>
|
33 |
-
<li>Limited compatibility with some Android devices</li>
|
34 |
-
<li>Potential lagging and crashing issues</li>
|
35 |
-
<li>Requires internet connection to play</li>
|
36 |
-
<li>Some bugs and glitches reported by users</li>
|
37 |
-
</ul></td>
|
38 |
-
</tr>
|
39 |
-
</table>
|
40 |
-
<h3>How to download FIFA 16 Mobile?</h3>
|
41 |
-
<p>If you want to download FIFA 16 Mobile for your Android device, you can follow these simple steps:</p>
|
42 |
-
<ol>
|
43 |
-
<li>Go to apkaward.com, a reliable and safe website that offers free apk files for Android games.</li>
|
44 |
-
<li>Search for FIFA 16 Mobile in the search bar or browse the categories.</li>
|
45 |
-
<li>Select the game from the results and click on the download button.</li>
|
46 |
-
<li>Wait for the download to finish and locate the apk file in your device's storage.</li>
|
47 |
-
<li>Before installing the apk file, make sure you enable the "Unknown sources" option in your device's settings. This will allow you to install apps from sources other than Google Play.</li>
|
48 |
-
<li>Tap on the apk file and follow the instructions to install the game.</li>
|
49 |
-
<li>Enjoy playing FIFA 16 Mobile on your device.</li>
|
50 |
-
</ol>
|
51 |
-
<h2>Tips and tricks for FIFA 16 Mobile</h2>
|
52 |
-
<p>To help you play better and enjoy FIFA 16 Mobile more, here are some tips and tricks that you can use:</p>
|
53 |
-
<ul>
|
54 |
-
<li><b>Defend smartly:</b> Don't rush into tackles or slide unnecessarily. Instead, use the pressure button to close down the space and force the opponent to make a mistake. You can also use the second defender button to call for backup from a teammate. When defending corners, use the swipe gesture to clear the ball.</li>
|
55 |
-
<li><b>Pass accurately:</b> Don't just spam the pass button or use long balls all the time. Instead, use short passes to build up your play and create openings. You can also use through balls to send your attackers behind the defense. When passing, pay attention to the direction and power of your passes.</li>
|
56 |
-
<li><b>Dribble skillfully:</b> Don't just run with the ball or use sprint all the time. Instead, use the skill move button to perform tricks and feints that can confuse or beat your opponents. You can also use the joystick to change direction or speed up your dribbling. When dribbling, pay attention to your player's balance and agility.</li>
|
57 |
-
<li><b>Score effectively:</b> Don't just shoot whenever you get the ball or use finesse shots all the time. Instead, use different types of shots depending on the situation, such as power shots, chip shots, or volleys. You can also use headers or tap-ins to score from crosses or rebounds. When shooting, pay attention to your player's position, angle, and timing.</li>
|
58 |
-
<li><b>Manage wisely:</b> Don't just buy or sell players randomly or use the same formation all the time. Instead, use the player exchange feature to get better players or items. You can also use different formations depending on your play style or opponent. When managing, pay attention to your player's chemistry, rating, and fitness.</li>
|
59 |
-
</ul>
|
60 |
-
<h2>Conclusion</h2>
|
61 |
-
<p>FIFA 16 Mobile is a great game for football fans who want to enjoy a realistic and immersive soccer simulation on their mobile devices. It has many features that make it fun and challenging, such as the all-new engine, the ultimate team, the skill games, the real world football, and the player exchange. It also has some drawbacks, such as its large file size, its limited compatibility, its potential lagging issues, its internet requirement, and its bugs and glitches. However, these can be overcome by downloading it from apkaward.com, a trusted website that offers free apk files for Android games. By following our tips and tricks, you can also improve your performance and experience in FIFA 16 Mobile.</p>
|
62 |
-
<h2>FAQs</h2>
|
63 |
-
<ol>
|
64 |
-
<li><b>Q: How much space does FIFA 16 Mobile take on my device?</b></li>
|
65 |
-
<p>A: FIFA 16 Mobile requires about 1.4 GB of free space on your device. You may need more space for additional data or updates.</p>
|
66 |
-
<li><b>Q: Which Android devices are compatible with FIFA 16 Mobile?</b></li>
|
67 |
-
<p>A: FIFA 16 Mobile is compatible with Android devices that have at least 1.5 GB of RAM, Android 4.4 or later, and a minimum resolution of 800x480. However, some devices may not run the game smoothly or at all, depending on their specifications and performance.</p>
|
68 |
-
<li><b>Q: How can I fix the lagging or crashing issues in FIFA 16 Mobile?</b></li>
|
69 |
-
<p>A: If you experience lagging or crashing issues in FIFA 16 Mobile, you can try the following solutions:</p>
|
70 |
-
<p>fifa 16 ultimate team apk download<br />
|
71 |
-
fifa 16 soccer android game<br />
|
72 |
-
fifa 16 mobile free download<br />
|
73 |
-
fifa 16 apk + obb offline<br />
|
74 |
-
fifa 16 apk mod unlimited money<br />
|
75 |
-
fifa 16 android gameplay<br />
|
76 |
-
fifa 16 mobile best players<br />
|
77 |
-
fifa 16 apk + data highly compressed<br />
|
78 |
-
fifa 16 soccer apk latest version<br />
|
79 |
-
fifa 16 mobile tips and tricks<br />
|
80 |
-
fifa 16 apk no license verification<br />
|
81 |
-
fifa 16 android requirements<br />
|
82 |
-
fifa 16 mobile cheats and hacks<br />
|
83 |
-
fifa 16 apk + obb google drive<br />
|
84 |
-
fifa 16 apk revdl<br />
|
85 |
-
fifa 16 android online or offline<br />
|
86 |
-
fifa 16 mobile skill moves<br />
|
87 |
-
fifa 16 apk + data mega<br />
|
88 |
-
fifa 16 soccer apk old version<br />
|
89 |
-
fifa 16 mobile update<br />
|
90 |
-
fifa 16 apk no root needed<br />
|
91 |
-
fifa 16 android controller support<br />
|
92 |
-
fifa 16 mobile player exchange<br />
|
93 |
-
fifa 16 apk + obb mediafire<br />
|
94 |
-
fifa 16 apk rexdl<br />
|
95 |
-
fifa 16 android multiplayer<br />
|
96 |
-
fifa 16 mobile manager mode<br />
|
97 |
-
fifa 16 apk + data zip file<br />
|
98 |
-
fifa 16 soccer apk pure<br />
|
99 |
-
fifa 16 mobile review<br />
|
100 |
-
fifa 16 apk cracked version<br />
|
101 |
-
fifa 16 android system requirements<br />
|
102 |
-
fifa 16 mobile hack tool<br />
|
103 |
-
fifa 16 apk + obb zippyshare<br />
|
104 |
-
fifa 16 apk mirror<br />
|
105 |
-
fifa 16 android graphics settings<br />
|
106 |
-
fifa 16 mobile tournaments<br />
|
107 |
-
fifa 16 apk + data kickass torrent<br />
|
108 |
-
fifa 16 soccer apkpure.com[^1^]<br />
|
109 |
-
fifa 16 mobile ratings<br />
|
110 |
-
fifa 16 apk full unlocked version<br />
|
111 |
-
fifa 16 android download size<br />
|
112 |
-
fifa 16 mobile coins generator<br />
|
113 |
-
fifa 16 apk + obb uptodown.com[^1^]<br />
|
114 |
-
fifa 16 apk mob.org[^1^]<br />
|
115 |
-
fifa 16 android bugs and glitches<br />
|
116 |
-
fifa 16 mobile achievements<br />
|
117 |
-
fifa 16 apk + data parts download[^1^]</p>
|
118 |
-
<ul>
|
119 |
-
<li>Close other apps or background processes that may be consuming your device's memory or battery.</li>
|
120 |
-
<li>Clear your device's cache or data to free up some space and improve its performance.</li>
|
121 |
-
<li>Update your device's software or firmware to the latest version.</li>
|
122 |
-
<li>Reinstall the game or download the latest version from apkaward.com.</li>
|
123 |
-
<li>Contact EA Sports customer support for further assistance.</li>
|
124 |
-
</ul>
|
125 |
-
<li><b>Q: How can I play FIFA 16 Mobile offline?</b></li>
|
126 |
-
<p>A: Unfortunately, you cannot play FIFA 16 Mobile offline. You need an internet connection to access the game's features and modes, such as the ultimate team, the skill games, and the real world football. You also need an internet connection to download the game's data and updates.</p>
|
127 |
-
<li><b>Q: How can I get more coins or points in FIFA 16 Mobile?</b></li>
|
128 |
-
<p>A: There are several ways to get more coins or points in FIFA 16 Mobile, such as:</p>
|
129 |
-
<ul>
|
130 |
-
<li>Completing skill games, achievements, and challenges.</li>
|
131 |
-
<li>Winning matches, tournaments, and seasons.</li>
|
132 |
-
<li>Selling or exchanging players or items in the market or player exchange.</li>
|
133 |
-
<li>Watching ads or completing offers in the store.</li>
|
134 |
-
<li>Purchasing them with real money in the store.</li>
|
135 |
-
</ul>
|
136 |
-
<p>I hope you enjoyed reading this article and found it helpful. If you have any questions or feedback, please feel free to leave a comment below. Thank you for your time and attention.</p> 197e85843d<br />
|
137 |
-
<br />
|
138 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Boost your Android device with Speed APK The ultimate performance optimizer.md
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>What is Speed Apk and How to Use It?</h1>
|
3 |
-
<p>If you are an avid gamer who loves playing Android games, you might have wondered if there is a way to change the speed of the games. Maybe you want to make them faster to save time, or slower to enjoy them more. Or maybe you want to cheat or hack some games by manipulating their speed. Whatever your reason, there is a tool that can help you do that. It is called speed apk, and in this article, we will tell you what it is, how to use it, and what are its benefits and risks.</p>
|
4 |
-
<h2>speed apk</h2><br /><p><b><b>Download Zip</b> ★★★★★ <a href="https://jinyurl.com/2uNNXA">https://jinyurl.com/2uNNXA</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<p>Speed apk is an application that allows you to change the speed of any game on your Android device. It works by modifying the system clock of your device, which affects how fast or slow the game runs. You can use it to make your games run faster or slower, depending on your preference. You can also use it to cheat or hack some games by speeding up or slowing down certain aspects of them.</p>
|
7 |
-
<p>Why would you want to use speed apk? There are many reasons why you might want to change the speed of your games. For example, you might want to:</p>
|
8 |
-
<ul>
|
9 |
-
<li>Make your games run faster to reduce waiting time, loading time, or animation time.</li>
|
10 |
-
<li>Make your games run slower to enjoy them more, or make them more challenging or realistic.</li>
|
11 |
-
<li>Cheat or hack some games by speeding up or slowing down certain elements, such as timers, enemies, resources, etc.</li>
|
12 |
-
</ul>
|
13 |
-
<p>How to download and install speed apk on your Android device? To use speed apk, you need to have a rooted Android device. Rooting is a process that gives you full control over your device, allowing you to modify its system settings and install apps that require root access. If you don't know how to root your device, you can search online for tutorials or guides for your specific device model. Once you have rooted your device, you can download and install speed apk from its official website or from other sources. Make sure you download the latest version of the app and that it is compatible with your device.</p>
|
14 |
-
<h2>How to Use Speed Apk to Change the Speed of Games in Android?</h2>
|
15 |
-
<p>Once you have downloaded and installed speed apk on your rooted Android device, you can start using it to change the speed of your games. Here are the steps you need to follow:</p>
|
16 |
-
<ol>
|
17 |
-
<li>Launch speed apk and grant it root access. You will see a floating icon on your screen that indicates that the app is running.</li>
|
18 |
-
<li>Select the game you want to speed up or slow down. You can do this by tapping on the floating icon and choosing "Select application". You will see a list of all the apps installed on your device. Tap on the game you want to modify and press "OK ". The game will be added to the speed apk list.</li>
|
19 |
-
<li>Adjust the speed multiplier and apply the changes. You can do this by tapping on the floating icon and choosing "Speed". You will see a slider that allows you to change the speed of the game from 0.1x to 10x. You can also use the buttons to increase or decrease the speed by 0.1x. Once you have set the desired speed, press "Apply". The game will run at the new speed.</li>
|
20 |
-
<li>Revert the changes and restore the original speed. You can do this by tapping on the floating icon and choosing "Restore". The game will run at its normal speed. You can also remove the game from the speed apk list by tapping on it and choosing "Remove".</li>
|
21 |
-
</ol>
|
22 |
-
<p>That's it! You have successfully changed the speed of your game using speed apk. You can repeat these steps for any other game you want to modify.</p>
|
23 |
-
<h2>Benefits and Risks of Using Speed Apk</h2>
|
24 |
-
<p>Using speed apk can have some benefits and risks, depending on how you use it and what games you use it on. Here are some of them:</p>
|
25 |
-
<p>speed stars apk<br />
|
26 |
-
zingspeed mobile apk<br />
|
27 |
-
speed test apk<br />
|
28 |
-
speed vpn apk<br />
|
29 |
-
speed booster apk<br />
|
30 |
-
speed camera apk<br />
|
31 |
-
speed racing apk<br />
|
32 |
-
speed meter apk<br />
|
33 |
-
speed browser apk<br />
|
34 |
-
speed dial apk<br />
|
35 |
-
speed cleaner apk<br />
|
36 |
-
speed drifters apk<br />
|
37 |
-
speed fan apk<br />
|
38 |
-
speed golf apk<br />
|
39 |
-
speed hacker apk<br />
|
40 |
-
speed indicator apk<br />
|
41 |
-
speed jump apk<br />
|
42 |
-
speed keyboard apk<br />
|
43 |
-
speed launcher apk<br />
|
44 |
-
speed logic apk<br />
|
45 |
-
speed monitor apk<br />
|
46 |
-
speed optimizer apk<br />
|
47 |
-
speed painter apk<br />
|
48 |
-
speed quiz apk<br />
|
49 |
-
speed reader apk<br />
|
50 |
-
speed run apk<br />
|
51 |
-
speed scanner apk<br />
|
52 |
-
speed tracker apk<br />
|
53 |
-
speed video apk<br />
|
54 |
-
speed wallpaper apk<br />
|
55 |
-
speed x3d apk<br />
|
56 |
-
speed zone apk<br />
|
57 |
-
need for speed apk<br />
|
58 |
-
asphalt 9: legends - epic car action racing game (speed edition) apk <br />
|
59 |
-
bike race free - top motorcycle racing games (speed edition) apk <br />
|
60 |
-
carx drift racing 2 (speed edition) apk <br />
|
61 |
-
drag racing (speed edition) apk <br />
|
62 |
-
extreme car driving simulator (speed edition) apk <br />
|
63 |
-
fast & furious takedown (speed edition) apk <br />
|
64 |
-
hill climb racing 2 (speed edition) apk <br />
|
65 |
-
hot wheels: race off (speed edition) apk <br />
|
66 |
-
real racing 3 (speed edition) apk <br />
|
67 |
-
traffic rider (speed edition) apk <br />
|
68 |
-
turbo driving racing 3d (speed edition) apk <br />
|
69 |
-
csr racing 2 - free car racing game (speed edition) apk <br />
|
70 |
-
real drift car racing (speed edition) apk <br />
|
71 |
-
traffic racer (speed edition) apk <br />
|
72 |
-
beach buggy racing 2 (speed edition) apk <br />
|
73 |
-
city racing 3d (speed edition) apk</p>
|
74 |
-
<h3>Benefits of Using Speed Apk</h3>
|
75 |
-
<ul>
|
76 |
-
<li><b>Faster gameplay and reduced waiting time</b>: Using speed apk can make your games run faster, which can save you time and make your gaming experience more enjoyable. For example, you can use it to speed up games that have long loading times, slow animations, or tedious tasks.</li>
|
77 |
-
<li><b>More fun and challenge</b>: Using speed apk can also make your games more fun and challenging, by changing their difficulty level or adding some variety. For example, you can use it to slow down games that are too easy or boring, or to speed up games that are too hard or frustrating.</li>
|
78 |
-
<li><b>Cheating and hacking possibilities</b>: Using speed apk can also give you some advantages or disadvantages in some games, by altering their mechanics or outcomes. For example, you can use it to cheat or hack games that have timers, enemies, resources, or other elements that depend on the speed of the game.</li>
|
79 |
-
</ul>
|
80 |
-
<h3>Risks of Using Speed Apk</h3>
|
81 |
-
<ul>
|
82 |
-
<li><b>Potential damage to your device</b>: Using speed apk can also cause some problems or damage to your device, by affecting its performance or stability. For example, using it to make your games run too fast or too slow can overheat your device, drain your battery, or crash your system.</li>
|
83 |
-
<li><b>Possible bans and penalties from game developers</b>: Using speed apk can also get you in trouble with some game developers, by violating their terms of service or policies. For example, using it to cheat or hack online games can result in bans, suspensions, or other penalties from the game developers or platforms.</li>
|
84 |
-
<li><b>Ethical and moral issues</b>: Using speed apk can also raise some ethical and moral questions, by affecting your gaming integrity or fairness. For example, using it to cheat or hack games that involve other players can be considered unfair, dishonest, or disrespectful.</li>
|
85 |
-
</ul>
|
86 |
-
<h2>Conclusion</h2>
|
87 |
-
<p>In conclusion, speed apk is a tool that allows you to change the speed of any game on your Android device. It can be used for various purposes, such as making your games faster or slower, more fun or challenging, or cheating or hacking them. However, it also comes with some benefits and risks, such as affecting your device performance or stability, getting banned or penalized by game developers, or raising ethical or moral issues. Therefore, you should use it wisely and responsibly, and at your own risk.</p>
|
88 |
-
<p>Here are some tips and recommendations for using speed apk:</p>
|
89 |
-
<ul>
|
90 |
-
<li>Make sure your device is rooted before using speed apk.</li>
|
91 |
-
<li>Download and install speed apk from its official website or from trusted sources.</li>
|
92 |
-
<li>Use speed apk only on games that you own or have permission to modify.</li>
|
93 |
-
<li>Use speed apk only on offline games or single-player modes.</li>
|
94 |
-
<li>Use speed apk only for personal use or entertainment purposes.</li>
|
95 |
-
<li>Do not use speed apk to harm others or gain unfair advantages.</li>
|
96 |
-
<li>Do not use speed apk excessively or unnecessarily.</li>
|
97 |
-
<li>Do not use speed apk on games that are incompatible or unstable with it.</li>
|
98 |
-
<li>Backup your device data before using speed apk.</li>
|
99 |
-
<li>Monitor your device temperature and battery level while using speed apk.</li>
|
100 |
-
</ul>
|
101 |
-
<p>We hope this article has helped you understand what is speed apk and how to use it. If you have any feedback or opinions about this topic, feel free to share them with us in the comments section below. Happy gaming!</p>
|
102 |
-
<h2 <h2>FAQs</h2>
|
103 |
-
<p>Here are some of the frequently asked questions about speed apk:</p>
|
104 |
-
<ol>
|
105 |
-
<li><b>What are some of the best games to use speed apk on?</b></li>
|
106 |
-
<p>There is no definitive answer to this question, as different games may have different effects or results when using speed apk. However, some of the games that are commonly used with speed apk are:</p>
|
107 |
-
<ul>
|
108 |
-
<li>Idle or clicker games, such as Cookie Clicker, Adventure Capitalist, or Idle Miner Tycoon. These games can be sped up to earn more money, resources, or achievements faster.</li>
|
109 |
-
<li>Action or arcade games, such as Temple Run, Subway Surfers, or Jetpack Joyride. These games can be slowed down to make them easier to play, avoid obstacles, or collect items.</li>
|
110 |
-
<li>Strategy or simulation games, such as Clash of Clans, SimCity, or FarmVille. These games can be sped up to reduce the waiting time for building, upgrading, or harvesting.</li>
|
111 |
-
</ul>
|
112 |
-
<li><b>Does speed apk work on online games?</b></li>
|
113 |
-
<p>Speed apk does not work on online games that require an internet connection or a server to run. This is because the speed of the game is determined by the server, not by your device. If you try to use speed apk on online games, you may experience errors, glitches, or disconnections. You may also get banned or penalized by the game developers for violating their rules or policies.</p>
|
114 |
-
<li><b>Is speed apk safe and legal to use?</b></li>
|
115 |
-
<p>Speed apk is not a malicious or harmful app, but it is not a risk-free app either. It can cause some problems or damage to your device, such as overheating, battery drain, or system crash. It can also get you in trouble with some game developers, who may ban or penalize you for using it. Moreover, it can raise some ethical or moral issues, such as cheating or hacking other players. Therefore, you should use speed apk at your own risk and responsibility.</p>
|
116 |
-
<li><b>How can I uninstall speed apk from my device?</b></li>
|
117 |
-
<p>If you want to uninstall speed apk from your device, you can follow these steps:</p>
|
118 |
-
<ol>
|
119 |
-
<li>Launch speed apk and tap on the floating icon.</li>
|
120 |
-
<li>Choose "Settings" and then "Uninstall".</li>
|
121 |
-
<li>Confirm your choice and wait for the app to be uninstalled.</li>
|
122 |
-
<li>Reboot your device to complete the process.</li>
|
123 |
-
</ol>
|
124 |
-
<li><b>Where can I find more information and support for speed apk?</b></li>
|
125 |
-
<p>If you want to find more information and support for speed apk, you can visit its official website or its social media pages. You can also contact its developers via email or feedback form. You can also join its online community or forum, where you can ask questions, share tips, or report issues.</p>
|
126 |
-
</ol></p> 401be4b1e0<br />
|
127 |
-
<br />
|
128 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Google Drive APK for Android and Enjoy Free Cloud Storage.md
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download APK Files from Google Drive and Install Them on Your Android Device</h1>
|
3 |
-
<p>If you have an Android device, you probably know that you can install apps from the Google Play Store. But did you know that you can also install apps from other sources, such as Google Drive? In this article, we will show you how to download APK files from Google Drive and install them on your Android device. We will also explain what an APK file is, why you might need it, and what risks and precautions you should take when installing it.</p>
|
4 |
-
<h2>download apk google drive</h2><br /><p><b><b>Download Zip</b> ⇒⇒⇒ <a href="https://jinyurl.com/2uNU1K">https://jinyurl.com/2uNU1K</a></b></p><br /><br />
|
5 |
-
<h2>What is an APK File and Why You Might Need It</h2>
|
6 |
-
<h3>APK File Definition and Benefits</h3>
|
7 |
-
<p>An APK file is a package file that contains the installation files for an Android app. It has the extension .apk and can be opened by any file explorer app. APK files are useful for installing apps that are not available on the Google Play Store, such as beta versions, regional apps, or modded apps. They can also help you update your apps faster, bypass restrictions, or access features that are not supported by your device.</p>
|
8 |
-
<h3>Risks and Precautions of Installing APK Files</h3>
|
9 |
-
<p>However, installing APK files also comes with some risks. You might download a malicious or corrupted file that can harm your device or compromise your data. You might also violate the terms of service of some apps or infringe on their intellectual property rights. Therefore, you should only download APK files from reputable sources, such as official websites, trusted developers, or verified platforms. You should also scan the files for viruses before installing them and check their permissions carefully. Finally, you should always back up your data before installing any APK file, in case something goes wrong.</p>
|
10 |
-
<h2>How to Download APK Files from Google Drive</h2>
|
11 |
-
<h3>Step 1: Enable Unknown Sources on Your Android Device</h3>
|
12 |
-
<p>Before you can install any APK file on your Android device, you need to enable unknown sources. This means that you allow your device to install apps from sources other than the Google Play Store. To do this, follow these steps:</p>
|
13 |
-
<ul>
|
14 |
-
<li>Go to your device settings and tap Security or Apps & Notifications.</li>
|
15 |
-
<li>Tap the three dots in the upper-right corner and select Special Access or Install Unknown Apps.</li>
|
16 |
-
<li>Tap Chrome or whichever web browser you use to access Google Drive.</li>
|
17 |
-
<li>Toggle Allow from this source to the On position.</li>
|
18 |
-
</ul>
|
19 |
-
<h3>Step 2: Find the APK File on Google Drive and Download It</h3>
|
20 |
-
<p>Now that you have enabled unknown sources, you can download the APK file from Google Drive. To do this, follow these steps:</p>
|
21 |
-
<ul>
|
22 |
-
<li>Open your web browser and go to [Google Drive](^10^).</li>
|
23 |
-
<li>Sign in with your Google account if you haven't already.</li>
|
24 |
-
<li>Find the APK file that you want to download and tap it.</li>
|
25 |
-
<li>Tap Download or the three dots icon and select Download.</li>
|
26 |
-
<li>Accept any pop-ups or warnings that appear.</li>
|
27 |
-
</ul>
|
28 |
-
<h3>Step 3: Locate the Downloaded APK File and Install It</h3>
|
29 |
-
<p>Once you have downloaded the APK file from Google Drive, you need to locate it and install it. To do this, follow these steps:</p>
|
30 |
-
<ul>
|
31 |
-
<li>Open your file explorer app or download one from the Google Play Store if you don't have one.</li>
|
32 |
-
<li>Navigate to the Downloads folder or wherever you saved the APK file.</li>
|
33 |
-
<li>Tap the APK file and tap Install.</li>
|
34 |
-
<li>Follow the on-screen instructions and wait for the installation to finish.</li>
|
35 |
-
<li>Tap Open or Done to launch or exit the app.</li>
|
36 |
-
</ul>
|
37 |
-
<p>Congratulations, you have successfully downloaded and installed an APK file from Google Drive!</p>
|
38 |
-
<h2>How to Install APK Files on Your Android Device Using Other Methods</h2>
|
39 |
-
<h3>Method 1: Use a File Manager App</h3>
|
40 |
-
<p>If you don't want to use your web browser to download APK files from Google Drive, you can use a file manager app instead. A file manager app allows you to access and manage the files on your device, including APK files. Some popular file manager apps are [ES File Explorer], [Solid Explorer], and [Files by Google]. To use a file manager app to install APK files, follow these steps:</p>
|
41 |
-
<p>How to download apk files from google drive<br />
|
42 |
-
Download google drive apk for android<br />
|
43 |
-
Google drive apk download latest version<br />
|
44 |
-
Download google drive apk for pc<br />
|
45 |
-
Google drive apk download for firestick<br />
|
46 |
-
Download google drive apk old version<br />
|
47 |
-
Google drive apk download uptodown<br />
|
48 |
-
Download google drive apk mod<br />
|
49 |
-
Google drive apk download for android tv<br />
|
50 |
-
Download google drive apk mirror<br />
|
51 |
-
Google drive apk download apkpure<br />
|
52 |
-
Download google drive apk for chromebook<br />
|
53 |
-
Google drive apk download for windows 10<br />
|
54 |
-
Download google drive apk pro<br />
|
55 |
-
Google drive apk download for laptop<br />
|
56 |
-
Download google drive apk offline installer<br />
|
57 |
-
Google drive apk download for ios<br />
|
58 |
-
Download google drive apk premium<br />
|
59 |
-
Google drive apk download for kindle fire<br />
|
60 |
-
Download google drive apk no ads<br />
|
61 |
-
Google drive apk download for mac<br />
|
62 |
-
Download google drive apk cracked<br />
|
63 |
-
Google drive apk download for smart tv<br />
|
64 |
-
Download google drive apk filehippo<br />
|
65 |
-
Google drive apk download for windows 7<br />
|
66 |
-
Download google drive apk full version<br />
|
67 |
-
Google drive apk download for blackberry<br />
|
68 |
-
Download google drive apk pure<br />
|
69 |
-
Google drive apk download for windows 8.1<br />
|
70 |
-
Download google drive apk hack<br />
|
71 |
-
Google drive apk download for linux<br />
|
72 |
-
Download google drive apk free<br />
|
73 |
-
Google drive apk download for android 4.4.2<br />
|
74 |
-
Download google drive apk beta<br />
|
75 |
-
Google drive apk download for android 5.1.1<br />
|
76 |
-
Download google drive apk direct link<br />
|
77 |
-
Google drive apk download for android 6.0.1<br />
|
78 |
-
Download google drive apk xda<br />
|
79 |
-
Google drive apk download for android 7.0<br />
|
80 |
-
Download google drive apk rexdl<br />
|
81 |
-
Google drive apk download for android 8.0<br />
|
82 |
-
Download google drive apk revdl<br />
|
83 |
-
Google drive apk download for android 9.0 pie<br />
|
84 |
-
Download google drive apk from play store<br />
|
85 |
-
Google drive apk download for android 10 q</p>
|
86 |
-
<ul>
|
87 |
-
<li>Download and install a file manager app from the Google Play Store if you don't have one.</li>
|
88 |
-
<li>Open the file manager app and tap Google Drive or whichever cloud service you use to store your APK files.</li>
|
89 |
-
<li>Sign in with your Google account if you haven't already.</li>
|
90 |
-
<li>Find the APK file that you want to install and tap it.</li>
|
91 |
-
<li>Tap Install and follow the on-screen instructions.</li>
|
92 |
-
<li>Tap Open or Done to launch or exit the app.</li>
|
93 |
-
</ul>
|
94 |
-
<h3>Method 2: Use an APK Installer App</h3>
|
95 |
-
<p>If you want to make the installation process easier, you can use an APK installer app. An APK installer app is a tool that helps you install APK files on your device without any hassle. Some popular APK installer apps are [APK Installer], [APKPure], and [APKMirror Installer]. To use an APK installer app to install APK files, follow these steps:</p>
|
96 |
-
<ul>
|
97 |
-
<li>Download and install an APK installer app from the Google Play Store or its official website if you don't have one.</li>
|
98 |
-
<li>Open the APK installer app and tap Google Drive or whichever cloud service you use to store your APK files.</li>
|
99 |
-
<li>Sign in with your Google account if you haven't already.</li>
|
100 |
-
<li>Find the APK file that you want to install and tap it.</li>
|
101 |
-
<li>The APK installer app will automatically scan, verify, and install the APK file for you.</li>
|
102 |
-
<li>Tap Open or Done to launch or exit the app.</li>
|
103 |
-
</ul>
|
104 |
-
<h3>Method 3: Transfer the APK File from Your Computer via USB</h3>
|
105 |
-
<p>If you have the APK file on your computer, you can also transfer it to your Android device via USB and install it. To do this, follow these steps:</p>
|
106 |
-
<ul>
|
107 |
-
<li>Connect your Android device to your computer using a USB cable.</li>
|
108 |
-
<li>Select Transfer Files or MTP mode on your device if prompted.</li>
|
109 |
-
<li>On your computer, open File Explorer or Finder and locate the APK file that you want to transfer.</li>
|
110 |
-
<li>Drag and drop the APK file to your device's internal storage or SD card.</li>
|
111 |
-
<li>Eject your device from your computer and disconnect the USB cable.</li>
|
112 |
-
<li>On your device, open your file explorer app or download one from the Google Play Store if you don't have one.</li>
|
113 |
-
<li>Navigate to the folder where you transferred the APK file and tap it.</li>
|
114 |
-
<li>Tap Install and follow the on-screen instructions.</li>
|
115 |
-
<li>Tap Open or Done to launch or exit the app.</li>
|
116 |
-
</ul>
|
117 |
-
<h2>Conclusion</h2>
|
118 |
-
<p>In this article, we have shown you how to download APK files from Google Drive and install them on your Android device. We have also explained what an APK file is, why you might need it, and what risks and precautions you should take when installing it. We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
|
119 |
-
<h2>FAQs</h2>
|
120 |
-
<table border="1">
|
121 |
-
<tr><td><b>Question</b></td><td><b>Answer</b></td></tr>
|
122 |
-
<tr><td>What is Google Drive?</td><td>Google Drive is a cloud storage service that allows you to store and access your files online. You can upload, download, share, and sync your files across different devices using Google Drive. You can also create and edit documents, spreadsheets, presentations, forms, drawings, and more using Google Drive's online tools. You can get 15 GB of free storage space with a Google account or upgrade to a paid plan for more storage options.</td></tr>
|
123 |
-
<tr><td>How do I update an APK file?</ <td>To update an APK file, you need to download and install the latest version of the APK file from the same source that you got the original one. You can also check for updates using the APK installer app that you used to install the APK file. Alternatively, you can uninstall the old version of the app and install the new one from the Google Play Store if it is available there.</td></tr>
|
124 |
-
<tr><td>How do I uninstall an APK file?</td><td>To uninstall an APK file, you need to go to your device settings and tap Apps or Applications. Find the app that you want to uninstall and tap it. Tap Uninstall and confirm your choice. You can also uninstall an APK file using the APK installer app that you used to install it.</td></tr>
|
125 |
-
<tr><td>How do I share an APK file?</td><td>To share an APK file, you need to upload it to a cloud service, such as Google Drive, Dropbox, or OneDrive, and share the link with the person that you want to share it with. You can also use a file sharing app, such as [SHAREit], [Xender], or [Zapya], to transfer the APK file directly to another device via Wi-Fi or Bluetooth.</td></tr>
|
126 |
-
<tr><td>How do I backup an APK file?</td><td>To backup an APK file, you need to copy it from your device's internal storage or SD card to your computer or another storage device. You can also use a backup app, such as [Titanium Backup], [Helium], or [Super Backup], to backup your APK files along with their data and settings.</td></tr>
|
127 |
-
<tr><td>How do I open an APK file on my computer?</td><td>To open an APK file on your computer, you need to use an Android emulator, such as [BlueStacks], [Nox Player], or [MEmu], that allows you to run Android apps on your computer. You can also use a software tool, such as [APK Studio], [APK Easy Tool], or [APK Editor Pro], that allows you to view and edit the contents of an APK file.</td></tr>
|
128 |
-
</table></p> 197e85843d<br />
|
129 |
-
<br />
|
130 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_dpmsolver_multistep.py
DELETED
@@ -1,524 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 TSAIL Team and The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver
|
17 |
-
|
18 |
-
import math
|
19 |
-
from typing import List, Optional, Tuple, Union
|
20 |
-
|
21 |
-
import numpy as np
|
22 |
-
import paddle
|
23 |
-
|
24 |
-
from ..configuration_utils import ConfigMixin, register_to_config
|
25 |
-
from ..utils import _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS, deprecate
|
26 |
-
from .scheduling_utils import SchedulerMixin, SchedulerOutput
|
27 |
-
|
28 |
-
|
29 |
-
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
|
30 |
-
"""
|
31 |
-
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
|
32 |
-
(1-beta) over time from t = [0,1].
|
33 |
-
|
34 |
-
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
|
35 |
-
to that part of the diffusion process.
|
36 |
-
|
37 |
-
|
38 |
-
Args:
|
39 |
-
num_diffusion_timesteps (`int`): the number of betas to produce.
|
40 |
-
max_beta (`float`): the maximum beta to use; use values lower than 1 to
|
41 |
-
prevent singularities.
|
42 |
-
|
43 |
-
Returns:
|
44 |
-
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
|
45 |
-
"""
|
46 |
-
|
47 |
-
def alpha_bar(time_step):
|
48 |
-
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
|
49 |
-
|
50 |
-
betas = []
|
51 |
-
for i in range(num_diffusion_timesteps):
|
52 |
-
t1 = i / num_diffusion_timesteps
|
53 |
-
t2 = (i + 1) / num_diffusion_timesteps
|
54 |
-
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
|
55 |
-
return paddle.to_tensor(betas, dtype="float32")
|
56 |
-
|
57 |
-
|
58 |
-
class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
|
59 |
-
"""
|
60 |
-
DPM-Solver (and the improved version DPM-Solver++) is a fast dedicated high-order solver for diffusion ODEs with
|
61 |
-
the convergence order guarantee. Empirically, sampling by DPM-Solver with only 20 steps can generate high-quality
|
62 |
-
samples, and it can generate quite good samples even in only 10 steps.
|
63 |
-
|
64 |
-
For more details, see the original paper: https://arxiv.org/abs/2206.00927 and https://arxiv.org/abs/2211.01095
|
65 |
-
|
66 |
-
Currently, we support the multistep DPM-Solver for both noise prediction models and data prediction models. We
|
67 |
-
recommend to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling.
|
68 |
-
|
69 |
-
We also support the "dynamic thresholding" method in Imagen (https://arxiv.org/abs/2205.11487). For pixel-space
|
70 |
-
diffusion models, you can set both `algorithm_type="dpmsolver++"` and `thresholding=True` to use the dynamic
|
71 |
-
thresholding. Note that the thresholding method is unsuitable for latent-space diffusion models (such as
|
72 |
-
stable-diffusion).
|
73 |
-
|
74 |
-
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
|
75 |
-
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
|
76 |
-
[`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
|
77 |
-
[`~SchedulerMixin.from_pretrained`] functions.
|
78 |
-
|
79 |
-
Args:
|
80 |
-
num_train_timesteps (`int`): number of diffusion steps used to train the model.
|
81 |
-
beta_start (`float`): the starting `beta` value of inference.
|
82 |
-
beta_end (`float`): the final `beta` value.
|
83 |
-
beta_schedule (`str`):
|
84 |
-
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
|
85 |
-
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
|
86 |
-
trained_betas (`np.ndarray`, optional):
|
87 |
-
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
|
88 |
-
solver_order (`int`, default `2`):
|
89 |
-
the order of DPM-Solver; can be `1` or `2` or `3`. We recommend to use `solver_order=2` for guided
|
90 |
-
sampling, and `solver_order=3` for unconditional sampling.
|
91 |
-
prediction_type (`str`, default `epsilon`, optional):
|
92 |
-
prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
|
93 |
-
process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
|
94 |
-
https://imagen.research.google/video/paper.pdf)
|
95 |
-
thresholding (`bool`, default `False`):
|
96 |
-
whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487).
|
97 |
-
For pixel-space diffusion models, you can set both `algorithm_type=dpmsolver++` and `thresholding=True` to
|
98 |
-
use the dynamic thresholding. Note that the thresholding method is unsuitable for latent-space diffusion
|
99 |
-
models (such as stable-diffusion).
|
100 |
-
dynamic_thresholding_ratio (`float`, default `0.995`):
|
101 |
-
the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen
|
102 |
-
(https://arxiv.org/abs/2205.11487).
|
103 |
-
sample_max_value (`float`, default `1.0`):
|
104 |
-
the threshold value for dynamic thresholding. Valid only when `thresholding=True` and
|
105 |
-
`algorithm_type="dpmsolver++`.
|
106 |
-
algorithm_type (`str`, default `dpmsolver++`):
|
107 |
-
the algorithm type for the solver. Either `dpmsolver` or `dpmsolver++`. The `dpmsolver` type implements the
|
108 |
-
algorithms in https://arxiv.org/abs/2206.00927, and the `dpmsolver++` type implements the algorithms in
|
109 |
-
https://arxiv.org/abs/2211.01095. We recommend to use `dpmsolver++` with `solver_order=2` for guided
|
110 |
-
sampling (e.g. stable-diffusion).
|
111 |
-
solver_type (`str`, default `midpoint`):
|
112 |
-
the solver type for the second-order solver. Either `midpoint` or `heun`. The solver type slightly affects
|
113 |
-
the sample quality, especially for small number of steps. We empirically find that `midpoint` solvers are
|
114 |
-
slightly better, so we recommend to use the `midpoint` type.
|
115 |
-
lower_order_final (`bool`, default `True`):
|
116 |
-
whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. We empirically
|
117 |
-
find this trick can stabilize the sampling of DPM-Solver for steps < 15, especially for steps <= 10.
|
118 |
-
|
119 |
-
"""
|
120 |
-
|
121 |
-
_compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy()
|
122 |
-
_deprecated_kwargs = ["predict_epsilon"]
|
123 |
-
order = 1
|
124 |
-
|
125 |
-
@register_to_config
|
126 |
-
def __init__(
|
127 |
-
self,
|
128 |
-
num_train_timesteps: int = 1000,
|
129 |
-
beta_start: float = 0.0001,
|
130 |
-
beta_end: float = 0.02,
|
131 |
-
beta_schedule: str = "linear",
|
132 |
-
trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
|
133 |
-
solver_order: int = 2,
|
134 |
-
prediction_type: str = "epsilon",
|
135 |
-
thresholding: bool = False,
|
136 |
-
dynamic_thresholding_ratio: float = 0.995,
|
137 |
-
sample_max_value: float = 1.0,
|
138 |
-
algorithm_type: str = "dpmsolver++",
|
139 |
-
solver_type: str = "midpoint",
|
140 |
-
lower_order_final: bool = True,
|
141 |
-
**kwargs,
|
142 |
-
):
|
143 |
-
message = (
|
144 |
-
"Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler ="
|
145 |
-
" DPMSolverMultistepScheduler.from_pretrained(<model_id>, prediction_type='epsilon')`."
|
146 |
-
)
|
147 |
-
predict_epsilon = deprecate("predict_epsilon", "0.13.0", message, take_from=kwargs)
|
148 |
-
if predict_epsilon is not None:
|
149 |
-
self.register_to_config(prediction_type="epsilon" if predict_epsilon else "sample")
|
150 |
-
if trained_betas is not None:
|
151 |
-
self.betas = paddle.to_tensor(trained_betas, dtype="float32")
|
152 |
-
elif beta_schedule == "linear":
|
153 |
-
self.betas = paddle.linspace(beta_start, beta_end, num_train_timesteps, dtype="float32")
|
154 |
-
elif beta_schedule == "scaled_linear":
|
155 |
-
# this schedule is very specific to the latent diffusion model.
|
156 |
-
self.betas = paddle.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype="float32") ** 2
|
157 |
-
elif beta_schedule == "squaredcos_cap_v2":
|
158 |
-
# Glide cosine schedule
|
159 |
-
self.betas = betas_for_alpha_bar(num_train_timesteps)
|
160 |
-
else:
|
161 |
-
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
|
162 |
-
|
163 |
-
self.alphas = 1.0 - self.betas
|
164 |
-
self.alphas_cumprod = paddle.cumprod(self.alphas, 0)
|
165 |
-
# Currently we only support VP-type noise schedule
|
166 |
-
self.alpha_t = paddle.sqrt(self.alphas_cumprod)
|
167 |
-
self.sigma_t = paddle.sqrt(1 - self.alphas_cumprod)
|
168 |
-
self.lambda_t = paddle.log(self.alpha_t) - paddle.log(self.sigma_t)
|
169 |
-
|
170 |
-
# standard deviation of the initial noise distribution
|
171 |
-
self.init_noise_sigma = 1.0
|
172 |
-
|
173 |
-
# settings for DPM-Solver
|
174 |
-
if algorithm_type not in ["dpmsolver", "dpmsolver++"]:
|
175 |
-
raise NotImplementedError(f"{algorithm_type} does is not implemented for {self.__class__}")
|
176 |
-
if solver_type not in ["midpoint", "heun"]:
|
177 |
-
raise NotImplementedError(f"{solver_type} does is not implemented for {self.__class__}")
|
178 |
-
|
179 |
-
# setable values
|
180 |
-
self.num_inference_steps = None
|
181 |
-
timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy()
|
182 |
-
self.timesteps = paddle.to_tensor(timesteps)
|
183 |
-
self.model_outputs = [None] * solver_order
|
184 |
-
self.lower_order_nums = 0
|
185 |
-
|
186 |
-
def set_timesteps(self, num_inference_steps: int):
|
187 |
-
"""
|
188 |
-
Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
|
189 |
-
|
190 |
-
Args:
|
191 |
-
num_inference_steps (`int`):
|
192 |
-
the number of diffusion steps used when generating samples with a pre-trained model.
|
193 |
-
"""
|
194 |
-
self.num_inference_steps = num_inference_steps
|
195 |
-
timesteps = (
|
196 |
-
np.linspace(0, self.num_train_timesteps - 1, num_inference_steps + 1)
|
197 |
-
.round()[::-1][:-1]
|
198 |
-
.copy()
|
199 |
-
.astype(np.int64)
|
200 |
-
)
|
201 |
-
self.timesteps = paddle.to_tensor(timesteps)
|
202 |
-
self.model_outputs = [
|
203 |
-
None,
|
204 |
-
] * self.config.solver_order
|
205 |
-
self.lower_order_nums = 0
|
206 |
-
|
207 |
-
def convert_model_output(self, model_output: paddle.Tensor, timestep: int, sample: paddle.Tensor) -> paddle.Tensor:
|
208 |
-
"""
|
209 |
-
Convert the model output to the corresponding type that the algorithm (DPM-Solver / DPM-Solver++) needs.
|
210 |
-
|
211 |
-
DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to
|
212 |
-
discretize an integral of the data prediction model. So we need to first convert the model output to the
|
213 |
-
corresponding type to match the algorithm.
|
214 |
-
|
215 |
-
Note that the algorithm type and the model type is decoupled. That is to say, we can use either DPM-Solver or
|
216 |
-
DPM-Solver++ for both noise prediction model and data prediction model.
|
217 |
-
|
218 |
-
Args:
|
219 |
-
model_output (`paddle.Tensor`): direct output from learned diffusion model.
|
220 |
-
timestep (`int`): current discrete timestep in the diffusion chain.
|
221 |
-
sample (`paddle.Tensor`):
|
222 |
-
current instance of sample being created by diffusion process.
|
223 |
-
|
224 |
-
Returns:
|
225 |
-
`paddle.Tensor`: the converted model output.
|
226 |
-
"""
|
227 |
-
# DPM-Solver++ needs to solve an integral of the data prediction model.
|
228 |
-
if self.config.algorithm_type == "dpmsolver++":
|
229 |
-
if self.config.prediction_type == "epsilon":
|
230 |
-
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
|
231 |
-
x0_pred = (sample - sigma_t * model_output) / alpha_t
|
232 |
-
elif self.config.prediction_type == "sample":
|
233 |
-
x0_pred = model_output
|
234 |
-
elif self.config.prediction_type == "v_prediction":
|
235 |
-
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
|
236 |
-
x0_pred = alpha_t * sample - sigma_t * model_output
|
237 |
-
else:
|
238 |
-
raise ValueError(
|
239 |
-
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
|
240 |
-
" `v_prediction` for the DPMSolverMultistepScheduler."
|
241 |
-
)
|
242 |
-
|
243 |
-
if self.config.thresholding:
|
244 |
-
# Dynamic thresholding in https://arxiv.org/abs/2205.11487
|
245 |
-
orig_dtype = x0_pred.dtype
|
246 |
-
if orig_dtype not in [paddle.float32, paddle.float64]:
|
247 |
-
x0_pred = x0_pred.cast("float32")
|
248 |
-
dynamic_max_val = paddle.quantile(
|
249 |
-
paddle.abs(x0_pred).reshape((x0_pred.shape[0], -1)), self.config.dynamic_thresholding_ratio, axis=1
|
250 |
-
)
|
251 |
-
dynamic_max_val = paddle.maximum(
|
252 |
-
dynamic_max_val,
|
253 |
-
self.config.sample_max_value * paddle.ones_like(dynamic_max_val),
|
254 |
-
)[(...,) + (None,) * (x0_pred.ndim - 1)]
|
255 |
-
x0_pred = paddle.clip(x0_pred, -dynamic_max_val, dynamic_max_val) / dynamic_max_val
|
256 |
-
x0_pred = x0_pred.cast(orig_dtype)
|
257 |
-
return x0_pred
|
258 |
-
# DPM-Solver needs to solve an integral of the noise prediction model.
|
259 |
-
elif self.config.algorithm_type == "dpmsolver":
|
260 |
-
if self.config.prediction_type == "epsilon":
|
261 |
-
return model_output
|
262 |
-
elif self.config.prediction_type == "sample":
|
263 |
-
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
|
264 |
-
epsilon = (sample - alpha_t * model_output) / sigma_t
|
265 |
-
return epsilon
|
266 |
-
elif self.config.prediction_type == "v_prediction":
|
267 |
-
alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
|
268 |
-
epsilon = alpha_t * model_output + sigma_t * sample
|
269 |
-
return epsilon
|
270 |
-
else:
|
271 |
-
raise ValueError(
|
272 |
-
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
|
273 |
-
" `v_prediction` for the DPMSolverMultistepScheduler."
|
274 |
-
)
|
275 |
-
|
276 |
-
def dpm_solver_first_order_update(
|
277 |
-
self,
|
278 |
-
model_output: paddle.Tensor,
|
279 |
-
timestep: int,
|
280 |
-
prev_timestep: int,
|
281 |
-
sample: paddle.Tensor,
|
282 |
-
) -> paddle.Tensor:
|
283 |
-
"""
|
284 |
-
One step for the first-order DPM-Solver (equivalent to DDIM).
|
285 |
-
|
286 |
-
See https://arxiv.org/abs/2206.00927 for the detailed derivation.
|
287 |
-
|
288 |
-
Args:
|
289 |
-
model_output (`paddle.Tensor`): direct output from learned diffusion model.
|
290 |
-
timestep (`int`): current discrete timestep in the diffusion chain.
|
291 |
-
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
|
292 |
-
sample (`paddle.Tensor`):
|
293 |
-
current instance of sample being created by diffusion process.
|
294 |
-
|
295 |
-
Returns:
|
296 |
-
`paddle.Tensor`: the sample tensor at the previous timestep.
|
297 |
-
"""
|
298 |
-
lambda_t, lambda_s = self.lambda_t[prev_timestep], self.lambda_t[timestep]
|
299 |
-
alpha_t, alpha_s = self.alpha_t[prev_timestep], self.alpha_t[timestep]
|
300 |
-
sigma_t, sigma_s = self.sigma_t[prev_timestep], self.sigma_t[timestep]
|
301 |
-
h = lambda_t - lambda_s
|
302 |
-
if self.config.algorithm_type == "dpmsolver++":
|
303 |
-
x_t = (sigma_t / sigma_s) * sample - (alpha_t * (paddle.exp(-h) - 1.0)) * model_output
|
304 |
-
elif self.config.algorithm_type == "dpmsolver":
|
305 |
-
x_t = (alpha_t / alpha_s) * sample - (sigma_t * (paddle.exp(h) - 1.0)) * model_output
|
306 |
-
return x_t
|
307 |
-
|
308 |
-
def multistep_dpm_solver_second_order_update(
|
309 |
-
self,
|
310 |
-
model_output_list: List[paddle.Tensor],
|
311 |
-
timestep_list: List[int],
|
312 |
-
prev_timestep: int,
|
313 |
-
sample: paddle.Tensor,
|
314 |
-
) -> paddle.Tensor:
|
315 |
-
"""
|
316 |
-
One step for the second-order multistep DPM-Solver.
|
317 |
-
|
318 |
-
Args:
|
319 |
-
model_output_list (`List[paddle.Tensor]`):
|
320 |
-
direct outputs from learned diffusion model at current and latter timesteps.
|
321 |
-
timestep (`int`): current and latter discrete timestep in the diffusion chain.
|
322 |
-
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
|
323 |
-
sample (`paddle.Tensor`):
|
324 |
-
current instance of sample being created by diffusion process.
|
325 |
-
|
326 |
-
Returns:
|
327 |
-
`paddle.Tensor`: the sample tensor at the previous timestep.
|
328 |
-
"""
|
329 |
-
t, s0, s1 = prev_timestep, timestep_list[-1], timestep_list[-2]
|
330 |
-
m0, m1 = model_output_list[-1], model_output_list[-2]
|
331 |
-
lambda_t, lambda_s0, lambda_s1 = self.lambda_t[t], self.lambda_t[s0], self.lambda_t[s1]
|
332 |
-
alpha_t, alpha_s0 = self.alpha_t[t], self.alpha_t[s0]
|
333 |
-
sigma_t, sigma_s0 = self.sigma_t[t], self.sigma_t[s0]
|
334 |
-
h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1
|
335 |
-
r0 = h_0 / h
|
336 |
-
D0, D1 = m0, (1.0 / r0) * (m0 - m1)
|
337 |
-
if self.config.algorithm_type == "dpmsolver++":
|
338 |
-
# See https://arxiv.org/abs/2211.01095 for detailed derivations
|
339 |
-
if self.config.solver_type == "midpoint":
|
340 |
-
x_t = (
|
341 |
-
(sigma_t / sigma_s0) * sample
|
342 |
-
- (alpha_t * (paddle.exp(-h) - 1.0)) * D0
|
343 |
-
- 0.5 * (alpha_t * (paddle.exp(-h) - 1.0)) * D1
|
344 |
-
)
|
345 |
-
elif self.config.solver_type == "heun":
|
346 |
-
x_t = (
|
347 |
-
(sigma_t / sigma_s0) * sample
|
348 |
-
- (alpha_t * (paddle.exp(-h) - 1.0)) * D0
|
349 |
-
+ (alpha_t * ((paddle.exp(-h) - 1.0) / h + 1.0)) * D1
|
350 |
-
)
|
351 |
-
elif self.config.algorithm_type == "dpmsolver":
|
352 |
-
# See https://arxiv.org/abs/2206.00927 for detailed derivations
|
353 |
-
if self.config.solver_type == "midpoint":
|
354 |
-
x_t = (
|
355 |
-
(alpha_t / alpha_s0) * sample
|
356 |
-
- (sigma_t * (paddle.exp(h) - 1.0)) * D0
|
357 |
-
- 0.5 * (sigma_t * (paddle.exp(h) - 1.0)) * D1
|
358 |
-
)
|
359 |
-
elif self.config.solver_type == "heun":
|
360 |
-
x_t = (
|
361 |
-
(alpha_t / alpha_s0) * sample
|
362 |
-
- (sigma_t * (paddle.exp(h) - 1.0)) * D0
|
363 |
-
- (sigma_t * ((paddle.exp(h) - 1.0) / h - 1.0)) * D1
|
364 |
-
)
|
365 |
-
return x_t
|
366 |
-
|
367 |
-
def multistep_dpm_solver_third_order_update(
|
368 |
-
self,
|
369 |
-
model_output_list: List[paddle.Tensor],
|
370 |
-
timestep_list: List[int],
|
371 |
-
prev_timestep: int,
|
372 |
-
sample: paddle.Tensor,
|
373 |
-
) -> paddle.Tensor:
|
374 |
-
"""
|
375 |
-
One step for the third-order multistep DPM-Solver.
|
376 |
-
|
377 |
-
Args:
|
378 |
-
model_output_list (`List[paddle.Tensor]`):
|
379 |
-
direct outputs from learned diffusion model at current and latter timesteps.
|
380 |
-
timestep (`int`): current and latter discrete timestep in the diffusion chain.
|
381 |
-
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
|
382 |
-
sample (`paddle.Tensor`):
|
383 |
-
current instance of sample being created by diffusion process.
|
384 |
-
|
385 |
-
Returns:
|
386 |
-
`paddle.Tensor`: the sample tensor at the previous timestep.
|
387 |
-
"""
|
388 |
-
t, s0, s1, s2 = prev_timestep, timestep_list[-1], timestep_list[-2], timestep_list[-3]
|
389 |
-
m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3]
|
390 |
-
lambda_t, lambda_s0, lambda_s1, lambda_s2 = (
|
391 |
-
self.lambda_t[t],
|
392 |
-
self.lambda_t[s0],
|
393 |
-
self.lambda_t[s1],
|
394 |
-
self.lambda_t[s2],
|
395 |
-
)
|
396 |
-
alpha_t, alpha_s0 = self.alpha_t[t], self.alpha_t[s0]
|
397 |
-
sigma_t, sigma_s0 = self.sigma_t[t], self.sigma_t[s0]
|
398 |
-
h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2
|
399 |
-
r0, r1 = h_0 / h, h_1 / h
|
400 |
-
D0 = m0
|
401 |
-
D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2)
|
402 |
-
D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1)
|
403 |
-
D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1)
|
404 |
-
if self.config.algorithm_type == "dpmsolver++":
|
405 |
-
# See https://arxiv.org/abs/2206.00927 for detailed derivations
|
406 |
-
x_t = (
|
407 |
-
(sigma_t / sigma_s0) * sample
|
408 |
-
- (alpha_t * (paddle.exp(-h) - 1.0)) * D0
|
409 |
-
+ (alpha_t * ((paddle.exp(-h) - 1.0) / h + 1.0)) * D1
|
410 |
-
- (alpha_t * ((paddle.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2
|
411 |
-
)
|
412 |
-
elif self.config.algorithm_type == "dpmsolver":
|
413 |
-
# See https://arxiv.org/abs/2206.00927 for detailed derivations
|
414 |
-
x_t = (
|
415 |
-
(alpha_t / alpha_s0) * sample
|
416 |
-
- (sigma_t * (paddle.exp(h) - 1.0)) * D0
|
417 |
-
- (sigma_t * ((paddle.exp(h) - 1.0) / h - 1.0)) * D1
|
418 |
-
- (sigma_t * ((paddle.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2
|
419 |
-
)
|
420 |
-
return x_t
|
421 |
-
|
422 |
-
def step(
|
423 |
-
self,
|
424 |
-
model_output: paddle.Tensor,
|
425 |
-
timestep: int,
|
426 |
-
sample: paddle.Tensor,
|
427 |
-
return_dict: bool = True,
|
428 |
-
) -> Union[SchedulerOutput, Tuple]:
|
429 |
-
"""
|
430 |
-
Step function propagating the sample with the multistep DPM-Solver.
|
431 |
-
|
432 |
-
Args:
|
433 |
-
model_output (`paddle.Tensor`): direct output from learned diffusion model.
|
434 |
-
timestep (`int`): current discrete timestep in the diffusion chain.
|
435 |
-
sample (`paddle.Tensor`):
|
436 |
-
current instance of sample being created by diffusion process.
|
437 |
-
return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
|
438 |
-
|
439 |
-
Returns:
|
440 |
-
[`~scheduling_utils.SchedulerOutput`] or `tuple`: [`~scheduling_utils.SchedulerOutput`] if `return_dict` is
|
441 |
-
True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
|
442 |
-
|
443 |
-
"""
|
444 |
-
if self.num_inference_steps is None:
|
445 |
-
raise ValueError(
|
446 |
-
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
447 |
-
)
|
448 |
-
|
449 |
-
step_index = (self.timesteps == timestep).nonzero()
|
450 |
-
if len(step_index) == 0:
|
451 |
-
step_index = len(self.timesteps) - 1
|
452 |
-
else:
|
453 |
-
step_index = step_index.item()
|
454 |
-
prev_timestep = 0 if step_index == len(self.timesteps) - 1 else self.timesteps[step_index + 1]
|
455 |
-
lower_order_final = (
|
456 |
-
(step_index == len(self.timesteps) - 1) and self.config.lower_order_final and len(self.timesteps) < 15
|
457 |
-
)
|
458 |
-
lower_order_second = (
|
459 |
-
(step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15
|
460 |
-
)
|
461 |
-
|
462 |
-
model_output = self.convert_model_output(model_output, timestep, sample)
|
463 |
-
for i in range(self.config.solver_order - 1):
|
464 |
-
self.model_outputs[i] = self.model_outputs[i + 1]
|
465 |
-
self.model_outputs[-1] = model_output
|
466 |
-
|
467 |
-
if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final:
|
468 |
-
prev_sample = self.dpm_solver_first_order_update(model_output, timestep, prev_timestep, sample)
|
469 |
-
elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second:
|
470 |
-
timestep_list = [self.timesteps[step_index - 1], timestep]
|
471 |
-
prev_sample = self.multistep_dpm_solver_second_order_update(
|
472 |
-
self.model_outputs, timestep_list, prev_timestep, sample
|
473 |
-
)
|
474 |
-
else:
|
475 |
-
timestep_list = [self.timesteps[step_index - 2], self.timesteps[step_index - 1], timestep]
|
476 |
-
prev_sample = self.multistep_dpm_solver_third_order_update(
|
477 |
-
self.model_outputs, timestep_list, prev_timestep, sample
|
478 |
-
)
|
479 |
-
|
480 |
-
if self.lower_order_nums < self.config.solver_order:
|
481 |
-
self.lower_order_nums += 1
|
482 |
-
|
483 |
-
if not return_dict:
|
484 |
-
return (prev_sample,)
|
485 |
-
|
486 |
-
return SchedulerOutput(prev_sample=prev_sample)
|
487 |
-
|
488 |
-
def scale_model_input(self, sample: paddle.Tensor, *args, **kwargs) -> paddle.Tensor:
|
489 |
-
"""
|
490 |
-
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
|
491 |
-
current timestep.
|
492 |
-
|
493 |
-
Args:
|
494 |
-
sample (`paddle.Tensor`): input sample
|
495 |
-
|
496 |
-
Returns:
|
497 |
-
`paddle.Tensor`: scaled input sample
|
498 |
-
"""
|
499 |
-
return sample
|
500 |
-
|
501 |
-
def add_noise(
|
502 |
-
self,
|
503 |
-
original_samples: paddle.Tensor,
|
504 |
-
noise: paddle.Tensor,
|
505 |
-
timesteps: paddle.Tensor,
|
506 |
-
) -> paddle.Tensor:
|
507 |
-
# Make sure alphas_cumprod and timestep have same dtype as original_samples
|
508 |
-
self.alphas_cumprod = self.alphas_cumprod.cast(original_samples.dtype)
|
509 |
-
|
510 |
-
sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
|
511 |
-
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
|
512 |
-
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
|
513 |
-
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
|
514 |
-
|
515 |
-
sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5
|
516 |
-
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
|
517 |
-
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
|
518 |
-
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
|
519 |
-
|
520 |
-
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
|
521 |
-
return noisy_samples
|
522 |
-
|
523 |
-
def __len__(self):
|
524 |
-
return self.config.num_train_timesteps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7hao/bingo/src/components/ui/sheet.tsx
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import * as React from 'react'
|
4 |
-
import * as SheetPrimitive from '@radix-ui/react-dialog'
|
5 |
-
|
6 |
-
import { cn } from '@/lib/utils'
|
7 |
-
import { IconClose } from '@/components/ui/icons'
|
8 |
-
|
9 |
-
const Sheet = SheetPrimitive.Root
|
10 |
-
|
11 |
-
const SheetTrigger = SheetPrimitive.Trigger
|
12 |
-
|
13 |
-
const SheetClose = SheetPrimitive.Close
|
14 |
-
|
15 |
-
const SheetPortal = ({
|
16 |
-
className,
|
17 |
-
children,
|
18 |
-
...props
|
19 |
-
}: SheetPrimitive.DialogPortalProps) => (
|
20 |
-
<SheetPrimitive.Portal
|
21 |
-
className={cn('fixed inset-0 z-50 flex', className)}
|
22 |
-
{...props}
|
23 |
-
>
|
24 |
-
{children}
|
25 |
-
</SheetPrimitive.Portal>
|
26 |
-
)
|
27 |
-
SheetPortal.displayName = SheetPrimitive.Portal.displayName
|
28 |
-
|
29 |
-
const SheetOverlay = React.forwardRef<
|
30 |
-
React.ElementRef<typeof SheetPrimitive.Overlay>,
|
31 |
-
React.ComponentPropsWithoutRef<typeof SheetPrimitive.Overlay>
|
32 |
-
>(({ className, children, ...props }, ref) => (
|
33 |
-
<SheetPrimitive.Overlay
|
34 |
-
className={cn(
|
35 |
-
'fixed inset-0 z-50 transition-all duration-100 data-[state=closed]:animate-out data-[state=closed]:fade-out data-[state=open]:fade-in',
|
36 |
-
className
|
37 |
-
)}
|
38 |
-
{...props}
|
39 |
-
ref={ref}
|
40 |
-
/>
|
41 |
-
))
|
42 |
-
SheetOverlay.displayName = SheetPrimitive.Overlay.displayName
|
43 |
-
|
44 |
-
const SheetContent = React.forwardRef<
|
45 |
-
React.ElementRef<typeof SheetPrimitive.Content>,
|
46 |
-
React.ComponentPropsWithoutRef<typeof SheetPrimitive.Content>
|
47 |
-
>(({ className, children, ...props }, ref) => (
|
48 |
-
<SheetPortal>
|
49 |
-
<SheetPrimitive.Content
|
50 |
-
ref={ref}
|
51 |
-
className={cn(
|
52 |
-
'fixed inset-y-0 left-0 z-50 h-full border-r bg-background p-6 shadow-lg transition ease-in-out data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:slide-out-to-left data-[state=open]:slide-in-from-left data-[state=closed]:duration-300 data-[state=open]:duration-500 sm:max-w-sm',
|
53 |
-
className
|
54 |
-
)}
|
55 |
-
{...props}
|
56 |
-
>
|
57 |
-
{children}
|
58 |
-
<SheetPrimitive.Close className="absolute right-4 top-4 rounded-sm opacity-70 ring-offset-background transition-opacity hover:opacity-100 focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2 disabled:pointer-events-none data-[state=open]:bg-secondary">
|
59 |
-
<IconClose />
|
60 |
-
<span className="sr-only">Close</span>
|
61 |
-
</SheetPrimitive.Close>
|
62 |
-
</SheetPrimitive.Content>
|
63 |
-
</SheetPortal>
|
64 |
-
))
|
65 |
-
SheetContent.displayName = SheetPrimitive.Content.displayName
|
66 |
-
|
67 |
-
const SheetHeader = ({
|
68 |
-
className,
|
69 |
-
...props
|
70 |
-
}: React.HTMLAttributes<HTMLDivElement>) => (
|
71 |
-
<div className={cn('flex flex-col space-y-2', className)} {...props} />
|
72 |
-
)
|
73 |
-
SheetHeader.displayName = 'SheetHeader'
|
74 |
-
|
75 |
-
const SheetFooter = ({
|
76 |
-
className,
|
77 |
-
...props
|
78 |
-
}: React.HTMLAttributes<HTMLDivElement>) => (
|
79 |
-
<div
|
80 |
-
className={cn(
|
81 |
-
'flex flex-col-reverse sm:flex-row sm:justify-end sm:space-x-2',
|
82 |
-
className
|
83 |
-
)}
|
84 |
-
{...props}
|
85 |
-
/>
|
86 |
-
)
|
87 |
-
SheetFooter.displayName = 'SheetFooter'
|
88 |
-
|
89 |
-
const SheetTitle = React.forwardRef<
|
90 |
-
React.ElementRef<typeof SheetPrimitive.Title>,
|
91 |
-
React.ComponentPropsWithoutRef<typeof SheetPrimitive.Title>
|
92 |
-
>(({ className, ...props }, ref) => (
|
93 |
-
<SheetPrimitive.Title
|
94 |
-
ref={ref}
|
95 |
-
className={cn('text-lg font-semibold text-foreground', className)}
|
96 |
-
{...props}
|
97 |
-
/>
|
98 |
-
))
|
99 |
-
SheetTitle.displayName = SheetPrimitive.Title.displayName
|
100 |
-
|
101 |
-
const SheetDescription = React.forwardRef<
|
102 |
-
React.ElementRef<typeof SheetPrimitive.Description>,
|
103 |
-
React.ComponentPropsWithoutRef<typeof SheetPrimitive.Description>
|
104 |
-
>(({ className, ...props }, ref) => (
|
105 |
-
<SheetPrimitive.Description
|
106 |
-
ref={ref}
|
107 |
-
className={cn('text-sm text-muted-foreground', className)}
|
108 |
-
{...props}
|
109 |
-
/>
|
110 |
-
))
|
111 |
-
SheetDescription.displayName = SheetPrimitive.Description.displayName
|
112 |
-
|
113 |
-
export {
|
114 |
-
Sheet,
|
115 |
-
SheetTrigger,
|
116 |
-
SheetClose,
|
117 |
-
SheetContent,
|
118 |
-
SheetHeader,
|
119 |
-
SheetFooter,
|
120 |
-
SheetTitle,
|
121 |
-
SheetDescription
|
122 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/infer/lib/infer_pack/attentions.py
DELETED
@@ -1,417 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
import math
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
from torch import nn
|
7 |
-
from torch.nn import functional as F
|
8 |
-
|
9 |
-
from infer.lib.infer_pack import commons, modules
|
10 |
-
from infer.lib.infer_pack.modules import LayerNorm
|
11 |
-
|
12 |
-
|
13 |
-
class Encoder(nn.Module):
|
14 |
-
def __init__(
|
15 |
-
self,
|
16 |
-
hidden_channels,
|
17 |
-
filter_channels,
|
18 |
-
n_heads,
|
19 |
-
n_layers,
|
20 |
-
kernel_size=1,
|
21 |
-
p_dropout=0.0,
|
22 |
-
window_size=10,
|
23 |
-
**kwargs
|
24 |
-
):
|
25 |
-
super().__init__()
|
26 |
-
self.hidden_channels = hidden_channels
|
27 |
-
self.filter_channels = filter_channels
|
28 |
-
self.n_heads = n_heads
|
29 |
-
self.n_layers = n_layers
|
30 |
-
self.kernel_size = kernel_size
|
31 |
-
self.p_dropout = p_dropout
|
32 |
-
self.window_size = window_size
|
33 |
-
|
34 |
-
self.drop = nn.Dropout(p_dropout)
|
35 |
-
self.attn_layers = nn.ModuleList()
|
36 |
-
self.norm_layers_1 = nn.ModuleList()
|
37 |
-
self.ffn_layers = nn.ModuleList()
|
38 |
-
self.norm_layers_2 = nn.ModuleList()
|
39 |
-
for i in range(self.n_layers):
|
40 |
-
self.attn_layers.append(
|
41 |
-
MultiHeadAttention(
|
42 |
-
hidden_channels,
|
43 |
-
hidden_channels,
|
44 |
-
n_heads,
|
45 |
-
p_dropout=p_dropout,
|
46 |
-
window_size=window_size,
|
47 |
-
)
|
48 |
-
)
|
49 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
50 |
-
self.ffn_layers.append(
|
51 |
-
FFN(
|
52 |
-
hidden_channels,
|
53 |
-
hidden_channels,
|
54 |
-
filter_channels,
|
55 |
-
kernel_size,
|
56 |
-
p_dropout=p_dropout,
|
57 |
-
)
|
58 |
-
)
|
59 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
60 |
-
|
61 |
-
def forward(self, x, x_mask):
|
62 |
-
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
63 |
-
x = x * x_mask
|
64 |
-
for i in range(self.n_layers):
|
65 |
-
y = self.attn_layers[i](x, x, attn_mask)
|
66 |
-
y = self.drop(y)
|
67 |
-
x = self.norm_layers_1[i](x + y)
|
68 |
-
|
69 |
-
y = self.ffn_layers[i](x, x_mask)
|
70 |
-
y = self.drop(y)
|
71 |
-
x = self.norm_layers_2[i](x + y)
|
72 |
-
x = x * x_mask
|
73 |
-
return x
|
74 |
-
|
75 |
-
|
76 |
-
class Decoder(nn.Module):
|
77 |
-
def __init__(
|
78 |
-
self,
|
79 |
-
hidden_channels,
|
80 |
-
filter_channels,
|
81 |
-
n_heads,
|
82 |
-
n_layers,
|
83 |
-
kernel_size=1,
|
84 |
-
p_dropout=0.0,
|
85 |
-
proximal_bias=False,
|
86 |
-
proximal_init=True,
|
87 |
-
**kwargs
|
88 |
-
):
|
89 |
-
super().__init__()
|
90 |
-
self.hidden_channels = hidden_channels
|
91 |
-
self.filter_channels = filter_channels
|
92 |
-
self.n_heads = n_heads
|
93 |
-
self.n_layers = n_layers
|
94 |
-
self.kernel_size = kernel_size
|
95 |
-
self.p_dropout = p_dropout
|
96 |
-
self.proximal_bias = proximal_bias
|
97 |
-
self.proximal_init = proximal_init
|
98 |
-
|
99 |
-
self.drop = nn.Dropout(p_dropout)
|
100 |
-
self.self_attn_layers = nn.ModuleList()
|
101 |
-
self.norm_layers_0 = nn.ModuleList()
|
102 |
-
self.encdec_attn_layers = nn.ModuleList()
|
103 |
-
self.norm_layers_1 = nn.ModuleList()
|
104 |
-
self.ffn_layers = nn.ModuleList()
|
105 |
-
self.norm_layers_2 = nn.ModuleList()
|
106 |
-
for i in range(self.n_layers):
|
107 |
-
self.self_attn_layers.append(
|
108 |
-
MultiHeadAttention(
|
109 |
-
hidden_channels,
|
110 |
-
hidden_channels,
|
111 |
-
n_heads,
|
112 |
-
p_dropout=p_dropout,
|
113 |
-
proximal_bias=proximal_bias,
|
114 |
-
proximal_init=proximal_init,
|
115 |
-
)
|
116 |
-
)
|
117 |
-
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
118 |
-
self.encdec_attn_layers.append(
|
119 |
-
MultiHeadAttention(
|
120 |
-
hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
|
121 |
-
)
|
122 |
-
)
|
123 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
124 |
-
self.ffn_layers.append(
|
125 |
-
FFN(
|
126 |
-
hidden_channels,
|
127 |
-
hidden_channels,
|
128 |
-
filter_channels,
|
129 |
-
kernel_size,
|
130 |
-
p_dropout=p_dropout,
|
131 |
-
causal=True,
|
132 |
-
)
|
133 |
-
)
|
134 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
135 |
-
|
136 |
-
def forward(self, x, x_mask, h, h_mask):
|
137 |
-
"""
|
138 |
-
x: decoder input
|
139 |
-
h: encoder output
|
140 |
-
"""
|
141 |
-
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
|
142 |
-
device=x.device, dtype=x.dtype
|
143 |
-
)
|
144 |
-
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
145 |
-
x = x * x_mask
|
146 |
-
for i in range(self.n_layers):
|
147 |
-
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
148 |
-
y = self.drop(y)
|
149 |
-
x = self.norm_layers_0[i](x + y)
|
150 |
-
|
151 |
-
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
152 |
-
y = self.drop(y)
|
153 |
-
x = self.norm_layers_1[i](x + y)
|
154 |
-
|
155 |
-
y = self.ffn_layers[i](x, x_mask)
|
156 |
-
y = self.drop(y)
|
157 |
-
x = self.norm_layers_2[i](x + y)
|
158 |
-
x = x * x_mask
|
159 |
-
return x
|
160 |
-
|
161 |
-
|
162 |
-
class MultiHeadAttention(nn.Module):
|
163 |
-
def __init__(
|
164 |
-
self,
|
165 |
-
channels,
|
166 |
-
out_channels,
|
167 |
-
n_heads,
|
168 |
-
p_dropout=0.0,
|
169 |
-
window_size=None,
|
170 |
-
heads_share=True,
|
171 |
-
block_length=None,
|
172 |
-
proximal_bias=False,
|
173 |
-
proximal_init=False,
|
174 |
-
):
|
175 |
-
super().__init__()
|
176 |
-
assert channels % n_heads == 0
|
177 |
-
|
178 |
-
self.channels = channels
|
179 |
-
self.out_channels = out_channels
|
180 |
-
self.n_heads = n_heads
|
181 |
-
self.p_dropout = p_dropout
|
182 |
-
self.window_size = window_size
|
183 |
-
self.heads_share = heads_share
|
184 |
-
self.block_length = block_length
|
185 |
-
self.proximal_bias = proximal_bias
|
186 |
-
self.proximal_init = proximal_init
|
187 |
-
self.attn = None
|
188 |
-
|
189 |
-
self.k_channels = channels // n_heads
|
190 |
-
self.conv_q = nn.Conv1d(channels, channels, 1)
|
191 |
-
self.conv_k = nn.Conv1d(channels, channels, 1)
|
192 |
-
self.conv_v = nn.Conv1d(channels, channels, 1)
|
193 |
-
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
194 |
-
self.drop = nn.Dropout(p_dropout)
|
195 |
-
|
196 |
-
if window_size is not None:
|
197 |
-
n_heads_rel = 1 if heads_share else n_heads
|
198 |
-
rel_stddev = self.k_channels**-0.5
|
199 |
-
self.emb_rel_k = nn.Parameter(
|
200 |
-
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
|
201 |
-
* rel_stddev
|
202 |
-
)
|
203 |
-
self.emb_rel_v = nn.Parameter(
|
204 |
-
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
|
205 |
-
* rel_stddev
|
206 |
-
)
|
207 |
-
|
208 |
-
nn.init.xavier_uniform_(self.conv_q.weight)
|
209 |
-
nn.init.xavier_uniform_(self.conv_k.weight)
|
210 |
-
nn.init.xavier_uniform_(self.conv_v.weight)
|
211 |
-
if proximal_init:
|
212 |
-
with torch.no_grad():
|
213 |
-
self.conv_k.weight.copy_(self.conv_q.weight)
|
214 |
-
self.conv_k.bias.copy_(self.conv_q.bias)
|
215 |
-
|
216 |
-
def forward(self, x, c, attn_mask=None):
|
217 |
-
q = self.conv_q(x)
|
218 |
-
k = self.conv_k(c)
|
219 |
-
v = self.conv_v(c)
|
220 |
-
|
221 |
-
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
222 |
-
|
223 |
-
x = self.conv_o(x)
|
224 |
-
return x
|
225 |
-
|
226 |
-
def attention(self, query, key, value, mask=None):
|
227 |
-
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
228 |
-
b, d, t_s, t_t = (*key.size(), query.size(2))
|
229 |
-
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
230 |
-
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
231 |
-
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
232 |
-
|
233 |
-
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
234 |
-
if self.window_size is not None:
|
235 |
-
assert (
|
236 |
-
t_s == t_t
|
237 |
-
), "Relative attention is only available for self-attention."
|
238 |
-
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
239 |
-
rel_logits = self._matmul_with_relative_keys(
|
240 |
-
query / math.sqrt(self.k_channels), key_relative_embeddings
|
241 |
-
)
|
242 |
-
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
243 |
-
scores = scores + scores_local
|
244 |
-
if self.proximal_bias:
|
245 |
-
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
246 |
-
scores = scores + self._attention_bias_proximal(t_s).to(
|
247 |
-
device=scores.device, dtype=scores.dtype
|
248 |
-
)
|
249 |
-
if mask is not None:
|
250 |
-
scores = scores.masked_fill(mask == 0, -1e4)
|
251 |
-
if self.block_length is not None:
|
252 |
-
assert (
|
253 |
-
t_s == t_t
|
254 |
-
), "Local attention is only available for self-attention."
|
255 |
-
block_mask = (
|
256 |
-
torch.ones_like(scores)
|
257 |
-
.triu(-self.block_length)
|
258 |
-
.tril(self.block_length)
|
259 |
-
)
|
260 |
-
scores = scores.masked_fill(block_mask == 0, -1e4)
|
261 |
-
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
262 |
-
p_attn = self.drop(p_attn)
|
263 |
-
output = torch.matmul(p_attn, value)
|
264 |
-
if self.window_size is not None:
|
265 |
-
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
266 |
-
value_relative_embeddings = self._get_relative_embeddings(
|
267 |
-
self.emb_rel_v, t_s
|
268 |
-
)
|
269 |
-
output = output + self._matmul_with_relative_values(
|
270 |
-
relative_weights, value_relative_embeddings
|
271 |
-
)
|
272 |
-
output = (
|
273 |
-
output.transpose(2, 3).contiguous().view(b, d, t_t)
|
274 |
-
) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
275 |
-
return output, p_attn
|
276 |
-
|
277 |
-
def _matmul_with_relative_values(self, x, y):
|
278 |
-
"""
|
279 |
-
x: [b, h, l, m]
|
280 |
-
y: [h or 1, m, d]
|
281 |
-
ret: [b, h, l, d]
|
282 |
-
"""
|
283 |
-
ret = torch.matmul(x, y.unsqueeze(0))
|
284 |
-
return ret
|
285 |
-
|
286 |
-
def _matmul_with_relative_keys(self, x, y):
|
287 |
-
"""
|
288 |
-
x: [b, h, l, d]
|
289 |
-
y: [h or 1, m, d]
|
290 |
-
ret: [b, h, l, m]
|
291 |
-
"""
|
292 |
-
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
293 |
-
return ret
|
294 |
-
|
295 |
-
def _get_relative_embeddings(self, relative_embeddings, length):
|
296 |
-
max_relative_position = 2 * self.window_size + 1
|
297 |
-
# Pad first before slice to avoid using cond ops.
|
298 |
-
pad_length = max(length - (self.window_size + 1), 0)
|
299 |
-
slice_start_position = max((self.window_size + 1) - length, 0)
|
300 |
-
slice_end_position = slice_start_position + 2 * length - 1
|
301 |
-
if pad_length > 0:
|
302 |
-
padded_relative_embeddings = F.pad(
|
303 |
-
relative_embeddings,
|
304 |
-
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
|
305 |
-
)
|
306 |
-
else:
|
307 |
-
padded_relative_embeddings = relative_embeddings
|
308 |
-
used_relative_embeddings = padded_relative_embeddings[
|
309 |
-
:, slice_start_position:slice_end_position
|
310 |
-
]
|
311 |
-
return used_relative_embeddings
|
312 |
-
|
313 |
-
def _relative_position_to_absolute_position(self, x):
|
314 |
-
"""
|
315 |
-
x: [b, h, l, 2*l-1]
|
316 |
-
ret: [b, h, l, l]
|
317 |
-
"""
|
318 |
-
batch, heads, length, _ = x.size()
|
319 |
-
# Concat columns of pad to shift from relative to absolute indexing.
|
320 |
-
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
|
321 |
-
|
322 |
-
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
323 |
-
x_flat = x.view([batch, heads, length * 2 * length])
|
324 |
-
x_flat = F.pad(
|
325 |
-
x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
|
326 |
-
)
|
327 |
-
|
328 |
-
# Reshape and slice out the padded elements.
|
329 |
-
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
|
330 |
-
:, :, :length, length - 1 :
|
331 |
-
]
|
332 |
-
return x_final
|
333 |
-
|
334 |
-
def _absolute_position_to_relative_position(self, x):
|
335 |
-
"""
|
336 |
-
x: [b, h, l, l]
|
337 |
-
ret: [b, h, l, 2*l-1]
|
338 |
-
"""
|
339 |
-
batch, heads, length, _ = x.size()
|
340 |
-
# padd along column
|
341 |
-
x = F.pad(
|
342 |
-
x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
|
343 |
-
)
|
344 |
-
x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
|
345 |
-
# add 0's in the beginning that will skew the elements after reshape
|
346 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
347 |
-
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
|
348 |
-
return x_final
|
349 |
-
|
350 |
-
def _attention_bias_proximal(self, length):
|
351 |
-
"""Bias for self-attention to encourage attention to close positions.
|
352 |
-
Args:
|
353 |
-
length: an integer scalar.
|
354 |
-
Returns:
|
355 |
-
a Tensor with shape [1, 1, length, length]
|
356 |
-
"""
|
357 |
-
r = torch.arange(length, dtype=torch.float32)
|
358 |
-
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
359 |
-
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
360 |
-
|
361 |
-
|
362 |
-
class FFN(nn.Module):
|
363 |
-
def __init__(
|
364 |
-
self,
|
365 |
-
in_channels,
|
366 |
-
out_channels,
|
367 |
-
filter_channels,
|
368 |
-
kernel_size,
|
369 |
-
p_dropout=0.0,
|
370 |
-
activation=None,
|
371 |
-
causal=False,
|
372 |
-
):
|
373 |
-
super().__init__()
|
374 |
-
self.in_channels = in_channels
|
375 |
-
self.out_channels = out_channels
|
376 |
-
self.filter_channels = filter_channels
|
377 |
-
self.kernel_size = kernel_size
|
378 |
-
self.p_dropout = p_dropout
|
379 |
-
self.activation = activation
|
380 |
-
self.causal = causal
|
381 |
-
|
382 |
-
if causal:
|
383 |
-
self.padding = self._causal_padding
|
384 |
-
else:
|
385 |
-
self.padding = self._same_padding
|
386 |
-
|
387 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
388 |
-
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
389 |
-
self.drop = nn.Dropout(p_dropout)
|
390 |
-
|
391 |
-
def forward(self, x, x_mask):
|
392 |
-
x = self.conv_1(self.padding(x * x_mask))
|
393 |
-
if self.activation == "gelu":
|
394 |
-
x = x * torch.sigmoid(1.702 * x)
|
395 |
-
else:
|
396 |
-
x = torch.relu(x)
|
397 |
-
x = self.drop(x)
|
398 |
-
x = self.conv_2(self.padding(x * x_mask))
|
399 |
-
return x * x_mask
|
400 |
-
|
401 |
-
def _causal_padding(self, x):
|
402 |
-
if self.kernel_size == 1:
|
403 |
-
return x
|
404 |
-
pad_l = self.kernel_size - 1
|
405 |
-
pad_r = 0
|
406 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
407 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
408 |
-
return x
|
409 |
-
|
410 |
-
def _same_padding(self, x):
|
411 |
-
if self.kernel_size == 1:
|
412 |
-
return x
|
413 |
-
pad_l = (self.kernel_size - 1) // 2
|
414 |
-
pad_r = self.kernel_size // 2
|
415 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
416 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
417 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Zero-to-Hero/02-H5-AR-VR-IOT/index.html
DELETED
@@ -1,66 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html>
|
3 |
-
<head>
|
4 |
-
<title>Dynamic Lights - A-Frame</title>
|
5 |
-
<meta name="description" content="Dynamic Lights - A-Frame">
|
6 |
-
<script src="https://aframe.io/releases/1.0.4/aframe.min.js"></script>
|
7 |
-
<script src="https://unpkg.com/[email protected]/dist/aframe-randomizer-components.min.js"></script>
|
8 |
-
<script src="https://unpkg.com/[email protected]/dist/aframe-entity-generator-component.min.js"></script>
|
9 |
-
<script>
|
10 |
-
AFRAME.registerComponent('random-material', {
|
11 |
-
init: function () {
|
12 |
-
this.el.setAttribute('material', {
|
13 |
-
color: this.getRandomColor(),
|
14 |
-
metalness: Math.random(),
|
15 |
-
roughness: Math.random()
|
16 |
-
});
|
17 |
-
},
|
18 |
-
getRandomColor: function () {
|
19 |
-
var letters = '0123456789ABCDEF'.split('');
|
20 |
-
var color = '#';
|
21 |
-
for (var i = 0; i < 6; i++) {
|
22 |
-
color += letters[Math.floor(Math.random() * 16)];
|
23 |
-
}
|
24 |
-
return color;
|
25 |
-
}
|
26 |
-
});
|
27 |
-
AFRAME.registerComponent('random-torus-knot', {
|
28 |
-
init: function () {
|
29 |
-
this.el.setAttribute('geometry', {
|
30 |
-
primitive: 'torusKnot',
|
31 |
-
radius: Math.random() * 10,
|
32 |
-
radiusTubular: Math.random() * .75,
|
33 |
-
p: Math.round(Math.random() * 10),
|
34 |
-
q: Math.round(Math.random() * 10)
|
35 |
-
});
|
36 |
-
}
|
37 |
-
});
|
38 |
-
</script>
|
39 |
-
</head>
|
40 |
-
<body>
|
41 |
-
<a-scene background="color: #111">
|
42 |
-
<a-assets>
|
43 |
-
<a-mixin id="light"
|
44 |
-
geometry="primitive: sphere; radius: 1.5"
|
45 |
-
material="color: #FFF; shader: flat"
|
46 |
-
light="color: #DDDDFF; distance: 120; intensity: 2; type: point"></a-mixin>
|
47 |
-
<a-mixin id="torusKnot"
|
48 |
-
random-torus-knot
|
49 |
-
random-material
|
50 |
-
random-position="min: -60 -60 -80; max: 60 60 40"></a-mixin>
|
51 |
-
</a-assets>
|
52 |
-
|
53 |
-
<!-- Use entity-generator component to generate 120 entities with the torusKnot mixin. -->
|
54 |
-
<a-entity entity-generator="mixin: torusKnot; num: 120"></a-entity>
|
55 |
-
|
56 |
-
<!-- Lights. -->
|
57 |
-
<a-entity animation="property: rotation; to: 0 0 360; dur: 4000; easing: linear; loop: true">
|
58 |
-
<a-entity mixin="light" position="30 0 0"></a-entity>
|
59 |
-
</a-entity>
|
60 |
-
|
61 |
-
<a-entity animation="property: rotation; to: 360 0 0; dur: 4000; easing: linear; loop: true">
|
62 |
-
<a-entity mixin="light" position="0 0 40"></a-entity>
|
63 |
-
</a-entity>
|
64 |
-
</a-scene>
|
65 |
-
</body>
|
66 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/Image-Animation-using-Thin-Plate-Spline-Motion-Model/style.css
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
h1 {
|
2 |
-
text-align: center;
|
3 |
-
}
|
4 |
-
img#overview {
|
5 |
-
max-width: 1000px;
|
6 |
-
max-height: 600px;
|
7 |
-
display: block;
|
8 |
-
margin: auto;
|
9 |
-
}
|
10 |
-
img#style-image {
|
11 |
-
max-width: 1000px;
|
12 |
-
max-height: 600px;
|
13 |
-
display: block;
|
14 |
-
margin: auto;
|
15 |
-
}
|
16 |
-
img#visitor-badge {
|
17 |
-
display: block;
|
18 |
-
margin: auto;
|
19 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/timm_model.py
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
""" timm model adapter
|
2 |
-
|
3 |
-
Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model.
|
4 |
-
"""
|
5 |
-
from collections import OrderedDict
|
6 |
-
|
7 |
-
import torch.nn as nn
|
8 |
-
|
9 |
-
try:
|
10 |
-
import timm
|
11 |
-
from timm.models.layers import Mlp, to_2tuple
|
12 |
-
from timm.models.layers.attention_pool2d import RotAttentionPool2d
|
13 |
-
from timm.models.layers.attention_pool2d import (
|
14 |
-
AttentionPool2d as AbsAttentionPool2d,
|
15 |
-
)
|
16 |
-
except ImportError as e:
|
17 |
-
timm = None
|
18 |
-
|
19 |
-
from .utils import freeze_batch_norm_2d
|
20 |
-
|
21 |
-
|
22 |
-
class TimmModel(nn.Module):
|
23 |
-
"""timm model adapter
|
24 |
-
# FIXME this adapter is a work in progress, may change in ways that break weight compat
|
25 |
-
"""
|
26 |
-
|
27 |
-
def __init__(
|
28 |
-
self,
|
29 |
-
model_name,
|
30 |
-
embed_dim,
|
31 |
-
image_size=224,
|
32 |
-
pool="avg",
|
33 |
-
proj="linear",
|
34 |
-
drop=0.0,
|
35 |
-
pretrained=False,
|
36 |
-
):
|
37 |
-
super().__init__()
|
38 |
-
if timm is None:
|
39 |
-
raise RuntimeError("Please `pip install timm` to use timm models.")
|
40 |
-
|
41 |
-
self.image_size = to_2tuple(image_size)
|
42 |
-
self.trunk = timm.create_model(model_name, pretrained=pretrained)
|
43 |
-
feat_size = self.trunk.default_cfg.get("pool_size", None)
|
44 |
-
feature_ndim = 1 if not feat_size else 2
|
45 |
-
if pool in ("abs_attn", "rot_attn"):
|
46 |
-
assert feature_ndim == 2
|
47 |
-
# if attn pooling used, remove both classifier and default pool
|
48 |
-
self.trunk.reset_classifier(0, global_pool="")
|
49 |
-
else:
|
50 |
-
# reset global pool if pool config set, otherwise leave as network default
|
51 |
-
reset_kwargs = dict(global_pool=pool) if pool else {}
|
52 |
-
self.trunk.reset_classifier(0, **reset_kwargs)
|
53 |
-
prev_chs = self.trunk.num_features
|
54 |
-
|
55 |
-
head_layers = OrderedDict()
|
56 |
-
if pool == "abs_attn":
|
57 |
-
head_layers["pool"] = AbsAttentionPool2d(
|
58 |
-
prev_chs, feat_size=feat_size, out_features=embed_dim
|
59 |
-
)
|
60 |
-
prev_chs = embed_dim
|
61 |
-
elif pool == "rot_attn":
|
62 |
-
head_layers["pool"] = RotAttentionPool2d(prev_chs, out_features=embed_dim)
|
63 |
-
prev_chs = embed_dim
|
64 |
-
else:
|
65 |
-
assert proj, "projection layer needed if non-attention pooling is used."
|
66 |
-
|
67 |
-
# NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used
|
68 |
-
if proj == "linear":
|
69 |
-
head_layers["drop"] = nn.Dropout(drop)
|
70 |
-
head_layers["proj"] = nn.Linear(prev_chs, embed_dim)
|
71 |
-
elif proj == "mlp":
|
72 |
-
head_layers["mlp"] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop)
|
73 |
-
|
74 |
-
self.head = nn.Sequential(head_layers)
|
75 |
-
|
76 |
-
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
|
77 |
-
"""lock modules
|
78 |
-
Args:
|
79 |
-
unlocked_groups (int): leave last n layer groups unlocked (default: 0)
|
80 |
-
"""
|
81 |
-
if not unlocked_groups:
|
82 |
-
# lock full model
|
83 |
-
for param in self.trunk.parameters():
|
84 |
-
param.requires_grad = False
|
85 |
-
if freeze_bn_stats:
|
86 |
-
freeze_batch_norm_2d(self.trunk)
|
87 |
-
else:
|
88 |
-
# NOTE: partial freeze requires latest timm (master) branch and is subject to change
|
89 |
-
try:
|
90 |
-
# FIXME import here until API stable and in an official release
|
91 |
-
from timm.models.helpers import group_parameters, group_modules
|
92 |
-
except ImportError:
|
93 |
-
raise RuntimeError(
|
94 |
-
"Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`"
|
95 |
-
)
|
96 |
-
matcher = self.trunk.group_matcher()
|
97 |
-
gparams = group_parameters(self.trunk, matcher)
|
98 |
-
max_layer_id = max(gparams.keys())
|
99 |
-
max_layer_id = max_layer_id - unlocked_groups
|
100 |
-
for group_idx in range(max_layer_id + 1):
|
101 |
-
group = gparams[group_idx]
|
102 |
-
for param in group:
|
103 |
-
self.trunk.get_parameter(param).requires_grad = False
|
104 |
-
if freeze_bn_stats:
|
105 |
-
gmodules = group_modules(self.trunk, matcher, reverse=True)
|
106 |
-
gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}
|
107 |
-
freeze_batch_norm_2d(self.trunk, gmodules)
|
108 |
-
|
109 |
-
def forward(self, x):
|
110 |
-
x = self.trunk(x)
|
111 |
-
x = self.head(x)
|
112 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/speech_base.py
DELETED
@@ -1,373 +0,0 @@
|
|
1 |
-
import filecmp
|
2 |
-
import os
|
3 |
-
import traceback
|
4 |
-
import numpy as np
|
5 |
-
import pandas as pd
|
6 |
-
import torch
|
7 |
-
import torch.distributed as dist
|
8 |
-
import torch.nn.functional as F
|
9 |
-
import torch.optim
|
10 |
-
import torch.utils.data
|
11 |
-
import yaml
|
12 |
-
from tqdm import tqdm
|
13 |
-
import utils
|
14 |
-
from tasks.tts.dataset_utils import BaseSpeechDataset
|
15 |
-
from tasks.tts.utils. import parse_mel_losses, parse_dataset_configs, load_data_preprocessor, load_data_binarizer
|
16 |
-
from tasks.tts.vocoder_infer.base_vocoder import BaseVocoder, get_vocoder_cls
|
17 |
-
from text_to_speech.utils.audio.align import mel2token_to_dur
|
18 |
-
from text_to_speech.utils.audio.io import save_wav
|
19 |
-
from text_to_speech.utils.audio.pitch_extractors import extract_pitch_simple
|
20 |
-
from text_to_speech.utils.commons.base_task import BaseTask
|
21 |
-
from text_to_speech.utils.commons.ckpt_utils import load_ckpt
|
22 |
-
from text_to_speech.utils.commons.dataset_utils import data_loader, BaseConcatDataset
|
23 |
-
from text_to_speech.utils.commons.hparams import hparams
|
24 |
-
from text_to_speech.utils.commons.multiprocess_utils import MultiprocessManager
|
25 |
-
from text_to_speech.utils.commons.tensor_utils import tensors_to_scalars
|
26 |
-
from text_to_speech.utils.metrics.ssim import ssim
|
27 |
-
from text_to_speech.utils.nn.model_utils import print_arch
|
28 |
-
from text_to_speech.utils.nn.schedulers import RSQRTSchedule, NoneSchedule, WarmupSchedule
|
29 |
-
from text_to_speech.utils.nn.seq_utils import weights_nonzero_speech
|
30 |
-
from text_to_speech.utils.plot.plot import spec_to_figure
|
31 |
-
from text_to_speech.utils.text.text_encoder import build_token_encoder
|
32 |
-
import matplotlib.pyplot as plt
|
33 |
-
|
34 |
-
|
35 |
-
class SpeechBaseTask(BaseTask):
|
36 |
-
def __init__(self, *args, **kwargs):
|
37 |
-
super().__init__(*args, **kwargs)
|
38 |
-
self.dataset_cls = BaseSpeechDataset
|
39 |
-
self.vocoder = None
|
40 |
-
data_dir = hparams['binary_data_dir']
|
41 |
-
if not hparams['use_word_input']:
|
42 |
-
self.token_encoder = build_token_encoder(f'{data_dir}/phone_set.json')
|
43 |
-
else:
|
44 |
-
self.token_encoder = build_token_encoder(f'{data_dir}/word_set.json')
|
45 |
-
self.padding_idx = self.token_encoder.pad()
|
46 |
-
self.eos_idx = self.token_encoder.eos()
|
47 |
-
self.seg_idx = self.token_encoder.seg()
|
48 |
-
self.saving_result_pool = None
|
49 |
-
self.saving_results_futures = None
|
50 |
-
self.mel_losses = parse_mel_losses()
|
51 |
-
self.max_tokens, self.max_sentences, \
|
52 |
-
self.max_valid_tokens, self.max_valid_sentences = parse_dataset_configs()
|
53 |
-
|
54 |
-
##########################
|
55 |
-
# datasets
|
56 |
-
##########################
|
57 |
-
@data_loader
|
58 |
-
def train_dataloader(self):
|
59 |
-
if hparams['train_sets'] != '':
|
60 |
-
train_sets = hparams['train_sets'].split("|")
|
61 |
-
# check if all train_sets have the same spk map and dictionary
|
62 |
-
binary_data_dir = hparams['binary_data_dir']
|
63 |
-
file_to_cmp = ['phone_set.json']
|
64 |
-
if os.path.exists(f'{binary_data_dir}/word_set.json'):
|
65 |
-
file_to_cmp.append('word_set.json')
|
66 |
-
if hparams['use_spk_id']:
|
67 |
-
file_to_cmp.append('spk_map.json')
|
68 |
-
for f in file_to_cmp:
|
69 |
-
for ds_name in train_sets:
|
70 |
-
base_file = os.path.join(binary_data_dir, f)
|
71 |
-
ds_file = os.path.join(ds_name, f)
|
72 |
-
assert filecmp.cmp(base_file, ds_file), \
|
73 |
-
f'{f} in {ds_name} is not same with that in {binary_data_dir}.'
|
74 |
-
train_dataset = BaseConcatDataset([
|
75 |
-
self.dataset_cls(prefix='train', shuffle=True, data_dir=ds_name) for ds_name in train_sets])
|
76 |
-
else:
|
77 |
-
train_dataset = self.dataset_cls(prefix=hparams['train_set_name'], shuffle=True)
|
78 |
-
return self.build_dataloader(train_dataset, True, self.max_tokens, self.max_sentences,
|
79 |
-
endless=hparams['endless_ds'])
|
80 |
-
|
81 |
-
@data_loader
|
82 |
-
def val_dataloader(self):
|
83 |
-
valid_dataset = self.dataset_cls(prefix=hparams['valid_set_name'], shuffle=False)
|
84 |
-
return self.build_dataloader(valid_dataset, False, self.max_valid_tokens, self.max_valid_sentences,
|
85 |
-
batch_by_size=False)
|
86 |
-
|
87 |
-
@data_loader
|
88 |
-
def test_dataloader(self):
|
89 |
-
test_dataset = self.dataset_cls(prefix=hparams['test_set_name'], shuffle=False)
|
90 |
-
self.test_dl = self.build_dataloader(
|
91 |
-
test_dataset, False, self.max_valid_tokens, self.max_valid_sentences, batch_by_size=False)
|
92 |
-
return self.test_dl
|
93 |
-
|
94 |
-
def build_dataloader(self, dataset, shuffle, max_tokens=None, max_sentences=None,
|
95 |
-
required_batch_size_multiple=-1, endless=False, batch_by_size=True):
|
96 |
-
devices_cnt = torch.cuda.device_count()
|
97 |
-
if devices_cnt == 0:
|
98 |
-
devices_cnt = 1
|
99 |
-
if required_batch_size_multiple == -1:
|
100 |
-
required_batch_size_multiple = devices_cnt
|
101 |
-
|
102 |
-
def shuffle_batches(batches):
|
103 |
-
np.random.shuffle(batches)
|
104 |
-
return batches
|
105 |
-
|
106 |
-
if max_tokens is not None:
|
107 |
-
max_tokens *= devices_cnt
|
108 |
-
if max_sentences is not None:
|
109 |
-
max_sentences *= devices_cnt
|
110 |
-
indices = dataset.ordered_indices()
|
111 |
-
if batch_by_size:
|
112 |
-
batch_sampler = utils.commons.dataset_utils.batch_by_size(
|
113 |
-
indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences,
|
114 |
-
required_batch_size_multiple=required_batch_size_multiple,
|
115 |
-
)
|
116 |
-
else:
|
117 |
-
batch_sampler = []
|
118 |
-
for i in range(0, len(indices), max_sentences):
|
119 |
-
batch_sampler.append(indices[i:i + max_sentences])
|
120 |
-
|
121 |
-
if shuffle:
|
122 |
-
batches = shuffle_batches(list(batch_sampler))
|
123 |
-
if endless:
|
124 |
-
batches = [b for _ in range(1000) for b in shuffle_batches(list(batch_sampler))]
|
125 |
-
else:
|
126 |
-
batches = batch_sampler
|
127 |
-
if endless:
|
128 |
-
batches = [b for _ in range(1000) for b in batches]
|
129 |
-
num_workers = dataset.num_workers
|
130 |
-
if self.trainer.use_ddp:
|
131 |
-
num_replicas = dist.get_world_size()
|
132 |
-
rank = dist.get_rank()
|
133 |
-
batches = [x[rank::num_replicas] for x in batches if len(x) % num_replicas == 0]
|
134 |
-
return torch.utils.data.DataLoader(dataset,
|
135 |
-
collate_fn=dataset.collater,
|
136 |
-
batch_sampler=batches,
|
137 |
-
num_workers=num_workers,
|
138 |
-
pin_memory=False)
|
139 |
-
|
140 |
-
##########################
|
141 |
-
# scheduler and optimizer
|
142 |
-
##########################
|
143 |
-
def build_model(self):
|
144 |
-
self.build_tts_model()
|
145 |
-
if hparams['load_ckpt'] != '':
|
146 |
-
load_ckpt(self.model, hparams['load_ckpt'])
|
147 |
-
print_arch(self.model)
|
148 |
-
return self.model
|
149 |
-
|
150 |
-
def build_tts_model(self):
|
151 |
-
raise NotImplementedError
|
152 |
-
|
153 |
-
def build_scheduler(self, optimizer):
|
154 |
-
if hparams['scheduler'] == 'rsqrt':
|
155 |
-
return RSQRTSchedule(optimizer, hparams['lr'], hparams['warmup_updates'], hparams['hidden_size'])
|
156 |
-
elif hparams['scheduler'] == 'warmup':
|
157 |
-
return WarmupSchedule(optimizer, hparams['lr'], hparams['warmup_updates'])
|
158 |
-
elif hparams['scheduler'] == 'step_lr':
|
159 |
-
return torch.optim.lr_scheduler.StepLR(
|
160 |
-
optimizer=optimizer, step_size=500, gamma=0.998)
|
161 |
-
else:
|
162 |
-
return NoneSchedule(optimizer, hparams['lr'])
|
163 |
-
|
164 |
-
def build_optimizer(self, model):
|
165 |
-
self.optimizer = optimizer = torch.optim.AdamW(
|
166 |
-
model.parameters(),
|
167 |
-
lr=hparams['lr'],
|
168 |
-
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
|
169 |
-
weight_decay=hparams['weight_decay'])
|
170 |
-
|
171 |
-
return optimizer
|
172 |
-
|
173 |
-
##########################
|
174 |
-
# training and validation
|
175 |
-
##########################
|
176 |
-
def _training_step(self, sample, batch_idx, _):
|
177 |
-
loss_output, _ = self.run_model(sample)
|
178 |
-
total_loss = sum([v for v in loss_output.values() if isinstance(v, torch.Tensor) and v.requires_grad])
|
179 |
-
loss_output['batch_size'] = sample['txt_tokens'].size()[0]
|
180 |
-
return total_loss, loss_output
|
181 |
-
|
182 |
-
def run_model(self, sample, infer=False):
|
183 |
-
"""
|
184 |
-
|
185 |
-
:param sample: a batch of data
|
186 |
-
:param infer: bool, run in infer mode
|
187 |
-
:return:
|
188 |
-
if not infer:
|
189 |
-
return losses, model_out
|
190 |
-
if infer:
|
191 |
-
return model_out
|
192 |
-
"""
|
193 |
-
raise NotImplementedError
|
194 |
-
|
195 |
-
def validation_start(self):
|
196 |
-
self.vocoder = get_vocoder_cls(hparams['vocoder'])()
|
197 |
-
|
198 |
-
def validation_step(self, sample, batch_idx):
|
199 |
-
outputs = {}
|
200 |
-
outputs['losses'] = {}
|
201 |
-
outputs['losses'], model_out = self.run_model(sample)
|
202 |
-
outputs['total_loss'] = sum(outputs['losses'].values())
|
203 |
-
outputs['nsamples'] = sample['nsamples']
|
204 |
-
outputs = tensors_to_scalars(outputs)
|
205 |
-
if self.global_step % hparams['valid_infer_interval'] == 0 \
|
206 |
-
and batch_idx < hparams['num_valid_plots']:
|
207 |
-
self.save_valid_result(sample, batch_idx, model_out)
|
208 |
-
return outputs
|
209 |
-
|
210 |
-
def validation_end(self, outputs):
|
211 |
-
self.vocoder = None
|
212 |
-
return super(SpeechBaseTask, self).validation_end(outputs)
|
213 |
-
|
214 |
-
def save_valid_result(self, sample, batch_idx, model_out):
|
215 |
-
raise NotImplementedError
|
216 |
-
|
217 |
-
##########################
|
218 |
-
# losses
|
219 |
-
##########################
|
220 |
-
def add_mel_loss(self, mel_out, target, losses, postfix=''):
|
221 |
-
for loss_name, lambd in self.mel_losses.items():
|
222 |
-
losses[f'{loss_name}{postfix}'] = getattr(self, f'{loss_name}_loss')(mel_out, target) * lambd
|
223 |
-
|
224 |
-
def l1_loss(self, decoder_output, target):
|
225 |
-
# decoder_output : B x T x n_mel
|
226 |
-
# target : B x T x n_mel
|
227 |
-
l1_loss = F.l1_loss(decoder_output, target, reduction='none')
|
228 |
-
weights = weights_nonzero_speech(target)
|
229 |
-
l1_loss = (l1_loss * weights).sum() / weights.sum()
|
230 |
-
return l1_loss
|
231 |
-
|
232 |
-
def mse_loss(self, decoder_output, target):
|
233 |
-
# decoder_output : B x T x n_mel
|
234 |
-
# target : B x T x n_mel
|
235 |
-
assert decoder_output.shape == target.shape
|
236 |
-
mse_loss = F.mse_loss(decoder_output, target, reduction='none')
|
237 |
-
weights = weights_nonzero_speech(target)
|
238 |
-
mse_loss = (mse_loss * weights).sum() / weights.sum()
|
239 |
-
return mse_loss
|
240 |
-
|
241 |
-
def ssim_loss(self, decoder_output, target, bias=6.0):
|
242 |
-
# decoder_output : B x T x n_mel
|
243 |
-
# target : B x T x n_mel
|
244 |
-
assert decoder_output.shape == target.shape
|
245 |
-
weights = weights_nonzero_speech(target)
|
246 |
-
decoder_output = decoder_output[:, None] + bias
|
247 |
-
target = target[:, None] + bias
|
248 |
-
ssim_loss = 1 - ssim(decoder_output, target, size_average=False)
|
249 |
-
ssim_loss = (ssim_loss * weights).sum() / weights.sum()
|
250 |
-
return ssim_loss
|
251 |
-
|
252 |
-
def plot_mel(self, batch_idx, spec_out, spec_gt=None, name=None, title='', f0s=None, dur_info=None):
|
253 |
-
vmin = hparams['mel_vmin']
|
254 |
-
vmax = hparams['mel_vmax']
|
255 |
-
if len(spec_out.shape) == 3:
|
256 |
-
spec_out = spec_out[0]
|
257 |
-
if isinstance(spec_out, torch.Tensor):
|
258 |
-
spec_out = spec_out.cpu().numpy()
|
259 |
-
if spec_gt is not None:
|
260 |
-
if len(spec_gt.shape) == 3:
|
261 |
-
spec_gt = spec_gt[0]
|
262 |
-
if isinstance(spec_gt, torch.Tensor):
|
263 |
-
spec_gt = spec_gt.cpu().numpy()
|
264 |
-
max_len = max(len(spec_gt), len(spec_out))
|
265 |
-
if max_len - len(spec_gt) > 0:
|
266 |
-
spec_gt = np.pad(spec_gt, [[0, max_len - len(spec_gt)], [0, 0]], mode='constant',
|
267 |
-
constant_values=vmin)
|
268 |
-
if max_len - len(spec_out) > 0:
|
269 |
-
spec_out = np.pad(spec_out, [[0, max_len - len(spec_out)], [0, 0]], mode='constant',
|
270 |
-
constant_values=vmin)
|
271 |
-
spec_out = np.concatenate([spec_out, spec_gt], -1)
|
272 |
-
name = f'mel_val_{batch_idx}' if name is None else name
|
273 |
-
self.logger.add_figure(name, spec_to_figure(
|
274 |
-
spec_out, vmin, vmax, title=title, f0s=f0s, dur_info=dur_info), self.global_step)
|
275 |
-
|
276 |
-
##########################
|
277 |
-
# testing
|
278 |
-
##########################
|
279 |
-
def test_start(self):
|
280 |
-
self.saving_result_pool = MultiprocessManager(int(os.getenv('N_PROC', os.cpu_count())))
|
281 |
-
self.saving_results_futures = []
|
282 |
-
self.gen_dir = os.path.join(
|
283 |
-
hparams['work_dir'], f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}')
|
284 |
-
self.vocoder: BaseVocoder = get_vocoder_cls(hparams['vocoder'])()
|
285 |
-
os.makedirs(self.gen_dir, exist_ok=True)
|
286 |
-
os.makedirs(f'{self.gen_dir}/wavs', exist_ok=True)
|
287 |
-
os.makedirs(f'{self.gen_dir}/plot', exist_ok=True)
|
288 |
-
if hparams.get('save_mel_npy', False):
|
289 |
-
os.makedirs(f'{self.gen_dir}/mel_npy', exist_ok=True)
|
290 |
-
|
291 |
-
def test_step(self, sample, batch_idx):
|
292 |
-
"""
|
293 |
-
|
294 |
-
:param sample:
|
295 |
-
:param batch_idx:
|
296 |
-
:return:
|
297 |
-
"""
|
298 |
-
assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference'
|
299 |
-
outputs = self.run_model(sample, infer=True)
|
300 |
-
text = sample['text'][0]
|
301 |
-
item_name = sample['item_name'][0]
|
302 |
-
tokens = sample['txt_tokens'][0].cpu().numpy()
|
303 |
-
mel_gt = sample['mels'][0].cpu().numpy()
|
304 |
-
mel_pred = outputs['mel_out'][0].cpu().numpy()
|
305 |
-
str_phs = self.token_encoder.decode(tokens, strip_padding=True)
|
306 |
-
base_fn = f'[{self.results_id:06d}][{item_name.replace("%", "_")}][%s]'
|
307 |
-
if text is not None:
|
308 |
-
base_fn += text.replace(":", "$3A")[:80]
|
309 |
-
base_fn = base_fn.replace(' ', '_')
|
310 |
-
gen_dir = self.gen_dir
|
311 |
-
wav_pred = self.vocoder.spec2wav(mel_pred)
|
312 |
-
self.saving_result_pool.add_job(self.save_result, args=[
|
313 |
-
wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs])
|
314 |
-
if hparams['save_gt']:
|
315 |
-
wav_gt = self.vocoder.spec2wav(mel_gt)
|
316 |
-
self.saving_result_pool.add_job(self.save_result, args=[
|
317 |
-
wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs])
|
318 |
-
print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}")
|
319 |
-
return {
|
320 |
-
'item_name': item_name,
|
321 |
-
'text': text,
|
322 |
-
'ph_tokens': self.token_encoder.decode(tokens.tolist()),
|
323 |
-
'wav_fn_pred': base_fn % 'P',
|
324 |
-
'wav_fn_gt': base_fn % 'G',
|
325 |
-
}
|
326 |
-
|
327 |
-
@staticmethod
|
328 |
-
def save_result(wav_out, mel, base_fn, gen_dir, str_phs=None, mel2ph=None, alignment=None):
|
329 |
-
save_wav(wav_out, f'{gen_dir}/wavs/{base_fn}.wav', hparams['audio_sample_rate'],
|
330 |
-
norm=hparams['out_wav_norm'])
|
331 |
-
fig = plt.figure(figsize=(14, 10))
|
332 |
-
spec_vmin = hparams['mel_vmin']
|
333 |
-
spec_vmax = hparams['mel_vmax']
|
334 |
-
heatmap = plt.pcolor(mel.T, vmin=spec_vmin, vmax=spec_vmax)
|
335 |
-
fig.colorbar(heatmap)
|
336 |
-
try:
|
337 |
-
f0 = extract_pitch_simple(wav_out)
|
338 |
-
f0 = f0 / 10 * (f0 > 0)
|
339 |
-
plt.plot(f0, c='white', linewidth=1, alpha=0.6)
|
340 |
-
if mel2ph is not None and str_phs is not None:
|
341 |
-
decoded_txt = str_phs.split(" ")
|
342 |
-
dur = mel2token_to_dur(torch.LongTensor(mel2ph)[None, :], len(decoded_txt))[0].numpy()
|
343 |
-
dur = [0] + list(np.cumsum(dur))
|
344 |
-
for i in range(len(dur) - 1):
|
345 |
-
shift = (i % 20) + 1
|
346 |
-
plt.text(dur[i], shift, decoded_txt[i])
|
347 |
-
plt.hlines(shift, dur[i], dur[i + 1], colors='b' if decoded_txt[i] != '|' else 'black')
|
348 |
-
plt.vlines(dur[i], 0, 5, colors='b' if decoded_txt[i] != '|' else 'black',
|
349 |
-
alpha=1, linewidth=1)
|
350 |
-
plt.tight_layout()
|
351 |
-
plt.savefig(f'{gen_dir}/plot/{base_fn}.png', format='png')
|
352 |
-
plt.close(fig)
|
353 |
-
if hparams.get('save_mel_npy', False):
|
354 |
-
np.save(f'{gen_dir}/mel_npy/{base_fn}', mel)
|
355 |
-
if alignment is not None:
|
356 |
-
fig, ax = plt.subplots(figsize=(12, 16))
|
357 |
-
im = ax.imshow(alignment, aspect='auto', origin='lower',
|
358 |
-
interpolation='none')
|
359 |
-
decoded_txt = str_phs.split(" ")
|
360 |
-
ax.set_yticks(np.arange(len(decoded_txt)))
|
361 |
-
ax.set_yticklabels(list(decoded_txt), fontsize=6)
|
362 |
-
fig.colorbar(im, ax=ax)
|
363 |
-
fig.savefig(f'{gen_dir}/attn_plot/{base_fn}_attn.png', format='png')
|
364 |
-
plt.close(fig)
|
365 |
-
except Exception:
|
366 |
-
traceback.print_exc()
|
367 |
-
return None
|
368 |
-
|
369 |
-
def test_end(self, outputs):
|
370 |
-
pd.DataFrame(outputs).to_csv(f'{self.gen_dir}/meta.csv')
|
371 |
-
for _1, _2 in tqdm(self.saving_result_pool.get_results(), total=len(self.saving_result_pool)):
|
372 |
-
pass
|
373 |
-
return {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AILab-CVC/SEED-LLaMA/scripts/seed_llama_inference_14B.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
import hydra
|
2 |
-
|
3 |
-
import pyrootutils
|
4 |
-
import os
|
5 |
-
import torch
|
6 |
-
|
7 |
-
from omegaconf import OmegaConf
|
8 |
-
import json
|
9 |
-
from typing import Optional
|
10 |
-
import transformers
|
11 |
-
from PIL import Image
|
12 |
-
from torchvision.transforms.functional import InterpolationMode
|
13 |
-
|
14 |
-
pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
|
15 |
-
|
16 |
-
BOI_TOKEN = '<img>'
|
17 |
-
EOI_TOKEN = '</img>'
|
18 |
-
IMG_TOKEN = '<img_{:05d}>'
|
19 |
-
|
20 |
-
IMG_FLAG = '<image>'
|
21 |
-
NUM_IMG_TOKNES = 32
|
22 |
-
NUM_IMG_CODES = 8192
|
23 |
-
image_id_shift = 32000
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
def generate(tokenizer, input_tokens, generation_config, model):
|
29 |
-
|
30 |
-
input_ids = tokenizer(input_tokens, add_special_tokens=False, return_tensors='pt').input_ids
|
31 |
-
input_ids = input_ids.to("cuda")
|
32 |
-
|
33 |
-
generate_ids = model.generate(
|
34 |
-
input_ids=input_ids,
|
35 |
-
**generation_config
|
36 |
-
)
|
37 |
-
generate_ids = generate_ids[0][input_ids.shape[1]:]
|
38 |
-
|
39 |
-
return generate_ids
|
40 |
-
|
41 |
-
def decode_image_text(generate_ids, tokenizer, save_path=None):
|
42 |
-
|
43 |
-
boi_list = torch.where(generate_ids == tokenizer(BOI_TOKEN, add_special_tokens=False).input_ids[0])[0]
|
44 |
-
eoi_list = torch.where(generate_ids == tokenizer(EOI_TOKEN, add_special_tokens=False).input_ids[0])[0]
|
45 |
-
|
46 |
-
if len(boi_list) == 0 and len(eoi_list) == 0:
|
47 |
-
text_ids = generate_ids
|
48 |
-
texts = tokenizer.decode(text_ids, skip_special_tokens=True)
|
49 |
-
print(texts)
|
50 |
-
|
51 |
-
else:
|
52 |
-
boi_index = boi_list[0]
|
53 |
-
eoi_index = eoi_list[0]
|
54 |
-
|
55 |
-
text_ids = generate_ids[:boi_index]
|
56 |
-
if len(text_ids) != 0:
|
57 |
-
texts = tokenizer.decode(text_ids, skip_special_tokens=True)
|
58 |
-
print(texts)
|
59 |
-
|
60 |
-
image_ids = (generate_ids[boi_index+1:eoi_index] - image_id_shift).reshape(1,-1)
|
61 |
-
|
62 |
-
images = tokenizer.decode_image(image_ids)
|
63 |
-
|
64 |
-
images[0].save(save_path)
|
65 |
-
|
66 |
-
|
67 |
-
device = "cuda"
|
68 |
-
|
69 |
-
tokenizer_cfg_path = 'configs/tokenizer/seed_llama_tokenizer.yaml'
|
70 |
-
tokenizer_cfg = OmegaConf.load(tokenizer_cfg_path)
|
71 |
-
tokenizer = hydra.utils.instantiate(tokenizer_cfg, device=device, load_diffusion=True)
|
72 |
-
|
73 |
-
transform_cfg_path = 'configs/transform/clip_transform.yaml'
|
74 |
-
transform_cfg = OmegaConf.load(transform_cfg_path)
|
75 |
-
transform = hydra.utils.instantiate(transform_cfg)
|
76 |
-
|
77 |
-
model_cfg = OmegaConf.load('configs/llm/seed_llama_14b.yaml')
|
78 |
-
model = hydra.utils.instantiate(model_cfg, torch_dtype=torch.float16)
|
79 |
-
model = model.eval().to(device)
|
80 |
-
|
81 |
-
generation_config = {
|
82 |
-
'temperature': 1.0,
|
83 |
-
'num_beams': 1,
|
84 |
-
'max_new_tokens': 512,
|
85 |
-
'top_p': 0.5,
|
86 |
-
'do_sample': True
|
87 |
-
}
|
88 |
-
|
89 |
-
s_token = "[INST] "
|
90 |
-
e_token = " [/INST]"
|
91 |
-
sep = "\n"
|
92 |
-
|
93 |
-
|
94 |
-
### visual question answering
|
95 |
-
image_path = "images/cat.jpg"
|
96 |
-
image = Image.open(image_path).convert('RGB')
|
97 |
-
image_tensor = transform(image).to(device)
|
98 |
-
img_ids = tokenizer.encode_image(image_torch=image_tensor)
|
99 |
-
img_ids = img_ids.view(-1).cpu().numpy()
|
100 |
-
img_tokens = BOI_TOKEN + ''.join([IMG_TOKEN.format(item) for item in img_ids]) + EOI_TOKEN
|
101 |
-
|
102 |
-
question = "What is this animal?"
|
103 |
-
|
104 |
-
input_tokens = tokenizer.bos_token + s_token + img_tokens + question + e_token + sep
|
105 |
-
generate_ids = generate(tokenizer, input_tokens, generation_config, model)
|
106 |
-
decode_image_text(generate_ids, tokenizer)
|
107 |
-
|
108 |
-
### text-to-image generation
|
109 |
-
prompt = "Can you generate an image of a dog on the green grass?"
|
110 |
-
input_tokens = tokenizer.bos_token + s_token + prompt + e_token + sep
|
111 |
-
generate_ids = generate(tokenizer, input_tokens, generation_config, model)
|
112 |
-
save_path = 'dog.jpg'
|
113 |
-
decode_image_text(generate_ids, tokenizer, save_path)
|
114 |
-
|
115 |
-
### multimodal prompt image generation
|
116 |
-
instruction = "Can you make the cat wear sunglasses?"
|
117 |
-
input_tokens = tokenizer.bos_token + s_token + img_tokens + instruction + e_token + sep
|
118 |
-
generate_ids = generate(tokenizer, input_tokens, generation_config, model)
|
119 |
-
save_path = 'cat_sunglasses.jpg'
|
120 |
-
decode_image_text(generate_ids, tokenizer, save_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/yolov7_l_syncbn_fast_6x16b-100e_coco.py
DELETED
@@ -1,489 +0,0 @@
|
|
1 |
-
_base_ = ['../_base_/default_runtime.py', '../_base_/det_p5_tta.py']
|
2 |
-
|
3 |
-
data_root = './data-df2/'
|
4 |
-
train_ann_file = 'annotations/train.json'
|
5 |
-
train_data_prefix = 'smaller-dataset/'
|
6 |
-
val_ann_file = 'annotations/val.json'
|
7 |
-
val_data_prefix = 'smaller-dataset/'
|
8 |
-
test_ann_file = 'annotations/test.json'
|
9 |
-
test_data_prefix = 'smaller-dataset/'
|
10 |
-
# num_classes = 13
|
11 |
-
train_batch_size_per_gpu = 32
|
12 |
-
train_num_workers = 4
|
13 |
-
persistent_workers = True
|
14 |
-
|
15 |
-
vis_backends = [
|
16 |
-
dict(type='LocalVisBackend'),
|
17 |
-
]
|
18 |
-
visualizer = dict(
|
19 |
-
type='mmdet.DetLocalVisualizer',
|
20 |
-
vis_backends=[
|
21 |
-
dict(type='LocalVisBackend'),
|
22 |
-
# dict(type='WandbVisBackend'),
|
23 |
-
dict(type='TensorboardVisBackend')
|
24 |
-
],
|
25 |
-
name='visualizer')
|
26 |
-
log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
|
27 |
-
log_level = 'INFO'
|
28 |
-
load_from = None
|
29 |
-
resume = False
|
30 |
-
|
31 |
-
anchors = [
|
32 |
-
[(12, 16), (19, 36), (40, 28)], # P3/8
|
33 |
-
[(36, 75), (76, 55), (72, 146)], # P4/16
|
34 |
-
[(142, 110), (192, 243), (459, 401)] # P5/32
|
35 |
-
]
|
36 |
-
|
37 |
-
base_lr = 0.01
|
38 |
-
max_epochs = 100
|
39 |
-
|
40 |
-
num_epoch_stage2 = 10 # The last 10 epochs switch evaluation interval
|
41 |
-
val_interval_stage2 = 1
|
42 |
-
|
43 |
-
model_test_cfg = dict(
|
44 |
-
multi_label=True,
|
45 |
-
nms_pre=30000,
|
46 |
-
score_thr=0.001,
|
47 |
-
nms=dict(type='nms', iou_threshold=0.65),
|
48 |
-
max_per_img=300)
|
49 |
-
|
50 |
-
img_scale = (640, 640)
|
51 |
-
dataset_type = 'YOLOv5CocoDataset'
|
52 |
-
classes=('short_sleeved_shirt', 'long_sleeved_shirt',
|
53 |
-
'short_sleeved_outwear', 'long_sleeved_outwear',
|
54 |
-
'vest', 'sling', 'shorts', 'trousers', 'skirt',
|
55 |
-
'short_sleeved_dress', 'long_sleeved_dress',
|
56 |
-
'vest_dress', 'sling_dress')
|
57 |
-
num_classes = len(classes)
|
58 |
-
palette=[(255, 0, 0), (255, 128, 0), (255, 255, 0),
|
59 |
-
(128, 255, 0), (0, 255, 0), (0, 255, 128),
|
60 |
-
(0, 255, 255), (0, 128, 255), (0, 0, 255),
|
61 |
-
(127, 0, 255), (255, 0, 255), (255, 0, 127),
|
62 |
-
(128, 128, 128)]
|
63 |
-
metainfo = dict(
|
64 |
-
classes=classes,
|
65 |
-
palette=palette
|
66 |
-
)
|
67 |
-
val_batch_size_per_gpu = 1
|
68 |
-
val_num_workers = 2
|
69 |
-
batch_shapes_cfg = dict(
|
70 |
-
type='BatchShapePolicy',
|
71 |
-
batch_size=val_batch_size_per_gpu,
|
72 |
-
img_size=img_scale[0],
|
73 |
-
size_divisor=32,
|
74 |
-
extra_pad_ratio=0.5)
|
75 |
-
strides = [8, 16, 32] # Strides of multi-scale prior box
|
76 |
-
num_det_layers = 3
|
77 |
-
norm_cfg = dict(type='BN', momentum=0.03, eps=0.001)
|
78 |
-
|
79 |
-
# Data augmentation
|
80 |
-
max_translate_ratio = 0.2 # YOLOv5RandomAffine
|
81 |
-
scaling_ratio_range = (0.1, 2.0) # YOLOv5RandomAffine
|
82 |
-
mixup_prob = 0.15 # YOLOv5MixUp
|
83 |
-
randchoice_mosaic_prob = [0.8, 0.2]
|
84 |
-
mixup_alpha = 8.0 # YOLOv5MixUp
|
85 |
-
mixup_beta = 8.0 # YOLOv5MixUp
|
86 |
-
|
87 |
-
# -----train val related-----
|
88 |
-
loss_cls_weight = 0.3
|
89 |
-
loss_bbox_weight = 0.05
|
90 |
-
loss_obj_weight = 0.7
|
91 |
-
# BatchYOLOv7Assigner params
|
92 |
-
simota_candidate_topk = 10
|
93 |
-
simota_iou_weight = 3.0
|
94 |
-
simota_cls_weight = 1.0
|
95 |
-
prior_match_thr = 4. # Priori box matching threshold
|
96 |
-
obj_level_weights = [4., 1.,
|
97 |
-
0.4] # The obj loss weights of the three output layers
|
98 |
-
|
99 |
-
lr_factor = 0.1 # Learning rate scaling factor
|
100 |
-
weight_decay = 0.0005
|
101 |
-
save_epoch_intervals = 1
|
102 |
-
max_keep_ckpts = 5
|
103 |
-
|
104 |
-
env_cfg = dict(
|
105 |
-
cudnn_benchmark=True,
|
106 |
-
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
|
107 |
-
dist_cfg=dict(backend='nccl'))
|
108 |
-
|
109 |
-
# ===============================Unmodified in most cases====================
|
110 |
-
model = dict(
|
111 |
-
type='YOLODetector',
|
112 |
-
data_preprocessor=dict(
|
113 |
-
type='YOLOv5DetDataPreprocessor',
|
114 |
-
mean=[0., 0., 0.],
|
115 |
-
std=[255., 255., 255.],
|
116 |
-
bgr_to_rgb=True),
|
117 |
-
backbone=dict(
|
118 |
-
type='YOLOv7Backbone',
|
119 |
-
arch='L',
|
120 |
-
norm_cfg=norm_cfg,
|
121 |
-
act_cfg=dict(type='SiLU', inplace=True)),
|
122 |
-
neck=dict(
|
123 |
-
type='YOLOv7PAFPN',
|
124 |
-
block_cfg=dict(
|
125 |
-
type='ELANBlock',
|
126 |
-
middle_ratio=0.5,
|
127 |
-
block_ratio=0.25,
|
128 |
-
num_blocks=4,
|
129 |
-
num_convs_in_block=1),
|
130 |
-
upsample_feats_cat_first=False,
|
131 |
-
in_channels=[512, 1024, 1024],
|
132 |
-
# The real output channel will be multiplied by 2
|
133 |
-
out_channels=[128, 256, 512],
|
134 |
-
norm_cfg=norm_cfg,
|
135 |
-
act_cfg=dict(type='SiLU', inplace=True)),
|
136 |
-
bbox_head=dict(
|
137 |
-
type='YOLOv7Head',
|
138 |
-
head_module=dict(
|
139 |
-
type='YOLOv7HeadModule',
|
140 |
-
num_classes=num_classes,
|
141 |
-
in_channels=[256, 512, 1024],
|
142 |
-
featmap_strides=strides,
|
143 |
-
num_base_priors=3),
|
144 |
-
prior_generator=dict(
|
145 |
-
type='mmdet.YOLOAnchorGenerator',
|
146 |
-
base_sizes=anchors,
|
147 |
-
strides=strides),
|
148 |
-
# scaled based on number of detection layers
|
149 |
-
loss_cls=dict(
|
150 |
-
type='mmdet.CrossEntropyLoss',
|
151 |
-
use_sigmoid=True,
|
152 |
-
reduction='mean',
|
153 |
-
loss_weight=loss_cls_weight *
|
154 |
-
(num_classes / 80 * 3 / num_det_layers)),
|
155 |
-
loss_bbox=dict(
|
156 |
-
type='IoULoss',
|
157 |
-
iou_mode='ciou',
|
158 |
-
bbox_format='xyxy',
|
159 |
-
reduction='mean',
|
160 |
-
loss_weight=loss_bbox_weight * (3 / num_det_layers),
|
161 |
-
return_iou=True),
|
162 |
-
loss_obj=dict(
|
163 |
-
type='mmdet.CrossEntropyLoss',
|
164 |
-
use_sigmoid=True,
|
165 |
-
reduction='mean',
|
166 |
-
loss_weight=loss_obj_weight *
|
167 |
-
((img_scale[0] / 640)**2 * 3 / num_det_layers)),
|
168 |
-
prior_match_thr=prior_match_thr,
|
169 |
-
obj_level_weights=obj_level_weights,
|
170 |
-
# BatchYOLOv7Assigner params
|
171 |
-
simota_candidate_topk=simota_candidate_topk,
|
172 |
-
simota_iou_weight=simota_iou_weight,
|
173 |
-
simota_cls_weight=simota_cls_weight),
|
174 |
-
test_cfg=model_test_cfg)
|
175 |
-
|
176 |
-
pre_transform = [
|
177 |
-
dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
|
178 |
-
dict(type='LoadAnnotations', with_bbox=True)
|
179 |
-
]
|
180 |
-
|
181 |
-
mosiac4_pipeline = [
|
182 |
-
dict(
|
183 |
-
type='Mosaic',
|
184 |
-
img_scale=img_scale,
|
185 |
-
pad_val=114.0,
|
186 |
-
pre_transform=pre_transform),
|
187 |
-
dict(
|
188 |
-
type='YOLOv5RandomAffine',
|
189 |
-
max_rotate_degree=0.0,
|
190 |
-
max_shear_degree=0.0,
|
191 |
-
max_translate_ratio=max_translate_ratio, # note
|
192 |
-
scaling_ratio_range=scaling_ratio_range, # note
|
193 |
-
# img_scale is (width, height)
|
194 |
-
border=(-img_scale[0] // 2, -img_scale[1] // 2),
|
195 |
-
border_val=(114, 114, 114)),
|
196 |
-
]
|
197 |
-
|
198 |
-
mosiac9_pipeline = [
|
199 |
-
dict(
|
200 |
-
type='Mosaic9',
|
201 |
-
img_scale=img_scale,
|
202 |
-
pad_val=114.0,
|
203 |
-
pre_transform=pre_transform),
|
204 |
-
dict(
|
205 |
-
type='YOLOv5RandomAffine',
|
206 |
-
max_rotate_degree=0.0,
|
207 |
-
max_shear_degree=0.0,
|
208 |
-
max_translate_ratio=max_translate_ratio, # note
|
209 |
-
scaling_ratio_range=scaling_ratio_range, # note
|
210 |
-
# img_scale is (width, height)
|
211 |
-
border=(-img_scale[0] // 2, -img_scale[1] // 2),
|
212 |
-
border_val=(114, 114, 114)),
|
213 |
-
]
|
214 |
-
|
215 |
-
randchoice_mosaic_pipeline = dict(
|
216 |
-
type='RandomChoice',
|
217 |
-
transforms=[mosiac4_pipeline, mosiac9_pipeline],
|
218 |
-
prob=randchoice_mosaic_prob)
|
219 |
-
|
220 |
-
train_pipeline = [
|
221 |
-
*pre_transform,
|
222 |
-
randchoice_mosaic_pipeline,
|
223 |
-
dict(
|
224 |
-
type='YOLOv5MixUp',
|
225 |
-
alpha=mixup_alpha, # note
|
226 |
-
beta=mixup_beta, # note
|
227 |
-
prob=mixup_prob,
|
228 |
-
pre_transform=[*pre_transform, randchoice_mosaic_pipeline]),
|
229 |
-
dict(type='YOLOv5HSVRandomAug'),
|
230 |
-
dict(type='mmdet.RandomFlip', prob=0.5),
|
231 |
-
dict(
|
232 |
-
type='mmdet.PackDetInputs',
|
233 |
-
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
|
234 |
-
'flip_direction'))
|
235 |
-
]
|
236 |
-
|
237 |
-
test_pipeline = [
|
238 |
-
dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
|
239 |
-
dict(type='YOLOv5KeepRatioResize', scale=img_scale),
|
240 |
-
dict(
|
241 |
-
type='LetterResize',
|
242 |
-
scale=img_scale,
|
243 |
-
allow_scale_up=False,
|
244 |
-
pad_val=dict(img=114)),
|
245 |
-
dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'),
|
246 |
-
dict(
|
247 |
-
type='mmdet.PackDetInputs',
|
248 |
-
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
|
249 |
-
'scale_factor', 'pad_param'))
|
250 |
-
]
|
251 |
-
|
252 |
-
train_dataloader = dict(
|
253 |
-
batch_size=train_batch_size_per_gpu,
|
254 |
-
num_workers=train_num_workers,
|
255 |
-
persistent_workers=persistent_workers,
|
256 |
-
pin_memory=True,
|
257 |
-
sampler=dict(type='DefaultSampler', shuffle=True),
|
258 |
-
collate_fn=dict(type='yolov5_collate'), # FASTER
|
259 |
-
dataset=dict(
|
260 |
-
type='RepeatDataset',
|
261 |
-
times=2,
|
262 |
-
dataset=dict(
|
263 |
-
type=dataset_type,
|
264 |
-
data_root=data_root,
|
265 |
-
metainfo=metainfo,
|
266 |
-
ann_file=val_ann_file,
|
267 |
-
data_prefix=dict(img=train_data_prefix),
|
268 |
-
filter_cfg=dict(filter_empty_gt=False, min_size=32),
|
269 |
-
pipeline=train_pipeline)
|
270 |
-
)
|
271 |
-
)
|
272 |
-
|
273 |
-
val_dataloader = dict(
|
274 |
-
dataset=dict(
|
275 |
-
metainfo=metainfo,
|
276 |
-
data_root=data_root,
|
277 |
-
ann_file=val_ann_file,
|
278 |
-
data_prefix=dict(img=val_data_prefix)))
|
279 |
-
|
280 |
-
val_evaluator = dict(ann_file=data_root + val_ann_file)
|
281 |
-
|
282 |
-
test_dataloader = dict(
|
283 |
-
dataset=dict(
|
284 |
-
metainfo=metainfo,
|
285 |
-
data_root=data_root,
|
286 |
-
ann_file=test_ann_file,
|
287 |
-
data_prefix=dict(img=test_data_prefix)))
|
288 |
-
test_evaluator = dict(ann_file=data_root + test_ann_file)
|
289 |
-
|
290 |
-
train_cfg = dict(
|
291 |
-
type='EpochBasedTrainLoop',
|
292 |
-
max_epochs=max_epochs,
|
293 |
-
val_interval=save_epoch_intervals,
|
294 |
-
dynamic_intervals=[(max_epochs - num_epoch_stage2, val_interval_stage2)])
|
295 |
-
val_cfg = dict(type='ValLoop')
|
296 |
-
test_cfg = dict(type='TestLoop')
|
297 |
-
|
298 |
-
param_scheduler = None
|
299 |
-
optim_wrapper = dict(
|
300 |
-
type='OptimWrapper',
|
301 |
-
optimizer=dict(
|
302 |
-
type='SGD',
|
303 |
-
lr=base_lr,
|
304 |
-
momentum=0.937,
|
305 |
-
weight_decay=weight_decay,
|
306 |
-
nesterov=True,
|
307 |
-
batch_size_per_gpu=train_batch_size_per_gpu),
|
308 |
-
constructor='YOLOv7OptimWrapperConstructor')
|
309 |
-
|
310 |
-
# TO DO: change param_scheduler type to StepLR, refer to mobilenet
|
311 |
-
default_scope = 'mmyolo'
|
312 |
-
default_hooks = dict(
|
313 |
-
timer=dict(type='IterTimerHook'),
|
314 |
-
logger=dict(type='LoggerHook', interval=10),
|
315 |
-
param_scheduler=dict(
|
316 |
-
type='YOLOv5ParamSchedulerHook',
|
317 |
-
scheduler_type='cosine',
|
318 |
-
lr_factor=lr_factor, # note
|
319 |
-
max_epochs=max_epochs),
|
320 |
-
checkpoint=dict(
|
321 |
-
type='CheckpointHook',
|
322 |
-
save_param_scheduler=False,
|
323 |
-
interval=save_epoch_intervals,
|
324 |
-
save_best='auto',
|
325 |
-
max_keep_ckpts=max_keep_ckpts),
|
326 |
-
sampler_seed=dict(type='DistSamplerSeedHook'),
|
327 |
-
visualization=dict(type='mmdet.DetVisualizationHook'))
|
328 |
-
|
329 |
-
custom_hooks = [
|
330 |
-
dict(
|
331 |
-
type='EMAHook',
|
332 |
-
ema_type='ExpMomentumEMA',
|
333 |
-
momentum=0.001,
|
334 |
-
update_buffers=True,
|
335 |
-
strict_load=False,
|
336 |
-
priority=49)
|
337 |
-
]
|
338 |
-
|
339 |
-
# ============================
|
340 |
-
|
341 |
-
file_client_args = dict(backend='disk')
|
342 |
-
_file_client_args = dict(backend='disk')
|
343 |
-
tta_model = dict(
|
344 |
-
type='mmdet.DetTTAModel',
|
345 |
-
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.65), max_per_img=300))
|
346 |
-
img_scales = [
|
347 |
-
(
|
348 |
-
640,
|
349 |
-
640,
|
350 |
-
),
|
351 |
-
(
|
352 |
-
320,
|
353 |
-
320,
|
354 |
-
),
|
355 |
-
(
|
356 |
-
960,
|
357 |
-
960,
|
358 |
-
),
|
359 |
-
]
|
360 |
-
_multiscale_resize_transforms = [
|
361 |
-
dict(
|
362 |
-
type='Compose',
|
363 |
-
transforms=[
|
364 |
-
dict(type='YOLOv5KeepRatioResize', scale=(
|
365 |
-
640,
|
366 |
-
640,
|
367 |
-
)),
|
368 |
-
dict(
|
369 |
-
type='LetterResize',
|
370 |
-
scale=(
|
371 |
-
640,
|
372 |
-
640,
|
373 |
-
),
|
374 |
-
allow_scale_up=False,
|
375 |
-
pad_val=dict(img=114)),
|
376 |
-
]),
|
377 |
-
dict(
|
378 |
-
type='Compose',
|
379 |
-
transforms=[
|
380 |
-
dict(type='YOLOv5KeepRatioResize', scale=(
|
381 |
-
320,
|
382 |
-
320,
|
383 |
-
)),
|
384 |
-
dict(
|
385 |
-
type='LetterResize',
|
386 |
-
scale=(
|
387 |
-
320,
|
388 |
-
320,
|
389 |
-
),
|
390 |
-
allow_scale_up=False,
|
391 |
-
pad_val=dict(img=114)),
|
392 |
-
]),
|
393 |
-
dict(
|
394 |
-
type='Compose',
|
395 |
-
transforms=[
|
396 |
-
dict(type='YOLOv5KeepRatioResize', scale=(
|
397 |
-
960,
|
398 |
-
960,
|
399 |
-
)),
|
400 |
-
dict(
|
401 |
-
type='LetterResize',
|
402 |
-
scale=(
|
403 |
-
960,
|
404 |
-
960,
|
405 |
-
),
|
406 |
-
allow_scale_up=False,
|
407 |
-
pad_val=dict(img=114)),
|
408 |
-
]),
|
409 |
-
]
|
410 |
-
tta_pipeline = [
|
411 |
-
dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
|
412 |
-
dict(
|
413 |
-
type='TestTimeAug',
|
414 |
-
transforms=[
|
415 |
-
[
|
416 |
-
dict(
|
417 |
-
type='Compose',
|
418 |
-
transforms=[
|
419 |
-
dict(type='YOLOv5KeepRatioResize', scale=(
|
420 |
-
640,
|
421 |
-
640,
|
422 |
-
)),
|
423 |
-
dict(
|
424 |
-
type='LetterResize',
|
425 |
-
scale=(
|
426 |
-
640,
|
427 |
-
640,
|
428 |
-
),
|
429 |
-
allow_scale_up=False,
|
430 |
-
pad_val=dict(img=114)),
|
431 |
-
]),
|
432 |
-
dict(
|
433 |
-
type='Compose',
|
434 |
-
transforms=[
|
435 |
-
dict(type='YOLOv5KeepRatioResize', scale=(
|
436 |
-
320,
|
437 |
-
320,
|
438 |
-
)),
|
439 |
-
dict(
|
440 |
-
type='LetterResize',
|
441 |
-
scale=(
|
442 |
-
320,
|
443 |
-
320,
|
444 |
-
),
|
445 |
-
allow_scale_up=False,
|
446 |
-
pad_val=dict(img=114)),
|
447 |
-
]),
|
448 |
-
dict(
|
449 |
-
type='Compose',
|
450 |
-
transforms=[
|
451 |
-
dict(type='YOLOv5KeepRatioResize', scale=(
|
452 |
-
960,
|
453 |
-
960,
|
454 |
-
)),
|
455 |
-
dict(
|
456 |
-
type='LetterResize',
|
457 |
-
scale=(
|
458 |
-
960,
|
459 |
-
960,
|
460 |
-
),
|
461 |
-
allow_scale_up=False,
|
462 |
-
pad_val=dict(img=114)),
|
463 |
-
]),
|
464 |
-
],
|
465 |
-
[
|
466 |
-
dict(type='mmdet.RandomFlip', prob=1.0),
|
467 |
-
dict(type='mmdet.RandomFlip', prob=0.0),
|
468 |
-
],
|
469 |
-
[
|
470 |
-
dict(type='mmdet.LoadAnnotations', with_bbox=True),
|
471 |
-
],
|
472 |
-
[
|
473 |
-
dict(
|
474 |
-
type='mmdet.PackDetInputs',
|
475 |
-
meta_keys=(
|
476 |
-
'img_id',
|
477 |
-
'img_path',
|
478 |
-
'ori_shape',
|
479 |
-
'img_shape',
|
480 |
-
'scale_factor',
|
481 |
-
'pad_param',
|
482 |
-
'flip',
|
483 |
-
'flip_direction',
|
484 |
-
)),
|
485 |
-
],
|
486 |
-
]),
|
487 |
-
]
|
488 |
-
|
489 |
-
launcher = 'none'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnetv1c101_8xb32_in1k.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/resnetv1c50.py',
|
3 |
-
'../_base_/datasets/imagenet_bs32_pil_resize.py',
|
4 |
-
'../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
|
5 |
-
]
|
6 |
-
|
7 |
-
model = dict(backbone=dict(depth=101))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/util/utils.py
DELETED
@@ -1,238 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from typing import Any
|
3 |
-
|
4 |
-
import matplotlib.pyplot as plt
|
5 |
-
import torch
|
6 |
-
from torch import nn
|
7 |
-
from itertools import repeat
|
8 |
-
from poetry_diacritizer.util.decorators import ignore_exception
|
9 |
-
from dataclasses import dataclass
|
10 |
-
import numpy as np
|
11 |
-
|
12 |
-
|
13 |
-
@dataclass
|
14 |
-
class ErrorRate:
|
15 |
-
wer: float
|
16 |
-
der: float
|
17 |
-
wer_without_case_ending: float
|
18 |
-
der_without_case_ending: float
|
19 |
-
|
20 |
-
|
21 |
-
def epoch_time(start_time, end_time):
|
22 |
-
elapsed_time = end_time - start_time
|
23 |
-
elapsed_mins = int(elapsed_time / 60)
|
24 |
-
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
|
25 |
-
return elapsed_mins, elapsed_secs
|
26 |
-
|
27 |
-
|
28 |
-
@ignore_exception
|
29 |
-
def plot_alignment(alignment: torch.Tensor, path: str, global_step: Any = 0):
|
30 |
-
"""
|
31 |
-
Plot alignment and save it into a path
|
32 |
-
Args:
|
33 |
-
alignment (Tensor): the encoder-decoder alignment
|
34 |
-
path (str): a path used to save the alignment plot
|
35 |
-
global_step (int): used in the name of the output alignment plot
|
36 |
-
"""
|
37 |
-
alignment = alignment.squeeze(1).transpose(0, 1).cpu().detach().numpy()
|
38 |
-
fig, axs = plt.subplots()
|
39 |
-
img = axs.imshow(alignment, aspect="auto", origin="lower", interpolation="none")
|
40 |
-
fig.colorbar(img, ax=axs)
|
41 |
-
xlabel = "Decoder timestep"
|
42 |
-
plt.xlabel(xlabel)
|
43 |
-
plt.ylabel("Encoder timestep")
|
44 |
-
plt.tight_layout()
|
45 |
-
plot_name = f"{global_step}.png"
|
46 |
-
plt.savefig(os.path.join(path, plot_name), dpi=300, format="png")
|
47 |
-
plt.close()
|
48 |
-
|
49 |
-
|
50 |
-
def get_mask_from_lengths(memory, memory_lengths):
|
51 |
-
"""Get mask tensor from list of length
|
52 |
-
Args:
|
53 |
-
memory: (batch, max_time, dim)
|
54 |
-
memory_lengths: array like
|
55 |
-
"""
|
56 |
-
mask = memory.data.new(memory.size(0), memory.size(1)).bool().zero_()
|
57 |
-
for idx, length in enumerate(memory_lengths):
|
58 |
-
mask[idx][:length] = 1
|
59 |
-
return ~mask
|
60 |
-
|
61 |
-
|
62 |
-
def repeater(data_loader):
|
63 |
-
for loader in repeat(data_loader):
|
64 |
-
for data in loader:
|
65 |
-
yield data
|
66 |
-
|
67 |
-
|
68 |
-
def count_parameters(model):
|
69 |
-
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
70 |
-
|
71 |
-
|
72 |
-
def initialize_weights(m):
|
73 |
-
if hasattr(m, "weight") and m.weight.dim() > 1:
|
74 |
-
nn.init.xavier_uniform_(m.weight.data)
|
75 |
-
|
76 |
-
|
77 |
-
def get_encoder_layers_attentions(model):
|
78 |
-
attentions = []
|
79 |
-
for layer in model.encoder.layers:
|
80 |
-
attentions.append(layer.self_attention.attention)
|
81 |
-
return attentions
|
82 |
-
|
83 |
-
|
84 |
-
def get_decoder_layers_attentions(model):
|
85 |
-
self_attns, src_attens = [], []
|
86 |
-
for layer in model.decoder.layers:
|
87 |
-
self_attns.append(layer.self_attention.attention)
|
88 |
-
src_attens.append(layer.encoder_attention.attention)
|
89 |
-
return self_attns, src_attens
|
90 |
-
|
91 |
-
|
92 |
-
def display_attention(
|
93 |
-
attention, path, global_step: int, name="att", n_heads=4, n_rows=2, n_cols=2
|
94 |
-
):
|
95 |
-
assert n_rows * n_cols == n_heads
|
96 |
-
|
97 |
-
fig = plt.figure(figsize=(15, 15))
|
98 |
-
|
99 |
-
for i in range(n_heads):
|
100 |
-
|
101 |
-
ax = fig.add_subplot(n_rows, n_cols, i + 1)
|
102 |
-
|
103 |
-
_attention = attention.squeeze(0)[i].transpose(0, 1).cpu().detach().numpy()
|
104 |
-
cax = ax.imshow(_attention, aspect="auto", origin="lower", interpolation="none")
|
105 |
-
|
106 |
-
plot_name = f"{global_step}-{name}.png"
|
107 |
-
plt.savefig(os.path.join(path, plot_name), dpi=300, format="png")
|
108 |
-
plt.close()
|
109 |
-
|
110 |
-
|
111 |
-
def plot_multi_head(model, path, global_step):
|
112 |
-
encoder_attentions = get_encoder_layers_attentions(model)
|
113 |
-
decoder_attentions, attentions = get_decoder_layers_attentions(model)
|
114 |
-
for i in range(len(attentions)):
|
115 |
-
display_attention(
|
116 |
-
attentions[0][0], path, global_step, f"encoder-decoder-layer{i + 1}"
|
117 |
-
)
|
118 |
-
for i in range(len(decoder_attentions)):
|
119 |
-
display_attention(
|
120 |
-
decoder_attentions[0][0], path, global_step, f"decoder-layer{i + 1}"
|
121 |
-
)
|
122 |
-
for i in range(len(encoder_attentions)):
|
123 |
-
display_attention(
|
124 |
-
encoder_attentions[0][0], path, global_step, f"encoder-layer {i + 1}"
|
125 |
-
)
|
126 |
-
|
127 |
-
|
128 |
-
def make_src_mask(src, pad_idx=0):
|
129 |
-
|
130 |
-
# src = [batch size, src len]
|
131 |
-
|
132 |
-
src_mask = (src != pad_idx).unsqueeze(1).unsqueeze(2)
|
133 |
-
|
134 |
-
# src_mask = [batch size, 1, 1, src len]
|
135 |
-
|
136 |
-
return src_mask
|
137 |
-
|
138 |
-
|
139 |
-
def get_angles(pos, i, model_dim):
|
140 |
-
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(model_dim))
|
141 |
-
return pos * angle_rates
|
142 |
-
|
143 |
-
|
144 |
-
def positional_encoding(position, model_dim):
|
145 |
-
angle_rads = get_angles(
|
146 |
-
np.arange(position)[:, np.newaxis],
|
147 |
-
np.arange(model_dim)[np.newaxis, :],
|
148 |
-
model_dim,
|
149 |
-
)
|
150 |
-
|
151 |
-
# apply sin to even indices in the array; 2i
|
152 |
-
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
|
153 |
-
|
154 |
-
# apply cos to odd indices in the array; 2i+1
|
155 |
-
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
|
156 |
-
|
157 |
-
pos_encoding = angle_rads[np.newaxis, ...]
|
158 |
-
|
159 |
-
return torch.from_numpy(pos_encoding)
|
160 |
-
|
161 |
-
|
162 |
-
def calculate_error_rates(original_file_path: str, target_file_path: str) -> ErrorRate:
|
163 |
-
"""
|
164 |
-
Calculates ErrorRates from paths
|
165 |
-
"""
|
166 |
-
assert os.path.isfile(original_file_path)
|
167 |
-
assert os.path.isfile(target_file_path)
|
168 |
-
|
169 |
-
_wer = wer.calculate_wer_from_path(
|
170 |
-
inp_path=original_file_path, out_path=target_file_path, case_ending=True
|
171 |
-
)
|
172 |
-
|
173 |
-
_wer_without_case_ending = wer.calculate_wer_from_path(
|
174 |
-
inp_path=original_file_path, out_path=target_file_path, case_ending=False
|
175 |
-
)
|
176 |
-
|
177 |
-
_der = der.calculate_der_from_path(
|
178 |
-
inp_path=original_file_path, out_path=target_file_path, case_ending=True
|
179 |
-
)
|
180 |
-
|
181 |
-
_der_without_case_ending = der.calculate_der_from_path(
|
182 |
-
inp_path=original_file_path, out_path=target_file_path, case_ending=False
|
183 |
-
)
|
184 |
-
|
185 |
-
error_rates = ErrorRate(
|
186 |
-
_wer,
|
187 |
-
_der,
|
188 |
-
_wer_without_case_ending,
|
189 |
-
_der_without_case_ending,
|
190 |
-
)
|
191 |
-
|
192 |
-
return error_rates
|
193 |
-
|
194 |
-
|
195 |
-
def categorical_accuracy(preds, y, tag_pad_idx, device="cuda"):
|
196 |
-
"""
|
197 |
-
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
|
198 |
-
"""
|
199 |
-
max_preds = preds.argmax(
|
200 |
-
dim=1, keepdim=True
|
201 |
-
) # get the index of the max probability
|
202 |
-
non_pad_elements = torch.nonzero((y != tag_pad_idx))
|
203 |
-
correct = max_preds[non_pad_elements].squeeze(1).eq(y[non_pad_elements])
|
204 |
-
return correct.sum() / torch.FloatTensor([y[non_pad_elements].shape[0]]).to(device)
|
205 |
-
|
206 |
-
|
207 |
-
def write_to_files(input_path, output_path, input_list, output_list):
|
208 |
-
with open(input_path, "w", encoding="utf8") as file:
|
209 |
-
for inp in input_list:
|
210 |
-
file.write(inp + "\n")
|
211 |
-
with open(output_path, "w", encoding="utf8") as file:
|
212 |
-
for out in output_list:
|
213 |
-
file.write(out + "\n")
|
214 |
-
|
215 |
-
|
216 |
-
def make_src_mask(src: torch.Tensor, pad_idx=0):
|
217 |
-
return (src != pad_idx).unsqueeze(1).unsqueeze(2)
|
218 |
-
|
219 |
-
|
220 |
-
def make_trg_mask(trg, trg_pad_idx=0):
|
221 |
-
|
222 |
-
# trg = [batch size, trg len]
|
223 |
-
|
224 |
-
trg_pad_mask = (trg != trg_pad_idx).unsqueeze(1).unsqueeze(2)
|
225 |
-
|
226 |
-
# trg_pad_mask = [batch size, 1, 1, trg len]
|
227 |
-
|
228 |
-
trg_len = trg.shape[1]
|
229 |
-
|
230 |
-
trg_sub_mask = torch.tril(torch.ones((trg_len, trg_len))).bool()
|
231 |
-
|
232 |
-
# trg_sub_mask = [trg len, trg len]
|
233 |
-
|
234 |
-
trg_mask = trg_pad_mask & trg_sub_mask
|
235 |
-
|
236 |
-
# trg_mask = [batch size, 1, trg len, trg len]
|
237 |
-
|
238 |
-
return trg_mask
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/__init__.py
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
from .Acytoo import Acytoo
|
3 |
-
from .AiAsk import AiAsk
|
4 |
-
from .Aibn import Aibn
|
5 |
-
from .Aichat import Aichat
|
6 |
-
from .Ails import Ails
|
7 |
-
from .Aivvm import Aivvm
|
8 |
-
from .AItianhu import AItianhu
|
9 |
-
from .AItianhuSpace import AItianhuSpace
|
10 |
-
from .Bing import Bing
|
11 |
-
from .ChatBase import ChatBase
|
12 |
-
from .ChatForAi import ChatForAi
|
13 |
-
from .Chatgpt4Online import Chatgpt4Online
|
14 |
-
from .ChatgptAi import ChatgptAi
|
15 |
-
from .ChatgptDemo import ChatgptDemo
|
16 |
-
from .ChatgptDuo import ChatgptDuo
|
17 |
-
from .ChatgptX import ChatgptX
|
18 |
-
from .Cromicle import Cromicle
|
19 |
-
from .DeepAi import DeepAi
|
20 |
-
from .FreeGpt import FreeGpt
|
21 |
-
from .GPTalk import GPTalk
|
22 |
-
from .GptForLove import GptForLove
|
23 |
-
from .GptGo import GptGo
|
24 |
-
from .GptGod import GptGod
|
25 |
-
from .H2o import H2o
|
26 |
-
from .Liaobots import Liaobots
|
27 |
-
from .Myshell import Myshell
|
28 |
-
from .Phind import Phind
|
29 |
-
from .Vercel import Vercel
|
30 |
-
from .Vitalentum import Vitalentum
|
31 |
-
from .Ylokh import Ylokh
|
32 |
-
from .You import You
|
33 |
-
from .Yqcloud import Yqcloud
|
34 |
-
|
35 |
-
from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
|
36 |
-
from .retry_provider import RetryProvider
|
37 |
-
from .deprecated import *
|
38 |
-
from .needs_auth import *
|
39 |
-
from .unfinished import *
|
40 |
-
|
41 |
-
__all__ = [
|
42 |
-
'BaseProvider',
|
43 |
-
'AsyncProvider',
|
44 |
-
'AsyncGeneratorProvider',
|
45 |
-
'RetryProvider',
|
46 |
-
'Acytoo',
|
47 |
-
'AiAsk',
|
48 |
-
'Aibn',
|
49 |
-
'Aichat',
|
50 |
-
'Ails',
|
51 |
-
'Aivvm',
|
52 |
-
'AiService',
|
53 |
-
'AItianhu',
|
54 |
-
'AItianhuSpace',
|
55 |
-
'Aivvm',
|
56 |
-
'Bard',
|
57 |
-
'Bing',
|
58 |
-
'ChatBase',
|
59 |
-
'ChatForAi',
|
60 |
-
'Chatgpt4Online',
|
61 |
-
'ChatgptAi',
|
62 |
-
'ChatgptDemo',
|
63 |
-
'ChatgptDuo',
|
64 |
-
'ChatgptLogin',
|
65 |
-
'ChatgptX',
|
66 |
-
'Cromicle',
|
67 |
-
'CodeLinkAva',
|
68 |
-
'DeepAi',
|
69 |
-
'DfeHub',
|
70 |
-
'EasyChat',
|
71 |
-
'Forefront',
|
72 |
-
'FreeGpt',
|
73 |
-
'GPTalk',
|
74 |
-
'GptForLove',
|
75 |
-
'GetGpt',
|
76 |
-
'GptGo',
|
77 |
-
'GptGod',
|
78 |
-
'H2o',
|
79 |
-
'HuggingChat',
|
80 |
-
'Liaobots',
|
81 |
-
'Lockchat',
|
82 |
-
'Myshell',
|
83 |
-
'Opchatgpts',
|
84 |
-
'Raycast',
|
85 |
-
'OpenaiChat',
|
86 |
-
'OpenAssistant',
|
87 |
-
'PerplexityAi',
|
88 |
-
'Phind',
|
89 |
-
'Theb',
|
90 |
-
'Vercel',
|
91 |
-
'Vitalentum',
|
92 |
-
'Wewordle',
|
93 |
-
'Ylokh',
|
94 |
-
'You',
|
95 |
-
'Yqcloud',
|
96 |
-
'Equing',
|
97 |
-
'FastGpt',
|
98 |
-
'Wuguokai',
|
99 |
-
'V50'
|
100 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/op_edit/fused_bias_act.cpp
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
// Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
#include <torch/extension.h>
|
4 |
-
|
5 |
-
|
6 |
-
torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
|
7 |
-
int act, int grad, float alpha, float scale);
|
8 |
-
|
9 |
-
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
|
10 |
-
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
|
11 |
-
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
|
12 |
-
|
13 |
-
torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
|
14 |
-
int act, int grad, float alpha, float scale) {
|
15 |
-
CHECK_CUDA(input);
|
16 |
-
CHECK_CUDA(bias);
|
17 |
-
|
18 |
-
return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
|
19 |
-
}
|
20 |
-
|
21 |
-
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
22 |
-
m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
|
23 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anar0140/4.RealTime-MediaPipe-AI-From-Video-On-Any-Device/app.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
st.markdown("""
|
3 |
-
|
4 |
-
# MediaPipe
|
5 |
-
|
6 |
-
### A cross language SDK for AI that is real time, 3d, camera responsive, and on any device for nearly any language
|
7 |
-
|
8 |
-
#### Vision
|
9 |
-
#### Natural Language
|
10 |
-
#### Audio
|
11 |
-
|
12 |
-
Mediapipe has fast and flexible AI/ML pipelines.
|
13 |
-
|
14 |
-
Examples with Javascript Links!
|
15 |
-
|
16 |
-
1. Image Classifier: https://mediapipe-studio.webapps.google.com/demo/image_classifier
|
17 |
-
2. Object Detector: https://mediapipe-studio.webapps.google.com/demo/object_detector
|
18 |
-
3. Text Classification: https://mediapipe-studio.webapps.google.com/demo/text_classifier
|
19 |
-
4. Gesture Recognizer: https://mediapipe-studio.webapps.google.com/demo/gesture_recognizer
|
20 |
-
5. Hand Landmark Detection: https://mediapipe-studio.webapps.google.com/demo/hand_landmarker
|
21 |
-
6. Audio Classifier: https://mediapipe-studio.webapps.google.com/demo/audio_classifier
|
22 |
-
|
23 |
-
Get started with just Javascript!!
|
24 |
-
|
25 |
-
Getting Started: https://google.github.io/mediapipe/getting_started/javascript.html
|
26 |
-
|
27 |
-
Javascript Solutions - Ready to Demo:
|
28 |
-
1. Face Mesh: https://codepen.io/mediapipe/full/KKgVaPJ
|
29 |
-
2. Face Detection: https://codepen.io/mediapipe/full/dyOzvZM
|
30 |
-
3. Hands: https://codepen.io/mediapipe/full/RwGWYJw
|
31 |
-
4. Face, Hands, Body: https://codepen.io/mediapipe/full/LYRRYEw
|
32 |
-
5. Objectron: https://codepen.io/mediapipe/full/BaWvzdY
|
33 |
-
6. Full Skeletal Pose: https://codepen.io/mediapipe/full/jOMbvxw
|
34 |
-
7. Self Segmentation From Background: https://codepen.io/mediapipe/full/wvJyQpq
|
35 |
-
|
36 |
-
|
37 |
-
Demonstration in Action with Screenshots:
|
38 |
-
|
39 |
-
Self Segmentation From Background:
|
40 |
-

|
41 |
-
|
42 |
-
Full Skeletal Pose:
|
43 |
-

|
44 |
-
|
45 |
-
Hands - Both in 3D Projection even hidden surface vertices - Mahalo:
|
46 |
-

|
47 |
-
|
48 |
-
Holistic - Face, Hands, Body:
|
49 |
-

|
50 |
-
|
51 |
-
Face Detection:
|
52 |
-

|
53 |
-
|
54 |
-
Face Mesh Real Time - 30 Frames per second!
|
55 |
-

|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py
DELETED
@@ -1,748 +0,0 @@
|
|
1 |
-
# Copyright 2023 The InstructPix2Pix Authors and The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import inspect
|
16 |
-
import warnings
|
17 |
-
from typing import Callable, List, Optional, Union
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
import PIL
|
21 |
-
import torch
|
22 |
-
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
23 |
-
|
24 |
-
from ...image_processor import VaeImageProcessor
|
25 |
-
from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
|
26 |
-
from ...models import AutoencoderKL, UNet2DConditionModel
|
27 |
-
from ...schedulers import KarrasDiffusionSchedulers
|
28 |
-
from ...utils import (
|
29 |
-
PIL_INTERPOLATION,
|
30 |
-
deprecate,
|
31 |
-
is_accelerate_available,
|
32 |
-
is_accelerate_version,
|
33 |
-
logging,
|
34 |
-
randn_tensor,
|
35 |
-
)
|
36 |
-
from ..pipeline_utils import DiffusionPipeline
|
37 |
-
from . import StableDiffusionPipelineOutput
|
38 |
-
from .safety_checker import StableDiffusionSafetyChecker
|
39 |
-
|
40 |
-
|
41 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
42 |
-
|
43 |
-
|
44 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
|
45 |
-
def preprocess(image):
|
46 |
-
warnings.warn(
|
47 |
-
"The preprocess method is deprecated and will be removed in a future version. Please"
|
48 |
-
" use VaeImageProcessor.preprocess instead",
|
49 |
-
FutureWarning,
|
50 |
-
)
|
51 |
-
if isinstance(image, torch.Tensor):
|
52 |
-
return image
|
53 |
-
elif isinstance(image, PIL.Image.Image):
|
54 |
-
image = [image]
|
55 |
-
|
56 |
-
if isinstance(image[0], PIL.Image.Image):
|
57 |
-
w, h = image[0].size
|
58 |
-
w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
|
59 |
-
|
60 |
-
image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
|
61 |
-
image = np.concatenate(image, axis=0)
|
62 |
-
image = np.array(image).astype(np.float32) / 255.0
|
63 |
-
image = image.transpose(0, 3, 1, 2)
|
64 |
-
image = 2.0 * image - 1.0
|
65 |
-
image = torch.from_numpy(image)
|
66 |
-
elif isinstance(image[0], torch.Tensor):
|
67 |
-
image = torch.cat(image, dim=0)
|
68 |
-
return image
|
69 |
-
|
70 |
-
|
71 |
-
class StableDiffusionInstructPix2PixPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
|
72 |
-
r"""
|
73 |
-
Pipeline for pixel-level image editing by following text instructions (based on Stable Diffusion).
|
74 |
-
|
75 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
76 |
-
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
77 |
-
|
78 |
-
The pipeline also inherits the following loading methods:
|
79 |
-
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
80 |
-
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
81 |
-
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
82 |
-
|
83 |
-
Args:
|
84 |
-
vae ([`AutoencoderKL`]):
|
85 |
-
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
86 |
-
text_encoder ([`~transformers.CLIPTextModel`]):
|
87 |
-
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
88 |
-
tokenizer ([`~transformers.CLIPTokenizer`]):
|
89 |
-
A `CLIPTokenizer` to tokenize text.
|
90 |
-
unet ([`UNet2DConditionModel`]):
|
91 |
-
A `UNet2DConditionModel` to denoise the encoded image latents.
|
92 |
-
scheduler ([`SchedulerMixin`]):
|
93 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
94 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
95 |
-
safety_checker ([`StableDiffusionSafetyChecker`]):
|
96 |
-
Classification module that estimates whether generated images could be considered offensive or harmful.
|
97 |
-
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
|
98 |
-
about a model's potential harms.
|
99 |
-
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
100 |
-
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
101 |
-
"""
|
102 |
-
_optional_components = ["safety_checker", "feature_extractor"]
|
103 |
-
|
104 |
-
def __init__(
|
105 |
-
self,
|
106 |
-
vae: AutoencoderKL,
|
107 |
-
text_encoder: CLIPTextModel,
|
108 |
-
tokenizer: CLIPTokenizer,
|
109 |
-
unet: UNet2DConditionModel,
|
110 |
-
scheduler: KarrasDiffusionSchedulers,
|
111 |
-
safety_checker: StableDiffusionSafetyChecker,
|
112 |
-
feature_extractor: CLIPImageProcessor,
|
113 |
-
requires_safety_checker: bool = True,
|
114 |
-
):
|
115 |
-
super().__init__()
|
116 |
-
|
117 |
-
if safety_checker is None and requires_safety_checker:
|
118 |
-
logger.warning(
|
119 |
-
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
120 |
-
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
121 |
-
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
122 |
-
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
123 |
-
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
124 |
-
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
125 |
-
)
|
126 |
-
|
127 |
-
if safety_checker is not None and feature_extractor is None:
|
128 |
-
raise ValueError(
|
129 |
-
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
130 |
-
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
131 |
-
)
|
132 |
-
|
133 |
-
self.register_modules(
|
134 |
-
vae=vae,
|
135 |
-
text_encoder=text_encoder,
|
136 |
-
tokenizer=tokenizer,
|
137 |
-
unet=unet,
|
138 |
-
scheduler=scheduler,
|
139 |
-
safety_checker=safety_checker,
|
140 |
-
feature_extractor=feature_extractor,
|
141 |
-
)
|
142 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
143 |
-
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
144 |
-
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
145 |
-
|
146 |
-
@torch.no_grad()
|
147 |
-
def __call__(
|
148 |
-
self,
|
149 |
-
prompt: Union[str, List[str]] = None,
|
150 |
-
image: Union[
|
151 |
-
torch.FloatTensor,
|
152 |
-
PIL.Image.Image,
|
153 |
-
np.ndarray,
|
154 |
-
List[torch.FloatTensor],
|
155 |
-
List[PIL.Image.Image],
|
156 |
-
List[np.ndarray],
|
157 |
-
] = None,
|
158 |
-
num_inference_steps: int = 100,
|
159 |
-
guidance_scale: float = 7.5,
|
160 |
-
image_guidance_scale: float = 1.5,
|
161 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
162 |
-
num_images_per_prompt: Optional[int] = 1,
|
163 |
-
eta: float = 0.0,
|
164 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
165 |
-
latents: Optional[torch.FloatTensor] = None,
|
166 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
167 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
168 |
-
output_type: Optional[str] = "pil",
|
169 |
-
return_dict: bool = True,
|
170 |
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
171 |
-
callback_steps: int = 1,
|
172 |
-
):
|
173 |
-
r"""
|
174 |
-
The call function to the pipeline for generation.
|
175 |
-
|
176 |
-
Args:
|
177 |
-
prompt (`str` or `List[str]`, *optional*):
|
178 |
-
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
179 |
-
image (`torch.FloatTensor` `np.ndarray`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
|
180 |
-
`Image` or tensor representing an image batch to be repainted according to `prompt`. Can also accept
|
181 |
-
image latents as `image`, but if passing latents directly it is not encoded again.
|
182 |
-
num_inference_steps (`int`, *optional*, defaults to 100):
|
183 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
184 |
-
expense of slower inference.
|
185 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
186 |
-
A higher guidance scale value encourages the model to generate images closely linked to the text
|
187 |
-
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
188 |
-
image_guidance_scale (`float`, *optional*, defaults to 1.5):
|
189 |
-
Push the generated image towards the inital `image`. Image guidance scale is enabled by setting
|
190 |
-
`image_guidance_scale > 1`. Higher image guidance scale encourages generated images that are closely
|
191 |
-
linked to the source `image`, usually at the expense of lower image quality. This pipeline requires a
|
192 |
-
value of at least `1`.
|
193 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
194 |
-
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
195 |
-
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
196 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
197 |
-
The number of images to generate per prompt.
|
198 |
-
eta (`float`, *optional*, defaults to 0.0):
|
199 |
-
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
200 |
-
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
201 |
-
generator (`torch.Generator`, *optional*):
|
202 |
-
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
203 |
-
generation deterministic.
|
204 |
-
latents (`torch.FloatTensor`, *optional*):
|
205 |
-
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
206 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
207 |
-
tensor is generated by sampling using the supplied random `generator`.
|
208 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
209 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
210 |
-
provided, text embeddings are generated from the `prompt` input argument.
|
211 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
212 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
213 |
-
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
214 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
215 |
-
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
216 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
217 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
218 |
-
plain tuple.
|
219 |
-
callback (`Callable`, *optional*):
|
220 |
-
A function that calls every `callback_steps` steps during inference. The function is called with the
|
221 |
-
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
222 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
223 |
-
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
224 |
-
every step.
|
225 |
-
|
226 |
-
Examples:
|
227 |
-
|
228 |
-
```py
|
229 |
-
>>> import PIL
|
230 |
-
>>> import requests
|
231 |
-
>>> import torch
|
232 |
-
>>> from io import BytesIO
|
233 |
-
|
234 |
-
>>> from diffusers import StableDiffusionInstructPix2PixPipeline
|
235 |
-
|
236 |
-
|
237 |
-
>>> def download_image(url):
|
238 |
-
... response = requests.get(url)
|
239 |
-
... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
240 |
-
|
241 |
-
|
242 |
-
>>> img_url = "https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png"
|
243 |
-
|
244 |
-
>>> image = download_image(img_url).resize((512, 512))
|
245 |
-
|
246 |
-
>>> pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(
|
247 |
-
... "timbrooks/instruct-pix2pix", torch_dtype=torch.float16
|
248 |
-
... )
|
249 |
-
>>> pipe = pipe.to("cuda")
|
250 |
-
|
251 |
-
>>> prompt = "make the mountains snowy"
|
252 |
-
>>> image = pipe(prompt=prompt, image=image).images[0]
|
253 |
-
```
|
254 |
-
|
255 |
-
Returns:
|
256 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
257 |
-
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
258 |
-
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
259 |
-
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
260 |
-
"not-safe-for-work" (nsfw) content.
|
261 |
-
"""
|
262 |
-
# 0. Check inputs
|
263 |
-
self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)
|
264 |
-
|
265 |
-
if image is None:
|
266 |
-
raise ValueError("`image` input cannot be undefined.")
|
267 |
-
|
268 |
-
# 1. Define call parameters
|
269 |
-
if prompt is not None and isinstance(prompt, str):
|
270 |
-
batch_size = 1
|
271 |
-
elif prompt is not None and isinstance(prompt, list):
|
272 |
-
batch_size = len(prompt)
|
273 |
-
else:
|
274 |
-
batch_size = prompt_embeds.shape[0]
|
275 |
-
|
276 |
-
device = self._execution_device
|
277 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
278 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
279 |
-
# corresponds to doing no classifier free guidance.
|
280 |
-
do_classifier_free_guidance = guidance_scale > 1.0 and image_guidance_scale >= 1.0
|
281 |
-
# check if scheduler is in sigmas space
|
282 |
-
scheduler_is_in_sigma_space = hasattr(self.scheduler, "sigmas")
|
283 |
-
|
284 |
-
# 2. Encode input prompt
|
285 |
-
prompt_embeds = self._encode_prompt(
|
286 |
-
prompt,
|
287 |
-
device,
|
288 |
-
num_images_per_prompt,
|
289 |
-
do_classifier_free_guidance,
|
290 |
-
negative_prompt,
|
291 |
-
prompt_embeds=prompt_embeds,
|
292 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
293 |
-
)
|
294 |
-
|
295 |
-
# 3. Preprocess image
|
296 |
-
image = self.image_processor.preprocess(image)
|
297 |
-
|
298 |
-
# 4. set timesteps
|
299 |
-
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
300 |
-
timesteps = self.scheduler.timesteps
|
301 |
-
|
302 |
-
# 5. Prepare Image latents
|
303 |
-
image_latents = self.prepare_image_latents(
|
304 |
-
image,
|
305 |
-
batch_size,
|
306 |
-
num_images_per_prompt,
|
307 |
-
prompt_embeds.dtype,
|
308 |
-
device,
|
309 |
-
do_classifier_free_guidance,
|
310 |
-
generator,
|
311 |
-
)
|
312 |
-
|
313 |
-
height, width = image_latents.shape[-2:]
|
314 |
-
height = height * self.vae_scale_factor
|
315 |
-
width = width * self.vae_scale_factor
|
316 |
-
|
317 |
-
# 6. Prepare latent variables
|
318 |
-
num_channels_latents = self.vae.config.latent_channels
|
319 |
-
latents = self.prepare_latents(
|
320 |
-
batch_size * num_images_per_prompt,
|
321 |
-
num_channels_latents,
|
322 |
-
height,
|
323 |
-
width,
|
324 |
-
prompt_embeds.dtype,
|
325 |
-
device,
|
326 |
-
generator,
|
327 |
-
latents,
|
328 |
-
)
|
329 |
-
|
330 |
-
# 7. Check that shapes of latents and image match the UNet channels
|
331 |
-
num_channels_image = image_latents.shape[1]
|
332 |
-
if num_channels_latents + num_channels_image != self.unet.config.in_channels:
|
333 |
-
raise ValueError(
|
334 |
-
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
|
335 |
-
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
|
336 |
-
f" `num_channels_image`: {num_channels_image} "
|
337 |
-
f" = {num_channels_latents+num_channels_image}. Please verify the config of"
|
338 |
-
" `pipeline.unet` or your `image` input."
|
339 |
-
)
|
340 |
-
|
341 |
-
# 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
342 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
343 |
-
|
344 |
-
# 9. Denoising loop
|
345 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
346 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
347 |
-
for i, t in enumerate(timesteps):
|
348 |
-
# Expand the latents if we are doing classifier free guidance.
|
349 |
-
# The latents are expanded 3 times because for pix2pix the guidance\
|
350 |
-
# is applied for both the text and the input image.
|
351 |
-
latent_model_input = torch.cat([latents] * 3) if do_classifier_free_guidance else latents
|
352 |
-
|
353 |
-
# concat latents, image_latents in the channel dimension
|
354 |
-
scaled_latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
355 |
-
scaled_latent_model_input = torch.cat([scaled_latent_model_input, image_latents], dim=1)
|
356 |
-
|
357 |
-
# predict the noise residual
|
358 |
-
noise_pred = self.unet(
|
359 |
-
scaled_latent_model_input, t, encoder_hidden_states=prompt_embeds, return_dict=False
|
360 |
-
)[0]
|
361 |
-
|
362 |
-
# Hack:
|
363 |
-
# For karras style schedulers the model does classifer free guidance using the
|
364 |
-
# predicted_original_sample instead of the noise_pred. So we need to compute the
|
365 |
-
# predicted_original_sample here if we are using a karras style scheduler.
|
366 |
-
if scheduler_is_in_sigma_space:
|
367 |
-
step_index = (self.scheduler.timesteps == t).nonzero()[0].item()
|
368 |
-
sigma = self.scheduler.sigmas[step_index]
|
369 |
-
noise_pred = latent_model_input - sigma * noise_pred
|
370 |
-
|
371 |
-
# perform guidance
|
372 |
-
if do_classifier_free_guidance:
|
373 |
-
noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred.chunk(3)
|
374 |
-
noise_pred = (
|
375 |
-
noise_pred_uncond
|
376 |
-
+ guidance_scale * (noise_pred_text - noise_pred_image)
|
377 |
-
+ image_guidance_scale * (noise_pred_image - noise_pred_uncond)
|
378 |
-
)
|
379 |
-
|
380 |
-
# Hack:
|
381 |
-
# For karras style schedulers the model does classifer free guidance using the
|
382 |
-
# predicted_original_sample instead of the noise_pred. But the scheduler.step function
|
383 |
-
# expects the noise_pred and computes the predicted_original_sample internally. So we
|
384 |
-
# need to overwrite the noise_pred here such that the value of the computed
|
385 |
-
# predicted_original_sample is correct.
|
386 |
-
if scheduler_is_in_sigma_space:
|
387 |
-
noise_pred = (noise_pred - latents) / (-sigma)
|
388 |
-
|
389 |
-
# compute the previous noisy sample x_t -> x_t-1
|
390 |
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
391 |
-
|
392 |
-
# call the callback, if provided
|
393 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
394 |
-
progress_bar.update()
|
395 |
-
if callback is not None and i % callback_steps == 0:
|
396 |
-
callback(i, t, latents)
|
397 |
-
|
398 |
-
if not output_type == "latent":
|
399 |
-
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
400 |
-
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
401 |
-
else:
|
402 |
-
image = latents
|
403 |
-
has_nsfw_concept = None
|
404 |
-
|
405 |
-
if has_nsfw_concept is None:
|
406 |
-
do_denormalize = [True] * image.shape[0]
|
407 |
-
else:
|
408 |
-
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
409 |
-
|
410 |
-
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
411 |
-
|
412 |
-
# Offload last model to CPU
|
413 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
414 |
-
self.final_offload_hook.offload()
|
415 |
-
|
416 |
-
if not return_dict:
|
417 |
-
return (image, has_nsfw_concept)
|
418 |
-
|
419 |
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
420 |
-
|
421 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
|
422 |
-
def enable_model_cpu_offload(self, gpu_id=0):
|
423 |
-
r"""
|
424 |
-
Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
|
425 |
-
time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
|
426 |
-
Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
|
427 |
-
iterative execution of the `unet`.
|
428 |
-
"""
|
429 |
-
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
430 |
-
from accelerate import cpu_offload_with_hook
|
431 |
-
else:
|
432 |
-
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
|
433 |
-
|
434 |
-
device = torch.device(f"cuda:{gpu_id}")
|
435 |
-
|
436 |
-
if self.device.type != "cpu":
|
437 |
-
self.to("cpu", silence_dtype_warnings=True)
|
438 |
-
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
|
439 |
-
|
440 |
-
hook = None
|
441 |
-
for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
|
442 |
-
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
|
443 |
-
|
444 |
-
if self.safety_checker is not None:
|
445 |
-
_, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
|
446 |
-
|
447 |
-
# We'll offload the last model manually.
|
448 |
-
self.final_offload_hook = hook
|
449 |
-
|
450 |
-
def _encode_prompt(
|
451 |
-
self,
|
452 |
-
prompt,
|
453 |
-
device,
|
454 |
-
num_images_per_prompt,
|
455 |
-
do_classifier_free_guidance,
|
456 |
-
negative_prompt=None,
|
457 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
458 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
459 |
-
):
|
460 |
-
r"""
|
461 |
-
Encodes the prompt into text encoder hidden states.
|
462 |
-
|
463 |
-
Args:
|
464 |
-
prompt (`str` or `List[str]`, *optional*):
|
465 |
-
prompt to be encoded
|
466 |
-
device: (`torch.device`):
|
467 |
-
torch device
|
468 |
-
num_images_per_prompt (`int`):
|
469 |
-
number of images that should be generated per prompt
|
470 |
-
do_classifier_free_guidance (`bool`):
|
471 |
-
whether to use classifier free guidance or not
|
472 |
-
negative_ prompt (`str` or `List[str]`, *optional*):
|
473 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
474 |
-
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
475 |
-
less than `1`).
|
476 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
477 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
478 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
479 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
480 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
481 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
482 |
-
argument.
|
483 |
-
"""
|
484 |
-
if prompt is not None and isinstance(prompt, str):
|
485 |
-
batch_size = 1
|
486 |
-
elif prompt is not None and isinstance(prompt, list):
|
487 |
-
batch_size = len(prompt)
|
488 |
-
else:
|
489 |
-
batch_size = prompt_embeds.shape[0]
|
490 |
-
|
491 |
-
if prompt_embeds is None:
|
492 |
-
# textual inversion: procecss multi-vector tokens if necessary
|
493 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
494 |
-
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
495 |
-
|
496 |
-
text_inputs = self.tokenizer(
|
497 |
-
prompt,
|
498 |
-
padding="max_length",
|
499 |
-
max_length=self.tokenizer.model_max_length,
|
500 |
-
truncation=True,
|
501 |
-
return_tensors="pt",
|
502 |
-
)
|
503 |
-
text_input_ids = text_inputs.input_ids
|
504 |
-
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
505 |
-
|
506 |
-
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
507 |
-
text_input_ids, untruncated_ids
|
508 |
-
):
|
509 |
-
removed_text = self.tokenizer.batch_decode(
|
510 |
-
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
511 |
-
)
|
512 |
-
logger.warning(
|
513 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
514 |
-
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
515 |
-
)
|
516 |
-
|
517 |
-
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
518 |
-
attention_mask = text_inputs.attention_mask.to(device)
|
519 |
-
else:
|
520 |
-
attention_mask = None
|
521 |
-
|
522 |
-
prompt_embeds = self.text_encoder(
|
523 |
-
text_input_ids.to(device),
|
524 |
-
attention_mask=attention_mask,
|
525 |
-
)
|
526 |
-
prompt_embeds = prompt_embeds[0]
|
527 |
-
|
528 |
-
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
529 |
-
|
530 |
-
bs_embed, seq_len, _ = prompt_embeds.shape
|
531 |
-
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
532 |
-
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
533 |
-
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
534 |
-
|
535 |
-
# get unconditional embeddings for classifier free guidance
|
536 |
-
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
537 |
-
uncond_tokens: List[str]
|
538 |
-
if negative_prompt is None:
|
539 |
-
uncond_tokens = [""] * batch_size
|
540 |
-
elif type(prompt) is not type(negative_prompt):
|
541 |
-
raise TypeError(
|
542 |
-
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
543 |
-
f" {type(prompt)}."
|
544 |
-
)
|
545 |
-
elif isinstance(negative_prompt, str):
|
546 |
-
uncond_tokens = [negative_prompt]
|
547 |
-
elif batch_size != len(negative_prompt):
|
548 |
-
raise ValueError(
|
549 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
550 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
551 |
-
" the batch size of `prompt`."
|
552 |
-
)
|
553 |
-
else:
|
554 |
-
uncond_tokens = negative_prompt
|
555 |
-
|
556 |
-
# textual inversion: procecss multi-vector tokens if necessary
|
557 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
558 |
-
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
559 |
-
|
560 |
-
max_length = prompt_embeds.shape[1]
|
561 |
-
uncond_input = self.tokenizer(
|
562 |
-
uncond_tokens,
|
563 |
-
padding="max_length",
|
564 |
-
max_length=max_length,
|
565 |
-
truncation=True,
|
566 |
-
return_tensors="pt",
|
567 |
-
)
|
568 |
-
|
569 |
-
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
570 |
-
attention_mask = uncond_input.attention_mask.to(device)
|
571 |
-
else:
|
572 |
-
attention_mask = None
|
573 |
-
|
574 |
-
negative_prompt_embeds = self.text_encoder(
|
575 |
-
uncond_input.input_ids.to(device),
|
576 |
-
attention_mask=attention_mask,
|
577 |
-
)
|
578 |
-
negative_prompt_embeds = negative_prompt_embeds[0]
|
579 |
-
|
580 |
-
if do_classifier_free_guidance:
|
581 |
-
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
582 |
-
seq_len = negative_prompt_embeds.shape[1]
|
583 |
-
|
584 |
-
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
585 |
-
|
586 |
-
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
587 |
-
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
588 |
-
|
589 |
-
# For classifier free guidance, we need to do two forward passes.
|
590 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
591 |
-
# to avoid doing two forward passes
|
592 |
-
# pix2pix has two negative embeddings, and unlike in other pipelines latents are ordered [prompt_embeds, negative_prompt_embeds, negative_prompt_embeds]
|
593 |
-
prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds, negative_prompt_embeds])
|
594 |
-
|
595 |
-
return prompt_embeds
|
596 |
-
|
597 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
598 |
-
def run_safety_checker(self, image, device, dtype):
|
599 |
-
if self.safety_checker is None:
|
600 |
-
has_nsfw_concept = None
|
601 |
-
else:
|
602 |
-
if torch.is_tensor(image):
|
603 |
-
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
604 |
-
else:
|
605 |
-
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
606 |
-
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
607 |
-
image, has_nsfw_concept = self.safety_checker(
|
608 |
-
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
609 |
-
)
|
610 |
-
return image, has_nsfw_concept
|
611 |
-
|
612 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
613 |
-
def prepare_extra_step_kwargs(self, generator, eta):
|
614 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
615 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
616 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
617 |
-
# and should be between [0, 1]
|
618 |
-
|
619 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
620 |
-
extra_step_kwargs = {}
|
621 |
-
if accepts_eta:
|
622 |
-
extra_step_kwargs["eta"] = eta
|
623 |
-
|
624 |
-
# check if the scheduler accepts generator
|
625 |
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
626 |
-
if accepts_generator:
|
627 |
-
extra_step_kwargs["generator"] = generator
|
628 |
-
return extra_step_kwargs
|
629 |
-
|
630 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
|
631 |
-
def decode_latents(self, latents):
|
632 |
-
warnings.warn(
|
633 |
-
"The decode_latents method is deprecated and will be removed in a future version. Please"
|
634 |
-
" use VaeImageProcessor instead",
|
635 |
-
FutureWarning,
|
636 |
-
)
|
637 |
-
latents = 1 / self.vae.config.scaling_factor * latents
|
638 |
-
image = self.vae.decode(latents, return_dict=False)[0]
|
639 |
-
image = (image / 2 + 0.5).clamp(0, 1)
|
640 |
-
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
641 |
-
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
642 |
-
return image
|
643 |
-
|
644 |
-
def check_inputs(
|
645 |
-
self, prompt, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None
|
646 |
-
):
|
647 |
-
if (callback_steps is None) or (
|
648 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
649 |
-
):
|
650 |
-
raise ValueError(
|
651 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
652 |
-
f" {type(callback_steps)}."
|
653 |
-
)
|
654 |
-
|
655 |
-
if prompt is not None and prompt_embeds is not None:
|
656 |
-
raise ValueError(
|
657 |
-
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
658 |
-
" only forward one of the two."
|
659 |
-
)
|
660 |
-
elif prompt is None and prompt_embeds is None:
|
661 |
-
raise ValueError(
|
662 |
-
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
663 |
-
)
|
664 |
-
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
665 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
666 |
-
|
667 |
-
if negative_prompt is not None and negative_prompt_embeds is not None:
|
668 |
-
raise ValueError(
|
669 |
-
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
670 |
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
671 |
-
)
|
672 |
-
|
673 |
-
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
674 |
-
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
675 |
-
raise ValueError(
|
676 |
-
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
677 |
-
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
678 |
-
f" {negative_prompt_embeds.shape}."
|
679 |
-
)
|
680 |
-
|
681 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
682 |
-
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
683 |
-
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
684 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
685 |
-
raise ValueError(
|
686 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
687 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
688 |
-
)
|
689 |
-
|
690 |
-
if latents is None:
|
691 |
-
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
692 |
-
else:
|
693 |
-
latents = latents.to(device)
|
694 |
-
|
695 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
696 |
-
latents = latents * self.scheduler.init_noise_sigma
|
697 |
-
return latents
|
698 |
-
|
699 |
-
def prepare_image_latents(
|
700 |
-
self, image, batch_size, num_images_per_prompt, dtype, device, do_classifier_free_guidance, generator=None
|
701 |
-
):
|
702 |
-
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
703 |
-
raise ValueError(
|
704 |
-
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
705 |
-
)
|
706 |
-
|
707 |
-
image = image.to(device=device, dtype=dtype)
|
708 |
-
|
709 |
-
batch_size = batch_size * num_images_per_prompt
|
710 |
-
|
711 |
-
if image.shape[1] == 4:
|
712 |
-
image_latents = image
|
713 |
-
else:
|
714 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
715 |
-
raise ValueError(
|
716 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
717 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
718 |
-
)
|
719 |
-
|
720 |
-
if isinstance(generator, list):
|
721 |
-
image_latents = [self.vae.encode(image[i : i + 1]).latent_dist.mode() for i in range(batch_size)]
|
722 |
-
image_latents = torch.cat(image_latents, dim=0)
|
723 |
-
else:
|
724 |
-
image_latents = self.vae.encode(image).latent_dist.mode()
|
725 |
-
|
726 |
-
if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0:
|
727 |
-
# expand image_latents for batch_size
|
728 |
-
deprecation_message = (
|
729 |
-
f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial"
|
730 |
-
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
|
731 |
-
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
|
732 |
-
" your script to pass as many initial images as text prompts to suppress this warning."
|
733 |
-
)
|
734 |
-
deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
|
735 |
-
additional_image_per_prompt = batch_size // image_latents.shape[0]
|
736 |
-
image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0)
|
737 |
-
elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0:
|
738 |
-
raise ValueError(
|
739 |
-
f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts."
|
740 |
-
)
|
741 |
-
else:
|
742 |
-
image_latents = torch.cat([image_latents], dim=0)
|
743 |
-
|
744 |
-
if do_classifier_free_guidance:
|
745 |
-
uncond_image_latents = torch.zeros_like(image_latents)
|
746 |
-
image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0)
|
747 |
-
|
748 |
-
return image_latents
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_unclip/test_stable_unclip.py
DELETED
@@ -1,241 +0,0 @@
|
|
1 |
-
import gc
|
2 |
-
import unittest
|
3 |
-
|
4 |
-
import torch
|
5 |
-
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
6 |
-
|
7 |
-
from diffusers import (
|
8 |
-
AutoencoderKL,
|
9 |
-
DDIMScheduler,
|
10 |
-
DDPMScheduler,
|
11 |
-
PriorTransformer,
|
12 |
-
StableUnCLIPPipeline,
|
13 |
-
UNet2DConditionModel,
|
14 |
-
)
|
15 |
-
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
|
16 |
-
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
|
17 |
-
|
18 |
-
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
|
19 |
-
from ..test_pipelines_common import (
|
20 |
-
PipelineKarrasSchedulerTesterMixin,
|
21 |
-
PipelineLatentTesterMixin,
|
22 |
-
PipelineTesterMixin,
|
23 |
-
assert_mean_pixel_difference,
|
24 |
-
)
|
25 |
-
|
26 |
-
|
27 |
-
enable_full_determinism()
|
28 |
-
|
29 |
-
|
30 |
-
class StableUnCLIPPipelineFastTests(
|
31 |
-
PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
|
32 |
-
):
|
33 |
-
pipeline_class = StableUnCLIPPipeline
|
34 |
-
params = TEXT_TO_IMAGE_PARAMS
|
35 |
-
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
|
36 |
-
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
|
37 |
-
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
|
38 |
-
|
39 |
-
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
|
40 |
-
test_xformers_attention = False
|
41 |
-
|
42 |
-
def get_dummy_components(self):
|
43 |
-
embedder_hidden_size = 32
|
44 |
-
embedder_projection_dim = embedder_hidden_size
|
45 |
-
|
46 |
-
# prior components
|
47 |
-
|
48 |
-
torch.manual_seed(0)
|
49 |
-
prior_tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
50 |
-
|
51 |
-
torch.manual_seed(0)
|
52 |
-
prior_text_encoder = CLIPTextModelWithProjection(
|
53 |
-
CLIPTextConfig(
|
54 |
-
bos_token_id=0,
|
55 |
-
eos_token_id=2,
|
56 |
-
hidden_size=embedder_hidden_size,
|
57 |
-
projection_dim=embedder_projection_dim,
|
58 |
-
intermediate_size=37,
|
59 |
-
layer_norm_eps=1e-05,
|
60 |
-
num_attention_heads=4,
|
61 |
-
num_hidden_layers=5,
|
62 |
-
pad_token_id=1,
|
63 |
-
vocab_size=1000,
|
64 |
-
)
|
65 |
-
)
|
66 |
-
|
67 |
-
torch.manual_seed(0)
|
68 |
-
prior = PriorTransformer(
|
69 |
-
num_attention_heads=2,
|
70 |
-
attention_head_dim=12,
|
71 |
-
embedding_dim=embedder_projection_dim,
|
72 |
-
num_layers=1,
|
73 |
-
)
|
74 |
-
|
75 |
-
torch.manual_seed(0)
|
76 |
-
prior_scheduler = DDPMScheduler(
|
77 |
-
variance_type="fixed_small_log",
|
78 |
-
prediction_type="sample",
|
79 |
-
num_train_timesteps=1000,
|
80 |
-
clip_sample=True,
|
81 |
-
clip_sample_range=5.0,
|
82 |
-
beta_schedule="squaredcos_cap_v2",
|
83 |
-
)
|
84 |
-
|
85 |
-
# regular denoising components
|
86 |
-
|
87 |
-
torch.manual_seed(0)
|
88 |
-
image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedder_hidden_size)
|
89 |
-
image_noising_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2")
|
90 |
-
|
91 |
-
torch.manual_seed(0)
|
92 |
-
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
93 |
-
|
94 |
-
torch.manual_seed(0)
|
95 |
-
text_encoder = CLIPTextModel(
|
96 |
-
CLIPTextConfig(
|
97 |
-
bos_token_id=0,
|
98 |
-
eos_token_id=2,
|
99 |
-
hidden_size=embedder_hidden_size,
|
100 |
-
projection_dim=32,
|
101 |
-
intermediate_size=37,
|
102 |
-
layer_norm_eps=1e-05,
|
103 |
-
num_attention_heads=4,
|
104 |
-
num_hidden_layers=5,
|
105 |
-
pad_token_id=1,
|
106 |
-
vocab_size=1000,
|
107 |
-
)
|
108 |
-
)
|
109 |
-
|
110 |
-
torch.manual_seed(0)
|
111 |
-
unet = UNet2DConditionModel(
|
112 |
-
sample_size=32,
|
113 |
-
in_channels=4,
|
114 |
-
out_channels=4,
|
115 |
-
down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"),
|
116 |
-
up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"),
|
117 |
-
block_out_channels=(32, 64),
|
118 |
-
attention_head_dim=(2, 4),
|
119 |
-
class_embed_type="projection",
|
120 |
-
# The class embeddings are the noise augmented image embeddings.
|
121 |
-
# I.e. the image embeddings concated with the noised embeddings of the same dimension
|
122 |
-
projection_class_embeddings_input_dim=embedder_projection_dim * 2,
|
123 |
-
cross_attention_dim=embedder_hidden_size,
|
124 |
-
layers_per_block=1,
|
125 |
-
upcast_attention=True,
|
126 |
-
use_linear_projection=True,
|
127 |
-
)
|
128 |
-
|
129 |
-
torch.manual_seed(0)
|
130 |
-
scheduler = DDIMScheduler(
|
131 |
-
beta_schedule="scaled_linear",
|
132 |
-
beta_start=0.00085,
|
133 |
-
beta_end=0.012,
|
134 |
-
prediction_type="v_prediction",
|
135 |
-
set_alpha_to_one=False,
|
136 |
-
steps_offset=1,
|
137 |
-
)
|
138 |
-
|
139 |
-
torch.manual_seed(0)
|
140 |
-
vae = AutoencoderKL()
|
141 |
-
|
142 |
-
components = {
|
143 |
-
# prior components
|
144 |
-
"prior_tokenizer": prior_tokenizer,
|
145 |
-
"prior_text_encoder": prior_text_encoder,
|
146 |
-
"prior": prior,
|
147 |
-
"prior_scheduler": prior_scheduler,
|
148 |
-
# image noising components
|
149 |
-
"image_normalizer": image_normalizer,
|
150 |
-
"image_noising_scheduler": image_noising_scheduler,
|
151 |
-
# regular denoising components
|
152 |
-
"tokenizer": tokenizer,
|
153 |
-
"text_encoder": text_encoder,
|
154 |
-
"unet": unet,
|
155 |
-
"scheduler": scheduler,
|
156 |
-
"vae": vae,
|
157 |
-
}
|
158 |
-
|
159 |
-
return components
|
160 |
-
|
161 |
-
def get_dummy_inputs(self, device, seed=0):
|
162 |
-
if str(device).startswith("mps"):
|
163 |
-
generator = torch.manual_seed(seed)
|
164 |
-
else:
|
165 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
166 |
-
inputs = {
|
167 |
-
"prompt": "A painting of a squirrel eating a burger",
|
168 |
-
"generator": generator,
|
169 |
-
"num_inference_steps": 2,
|
170 |
-
"prior_num_inference_steps": 2,
|
171 |
-
"output_type": "numpy",
|
172 |
-
}
|
173 |
-
return inputs
|
174 |
-
|
175 |
-
# Overriding PipelineTesterMixin::test_attention_slicing_forward_pass
|
176 |
-
# because UnCLIP GPU undeterminism requires a looser check.
|
177 |
-
def test_attention_slicing_forward_pass(self):
|
178 |
-
test_max_difference = torch_device == "cpu"
|
179 |
-
|
180 |
-
self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference)
|
181 |
-
|
182 |
-
# Overriding PipelineTesterMixin::test_inference_batch_single_identical
|
183 |
-
# because UnCLIP undeterminism requires a looser check.
|
184 |
-
def test_inference_batch_single_identical(self):
|
185 |
-
test_max_difference = torch_device in ["cpu", "mps"]
|
186 |
-
|
187 |
-
self._test_inference_batch_single_identical(test_max_difference=test_max_difference)
|
188 |
-
|
189 |
-
|
190 |
-
@slow
|
191 |
-
@require_torch_gpu
|
192 |
-
class StableUnCLIPPipelineIntegrationTests(unittest.TestCase):
|
193 |
-
def tearDown(self):
|
194 |
-
# clean up the VRAM after each test
|
195 |
-
super().tearDown()
|
196 |
-
gc.collect()
|
197 |
-
torch.cuda.empty_cache()
|
198 |
-
|
199 |
-
def test_stable_unclip(self):
|
200 |
-
expected_image = load_numpy(
|
201 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy"
|
202 |
-
)
|
203 |
-
|
204 |
-
pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16)
|
205 |
-
pipe.to(torch_device)
|
206 |
-
pipe.set_progress_bar_config(disable=None)
|
207 |
-
# stable unclip will oom when integration tests are run on a V100,
|
208 |
-
# so turn on memory savings
|
209 |
-
pipe.enable_attention_slicing()
|
210 |
-
pipe.enable_sequential_cpu_offload()
|
211 |
-
|
212 |
-
generator = torch.Generator(device="cpu").manual_seed(0)
|
213 |
-
output = pipe("anime turle", generator=generator, output_type="np")
|
214 |
-
|
215 |
-
image = output.images[0]
|
216 |
-
|
217 |
-
assert image.shape == (768, 768, 3)
|
218 |
-
|
219 |
-
assert_mean_pixel_difference(image, expected_image)
|
220 |
-
|
221 |
-
def test_stable_unclip_pipeline_with_sequential_cpu_offloading(self):
|
222 |
-
torch.cuda.empty_cache()
|
223 |
-
torch.cuda.reset_max_memory_allocated()
|
224 |
-
torch.cuda.reset_peak_memory_stats()
|
225 |
-
|
226 |
-
pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16)
|
227 |
-
pipe = pipe.to(torch_device)
|
228 |
-
pipe.set_progress_bar_config(disable=None)
|
229 |
-
pipe.enable_attention_slicing()
|
230 |
-
pipe.enable_sequential_cpu_offload()
|
231 |
-
|
232 |
-
_ = pipe(
|
233 |
-
"anime turtle",
|
234 |
-
prior_num_inference_steps=2,
|
235 |
-
num_inference_steps=2,
|
236 |
-
output_type="np",
|
237 |
-
)
|
238 |
-
|
239 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
240 |
-
# make sure that less than 7 GB is allocated
|
241 |
-
assert mem_bytes < 7 * 10**9
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/foveabox/README.md
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
# FoveaBox: Beyond Anchor-based Object Detector
|
2 |
-
|
3 |
-
[ALGORITHM]
|
4 |
-
|
5 |
-
FoveaBox is an accurate, flexible and completely anchor-free object detection system for object detection framework, as presented in our paper [https://arxiv.org/abs/1904.03797](https://arxiv.org/abs/1904.03797):
|
6 |
-
Different from previous anchor-based methods, FoveaBox directly learns the object existing possibility and the bounding box coordinates without anchor reference. This is achieved by: (a) predicting category-sensitive semantic maps for the object existing possibility, and (b) producing category-agnostic bounding box for each position that potentially contains an object.
|
7 |
-
|
8 |
-
## Main Results
|
9 |
-
|
10 |
-
### Results on R50/101-FPN
|
11 |
-
|
12 |
-
| Backbone | Style | align | ms-train| Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
|
13 |
-
|:---------:|:-------:|:-------:|:-------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:|
|
14 |
-
| R-50 | pytorch | N | N | 1x | 5.6 | 24.1 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_1x_coco/fovea_r50_fpn_4x4_1x_coco_20200219-ee4d5303.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_1x_coco/fovea_r50_fpn_4x4_1x_coco_20200219_223025.log.json) |
|
15 |
-
| R-50 | pytorch | N | N | 2x | 5.6 | - | 37.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_2x_coco/fovea_r50_fpn_4x4_2x_coco_20200203-2df792b1.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_2x_coco/fovea_r50_fpn_4x4_2x_coco_20200203_112043.log.json) |
|
16 |
-
| R-50 | pytorch | Y | N | 2x | 8.1 | 19.4 | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203_134252.log.json) |
|
17 |
-
| R-50 | pytorch | Y | Y | 2x | 8.1 | 18.3 | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200205-85ce26cb.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200205_112557.log.json) |
|
18 |
-
| R-101 | pytorch | N | N | 1x | 9.2 | 17.4 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_1x_coco/fovea_r101_fpn_4x4_1x_coco_20200219-05e38f1c.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_1x_coco/fovea_r101_fpn_4x4_1x_coco_20200219_011740.log.json) |
|
19 |
-
| R-101 | pytorch | N | N | 2x | 11.7 | - | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_2x_coco/fovea_r101_fpn_4x4_2x_coco_20200208-02320ea4.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_2x_coco/fovea_r101_fpn_4x4_2x_coco_20200208_202059.log.json) |
|
20 |
-
| R-101 | pytorch | Y | N | 2x | 11.7 | 14.7 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco/fovea_align_r101_fpn_gn-head_4x4_2x_coco_20200208-c39a027a.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco/fovea_align_r101_fpn_gn-head_4x4_2x_coco_20200208_203337.log.json) |
|
21 |
-
| R-101 | pytorch | Y | Y | 2x | 11.7 | 14.7 | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200208-649c5eb6.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200208_202124.log.json) |
|
22 |
-
|
23 |
-
[1] *1x and 2x mean the model is trained for 12 and 24 epochs, respectively.* \
|
24 |
-
[2] *Align means utilizing deformable convolution to align the cls branch.* \
|
25 |
-
[3] *All results are obtained with a single model and without any test time data augmentation.*\
|
26 |
-
[4] *We use 4 GPUs for training.*
|
27 |
-
|
28 |
-
Any pull requests or issues are welcome.
|
29 |
-
|
30 |
-
## Citations
|
31 |
-
|
32 |
-
Please consider citing our paper in your publications if the project helps your research. BibTeX reference is as follows.
|
33 |
-
|
34 |
-
```latex
|
35 |
-
@article{kong2019foveabox,
|
36 |
-
title={FoveaBox: Beyond Anchor-based Object Detector},
|
37 |
-
author={Kong, Tao and Sun, Fuchun and Liu, Huaping and Jiang, Yuning and Shi, Jianbo},
|
38 |
-
journal={arXiv preprint arXiv:1904.03797},
|
39 |
-
year={2019}
|
40 |
-
}
|
41 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
_base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py'
|
2 |
-
# model settings
|
3 |
-
model = dict(
|
4 |
-
pretrained='open-mmlab://msra/hrnetv2_w18',
|
5 |
-
backbone=dict(
|
6 |
-
extra=dict(
|
7 |
-
stage2=dict(num_channels=(18, 36)),
|
8 |
-
stage3=dict(num_channels=(18, 36, 72)),
|
9 |
-
stage4=dict(num_channels=(18, 36, 72, 144)))),
|
10 |
-
neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x512_160k_ade20k.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/ann_r50-d8.py', '../_base_/datasets/ade20k.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
|
4 |
-
]
|
5 |
-
model = dict(
|
6 |
-
decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/datasets/README.md
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
# Downloading datasets
|
2 |
-
|
3 |
-
This directory includes instructions and scripts for downloading ImageNet and LSUN bedrooms for use in this codebase.
|
4 |
-
|
5 |
-
## Class-conditional ImageNet
|
6 |
-
|
7 |
-
For our class-conditional models, we use the official ILSVRC2012 dataset with manual center cropping and downsampling. To obtain this dataset, navigate to [this page on image-net.org](http://www.image-net.org/challenges/LSVRC/2012/downloads) and sign in (or create an account if you do not already have one). Then click on the link reading "Training images (Task 1 & 2)". This is a 138GB tar file containing 1000 sub-tar files, one per class.
|
8 |
-
|
9 |
-
Once the file is downloaded, extract it and look inside. You should see 1000 `.tar` files. You need to extract each of these, which may be impractical to do by hand on your operating system. To automate the process on a Unix-based system, you can `cd` into the directory and run this short shell script:
|
10 |
-
|
11 |
-
```
|
12 |
-
for file in *.tar; do tar xf "$file"; rm "$file"; done
|
13 |
-
```
|
14 |
-
|
15 |
-
This will extract and remove each tar file in turn.
|
16 |
-
|
17 |
-
Once all of the images have been extracted, the resulting directory should be usable as a data directory (the `--data_dir` argument for the training script). The filenames should all start with WNID (class ids) followed by underscores, like `n01440764_2708.JPEG`. Conveniently (but not by accident) this is how the automated data-loader expects to discover class labels.
|
18 |
-
|
19 |
-
## LSUN bedroom
|
20 |
-
|
21 |
-
To download and pre-process LSUN bedroom, clone [fyu/lsun](https://github.com/fyu/lsun) on GitHub and run their download script `python3 download.py bedroom`. The result will be an "lmdb" database named like `bedroom_train_lmdb`. You can pass this to our [lsun_bedroom.py](lsun_bedroom.py) script like so:
|
22 |
-
|
23 |
-
```
|
24 |
-
python lsun_bedroom.py bedroom_train_lmdb lsun_train_output_dir
|
25 |
-
```
|
26 |
-
|
27 |
-
This creates a directory called `lsun_train_output_dir`. This directory can be passed to the training scripts via the `--data_dir` argument.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnthonyTruchetPoC/persistent-docker/scripts/run-all-precommit-checks.sh
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
#!/usr/bin/env sh
|
2 |
-
poetry run pre-commit run --all-files --hook-stage=manual
|
|
|
|
|
|
spaces/Araby/BRATArA/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: BRATArA
|
3 |
-
emoji: 🏃
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: red
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.27.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/cli.py
DELETED
@@ -1,199 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
import shlex
|
4 |
-
import sys
|
5 |
-
from contextlib import contextmanager
|
6 |
-
from subprocess import Popen
|
7 |
-
from typing import Any, Dict, IO, Iterator, List
|
8 |
-
|
9 |
-
try:
|
10 |
-
import click
|
11 |
-
except ImportError:
|
12 |
-
sys.stderr.write('It seems python-dotenv is not installed with cli option. \n'
|
13 |
-
'Run pip install "python-dotenv[cli]" to fix this.')
|
14 |
-
sys.exit(1)
|
15 |
-
|
16 |
-
from .main import dotenv_values, set_key, unset_key
|
17 |
-
from .version import __version__
|
18 |
-
|
19 |
-
|
20 |
-
def enumerate_env():
|
21 |
-
"""
|
22 |
-
Return a path for the ${pwd}/.env file.
|
23 |
-
|
24 |
-
If pwd does not exist, return None.
|
25 |
-
"""
|
26 |
-
try:
|
27 |
-
cwd = os.getcwd()
|
28 |
-
except FileNotFoundError:
|
29 |
-
return None
|
30 |
-
path = os.path.join(cwd, '.env')
|
31 |
-
return path
|
32 |
-
|
33 |
-
|
34 |
-
@click.group()
|
35 |
-
@click.option('-f', '--file', default=enumerate_env(),
|
36 |
-
type=click.Path(file_okay=True),
|
37 |
-
help="Location of the .env file, defaults to .env file in current working directory.")
|
38 |
-
@click.option('-q', '--quote', default='always',
|
39 |
-
type=click.Choice(['always', 'never', 'auto']),
|
40 |
-
help="Whether to quote or not the variable values. Default mode is always. This does not affect parsing.")
|
41 |
-
@click.option('-e', '--export', default=False,
|
42 |
-
type=click.BOOL,
|
43 |
-
help="Whether to write the dot file as an executable bash script.")
|
44 |
-
@click.version_option(version=__version__)
|
45 |
-
@click.pass_context
|
46 |
-
def cli(ctx: click.Context, file: Any, quote: Any, export: Any) -> None:
|
47 |
-
"""This script is used to set, get or unset values from a .env file."""
|
48 |
-
ctx.obj = {'QUOTE': quote, 'EXPORT': export, 'FILE': file}
|
49 |
-
|
50 |
-
|
51 |
-
@contextmanager
|
52 |
-
def stream_file(path: os.PathLike) -> Iterator[IO[str]]:
|
53 |
-
"""
|
54 |
-
Open a file and yield the corresponding (decoded) stream.
|
55 |
-
|
56 |
-
Exits with error code 2 if the file cannot be opened.
|
57 |
-
"""
|
58 |
-
|
59 |
-
try:
|
60 |
-
with open(path) as stream:
|
61 |
-
yield stream
|
62 |
-
except OSError as exc:
|
63 |
-
print(f"Error opening env file: {exc}", file=sys.stderr)
|
64 |
-
exit(2)
|
65 |
-
|
66 |
-
|
67 |
-
@cli.command()
|
68 |
-
@click.pass_context
|
69 |
-
@click.option('--format', default='simple',
|
70 |
-
type=click.Choice(['simple', 'json', 'shell', 'export']),
|
71 |
-
help="The format in which to display the list. Default format is simple, "
|
72 |
-
"which displays name=value without quotes.")
|
73 |
-
def list(ctx: click.Context, format: bool) -> None:
|
74 |
-
"""Display all the stored key/value."""
|
75 |
-
file = ctx.obj['FILE']
|
76 |
-
|
77 |
-
with stream_file(file) as stream:
|
78 |
-
values = dotenv_values(stream=stream)
|
79 |
-
|
80 |
-
if format == 'json':
|
81 |
-
click.echo(json.dumps(values, indent=2, sort_keys=True))
|
82 |
-
else:
|
83 |
-
prefix = 'export ' if format == 'export' else ''
|
84 |
-
for k in sorted(values):
|
85 |
-
v = values[k]
|
86 |
-
if v is not None:
|
87 |
-
if format in ('export', 'shell'):
|
88 |
-
v = shlex.quote(v)
|
89 |
-
click.echo(f'{prefix}{k}={v}')
|
90 |
-
|
91 |
-
|
92 |
-
@cli.command()
|
93 |
-
@click.pass_context
|
94 |
-
@click.argument('key', required=True)
|
95 |
-
@click.argument('value', required=True)
|
96 |
-
def set(ctx: click.Context, key: Any, value: Any) -> None:
|
97 |
-
"""Store the given key/value."""
|
98 |
-
file = ctx.obj['FILE']
|
99 |
-
quote = ctx.obj['QUOTE']
|
100 |
-
export = ctx.obj['EXPORT']
|
101 |
-
success, key, value = set_key(file, key, value, quote, export)
|
102 |
-
if success:
|
103 |
-
click.echo(f'{key}={value}')
|
104 |
-
else:
|
105 |
-
exit(1)
|
106 |
-
|
107 |
-
|
108 |
-
@cli.command()
|
109 |
-
@click.pass_context
|
110 |
-
@click.argument('key', required=True)
|
111 |
-
def get(ctx: click.Context, key: Any) -> None:
|
112 |
-
"""Retrieve the value for the given key."""
|
113 |
-
file = ctx.obj['FILE']
|
114 |
-
|
115 |
-
with stream_file(file) as stream:
|
116 |
-
values = dotenv_values(stream=stream)
|
117 |
-
|
118 |
-
stored_value = values.get(key)
|
119 |
-
if stored_value:
|
120 |
-
click.echo(stored_value)
|
121 |
-
else:
|
122 |
-
exit(1)
|
123 |
-
|
124 |
-
|
125 |
-
@cli.command()
|
126 |
-
@click.pass_context
|
127 |
-
@click.argument('key', required=True)
|
128 |
-
def unset(ctx: click.Context, key: Any) -> None:
|
129 |
-
"""Removes the given key."""
|
130 |
-
file = ctx.obj['FILE']
|
131 |
-
quote = ctx.obj['QUOTE']
|
132 |
-
success, key = unset_key(file, key, quote)
|
133 |
-
if success:
|
134 |
-
click.echo(f"Successfully removed {key}")
|
135 |
-
else:
|
136 |
-
exit(1)
|
137 |
-
|
138 |
-
|
139 |
-
@cli.command(context_settings={'ignore_unknown_options': True})
|
140 |
-
@click.pass_context
|
141 |
-
@click.option(
|
142 |
-
"--override/--no-override",
|
143 |
-
default=True,
|
144 |
-
help="Override variables from the environment file with those from the .env file.",
|
145 |
-
)
|
146 |
-
@click.argument('commandline', nargs=-1, type=click.UNPROCESSED)
|
147 |
-
def run(ctx: click.Context, override: bool, commandline: List[str]) -> None:
|
148 |
-
"""Run command with environment variables present."""
|
149 |
-
file = ctx.obj['FILE']
|
150 |
-
if not os.path.isfile(file):
|
151 |
-
raise click.BadParameter(
|
152 |
-
f'Invalid value for \'-f\' "{file}" does not exist.',
|
153 |
-
ctx=ctx
|
154 |
-
)
|
155 |
-
dotenv_as_dict = {
|
156 |
-
k: v
|
157 |
-
for (k, v) in dotenv_values(file).items()
|
158 |
-
if v is not None and (override or k not in os.environ)
|
159 |
-
}
|
160 |
-
|
161 |
-
if not commandline:
|
162 |
-
click.echo('No command given.')
|
163 |
-
exit(1)
|
164 |
-
ret = run_command(commandline, dotenv_as_dict)
|
165 |
-
exit(ret)
|
166 |
-
|
167 |
-
|
168 |
-
def run_command(command: List[str], env: Dict[str, str]) -> int:
|
169 |
-
"""Run command in sub process.
|
170 |
-
|
171 |
-
Runs the command in a sub process with the variables from `env`
|
172 |
-
added in the current environment variables.
|
173 |
-
|
174 |
-
Parameters
|
175 |
-
----------
|
176 |
-
command: List[str]
|
177 |
-
The command and it's parameters
|
178 |
-
env: Dict
|
179 |
-
The additional environment variables
|
180 |
-
|
181 |
-
Returns
|
182 |
-
-------
|
183 |
-
int
|
184 |
-
The return code of the command
|
185 |
-
|
186 |
-
"""
|
187 |
-
# copy the current environment variables and add the vales from
|
188 |
-
# `env`
|
189 |
-
cmd_env = os.environ.copy()
|
190 |
-
cmd_env.update(env)
|
191 |
-
|
192 |
-
p = Popen(command,
|
193 |
-
universal_newlines=True,
|
194 |
-
bufsize=0,
|
195 |
-
shell=False,
|
196 |
-
env=cmd_env)
|
197 |
-
_, _ = p.communicate()
|
198 |
-
|
199 |
-
return p.returncode
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/hash.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import hashlib
|
2 |
-
import logging
|
3 |
-
import sys
|
4 |
-
from optparse import Values
|
5 |
-
from typing import List
|
6 |
-
|
7 |
-
from pip._internal.cli.base_command import Command
|
8 |
-
from pip._internal.cli.status_codes import ERROR, SUCCESS
|
9 |
-
from pip._internal.utils.hashes import FAVORITE_HASH, STRONG_HASHES
|
10 |
-
from pip._internal.utils.misc import read_chunks, write_output
|
11 |
-
|
12 |
-
logger = logging.getLogger(__name__)
|
13 |
-
|
14 |
-
|
15 |
-
class HashCommand(Command):
|
16 |
-
"""
|
17 |
-
Compute a hash of a local package archive.
|
18 |
-
|
19 |
-
These can be used with --hash in a requirements file to do repeatable
|
20 |
-
installs.
|
21 |
-
"""
|
22 |
-
|
23 |
-
usage = "%prog [options] <file> ..."
|
24 |
-
ignore_require_venv = True
|
25 |
-
|
26 |
-
def add_options(self) -> None:
|
27 |
-
self.cmd_opts.add_option(
|
28 |
-
"-a",
|
29 |
-
"--algorithm",
|
30 |
-
dest="algorithm",
|
31 |
-
choices=STRONG_HASHES,
|
32 |
-
action="store",
|
33 |
-
default=FAVORITE_HASH,
|
34 |
-
help="The hash algorithm to use: one of {}".format(
|
35 |
-
", ".join(STRONG_HASHES)
|
36 |
-
),
|
37 |
-
)
|
38 |
-
self.parser.insert_option_group(0, self.cmd_opts)
|
39 |
-
|
40 |
-
def run(self, options: Values, args: List[str]) -> int:
|
41 |
-
if not args:
|
42 |
-
self.parser.print_usage(sys.stderr)
|
43 |
-
return ERROR
|
44 |
-
|
45 |
-
algorithm = options.algorithm
|
46 |
-
for path in args:
|
47 |
-
write_output(
|
48 |
-
"%s:\n--hash=%s:%s", path, algorithm, _hash_of_file(path, algorithm)
|
49 |
-
)
|
50 |
-
return SUCCESS
|
51 |
-
|
52 |
-
|
53 |
-
def _hash_of_file(path: str, algorithm: str) -> str:
|
54 |
-
"""Return the hash digest of a file."""
|
55 |
-
with open(path, "rb") as archive:
|
56 |
-
hash = hashlib.new(algorithm)
|
57 |
-
for chunk in read_chunks(archive):
|
58 |
-
hash.update(chunk)
|
59 |
-
return hash.hexdigest()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/containers.py
DELETED
@@ -1,167 +0,0 @@
|
|
1 |
-
from itertools import zip_longest
|
2 |
-
from typing import (
|
3 |
-
Iterator,
|
4 |
-
Iterable,
|
5 |
-
List,
|
6 |
-
Optional,
|
7 |
-
Union,
|
8 |
-
overload,
|
9 |
-
TypeVar,
|
10 |
-
TYPE_CHECKING,
|
11 |
-
)
|
12 |
-
|
13 |
-
if TYPE_CHECKING:
|
14 |
-
from .console import (
|
15 |
-
Console,
|
16 |
-
ConsoleOptions,
|
17 |
-
JustifyMethod,
|
18 |
-
OverflowMethod,
|
19 |
-
RenderResult,
|
20 |
-
RenderableType,
|
21 |
-
)
|
22 |
-
from .text import Text
|
23 |
-
|
24 |
-
from .cells import cell_len
|
25 |
-
from .measure import Measurement
|
26 |
-
|
27 |
-
T = TypeVar("T")
|
28 |
-
|
29 |
-
|
30 |
-
class Renderables:
|
31 |
-
"""A list subclass which renders its contents to the console."""
|
32 |
-
|
33 |
-
def __init__(
|
34 |
-
self, renderables: Optional[Iterable["RenderableType"]] = None
|
35 |
-
) -> None:
|
36 |
-
self._renderables: List["RenderableType"] = (
|
37 |
-
list(renderables) if renderables is not None else []
|
38 |
-
)
|
39 |
-
|
40 |
-
def __rich_console__(
|
41 |
-
self, console: "Console", options: "ConsoleOptions"
|
42 |
-
) -> "RenderResult":
|
43 |
-
"""Console render method to insert line-breaks."""
|
44 |
-
yield from self._renderables
|
45 |
-
|
46 |
-
def __rich_measure__(
|
47 |
-
self, console: "Console", options: "ConsoleOptions"
|
48 |
-
) -> "Measurement":
|
49 |
-
dimensions = [
|
50 |
-
Measurement.get(console, options, renderable)
|
51 |
-
for renderable in self._renderables
|
52 |
-
]
|
53 |
-
if not dimensions:
|
54 |
-
return Measurement(1, 1)
|
55 |
-
_min = max(dimension.minimum for dimension in dimensions)
|
56 |
-
_max = max(dimension.maximum for dimension in dimensions)
|
57 |
-
return Measurement(_min, _max)
|
58 |
-
|
59 |
-
def append(self, renderable: "RenderableType") -> None:
|
60 |
-
self._renderables.append(renderable)
|
61 |
-
|
62 |
-
def __iter__(self) -> Iterable["RenderableType"]:
|
63 |
-
return iter(self._renderables)
|
64 |
-
|
65 |
-
|
66 |
-
class Lines:
|
67 |
-
"""A list subclass which can render to the console."""
|
68 |
-
|
69 |
-
def __init__(self, lines: Iterable["Text"] = ()) -> None:
|
70 |
-
self._lines: List["Text"] = list(lines)
|
71 |
-
|
72 |
-
def __repr__(self) -> str:
|
73 |
-
return f"Lines({self._lines!r})"
|
74 |
-
|
75 |
-
def __iter__(self) -> Iterator["Text"]:
|
76 |
-
return iter(self._lines)
|
77 |
-
|
78 |
-
@overload
|
79 |
-
def __getitem__(self, index: int) -> "Text":
|
80 |
-
...
|
81 |
-
|
82 |
-
@overload
|
83 |
-
def __getitem__(self, index: slice) -> List["Text"]:
|
84 |
-
...
|
85 |
-
|
86 |
-
def __getitem__(self, index: Union[slice, int]) -> Union["Text", List["Text"]]:
|
87 |
-
return self._lines[index]
|
88 |
-
|
89 |
-
def __setitem__(self, index: int, value: "Text") -> "Lines":
|
90 |
-
self._lines[index] = value
|
91 |
-
return self
|
92 |
-
|
93 |
-
def __len__(self) -> int:
|
94 |
-
return self._lines.__len__()
|
95 |
-
|
96 |
-
def __rich_console__(
|
97 |
-
self, console: "Console", options: "ConsoleOptions"
|
98 |
-
) -> "RenderResult":
|
99 |
-
"""Console render method to insert line-breaks."""
|
100 |
-
yield from self._lines
|
101 |
-
|
102 |
-
def append(self, line: "Text") -> None:
|
103 |
-
self._lines.append(line)
|
104 |
-
|
105 |
-
def extend(self, lines: Iterable["Text"]) -> None:
|
106 |
-
self._lines.extend(lines)
|
107 |
-
|
108 |
-
def pop(self, index: int = -1) -> "Text":
|
109 |
-
return self._lines.pop(index)
|
110 |
-
|
111 |
-
def justify(
|
112 |
-
self,
|
113 |
-
console: "Console",
|
114 |
-
width: int,
|
115 |
-
justify: "JustifyMethod" = "left",
|
116 |
-
overflow: "OverflowMethod" = "fold",
|
117 |
-
) -> None:
|
118 |
-
"""Justify and overflow text to a given width.
|
119 |
-
|
120 |
-
Args:
|
121 |
-
console (Console): Console instance.
|
122 |
-
width (int): Number of characters per line.
|
123 |
-
justify (str, optional): Default justify method for text: "left", "center", "full" or "right". Defaults to "left".
|
124 |
-
overflow (str, optional): Default overflow for text: "crop", "fold", or "ellipsis". Defaults to "fold".
|
125 |
-
|
126 |
-
"""
|
127 |
-
from .text import Text
|
128 |
-
|
129 |
-
if justify == "left":
|
130 |
-
for line in self._lines:
|
131 |
-
line.truncate(width, overflow=overflow, pad=True)
|
132 |
-
elif justify == "center":
|
133 |
-
for line in self._lines:
|
134 |
-
line.rstrip()
|
135 |
-
line.truncate(width, overflow=overflow)
|
136 |
-
line.pad_left((width - cell_len(line.plain)) // 2)
|
137 |
-
line.pad_right(width - cell_len(line.plain))
|
138 |
-
elif justify == "right":
|
139 |
-
for line in self._lines:
|
140 |
-
line.rstrip()
|
141 |
-
line.truncate(width, overflow=overflow)
|
142 |
-
line.pad_left(width - cell_len(line.plain))
|
143 |
-
elif justify == "full":
|
144 |
-
for line_index, line in enumerate(self._lines):
|
145 |
-
if line_index == len(self._lines) - 1:
|
146 |
-
break
|
147 |
-
words = line.split(" ")
|
148 |
-
words_size = sum(cell_len(word.plain) for word in words)
|
149 |
-
num_spaces = len(words) - 1
|
150 |
-
spaces = [1 for _ in range(num_spaces)]
|
151 |
-
index = 0
|
152 |
-
if spaces:
|
153 |
-
while words_size + num_spaces < width:
|
154 |
-
spaces[len(spaces) - index - 1] += 1
|
155 |
-
num_spaces += 1
|
156 |
-
index = (index + 1) % len(spaces)
|
157 |
-
tokens: List[Text] = []
|
158 |
-
for index, (word, next_word) in enumerate(
|
159 |
-
zip_longest(words, words[1:])
|
160 |
-
):
|
161 |
-
tokens.append(word)
|
162 |
-
if index < len(spaces):
|
163 |
-
style = word.get_style_at_offset(console, -1)
|
164 |
-
next_style = next_word.get_style_at_offset(console, 0)
|
165 |
-
space_style = style if style == next_style else line.style
|
166 |
-
tokens.append(Text(" " * spaces[index], style=space_style))
|
167 |
-
self[line_index] = Text("").join(tokens)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/functools.py
DELETED
@@ -1,525 +0,0 @@
|
|
1 |
-
import functools
|
2 |
-
import time
|
3 |
-
import inspect
|
4 |
-
import collections
|
5 |
-
import types
|
6 |
-
import itertools
|
7 |
-
|
8 |
-
import setuptools.extern.more_itertools
|
9 |
-
|
10 |
-
from typing import Callable, TypeVar
|
11 |
-
|
12 |
-
|
13 |
-
CallableT = TypeVar("CallableT", bound=Callable[..., object])
|
14 |
-
|
15 |
-
|
16 |
-
def compose(*funcs):
|
17 |
-
"""
|
18 |
-
Compose any number of unary functions into a single unary function.
|
19 |
-
|
20 |
-
>>> import textwrap
|
21 |
-
>>> expected = str.strip(textwrap.dedent(compose.__doc__))
|
22 |
-
>>> strip_and_dedent = compose(str.strip, textwrap.dedent)
|
23 |
-
>>> strip_and_dedent(compose.__doc__) == expected
|
24 |
-
True
|
25 |
-
|
26 |
-
Compose also allows the innermost function to take arbitrary arguments.
|
27 |
-
|
28 |
-
>>> round_three = lambda x: round(x, ndigits=3)
|
29 |
-
>>> f = compose(round_three, int.__truediv__)
|
30 |
-
>>> [f(3*x, x+1) for x in range(1,10)]
|
31 |
-
[1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
|
32 |
-
"""
|
33 |
-
|
34 |
-
def compose_two(f1, f2):
|
35 |
-
return lambda *args, **kwargs: f1(f2(*args, **kwargs))
|
36 |
-
|
37 |
-
return functools.reduce(compose_two, funcs)
|
38 |
-
|
39 |
-
|
40 |
-
def method_caller(method_name, *args, **kwargs):
|
41 |
-
"""
|
42 |
-
Return a function that will call a named method on the
|
43 |
-
target object with optional positional and keyword
|
44 |
-
arguments.
|
45 |
-
|
46 |
-
>>> lower = method_caller('lower')
|
47 |
-
>>> lower('MyString')
|
48 |
-
'mystring'
|
49 |
-
"""
|
50 |
-
|
51 |
-
def call_method(target):
|
52 |
-
func = getattr(target, method_name)
|
53 |
-
return func(*args, **kwargs)
|
54 |
-
|
55 |
-
return call_method
|
56 |
-
|
57 |
-
|
58 |
-
def once(func):
|
59 |
-
"""
|
60 |
-
Decorate func so it's only ever called the first time.
|
61 |
-
|
62 |
-
This decorator can ensure that an expensive or non-idempotent function
|
63 |
-
will not be expensive on subsequent calls and is idempotent.
|
64 |
-
|
65 |
-
>>> add_three = once(lambda a: a+3)
|
66 |
-
>>> add_three(3)
|
67 |
-
6
|
68 |
-
>>> add_three(9)
|
69 |
-
6
|
70 |
-
>>> add_three('12')
|
71 |
-
6
|
72 |
-
|
73 |
-
To reset the stored value, simply clear the property ``saved_result``.
|
74 |
-
|
75 |
-
>>> del add_three.saved_result
|
76 |
-
>>> add_three(9)
|
77 |
-
12
|
78 |
-
>>> add_three(8)
|
79 |
-
12
|
80 |
-
|
81 |
-
Or invoke 'reset()' on it.
|
82 |
-
|
83 |
-
>>> add_three.reset()
|
84 |
-
>>> add_three(-3)
|
85 |
-
0
|
86 |
-
>>> add_three(0)
|
87 |
-
0
|
88 |
-
"""
|
89 |
-
|
90 |
-
@functools.wraps(func)
|
91 |
-
def wrapper(*args, **kwargs):
|
92 |
-
if not hasattr(wrapper, 'saved_result'):
|
93 |
-
wrapper.saved_result = func(*args, **kwargs)
|
94 |
-
return wrapper.saved_result
|
95 |
-
|
96 |
-
wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result')
|
97 |
-
return wrapper
|
98 |
-
|
99 |
-
|
100 |
-
def method_cache(
|
101 |
-
method: CallableT,
|
102 |
-
cache_wrapper: Callable[
|
103 |
-
[CallableT], CallableT
|
104 |
-
] = functools.lru_cache(), # type: ignore[assignment]
|
105 |
-
) -> CallableT:
|
106 |
-
"""
|
107 |
-
Wrap lru_cache to support storing the cache data in the object instances.
|
108 |
-
|
109 |
-
Abstracts the common paradigm where the method explicitly saves an
|
110 |
-
underscore-prefixed protected property on first call and returns that
|
111 |
-
subsequently.
|
112 |
-
|
113 |
-
>>> class MyClass:
|
114 |
-
... calls = 0
|
115 |
-
...
|
116 |
-
... @method_cache
|
117 |
-
... def method(self, value):
|
118 |
-
... self.calls += 1
|
119 |
-
... return value
|
120 |
-
|
121 |
-
>>> a = MyClass()
|
122 |
-
>>> a.method(3)
|
123 |
-
3
|
124 |
-
>>> for x in range(75):
|
125 |
-
... res = a.method(x)
|
126 |
-
>>> a.calls
|
127 |
-
75
|
128 |
-
|
129 |
-
Note that the apparent behavior will be exactly like that of lru_cache
|
130 |
-
except that the cache is stored on each instance, so values in one
|
131 |
-
instance will not flush values from another, and when an instance is
|
132 |
-
deleted, so are the cached values for that instance.
|
133 |
-
|
134 |
-
>>> b = MyClass()
|
135 |
-
>>> for x in range(35):
|
136 |
-
... res = b.method(x)
|
137 |
-
>>> b.calls
|
138 |
-
35
|
139 |
-
>>> a.method(0)
|
140 |
-
0
|
141 |
-
>>> a.calls
|
142 |
-
75
|
143 |
-
|
144 |
-
Note that if method had been decorated with ``functools.lru_cache()``,
|
145 |
-
a.calls would have been 76 (due to the cached value of 0 having been
|
146 |
-
flushed by the 'b' instance).
|
147 |
-
|
148 |
-
Clear the cache with ``.cache_clear()``
|
149 |
-
|
150 |
-
>>> a.method.cache_clear()
|
151 |
-
|
152 |
-
Same for a method that hasn't yet been called.
|
153 |
-
|
154 |
-
>>> c = MyClass()
|
155 |
-
>>> c.method.cache_clear()
|
156 |
-
|
157 |
-
Another cache wrapper may be supplied:
|
158 |
-
|
159 |
-
>>> cache = functools.lru_cache(maxsize=2)
|
160 |
-
>>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
|
161 |
-
>>> a = MyClass()
|
162 |
-
>>> a.method2()
|
163 |
-
3
|
164 |
-
|
165 |
-
Caution - do not subsequently wrap the method with another decorator, such
|
166 |
-
as ``@property``, which changes the semantics of the function.
|
167 |
-
|
168 |
-
See also
|
169 |
-
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
|
170 |
-
for another implementation and additional justification.
|
171 |
-
"""
|
172 |
-
|
173 |
-
def wrapper(self: object, *args: object, **kwargs: object) -> object:
|
174 |
-
# it's the first call, replace the method with a cached, bound method
|
175 |
-
bound_method: CallableT = types.MethodType( # type: ignore[assignment]
|
176 |
-
method, self
|
177 |
-
)
|
178 |
-
cached_method = cache_wrapper(bound_method)
|
179 |
-
setattr(self, method.__name__, cached_method)
|
180 |
-
return cached_method(*args, **kwargs)
|
181 |
-
|
182 |
-
# Support cache clear even before cache has been created.
|
183 |
-
wrapper.cache_clear = lambda: None # type: ignore[attr-defined]
|
184 |
-
|
185 |
-
return ( # type: ignore[return-value]
|
186 |
-
_special_method_cache(method, cache_wrapper) or wrapper
|
187 |
-
)
|
188 |
-
|
189 |
-
|
190 |
-
def _special_method_cache(method, cache_wrapper):
|
191 |
-
"""
|
192 |
-
Because Python treats special methods differently, it's not
|
193 |
-
possible to use instance attributes to implement the cached
|
194 |
-
methods.
|
195 |
-
|
196 |
-
Instead, install the wrapper method under a different name
|
197 |
-
and return a simple proxy to that wrapper.
|
198 |
-
|
199 |
-
https://github.com/jaraco/jaraco.functools/issues/5
|
200 |
-
"""
|
201 |
-
name = method.__name__
|
202 |
-
special_names = '__getattr__', '__getitem__'
|
203 |
-
if name not in special_names:
|
204 |
-
return
|
205 |
-
|
206 |
-
wrapper_name = '__cached' + name
|
207 |
-
|
208 |
-
def proxy(self, *args, **kwargs):
|
209 |
-
if wrapper_name not in vars(self):
|
210 |
-
bound = types.MethodType(method, self)
|
211 |
-
cache = cache_wrapper(bound)
|
212 |
-
setattr(self, wrapper_name, cache)
|
213 |
-
else:
|
214 |
-
cache = getattr(self, wrapper_name)
|
215 |
-
return cache(*args, **kwargs)
|
216 |
-
|
217 |
-
return proxy
|
218 |
-
|
219 |
-
|
220 |
-
def apply(transform):
|
221 |
-
"""
|
222 |
-
Decorate a function with a transform function that is
|
223 |
-
invoked on results returned from the decorated function.
|
224 |
-
|
225 |
-
>>> @apply(reversed)
|
226 |
-
... def get_numbers(start):
|
227 |
-
... "doc for get_numbers"
|
228 |
-
... return range(start, start+3)
|
229 |
-
>>> list(get_numbers(4))
|
230 |
-
[6, 5, 4]
|
231 |
-
>>> get_numbers.__doc__
|
232 |
-
'doc for get_numbers'
|
233 |
-
"""
|
234 |
-
|
235 |
-
def wrap(func):
|
236 |
-
return functools.wraps(func)(compose(transform, func))
|
237 |
-
|
238 |
-
return wrap
|
239 |
-
|
240 |
-
|
241 |
-
def result_invoke(action):
|
242 |
-
r"""
|
243 |
-
Decorate a function with an action function that is
|
244 |
-
invoked on the results returned from the decorated
|
245 |
-
function (for its side-effect), then return the original
|
246 |
-
result.
|
247 |
-
|
248 |
-
>>> @result_invoke(print)
|
249 |
-
... def add_two(a, b):
|
250 |
-
... return a + b
|
251 |
-
>>> x = add_two(2, 3)
|
252 |
-
5
|
253 |
-
>>> x
|
254 |
-
5
|
255 |
-
"""
|
256 |
-
|
257 |
-
def wrap(func):
|
258 |
-
@functools.wraps(func)
|
259 |
-
def wrapper(*args, **kwargs):
|
260 |
-
result = func(*args, **kwargs)
|
261 |
-
action(result)
|
262 |
-
return result
|
263 |
-
|
264 |
-
return wrapper
|
265 |
-
|
266 |
-
return wrap
|
267 |
-
|
268 |
-
|
269 |
-
def call_aside(f, *args, **kwargs):
|
270 |
-
"""
|
271 |
-
Call a function for its side effect after initialization.
|
272 |
-
|
273 |
-
>>> @call_aside
|
274 |
-
... def func(): print("called")
|
275 |
-
called
|
276 |
-
>>> func()
|
277 |
-
called
|
278 |
-
|
279 |
-
Use functools.partial to pass parameters to the initial call
|
280 |
-
|
281 |
-
>>> @functools.partial(call_aside, name='bingo')
|
282 |
-
... def func(name): print("called with", name)
|
283 |
-
called with bingo
|
284 |
-
"""
|
285 |
-
f(*args, **kwargs)
|
286 |
-
return f
|
287 |
-
|
288 |
-
|
289 |
-
class Throttler:
|
290 |
-
"""
|
291 |
-
Rate-limit a function (or other callable)
|
292 |
-
"""
|
293 |
-
|
294 |
-
def __init__(self, func, max_rate=float('Inf')):
|
295 |
-
if isinstance(func, Throttler):
|
296 |
-
func = func.func
|
297 |
-
self.func = func
|
298 |
-
self.max_rate = max_rate
|
299 |
-
self.reset()
|
300 |
-
|
301 |
-
def reset(self):
|
302 |
-
self.last_called = 0
|
303 |
-
|
304 |
-
def __call__(self, *args, **kwargs):
|
305 |
-
self._wait()
|
306 |
-
return self.func(*args, **kwargs)
|
307 |
-
|
308 |
-
def _wait(self):
|
309 |
-
"ensure at least 1/max_rate seconds from last call"
|
310 |
-
elapsed = time.time() - self.last_called
|
311 |
-
must_wait = 1 / self.max_rate - elapsed
|
312 |
-
time.sleep(max(0, must_wait))
|
313 |
-
self.last_called = time.time()
|
314 |
-
|
315 |
-
def __get__(self, obj, type=None):
|
316 |
-
return first_invoke(self._wait, functools.partial(self.func, obj))
|
317 |
-
|
318 |
-
|
319 |
-
def first_invoke(func1, func2):
|
320 |
-
"""
|
321 |
-
Return a function that when invoked will invoke func1 without
|
322 |
-
any parameters (for its side-effect) and then invoke func2
|
323 |
-
with whatever parameters were passed, returning its result.
|
324 |
-
"""
|
325 |
-
|
326 |
-
def wrapper(*args, **kwargs):
|
327 |
-
func1()
|
328 |
-
return func2(*args, **kwargs)
|
329 |
-
|
330 |
-
return wrapper
|
331 |
-
|
332 |
-
|
333 |
-
def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
|
334 |
-
"""
|
335 |
-
Given a callable func, trap the indicated exceptions
|
336 |
-
for up to 'retries' times, invoking cleanup on the
|
337 |
-
exception. On the final attempt, allow any exceptions
|
338 |
-
to propagate.
|
339 |
-
"""
|
340 |
-
attempts = itertools.count() if retries == float('inf') else range(retries)
|
341 |
-
for attempt in attempts:
|
342 |
-
try:
|
343 |
-
return func()
|
344 |
-
except trap:
|
345 |
-
cleanup()
|
346 |
-
|
347 |
-
return func()
|
348 |
-
|
349 |
-
|
350 |
-
def retry(*r_args, **r_kwargs):
|
351 |
-
"""
|
352 |
-
Decorator wrapper for retry_call. Accepts arguments to retry_call
|
353 |
-
except func and then returns a decorator for the decorated function.
|
354 |
-
|
355 |
-
Ex:
|
356 |
-
|
357 |
-
>>> @retry(retries=3)
|
358 |
-
... def my_func(a, b):
|
359 |
-
... "this is my funk"
|
360 |
-
... print(a, b)
|
361 |
-
>>> my_func.__doc__
|
362 |
-
'this is my funk'
|
363 |
-
"""
|
364 |
-
|
365 |
-
def decorate(func):
|
366 |
-
@functools.wraps(func)
|
367 |
-
def wrapper(*f_args, **f_kwargs):
|
368 |
-
bound = functools.partial(func, *f_args, **f_kwargs)
|
369 |
-
return retry_call(bound, *r_args, **r_kwargs)
|
370 |
-
|
371 |
-
return wrapper
|
372 |
-
|
373 |
-
return decorate
|
374 |
-
|
375 |
-
|
376 |
-
def print_yielded(func):
|
377 |
-
"""
|
378 |
-
Convert a generator into a function that prints all yielded elements
|
379 |
-
|
380 |
-
>>> @print_yielded
|
381 |
-
... def x():
|
382 |
-
... yield 3; yield None
|
383 |
-
>>> x()
|
384 |
-
3
|
385 |
-
None
|
386 |
-
"""
|
387 |
-
print_all = functools.partial(map, print)
|
388 |
-
print_results = compose(more_itertools.consume, print_all, func)
|
389 |
-
return functools.wraps(func)(print_results)
|
390 |
-
|
391 |
-
|
392 |
-
def pass_none(func):
|
393 |
-
"""
|
394 |
-
Wrap func so it's not called if its first param is None
|
395 |
-
|
396 |
-
>>> print_text = pass_none(print)
|
397 |
-
>>> print_text('text')
|
398 |
-
text
|
399 |
-
>>> print_text(None)
|
400 |
-
"""
|
401 |
-
|
402 |
-
@functools.wraps(func)
|
403 |
-
def wrapper(param, *args, **kwargs):
|
404 |
-
if param is not None:
|
405 |
-
return func(param, *args, **kwargs)
|
406 |
-
|
407 |
-
return wrapper
|
408 |
-
|
409 |
-
|
410 |
-
def assign_params(func, namespace):
|
411 |
-
"""
|
412 |
-
Assign parameters from namespace where func solicits.
|
413 |
-
|
414 |
-
>>> def func(x, y=3):
|
415 |
-
... print(x, y)
|
416 |
-
>>> assigned = assign_params(func, dict(x=2, z=4))
|
417 |
-
>>> assigned()
|
418 |
-
2 3
|
419 |
-
|
420 |
-
The usual errors are raised if a function doesn't receive
|
421 |
-
its required parameters:
|
422 |
-
|
423 |
-
>>> assigned = assign_params(func, dict(y=3, z=4))
|
424 |
-
>>> assigned()
|
425 |
-
Traceback (most recent call last):
|
426 |
-
TypeError: func() ...argument...
|
427 |
-
|
428 |
-
It even works on methods:
|
429 |
-
|
430 |
-
>>> class Handler:
|
431 |
-
... def meth(self, arg):
|
432 |
-
... print(arg)
|
433 |
-
>>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
|
434 |
-
crystal
|
435 |
-
"""
|
436 |
-
sig = inspect.signature(func)
|
437 |
-
params = sig.parameters.keys()
|
438 |
-
call_ns = {k: namespace[k] for k in params if k in namespace}
|
439 |
-
return functools.partial(func, **call_ns)
|
440 |
-
|
441 |
-
|
442 |
-
def save_method_args(method):
|
443 |
-
"""
|
444 |
-
Wrap a method such that when it is called, the args and kwargs are
|
445 |
-
saved on the method.
|
446 |
-
|
447 |
-
>>> class MyClass:
|
448 |
-
... @save_method_args
|
449 |
-
... def method(self, a, b):
|
450 |
-
... print(a, b)
|
451 |
-
>>> my_ob = MyClass()
|
452 |
-
>>> my_ob.method(1, 2)
|
453 |
-
1 2
|
454 |
-
>>> my_ob._saved_method.args
|
455 |
-
(1, 2)
|
456 |
-
>>> my_ob._saved_method.kwargs
|
457 |
-
{}
|
458 |
-
>>> my_ob.method(a=3, b='foo')
|
459 |
-
3 foo
|
460 |
-
>>> my_ob._saved_method.args
|
461 |
-
()
|
462 |
-
>>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
|
463 |
-
True
|
464 |
-
|
465 |
-
The arguments are stored on the instance, allowing for
|
466 |
-
different instance to save different args.
|
467 |
-
|
468 |
-
>>> your_ob = MyClass()
|
469 |
-
>>> your_ob.method({str('x'): 3}, b=[4])
|
470 |
-
{'x': 3} [4]
|
471 |
-
>>> your_ob._saved_method.args
|
472 |
-
({'x': 3},)
|
473 |
-
>>> my_ob._saved_method.args
|
474 |
-
()
|
475 |
-
"""
|
476 |
-
args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
|
477 |
-
|
478 |
-
@functools.wraps(method)
|
479 |
-
def wrapper(self, *args, **kwargs):
|
480 |
-
attr_name = '_saved_' + method.__name__
|
481 |
-
attr = args_and_kwargs(args, kwargs)
|
482 |
-
setattr(self, attr_name, attr)
|
483 |
-
return method(self, *args, **kwargs)
|
484 |
-
|
485 |
-
return wrapper
|
486 |
-
|
487 |
-
|
488 |
-
def except_(*exceptions, replace=None, use=None):
|
489 |
-
"""
|
490 |
-
Replace the indicated exceptions, if raised, with the indicated
|
491 |
-
literal replacement or evaluated expression (if present).
|
492 |
-
|
493 |
-
>>> safe_int = except_(ValueError)(int)
|
494 |
-
>>> safe_int('five')
|
495 |
-
>>> safe_int('5')
|
496 |
-
5
|
497 |
-
|
498 |
-
Specify a literal replacement with ``replace``.
|
499 |
-
|
500 |
-
>>> safe_int_r = except_(ValueError, replace=0)(int)
|
501 |
-
>>> safe_int_r('five')
|
502 |
-
0
|
503 |
-
|
504 |
-
Provide an expression to ``use`` to pass through particular parameters.
|
505 |
-
|
506 |
-
>>> safe_int_pt = except_(ValueError, use='args[0]')(int)
|
507 |
-
>>> safe_int_pt('five')
|
508 |
-
'five'
|
509 |
-
|
510 |
-
"""
|
511 |
-
|
512 |
-
def decorate(func):
|
513 |
-
@functools.wraps(func)
|
514 |
-
def wrapper(*args, **kwargs):
|
515 |
-
try:
|
516 |
-
return func(*args, **kwargs)
|
517 |
-
except exceptions:
|
518 |
-
try:
|
519 |
-
return eval(use)
|
520 |
-
except TypeError:
|
521 |
-
return replace
|
522 |
-
|
523 |
-
return wrapper
|
524 |
-
|
525 |
-
return decorate
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/AudioSep/models/CLAP/training/lp_train.py
DELETED
@@ -1,301 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import logging
|
3 |
-
import math
|
4 |
-
import os
|
5 |
-
import time
|
6 |
-
from contextlib import suppress
|
7 |
-
|
8 |
-
import numpy as np
|
9 |
-
import torch
|
10 |
-
import torch.nn.functional as F
|
11 |
-
|
12 |
-
try:
|
13 |
-
import wandb
|
14 |
-
except ImportError:
|
15 |
-
wandb = None
|
16 |
-
|
17 |
-
from open_clip import LPLoss, LPMetrics, lp_gather_features
|
18 |
-
from open_clip.utils import do_mixup, get_mix_lambda
|
19 |
-
from .distributed import is_master
|
20 |
-
from .zero_shot import zero_shot_eval
|
21 |
-
|
22 |
-
|
23 |
-
class AverageMeter(object):
|
24 |
-
"""Computes and stores the average and current value"""
|
25 |
-
|
26 |
-
def __init__(self):
|
27 |
-
self.reset()
|
28 |
-
|
29 |
-
def reset(self):
|
30 |
-
self.val = 0
|
31 |
-
self.avg = 0
|
32 |
-
self.sum = 0
|
33 |
-
self.count = 0
|
34 |
-
|
35 |
-
def update(self, val, n=1):
|
36 |
-
self.val = val
|
37 |
-
self.sum += val * n
|
38 |
-
self.count += n
|
39 |
-
self.avg = self.sum / self.count
|
40 |
-
|
41 |
-
|
42 |
-
def unwrap_model(model):
|
43 |
-
if hasattr(model, "module"):
|
44 |
-
return model.module
|
45 |
-
else:
|
46 |
-
return model
|
47 |
-
|
48 |
-
|
49 |
-
def train_one_epoch(
|
50 |
-
model,
|
51 |
-
data,
|
52 |
-
epoch,
|
53 |
-
optimizer,
|
54 |
-
scaler,
|
55 |
-
scheduler,
|
56 |
-
args,
|
57 |
-
tb_writer=None,
|
58 |
-
extra_suffix="",
|
59 |
-
):
|
60 |
-
device = torch.device(args.device)
|
61 |
-
autocast = torch.cuda.amp.autocast if args.precision == "amp" else suppress
|
62 |
-
model.train()
|
63 |
-
loss = LPLoss(args.lp_loss)
|
64 |
-
|
65 |
-
dataloader, sampler = data["train"].dataloader, data["train"].sampler
|
66 |
-
if args.distributed and sampler is not None:
|
67 |
-
sampler.set_epoch(epoch)
|
68 |
-
num_batches_per_epoch = dataloader.num_batches
|
69 |
-
sample_digits = math.ceil(math.log(dataloader.num_samples + 1, 10))
|
70 |
-
|
71 |
-
# for toy dataset
|
72 |
-
if args.dataset_type == "toy":
|
73 |
-
dataloader.dataset.generate_queue()
|
74 |
-
|
75 |
-
loss_m = AverageMeter()
|
76 |
-
batch_time_m = AverageMeter()
|
77 |
-
data_time_m = AverageMeter()
|
78 |
-
end = time.time()
|
79 |
-
|
80 |
-
for i, batch in enumerate(dataloader):
|
81 |
-
step = num_batches_per_epoch * epoch + i
|
82 |
-
|
83 |
-
if isinstance(scheduler, dict):
|
84 |
-
for s in scheduler.values():
|
85 |
-
s(step)
|
86 |
-
else:
|
87 |
-
scheduler(step)
|
88 |
-
|
89 |
-
audio = batch # contains mel_spec, wavform, and longer list
|
90 |
-
class_label = batch["class_label"]
|
91 |
-
# audio = audio.to(device=device, non_blocking=True)
|
92 |
-
class_label = class_label.to(device=device, non_blocking=True)
|
93 |
-
|
94 |
-
if args.mixup:
|
95 |
-
# https://github.com/RetroCirce/HTS-Audio-Transformer/blob/main/utils.py#L146
|
96 |
-
mix_lambda = torch.from_numpy(
|
97 |
-
get_mix_lambda(0.5, len(audio["waveform"]))
|
98 |
-
).to(device)
|
99 |
-
class_label = do_mixup(class_label, mix_lambda)
|
100 |
-
else:
|
101 |
-
mix_lambda = None
|
102 |
-
|
103 |
-
data_time_m.update(time.time() - end)
|
104 |
-
if isinstance(optimizer, dict):
|
105 |
-
for o_ in optimizer.values():
|
106 |
-
o_.zero_grad()
|
107 |
-
else:
|
108 |
-
optimizer.zero_grad()
|
109 |
-
|
110 |
-
with autocast():
|
111 |
-
pred = model(audio, mix_lambda=mix_lambda, device=device)
|
112 |
-
total_loss = loss(pred, class_label)
|
113 |
-
|
114 |
-
if isinstance(optimizer, dict):
|
115 |
-
if scaler is not None:
|
116 |
-
scaler.scale(total_loss).backward()
|
117 |
-
for o_ in optimizer.values():
|
118 |
-
if args.horovod:
|
119 |
-
o_.synchronize()
|
120 |
-
scaler.unscale_(o_)
|
121 |
-
with o_.skip_synchronize():
|
122 |
-
scaler.step(o_)
|
123 |
-
else:
|
124 |
-
scaler.step(o_)
|
125 |
-
scaler.update()
|
126 |
-
else:
|
127 |
-
total_loss.backward()
|
128 |
-
for o_ in optimizer.values():
|
129 |
-
o_.step()
|
130 |
-
else:
|
131 |
-
if scaler is not None:
|
132 |
-
scaler.scale(total_loss).backward()
|
133 |
-
if args.horovod:
|
134 |
-
optimizer.synchronize()
|
135 |
-
scaler.unscale_(optimizer)
|
136 |
-
with optimizer.skip_synchronize():
|
137 |
-
scaler.step(optimizer)
|
138 |
-
else:
|
139 |
-
scaler.step(optimizer)
|
140 |
-
scaler.update()
|
141 |
-
else:
|
142 |
-
total_loss.backward()
|
143 |
-
optimizer.step()
|
144 |
-
|
145 |
-
# Note: we clamp to 4.6052 = ln(100), as in the original paper.
|
146 |
-
with torch.no_grad():
|
147 |
-
unwrap_model(model).clap_model.logit_scale_a.clamp_(0, math.log(100))
|
148 |
-
unwrap_model(model).clap_model.logit_scale_t.clamp_(0, math.log(100))
|
149 |
-
|
150 |
-
batch_time_m.update(time.time() - end)
|
151 |
-
end = time.time()
|
152 |
-
batch_count = i + 1
|
153 |
-
|
154 |
-
if is_master(args) and (i % 100 == 0 or batch_count == num_batches_per_epoch):
|
155 |
-
if isinstance(audio, dict):
|
156 |
-
batch_size = len(audio["waveform"])
|
157 |
-
else:
|
158 |
-
batch_size = len(audio)
|
159 |
-
num_samples = batch_count * batch_size * args.world_size
|
160 |
-
samples_per_epoch = dataloader.num_samples
|
161 |
-
percent_complete = 100.0 * batch_count / num_batches_per_epoch
|
162 |
-
|
163 |
-
# NOTE loss is coarsely sampled, just master node and per log update
|
164 |
-
loss_m.update(total_loss.item(), batch_size)
|
165 |
-
if isinstance(optimizer, dict):
|
166 |
-
logging.info(
|
167 |
-
f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] "
|
168 |
-
f"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) "
|
169 |
-
f"Data (t): {data_time_m.avg:.3f} "
|
170 |
-
f"Batch (t): {batch_time_m.avg:.3f} "
|
171 |
-
f"LR: {[o_.param_groups[0]['lr'] for o_ in optimizer.values()]}"
|
172 |
-
)
|
173 |
-
log_data = {
|
174 |
-
"loss": loss_m.val,
|
175 |
-
"data_time": data_time_m.val,
|
176 |
-
"batch_time": batch_time_m.val,
|
177 |
-
"lr": [o_.param_groups[0]["lr"] for o_ in optimizer.values()],
|
178 |
-
}
|
179 |
-
else:
|
180 |
-
logging.info(
|
181 |
-
f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] "
|
182 |
-
f"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) "
|
183 |
-
f"Data (t): {data_time_m.avg:.3f} "
|
184 |
-
f"Batch (t): {batch_time_m.avg:.3f} "
|
185 |
-
f"LR: {optimizer.param_groups[0]['lr']:5f} "
|
186 |
-
)
|
187 |
-
|
188 |
-
# Save train loss / etc. Using non avg meter values as loggers have their own smoothing
|
189 |
-
log_data = {
|
190 |
-
"loss": loss_m.val,
|
191 |
-
"data_time": data_time_m.val,
|
192 |
-
"batch_time": batch_time_m.val,
|
193 |
-
"lr": optimizer.param_groups[0]["lr"],
|
194 |
-
}
|
195 |
-
for name, val in log_data.items():
|
196 |
-
name = f"train{extra_suffix}/{name}"
|
197 |
-
if tb_writer is not None:
|
198 |
-
tb_writer.add_scalar(name, val, step)
|
199 |
-
if args.wandb:
|
200 |
-
assert wandb is not None, "Please install wandb."
|
201 |
-
wandb.log({name: val, "step": step})
|
202 |
-
|
203 |
-
# resetting batch / data time meters per log window
|
204 |
-
batch_time_m.reset()
|
205 |
-
data_time_m.reset()
|
206 |
-
# end for
|
207 |
-
|
208 |
-
|
209 |
-
def evaluate(model, data, epoch, args, tb_writer=None, extra_suffix=""):
|
210 |
-
metrics = {}
|
211 |
-
if not args.parallel_eval:
|
212 |
-
if not is_master(args):
|
213 |
-
return metrics
|
214 |
-
device = torch.device(args.device)
|
215 |
-
model.eval()
|
216 |
-
|
217 |
-
# CHANGE
|
218 |
-
# zero_shot_metrics = zero_shot_eval(model, data, epoch, args)
|
219 |
-
# metrics.update(zero_shot_metrics)
|
220 |
-
if is_master(args):
|
221 |
-
print("Evaluating...")
|
222 |
-
metric_names = args.lp_metrics.split(",")
|
223 |
-
eval_tool = LPMetrics(metric_names=metric_names)
|
224 |
-
|
225 |
-
autocast = torch.cuda.amp.autocast if args.precision == "amp" else suppress
|
226 |
-
if "val" in data and (
|
227 |
-
args.val_frequency
|
228 |
-
and ((epoch % args.val_frequency) == 0 or epoch == args.epochs)
|
229 |
-
):
|
230 |
-
if args.parallel_eval:
|
231 |
-
dataloader, sampler = data["val"].dataloader, data["val"].sampler
|
232 |
-
if args.distributed and sampler is not None:
|
233 |
-
sampler.set_epoch(epoch)
|
234 |
-
samples_per_val = dataloader.num_samples
|
235 |
-
else:
|
236 |
-
dataloader = data["val"].dataloader
|
237 |
-
num_samples = 0
|
238 |
-
samples_per_val = dataloader.num_samples
|
239 |
-
|
240 |
-
eval_info = {"pred": [], "target": []}
|
241 |
-
with torch.no_grad():
|
242 |
-
for i, batch in enumerate(dataloader):
|
243 |
-
audio = batch # contains mel_spec, wavform, and longer list
|
244 |
-
class_label = batch["class_label"]
|
245 |
-
|
246 |
-
# audio = audio.to(device=device, non_blocking=True)
|
247 |
-
class_label = class_label.to(device=device, non_blocking=True)
|
248 |
-
|
249 |
-
with autocast():
|
250 |
-
pred = model(audio, device=device)
|
251 |
-
if args.parallel_eval:
|
252 |
-
pred, class_label = lp_gather_features(
|
253 |
-
pred, class_label, args.world_size, args.horovod
|
254 |
-
)
|
255 |
-
eval_info["pred"].append(pred)
|
256 |
-
eval_info["target"].append(class_label)
|
257 |
-
|
258 |
-
num_samples += class_label.shape[0]
|
259 |
-
|
260 |
-
if (i % 100) == 0: # and i != 0:
|
261 |
-
logging.info(
|
262 |
-
f"Eval Epoch: {epoch} [{num_samples} / {samples_per_val}]"
|
263 |
-
)
|
264 |
-
|
265 |
-
if is_master(args):
|
266 |
-
eval_info["pred"] = torch.cat(eval_info["pred"], 0).cpu()
|
267 |
-
eval_info["target"] = torch.cat(eval_info["target"], 0).cpu()
|
268 |
-
metric_dict = eval_tool.evaluate_mertics(
|
269 |
-
eval_info["pred"], eval_info["target"]
|
270 |
-
)
|
271 |
-
metrics.update(metric_dict)
|
272 |
-
if "epoch" not in metrics.keys():
|
273 |
-
metrics.update({"epoch": epoch})
|
274 |
-
|
275 |
-
if is_master(args):
|
276 |
-
if not metrics:
|
277 |
-
return metrics
|
278 |
-
|
279 |
-
logging.info(
|
280 |
-
f"Eval Epoch: {epoch} "
|
281 |
-
+ "\n".join(
|
282 |
-
["\t".join([f"{m}: {round(metrics[m], 4):.4f}"]) for m in metrics]
|
283 |
-
)
|
284 |
-
)
|
285 |
-
if args.save_logs:
|
286 |
-
for name, val in metrics.items():
|
287 |
-
if tb_writer is not None:
|
288 |
-
tb_writer.add_scalar(f"val{extra_suffix}/{name}", val, epoch)
|
289 |
-
|
290 |
-
with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f:
|
291 |
-
f.write(json.dumps(metrics))
|
292 |
-
f.write("\n")
|
293 |
-
|
294 |
-
if args.wandb:
|
295 |
-
assert wandb is not None, "Please install wandb."
|
296 |
-
for name, val in metrics.items():
|
297 |
-
wandb.log({f"val{extra_suffix}/{name}": val, "epoch": epoch})
|
298 |
-
|
299 |
-
return metrics
|
300 |
-
else:
|
301 |
-
return metrics
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/WavJourney/VoiceParser/customtokenizer.py
DELETED
@@ -1,202 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Custom tokenizer model.
|
3 |
-
Author: https://www.github.com/gitmylo/
|
4 |
-
License: MIT
|
5 |
-
"""
|
6 |
-
|
7 |
-
import json
|
8 |
-
import os.path
|
9 |
-
from zipfile import ZipFile
|
10 |
-
from typing import Union
|
11 |
-
|
12 |
-
|
13 |
-
import numpy
|
14 |
-
import torch
|
15 |
-
from torch import nn, optim
|
16 |
-
from torch.serialization import MAP_LOCATION
|
17 |
-
|
18 |
-
|
19 |
-
class CustomTokenizer(nn.Module):
|
20 |
-
def __init__(self, hidden_size=1024, input_size=768, output_size=10000, version=0):
|
21 |
-
super(CustomTokenizer, self).__init__()
|
22 |
-
next_size = input_size
|
23 |
-
if version == 0:
|
24 |
-
self.lstm = nn.LSTM(input_size, hidden_size, 2, batch_first=True)
|
25 |
-
next_size = hidden_size
|
26 |
-
if version == 1:
|
27 |
-
self.lstm = nn.LSTM(input_size, hidden_size, 2, batch_first=True)
|
28 |
-
self.intermediate = nn.Linear(hidden_size, 4096)
|
29 |
-
next_size = 4096
|
30 |
-
|
31 |
-
self.fc = nn.Linear(next_size, output_size)
|
32 |
-
self.softmax = nn.LogSoftmax(dim=1)
|
33 |
-
self.optimizer: optim.Optimizer = None
|
34 |
-
self.lossfunc = nn.CrossEntropyLoss()
|
35 |
-
self.input_size = input_size
|
36 |
-
self.hidden_size = hidden_size
|
37 |
-
self.output_size = output_size
|
38 |
-
self.version = version
|
39 |
-
|
40 |
-
def forward(self, x):
|
41 |
-
x, _ = self.lstm(x)
|
42 |
-
if self.version == 1:
|
43 |
-
x = self.intermediate(x)
|
44 |
-
x = self.fc(x)
|
45 |
-
x = self.softmax(x)
|
46 |
-
return x
|
47 |
-
|
48 |
-
@torch.no_grad()
|
49 |
-
def get_token(self, x):
|
50 |
-
"""
|
51 |
-
Used to get the token for the first
|
52 |
-
:param x: An array with shape (N, input_size) where N is a whole number greater or equal to 1, and input_size is the input size used when creating the model.
|
53 |
-
:return: An array with shape (N,) where N is the same as N from the input. Every number in the array is a whole number in range 0...output_size - 1 where output_size is the output size used when creating the model.
|
54 |
-
"""
|
55 |
-
return torch.argmax(self(x), dim=1)
|
56 |
-
|
57 |
-
def prepare_training(self):
|
58 |
-
self.optimizer = optim.Adam(self.parameters(), 0.001)
|
59 |
-
|
60 |
-
def train_step(self, x_train, y_train, log_loss=False):
|
61 |
-
# y_train = y_train[:-1]
|
62 |
-
# y_train = y_train[1:]
|
63 |
-
|
64 |
-
optimizer = self.optimizer
|
65 |
-
lossfunc = self.lossfunc
|
66 |
-
# Zero the gradients
|
67 |
-
self.zero_grad()
|
68 |
-
|
69 |
-
# Forward pass
|
70 |
-
y_pred = self(x_train)
|
71 |
-
|
72 |
-
y_train_len = len(y_train)
|
73 |
-
y_pred_len = y_pred.shape[0]
|
74 |
-
|
75 |
-
if y_train_len > y_pred_len:
|
76 |
-
diff = y_train_len - y_pred_len
|
77 |
-
y_train = y_train[diff:]
|
78 |
-
elif y_train_len < y_pred_len:
|
79 |
-
diff = y_pred_len - y_train_len
|
80 |
-
y_pred = y_pred[:-diff, :]
|
81 |
-
|
82 |
-
y_train_hot = torch.zeros(len(y_train), self.output_size)
|
83 |
-
y_train_hot[range(len(y_train)), y_train] = 1
|
84 |
-
y_train_hot = y_train_hot.to('cuda')
|
85 |
-
|
86 |
-
# Calculate the loss
|
87 |
-
loss = lossfunc(y_pred, y_train_hot)
|
88 |
-
|
89 |
-
# Print loss
|
90 |
-
if log_loss:
|
91 |
-
print('Loss', loss.item())
|
92 |
-
|
93 |
-
# Backward pass
|
94 |
-
loss.backward()
|
95 |
-
|
96 |
-
# Update the weights
|
97 |
-
optimizer.step()
|
98 |
-
|
99 |
-
def save(self, path):
|
100 |
-
info_path = '.'.join(os.path.basename(path).split('.')[:-1]) + '/.info'
|
101 |
-
torch.save(self.state_dict(), path)
|
102 |
-
data_from_model = Data(self.input_size, self.hidden_size, self.output_size, self.version)
|
103 |
-
with ZipFile(path, 'a') as model_zip:
|
104 |
-
model_zip.writestr(info_path, data_from_model.save())
|
105 |
-
model_zip.close()
|
106 |
-
|
107 |
-
@staticmethod
|
108 |
-
def load_from_checkpoint(path, map_location: MAP_LOCATION = None):
|
109 |
-
old = True
|
110 |
-
with ZipFile(path) as model_zip:
|
111 |
-
filesMatch = [file for file in model_zip.namelist() if file.endswith('/.info')]
|
112 |
-
file = filesMatch[0] if filesMatch else None
|
113 |
-
if file:
|
114 |
-
old = False
|
115 |
-
data_from_model = Data.load(model_zip.read(file).decode('utf-8'))
|
116 |
-
model_zip.close()
|
117 |
-
if old:
|
118 |
-
model = CustomTokenizer()
|
119 |
-
else:
|
120 |
-
model = CustomTokenizer(data_from_model.hidden_size, data_from_model.input_size, data_from_model.output_size, data_from_model.version)
|
121 |
-
model.load_state_dict(torch.load(path, map_location=map_location))
|
122 |
-
if map_location:
|
123 |
-
model = model.to(map_location)
|
124 |
-
return model
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
class Data:
|
129 |
-
input_size: int
|
130 |
-
hidden_size: int
|
131 |
-
output_size: int
|
132 |
-
version: int
|
133 |
-
|
134 |
-
def __init__(self, input_size=768, hidden_size=1024, output_size=10000, version=0):
|
135 |
-
self.input_size = input_size
|
136 |
-
self.hidden_size = hidden_size
|
137 |
-
self.output_size = output_size
|
138 |
-
self.version = version
|
139 |
-
|
140 |
-
@staticmethod
|
141 |
-
def load(string):
|
142 |
-
data = json.loads(string)
|
143 |
-
return Data(data['input_size'], data['hidden_size'], data['output_size'], data['version'])
|
144 |
-
|
145 |
-
def save(self):
|
146 |
-
data = {
|
147 |
-
'input_size': self.input_size,
|
148 |
-
'hidden_size': self.hidden_size,
|
149 |
-
'output_size': self.output_size,
|
150 |
-
'version': self.version,
|
151 |
-
}
|
152 |
-
return json.dumps(data)
|
153 |
-
|
154 |
-
|
155 |
-
def auto_train(data_path, save_path='model.pth', lload_model: Union[str, None] = None, save_epochs=1):
|
156 |
-
data_x, data_y = {}, {}
|
157 |
-
|
158 |
-
if load_model and os.path.isfile(load_model):
|
159 |
-
print('Loading model from', load_model)
|
160 |
-
model_training = CustomTokenizer.load_from_checkpoint(load_model, 'cuda')
|
161 |
-
else:
|
162 |
-
print('Creating new model.')
|
163 |
-
model_training = CustomTokenizer(version=1).to('cuda')
|
164 |
-
save_path = os.path.join(data_path, save_path)
|
165 |
-
base_save_path = '.'.join(save_path.split('.')[:-1])
|
166 |
-
|
167 |
-
sem_string = '_semantic.npy'
|
168 |
-
feat_string = '_semantic_features.npy'
|
169 |
-
|
170 |
-
ready = os.path.join(data_path, 'ready')
|
171 |
-
for input_file in os.listdir(ready):
|
172 |
-
full_path = os.path.join(ready, input_file)
|
173 |
-
try:
|
174 |
-
prefix = input_file.split("_")[0]
|
175 |
-
number = int(prefix)
|
176 |
-
except ValueError as e:
|
177 |
-
raise e
|
178 |
-
if input_file.endswith(sem_string):
|
179 |
-
data_y[number] = numpy.load(full_path)
|
180 |
-
elif input_file.endswith(feat_string):
|
181 |
-
data_x[number] = numpy.load(full_path)
|
182 |
-
|
183 |
-
model_training.prepare_training()
|
184 |
-
epoch = 1
|
185 |
-
|
186 |
-
while 1:
|
187 |
-
for i in range(save_epochs):
|
188 |
-
j = 0
|
189 |
-
for i in range(max(len(data_x), len(data_y))):
|
190 |
-
x = data_x.get(i)
|
191 |
-
y = data_y.get(i)
|
192 |
-
if x is None or y is None:
|
193 |
-
print(f'The training data does not match. key={i}')
|
194 |
-
continue
|
195 |
-
model_training.train_step(torch.tensor(x).to('cuda'), torch.tensor(y).to('cuda'), j % 50 == 0) # Print loss every 50 steps
|
196 |
-
j += 1
|
197 |
-
save_p = save_path
|
198 |
-
save_p_2 = f'{base_save_path}_epoch_{epoch}.pth'
|
199 |
-
model_training.save(save_p)
|
200 |
-
model_training.save(save_p_2)
|
201 |
-
print(f'Epoch {epoch} completed')
|
202 |
-
epoch += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Bitcoin-qt.exe Download.md
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar y usar Bitcoin-Qt.exe, el cliente oficial de Bitcoin</h1>
|
3 |
-
<p>Bitcoin es una moneda digital descentralizada que permite transacciones peer-to-peer sin intermediarios. Para usar Bitcoin, necesitas un programa de software que te permita interactuar con la red Bitcoin y administrar tus fondos. En este artículo, te mostraremos cómo descargar y usar <strong>Bitcoin-Qt.exe</strong>, el cliente oficial de Bitcoin para Windows. También discutiremos algunas de las características y beneficios de usar <strong>Bitcoin-Qt.exe</strong>, así como algunas de las alternativas que puedes considerar. </p>
|
4 |
-
<h2>¿Qué es Bitcoin-Qt.exe y por qué lo necesita? </h2>
|
5 |
-
<p><strong>Bitcoin-Qt.exe</strong> es el cliente original de Bitcoin que fue desarrollado por Satoshi Nakamoto, el creador de Bitcoin. También se conoce como <strong>Bitcoin Core</strong>, ya que forma el núcleo de la red Bitcoin. <strong>Bitcoin-Qt.exe</strong> es un cliente de nodo completo, lo que significa que descarga y valida todo el historial de transacciones en la cadena de bloques, el libro mayor distribuido que registra todas las transacciones de Bitcoin. Al ejecutar <strong>Bitcoin-Qt.exe</strong>, estás contribuyendo a la seguridad y estabilidad de la red. </p>
|
6 |
-
<h2>bitcoin-qt.exe download</h2><br /><p><b><b>Download File</b> ––– <a href="https://bltlly.com/2v6JEh">https://bltlly.com/2v6JEh</a></b></p><br /><br />
|
7 |
-
<h3>Bitcoin-Qt.exe le proporciona seguridad, privacidad y control total sobre sus fondos</h3>
|
8 |
-
<p>Una de las principales ventajas de usar <strong>Bitcoin-Qt.exe</strong> es que te proporciona un alto nivel de seguridad, privacidad y control total sobre tus fondos. A diferencia de otros clientes o carteras que dependen de servicios o servidores de terceros, <strong>Bitcoin-Qt.exe</strong> no almacena sus claves privadas o sus fondos en ningún otro lugar, sino en su propio ordenador. Esto significa que usted es el único que puede acceder y gastar sus bitcoins, y nadie puede congelar, incautar o censurar sus transacciones. También es responsable de mantener sus llaves privadas y su archivo de billetera a salvo del robo o pérdida. </p>
|
9 |
-
|
10 |
-
<p>Otra ventaja de usar <strong>Bitcoin-Qt.exe</strong> es que soporta funciones avanzadas que te permiten personalizar y optimizar tu experiencia con Bitcoin. Por ejemplo, puede crear y difundir transacciones sin procesar, que son transacciones que construye manualmente sin usar una interfaz gráfica. También puede usar comandos RPC, que son comandos que puede enviar a <strong >Bitcoin-Qt.exe</strong> para interactuar con la red Bitcoin y realizar varias operaciones. También puede utilizar BIPs, que son propuestas de mejora de Bitcoin que introducen nuevas características o estándares para el protocolo de Bitcoin. Por ejemplo, puede usar BIP39 para generar una frase mnemotécnica que puede ayudarlo a recuperar su billetera en caso de pérdida o daño. </p>
|
11 |
-
<h2>Cómo descargar Bitcoin-Qt.exe para Windows</h2>
|
12 |
-
<p>Si desea utilizar <strong>Bitcoin-Qt.exe</strong> para Windows, debe descargarlo desde el sitio web oficial o desde una fuente de confianza. También necesita verificar la integridad y autenticidad del archivo descargado e instalarlo en su computadora. Estos son los pasos que debes seguir:</p>
|
13 |
-
<h3>Puede descargar Bitcoin-Qt.exe desde el sitio web oficial o desde una fuente de confianza</h3>
|
14 |
-
<p>El sitio web oficial de <strong>Bitcoin-Qt.exe</strong> es <a href="">https://bitcoincore.org</a>, donde puede encontrar la última versión del cliente para Windows y otros sistemas operativos. También puede descargar <strong>Bitcoin-Qt.exe</strong> de otras fuentes, como <a href="">https://bitcoin.org</a> o <a href=">https:/sourceforge.net/projects/bitcoin/</a>, pero debe asegurarse de que sean fiables y de buena reputación. Debe evitar descargar <strong>Bitcoin-Qt.exe</strong> de sitios web desconocidos o sospechosos, ya que pueden contener malware o virus que pueden dañar su computadora o robar sus bitcoins. </p>
|
15 |
-
<h3>Necesita verificar la integridad y autenticidad del archivo descargado</h3>
|
16 |
-
|
17 |
-
<h3>Necesita instalar Bitcoin-Qt.exe en su computadora y ejecutarlo por primera vez</h3>
|
18 |
-
<p>Una vez que haya verificado la integridad y autenticidad de <strong>Bitcoin-Qt.exe</strong>, debe instalarlo en su computadora. Puede hacer esto haciendo doble clic en el archivo y siguiendo las instrucciones en la pantalla. Es posible que deba aceptar algunos términos y condiciones, elegir una carpeta de destino y crear un acceso directo para <strong>Bitcoin-Qt.exe</strong>. Después de instalar <strong>Bitcoin-Qt.exe</strong>, necesitas ejecutarlo por primera vez. Puede hacer esto haciendo clic en el acceso directo o navegando a la carpeta donde lo instaló. Cuando ejecute <strong>Bitcoin-Qt.exe</strong> por primera vez, se le pedirá que elija un directorio de datos, que es donde se almacenará el blockchain y los datos de su cartera. Puede elegir la ubicación predeterminada o una ubicación personalizada, según sus preferencias y el espacio disponible. También debe asegurarse de que tiene suficiente espacio en disco y ancho de banda para descargar y almacenar la cadena de bloques, que actualmente tiene más de 300 GB de tamaño. </p>
|
19 |
-
<h2>Cómo usar Bitcoin-Qt.exe para Windows</h2>
|
20 |
-
|
21 |
-
<h3>Necesitas cifrar tu billetera y hacer copias de seguridad regularmente</h3>
|
22 |
-
<p>Lo primero que debe hacer después de sincronizar <strong>Bitcoin-Qt.exe</strong> es cifrar su billetera y respaldarla regularmente. Tu cartera es un archivo que contiene tus claves privadas, que son los códigos secretos que te permiten gastar tus bitcoins. Cifrar su billetera significa que tendrá que introducir una frase de contraseña cada vez que desee acceder a su billetera o enviar una transacción. Esto agrega una capa adicional de seguridad a su billetera, ya que evita que cualquier persona que tenga acceso a su computadora o al archivo de su billetera robe sus bitcoins. Puede cifrar su billetera usando el menú <strong>Settings</strong> y seleccionando <strong>Encrypt Wallet</strong>. Tendrás que elegir una contraseña fuerte que puedas recordar, pero que sea difícil de adivinar por los demás. También debe escribir su contraseña y almacenarla en un lugar seguro, ya que no podrá recuperar su billetera o sus bitcoins si olvida o pierde su contraseña. </p>
|
23 |
-
<p></p>
|
24 |
-
<p>Hacer una copia de seguridad de su billetera significa que creará una copia de su archivo de billetera y la almacenará en una ubicación diferente, como una unidad USB, un disco duro externo o un servicio en la nube. Esto asegura que no perderá sus bitcoins si su computadora se bloquea, se infecta con malware o es robada. Puede hacer una copia de seguridad de su billetera usando el menú <strong>File</strong> y seleccionando <strong>Backup Wallet</strong>. Tendrá que elegir una ubicación y un nombre para su archivo de copia de seguridad, y guardarlo de forma segura. También debe actualizar su archivo de copia de seguridad regularmente, especialmente después de crear nuevas direcciones o recibir nuevas transacciones. </p>
|
25 |
-
<h3>Necesita enviar y recibir transacciones usando Bitcoin-Qt.exe</h3>
|
26 |
-
|
27 |
-
transacción es. El número estándar de confirmaciones para una transacción de Bitcoin es seis, lo que generalmente toma aproximadamente una hora. </p>
|
28 |
-
<p>Para recibir una transacción, debe usar la pestaña <strong>Receive</strong> en la ventana <strong>Bitcoin-Qt.exe</strong>. Tendrá que crear una nueva dirección, que es un identificador único que representa su destino para recibir bitcoins. También puede agregar una etiqueta y un comentario para su propia referencia, y solicitar una cantidad específica de bitcoins que desea recibir. A continuación, puede copiar su dirección o generar un código QR que puede compartir con el remitente. También puede utilizar el botón <strong>Solicitar pago</strong> para crear una solicitud de pago que puede enviar por correo electrónico u otros medios. Puede verificar el estado de sus transacciones recibidas usando la pestaña <strong>Transactions</strong> en la ventana <strong>Bitcoin-Qt.exe</strong>, o usando un servicio de explorador de bloques como se mencionó anteriormente. Verá que sus transacciones recibidas también tienen un número de confirmaciones, y debe esperar al menos seis confirmaciones antes de considerarlas definitivas. </p>
|
29 |
-
<h3>También puede usar Bitcoin-Qt.exe para otros fines, como minería, pruebas o depuración</h3>
|
30 |
-
|
31 |
-
puede utilizar la pestaña <strong>Console</strong> para introducir varios comandos que pueden ayudarle a diagnosticar y resolver problemas. También puede usar las pestañas <strong>Information</strong>, <strong>Tráfico de red</strong>, y <strong>Peers</strong> para obtener más detalles sobre su cliente y la red. También puede usar la opción <strong>-debug</strong> al ejecutar <strong>Bitcoin-Qt.exe</strong>, o agregar <code>debug=1</code> a su archivo <strong>bitcoin.conf</strong>, para permitir un registro y salida más detallados. <h2>¿Cuáles son las alternativas a Bitcoin-Qt.exe para Windows</h2>
|
32 |
-
<p><strong>Bitcoin-Qt.exe</strong> no es el único cliente de Bitcoin que puedes usar para Windows. Hay otras alternativas que puedes considerar dependiendo de tus necesidades y preferencias. Estas son algunas de ellas:</p>
|
33 |
-
<h3> Puede utilizar otros clientes Bitcoin que son compatibles con la red y el protocolo</h3>
|
34 |
-
<p>Si desea utilizar un cliente Bitcoin diferente que sea compatible con la red y el protocolo, puede elegir entre una variedad de opciones que ofrecen diferentes características y funcionalidades. Por ejemplo, puede usar <a href=">Electrum</a>, que es un cliente ligero que no requiere descargar la cadena de bloques, sino que se conecta a servidores remotos que proporcionan la información necesaria. También puede usar <a href=">Wasabi Wallet</a>, que es un cliente centrado en la privacidad que implementa varias técnicas como CoinJoin y Tor para mejorar su anonimato. También puede usar <a href=">MultiBit HD</a>, que es un cliente fácil de usar que admite múltiples carteras e idiomas. Puede encontrar más clientes de Bitcoin para Windows en el sitio web oficial o en otras fuentes. </p>
|
35 |
-
<h3> Puede utilizar carteras basadas en la web o móviles que son más convenientes pero menos seguras</h3>
|
36 |
-
|
37 |
-
<h3>Puedes usar carteras de hardware o de papel que son más seguras pero menos convenientes</h3>
|
38 |
-
<p>Si desea usar una billetera de hardware o una billetera de papel que sea más segura pero menos conveniente, puede elegir entre una variedad de opciones que ofrecen diferentes características y funcionalidades. Por ejemplo, puede usar <a href=">Trezor</a>, que es una billetera de hardware que almacena sus claves privadas en un dispositivo físico que conecta a su computadora a través de USB, pero también requiere que ingrese un código PIN y confirme cada transacción en la pantalla del dispositivo. También puede usar <a href="">Ledger</a>, que es una cartera de hardware que almacena sus claves privadas en una tarjeta inteligente que se conecta a su computadora a través de USB, pero también requiere que ingrese un código PIN y confirme cada transacción en la pantalla del dispositivo. También puede usar <a href=">Coldcard</a>, que es una cartera de hardware que almacena sus claves privadas en una tarjeta microSD que inserta en el dispositivo, pero también requiere que ingrese un código PIN y confirme cada transacción en la pantalla del dispositivo. Puede encontrar más carteras de hardware para Windows en el sitio web oficial o en otras fuentes. </p>
|
39 |
-
<p>Una cartera de papel es una forma simple y barata de almacenar sus llaves privadas en un pedazo de papel que imprime desde un sitio web o un software. Puede usar <a href="">Bitaddress.org</a> o <a href="">Bitcoinpaperwallet.com</a> para generar e imprimir su billetera de papel, pero debe asegurarse de hacerlo sin conexión y en una computadora e impresora seguras y limpias. También debe mantener su billetera de papel a salvo del fuego, el agua o los daños físicos, y escanearla con un lector de código QR cada vez que desee acceder a sus fondos. Puede encontrar más información sobre carteras de papel en el sitio web oficial o en otras fuentes. </p>
|
40 |
-
<h2>Conclusión</h2>
|
41 |
-
|
42 |
-
<h2>Preguntas frecuentes</h2>
|
43 |
-
<h3>¿Cuáles son los requisitos del sistema para ejecutar Bitcoin-Qt.exe? </h3>
|
44 |
-
<p>Para ejecutar <strong>Bitcoin-Qt.exe</strong>, necesita un sistema operativo Windows (7 o posterior), un procesador de 64 bits, al menos 2 GB de RAM, al menos 400 GB de espacio en disco (preferiblemente SSD), y una conexión a Internet de banda ancha. </p>
|
45 |
-
<h3>¿Cómo puedo actualizar Bitcoin-Qt.exe a la última versión? </h3>
|
46 |
-
<p>Para actualizar <strong>Bitcoin-Qt.exe</strong> a la última versión, debe descargar la nueva versión desde el sitio web oficial o desde una fuente de confianza, verificar el archivo e instalarlo sobre la versión anterior. No es necesario desinstalar la versión anterior o eliminar su directorio de datos. </p>
|
47 |
-
<h3>¿Cómo puedo restaurar mi billetera desde una copia de seguridad? </h3>
|
48 |
-
<p>Para restaurar su billetera desde una copia de seguridad, necesita copiar su archivo de copia de seguridad (generalmente llamado <strong>wallet.dat</strong>) a su directorio de datos, reemplazando el archivo existente si hay uno. Es posible que tenga que volver a analizar la cadena de bloques para actualizar su saldo y el historial de transacciones. </p>
|
49 |
-
<h3>¿Cómo puedo importar o exportar mis claves privadas? </h3>
|
50 |
-
<p>Para importar o exportar tus claves privadas, necesitas usar la pestaña <strong>Console</strong> en la ventana <strong>Debug</strong>. Puede utilizar comandos como <code>importprivkey</code>, <code>dumpprivkey</code>, o <code>dumpwallet</code> para importar o exportar sus claves privadas. Debes tener cuidado al manejar tus llaves privadas, ya que son muy sensibles y pueden comprometer tus fondos si se exponen o se pierden. </p>
|
51 |
-
<h3>¿Cómo puedo contactar a los desarrolladores u obtener soporte para Bitcoin-Qt.exe? </h3>
|
52 |
-
<p>Para contactar a los desarrolladores u obtener soporte para <strong>Bitcoin-Qt.exe</strong>, puedes usar los siguientes canales: <ul>
|
53 |
-
<li>El sitio web oficial: <a href="">https://bitcoincore.org</a></li>
|
54 |
-
<li>El repositorio GitHub: <a href="">https://github.com/bitcoin/bitcoin</a></li>
|
55 |
-
<li>El canal IRC: #bitcoin-core-dev en Freenode</li>
|
56 |
-
|
57 |
-
<li>La comunidad de Reddit: r/Bitcoin o r/BitcoinBeginners</li>
|
58 |
-
<li>La red de intercambio de pila: <a href="">https://bitcoin.stackexchange.com/</a></li>
|
59 |
-
</ul></p> 64aa2da5cf<br />
|
60 |
-
<br />
|
61 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/__init__.py
DELETED
@@ -1,331 +0,0 @@
|
|
1 |
-
# module pyparsing.py
|
2 |
-
#
|
3 |
-
# Copyright (c) 2003-2022 Paul T. McGuire
|
4 |
-
#
|
5 |
-
# Permission is hereby granted, free of charge, to any person obtaining
|
6 |
-
# a copy of this software and associated documentation files (the
|
7 |
-
# "Software"), to deal in the Software without restriction, including
|
8 |
-
# without limitation the rights to use, copy, modify, merge, publish,
|
9 |
-
# distribute, sublicense, and/or sell copies of the Software, and to
|
10 |
-
# permit persons to whom the Software is furnished to do so, subject to
|
11 |
-
# the following conditions:
|
12 |
-
#
|
13 |
-
# The above copyright notice and this permission notice shall be
|
14 |
-
# included in all copies or substantial portions of the Software.
|
15 |
-
#
|
16 |
-
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
17 |
-
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
18 |
-
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
19 |
-
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
20 |
-
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
21 |
-
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
22 |
-
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
23 |
-
#
|
24 |
-
|
25 |
-
__doc__ = """
|
26 |
-
pyparsing module - Classes and methods to define and execute parsing grammars
|
27 |
-
=============================================================================
|
28 |
-
|
29 |
-
The pyparsing module is an alternative approach to creating and
|
30 |
-
executing simple grammars, vs. the traditional lex/yacc approach, or the
|
31 |
-
use of regular expressions. With pyparsing, you don't need to learn
|
32 |
-
a new syntax for defining grammars or matching expressions - the parsing
|
33 |
-
module provides a library of classes that you use to construct the
|
34 |
-
grammar directly in Python.
|
35 |
-
|
36 |
-
Here is a program to parse "Hello, World!" (or any greeting of the form
|
37 |
-
``"<salutation>, <addressee>!"``), built up using :class:`Word`,
|
38 |
-
:class:`Literal`, and :class:`And` elements
|
39 |
-
(the :meth:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
|
40 |
-
and the strings are auto-converted to :class:`Literal` expressions)::
|
41 |
-
|
42 |
-
from pyparsing import Word, alphas
|
43 |
-
|
44 |
-
# define grammar of a greeting
|
45 |
-
greet = Word(alphas) + "," + Word(alphas) + "!"
|
46 |
-
|
47 |
-
hello = "Hello, World!"
|
48 |
-
print(hello, "->", greet.parse_string(hello))
|
49 |
-
|
50 |
-
The program outputs the following::
|
51 |
-
|
52 |
-
Hello, World! -> ['Hello', ',', 'World', '!']
|
53 |
-
|
54 |
-
The Python representation of the grammar is quite readable, owing to the
|
55 |
-
self-explanatory class names, and the use of :class:`'+'<And>`,
|
56 |
-
:class:`'|'<MatchFirst>`, :class:`'^'<Or>` and :class:`'&'<Each>` operators.
|
57 |
-
|
58 |
-
The :class:`ParseResults` object returned from
|
59 |
-
:class:`ParserElement.parseString` can be
|
60 |
-
accessed as a nested list, a dictionary, or an object with named
|
61 |
-
attributes.
|
62 |
-
|
63 |
-
The pyparsing module handles some of the problems that are typically
|
64 |
-
vexing when writing text parsers:
|
65 |
-
|
66 |
-
- extra or missing whitespace (the above program will also handle
|
67 |
-
"Hello,World!", "Hello , World !", etc.)
|
68 |
-
- quoted strings
|
69 |
-
- embedded comments
|
70 |
-
|
71 |
-
|
72 |
-
Getting Started -
|
73 |
-
-----------------
|
74 |
-
Visit the classes :class:`ParserElement` and :class:`ParseResults` to
|
75 |
-
see the base classes that most other pyparsing
|
76 |
-
classes inherit from. Use the docstrings for examples of how to:
|
77 |
-
|
78 |
-
- construct literal match expressions from :class:`Literal` and
|
79 |
-
:class:`CaselessLiteral` classes
|
80 |
-
- construct character word-group expressions using the :class:`Word`
|
81 |
-
class
|
82 |
-
- see how to create repetitive expressions using :class:`ZeroOrMore`
|
83 |
-
and :class:`OneOrMore` classes
|
84 |
-
- use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
|
85 |
-
and :class:`'&'<Each>` operators to combine simple expressions into
|
86 |
-
more complex ones
|
87 |
-
- associate names with your parsed results using
|
88 |
-
:class:`ParserElement.setResultsName`
|
89 |
-
- access the parsed data, which is returned as a :class:`ParseResults`
|
90 |
-
object
|
91 |
-
- find some helpful expression short-cuts like :class:`delimitedList`
|
92 |
-
and :class:`oneOf`
|
93 |
-
- find more useful common expressions in the :class:`pyparsing_common`
|
94 |
-
namespace class
|
95 |
-
"""
|
96 |
-
from typing import NamedTuple
|
97 |
-
|
98 |
-
|
99 |
-
class version_info(NamedTuple):
|
100 |
-
major: int
|
101 |
-
minor: int
|
102 |
-
micro: int
|
103 |
-
releaselevel: str
|
104 |
-
serial: int
|
105 |
-
|
106 |
-
@property
|
107 |
-
def __version__(self):
|
108 |
-
return (
|
109 |
-
"{}.{}.{}".format(self.major, self.minor, self.micro)
|
110 |
-
+ (
|
111 |
-
"{}{}{}".format(
|
112 |
-
"r" if self.releaselevel[0] == "c" else "",
|
113 |
-
self.releaselevel[0],
|
114 |
-
self.serial,
|
115 |
-
),
|
116 |
-
"",
|
117 |
-
)[self.releaselevel == "final"]
|
118 |
-
)
|
119 |
-
|
120 |
-
def __str__(self):
|
121 |
-
return "{} {} / {}".format(__name__, self.__version__, __version_time__)
|
122 |
-
|
123 |
-
def __repr__(self):
|
124 |
-
return "{}.{}({})".format(
|
125 |
-
__name__,
|
126 |
-
type(self).__name__,
|
127 |
-
", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)),
|
128 |
-
)
|
129 |
-
|
130 |
-
|
131 |
-
__version_info__ = version_info(3, 0, 9, "final", 0)
|
132 |
-
__version_time__ = "05 May 2022 07:02 UTC"
|
133 |
-
__version__ = __version_info__.__version__
|
134 |
-
__versionTime__ = __version_time__
|
135 |
-
__author__ = "Paul McGuire <[email protected]>"
|
136 |
-
|
137 |
-
from .util import *
|
138 |
-
from .exceptions import *
|
139 |
-
from .actions import *
|
140 |
-
from .core import __diag__, __compat__
|
141 |
-
from .results import *
|
142 |
-
from .core import *
|
143 |
-
from .core import _builtin_exprs as core_builtin_exprs
|
144 |
-
from .helpers import *
|
145 |
-
from .helpers import _builtin_exprs as helper_builtin_exprs
|
146 |
-
|
147 |
-
from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
|
148 |
-
from .testing import pyparsing_test as testing
|
149 |
-
from .common import (
|
150 |
-
pyparsing_common as common,
|
151 |
-
_builtin_exprs as common_builtin_exprs,
|
152 |
-
)
|
153 |
-
|
154 |
-
# define backward compat synonyms
|
155 |
-
if "pyparsing_unicode" not in globals():
|
156 |
-
pyparsing_unicode = unicode
|
157 |
-
if "pyparsing_common" not in globals():
|
158 |
-
pyparsing_common = common
|
159 |
-
if "pyparsing_test" not in globals():
|
160 |
-
pyparsing_test = testing
|
161 |
-
|
162 |
-
core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
|
163 |
-
|
164 |
-
|
165 |
-
__all__ = [
|
166 |
-
"__version__",
|
167 |
-
"__version_time__",
|
168 |
-
"__author__",
|
169 |
-
"__compat__",
|
170 |
-
"__diag__",
|
171 |
-
"And",
|
172 |
-
"AtLineStart",
|
173 |
-
"AtStringStart",
|
174 |
-
"CaselessKeyword",
|
175 |
-
"CaselessLiteral",
|
176 |
-
"CharsNotIn",
|
177 |
-
"Combine",
|
178 |
-
"Dict",
|
179 |
-
"Each",
|
180 |
-
"Empty",
|
181 |
-
"FollowedBy",
|
182 |
-
"Forward",
|
183 |
-
"GoToColumn",
|
184 |
-
"Group",
|
185 |
-
"IndentedBlock",
|
186 |
-
"Keyword",
|
187 |
-
"LineEnd",
|
188 |
-
"LineStart",
|
189 |
-
"Literal",
|
190 |
-
"Located",
|
191 |
-
"PrecededBy",
|
192 |
-
"MatchFirst",
|
193 |
-
"NoMatch",
|
194 |
-
"NotAny",
|
195 |
-
"OneOrMore",
|
196 |
-
"OnlyOnce",
|
197 |
-
"OpAssoc",
|
198 |
-
"Opt",
|
199 |
-
"Optional",
|
200 |
-
"Or",
|
201 |
-
"ParseBaseException",
|
202 |
-
"ParseElementEnhance",
|
203 |
-
"ParseException",
|
204 |
-
"ParseExpression",
|
205 |
-
"ParseFatalException",
|
206 |
-
"ParseResults",
|
207 |
-
"ParseSyntaxException",
|
208 |
-
"ParserElement",
|
209 |
-
"PositionToken",
|
210 |
-
"QuotedString",
|
211 |
-
"RecursiveGrammarException",
|
212 |
-
"Regex",
|
213 |
-
"SkipTo",
|
214 |
-
"StringEnd",
|
215 |
-
"StringStart",
|
216 |
-
"Suppress",
|
217 |
-
"Token",
|
218 |
-
"TokenConverter",
|
219 |
-
"White",
|
220 |
-
"Word",
|
221 |
-
"WordEnd",
|
222 |
-
"WordStart",
|
223 |
-
"ZeroOrMore",
|
224 |
-
"Char",
|
225 |
-
"alphanums",
|
226 |
-
"alphas",
|
227 |
-
"alphas8bit",
|
228 |
-
"any_close_tag",
|
229 |
-
"any_open_tag",
|
230 |
-
"c_style_comment",
|
231 |
-
"col",
|
232 |
-
"common_html_entity",
|
233 |
-
"counted_array",
|
234 |
-
"cpp_style_comment",
|
235 |
-
"dbl_quoted_string",
|
236 |
-
"dbl_slash_comment",
|
237 |
-
"delimited_list",
|
238 |
-
"dict_of",
|
239 |
-
"empty",
|
240 |
-
"hexnums",
|
241 |
-
"html_comment",
|
242 |
-
"identchars",
|
243 |
-
"identbodychars",
|
244 |
-
"java_style_comment",
|
245 |
-
"line",
|
246 |
-
"line_end",
|
247 |
-
"line_start",
|
248 |
-
"lineno",
|
249 |
-
"make_html_tags",
|
250 |
-
"make_xml_tags",
|
251 |
-
"match_only_at_col",
|
252 |
-
"match_previous_expr",
|
253 |
-
"match_previous_literal",
|
254 |
-
"nested_expr",
|
255 |
-
"null_debug_action",
|
256 |
-
"nums",
|
257 |
-
"one_of",
|
258 |
-
"printables",
|
259 |
-
"punc8bit",
|
260 |
-
"python_style_comment",
|
261 |
-
"quoted_string",
|
262 |
-
"remove_quotes",
|
263 |
-
"replace_with",
|
264 |
-
"replace_html_entity",
|
265 |
-
"rest_of_line",
|
266 |
-
"sgl_quoted_string",
|
267 |
-
"srange",
|
268 |
-
"string_end",
|
269 |
-
"string_start",
|
270 |
-
"trace_parse_action",
|
271 |
-
"unicode_string",
|
272 |
-
"with_attribute",
|
273 |
-
"indentedBlock",
|
274 |
-
"original_text_for",
|
275 |
-
"ungroup",
|
276 |
-
"infix_notation",
|
277 |
-
"locatedExpr",
|
278 |
-
"with_class",
|
279 |
-
"CloseMatch",
|
280 |
-
"token_map",
|
281 |
-
"pyparsing_common",
|
282 |
-
"pyparsing_unicode",
|
283 |
-
"unicode_set",
|
284 |
-
"condition_as_parse_action",
|
285 |
-
"pyparsing_test",
|
286 |
-
# pre-PEP8 compatibility names
|
287 |
-
"__versionTime__",
|
288 |
-
"anyCloseTag",
|
289 |
-
"anyOpenTag",
|
290 |
-
"cStyleComment",
|
291 |
-
"commonHTMLEntity",
|
292 |
-
"countedArray",
|
293 |
-
"cppStyleComment",
|
294 |
-
"dblQuotedString",
|
295 |
-
"dblSlashComment",
|
296 |
-
"delimitedList",
|
297 |
-
"dictOf",
|
298 |
-
"htmlComment",
|
299 |
-
"javaStyleComment",
|
300 |
-
"lineEnd",
|
301 |
-
"lineStart",
|
302 |
-
"makeHTMLTags",
|
303 |
-
"makeXMLTags",
|
304 |
-
"matchOnlyAtCol",
|
305 |
-
"matchPreviousExpr",
|
306 |
-
"matchPreviousLiteral",
|
307 |
-
"nestedExpr",
|
308 |
-
"nullDebugAction",
|
309 |
-
"oneOf",
|
310 |
-
"opAssoc",
|
311 |
-
"pythonStyleComment",
|
312 |
-
"quotedString",
|
313 |
-
"removeQuotes",
|
314 |
-
"replaceHTMLEntity",
|
315 |
-
"replaceWith",
|
316 |
-
"restOfLine",
|
317 |
-
"sglQuotedString",
|
318 |
-
"stringEnd",
|
319 |
-
"stringStart",
|
320 |
-
"traceParseAction",
|
321 |
-
"unicodeString",
|
322 |
-
"withAttribute",
|
323 |
-
"indentedBlock",
|
324 |
-
"originalTextFor",
|
325 |
-
"infixNotation",
|
326 |
-
"locatedExpr",
|
327 |
-
"withClass",
|
328 |
-
"tokenMap",
|
329 |
-
"conditionAsParseAction",
|
330 |
-
"autoname_elements",
|
331 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Boadiwaa/Recipes/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Recipes
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/.github/ISSUE_TEMPLATE/question.md
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
---
|
2 |
-
name: Question
|
3 |
-
about: File an issue about unexplained behavior
|
4 |
-
title: "[QUESTION] "
|
5 |
-
---
|
6 |
-
|
7 |
-
If you have a question, please check the following first:
|
8 |
-
|
9 |
-
1. Check if your question has already been answered in the [FAQ][] section.
|
10 |
-
2. Make sure you've read the [documentation][]. Your issue may be addressed there.
|
11 |
-
3. If those resources didn't help and you only have a short question (not a bug report), consider asking in the [Gitter chat room][]
|
12 |
-
4. Search the [issue tracker][], including the closed issues, to see if your question has already been asked/answered. +1 or comment if it has been asked but has no answer.
|
13 |
-
5. If you have a more complex question which is not answered in the previous items (or not suitable for chat), please fill in the details below.
|
14 |
-
6. Include a self-contained and minimal piece of code that illustrates your question. If that's not possible, try to make the description as clear as possible.
|
15 |
-
|
16 |
-
[FAQ]: http://pybind11.readthedocs.io/en/latest/faq.html
|
17 |
-
[documentation]: https://pybind11.readthedocs.io
|
18 |
-
[issue tracker]: https://github.com/pybind/pybind11/issues
|
19 |
-
[Gitter chat room]: https://gitter.im/pybind/Lobby
|
20 |
-
|
21 |
-
*After reading, remove this checklist.*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tests/test_stl_binders.cpp
DELETED
@@ -1,129 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
tests/test_stl_binders.cpp -- Usage of stl_binders functions
|
3 |
-
|
4 |
-
Copyright (c) 2016 Sergey Lyskov
|
5 |
-
|
6 |
-
All rights reserved. Use of this source code is governed by a
|
7 |
-
BSD-style license that can be found in the LICENSE file.
|
8 |
-
*/
|
9 |
-
|
10 |
-
#include "pybind11_tests.h"
|
11 |
-
|
12 |
-
#include <pybind11/stl_bind.h>
|
13 |
-
#include <pybind11/numpy.h>
|
14 |
-
#include <map>
|
15 |
-
#include <deque>
|
16 |
-
#include <unordered_map>
|
17 |
-
|
18 |
-
class El {
|
19 |
-
public:
|
20 |
-
El() = delete;
|
21 |
-
El(int v) : a(v) { }
|
22 |
-
|
23 |
-
int a;
|
24 |
-
};
|
25 |
-
|
26 |
-
std::ostream & operator<<(std::ostream &s, El const&v) {
|
27 |
-
s << "El{" << v.a << '}';
|
28 |
-
return s;
|
29 |
-
}
|
30 |
-
|
31 |
-
/// Issue #487: binding std::vector<E> with E non-copyable
|
32 |
-
class E_nc {
|
33 |
-
public:
|
34 |
-
explicit E_nc(int i) : value{i} {}
|
35 |
-
E_nc(const E_nc &) = delete;
|
36 |
-
E_nc &operator=(const E_nc &) = delete;
|
37 |
-
E_nc(E_nc &&) = default;
|
38 |
-
E_nc &operator=(E_nc &&) = default;
|
39 |
-
|
40 |
-
int value;
|
41 |
-
};
|
42 |
-
|
43 |
-
template <class Container> Container *one_to_n(int n) {
|
44 |
-
auto v = new Container();
|
45 |
-
for (int i = 1; i <= n; i++)
|
46 |
-
v->emplace_back(i);
|
47 |
-
return v;
|
48 |
-
}
|
49 |
-
|
50 |
-
template <class Map> Map *times_ten(int n) {
|
51 |
-
auto m = new Map();
|
52 |
-
for (int i = 1; i <= n; i++)
|
53 |
-
m->emplace(int(i), E_nc(10*i));
|
54 |
-
return m;
|
55 |
-
}
|
56 |
-
|
57 |
-
template <class NestMap> NestMap *times_hundred(int n) {
|
58 |
-
auto m = new NestMap();
|
59 |
-
for (int i = 1; i <= n; i++)
|
60 |
-
for (int j = 1; j <= n; j++)
|
61 |
-
(*m)[i].emplace(int(j*10), E_nc(100*j));
|
62 |
-
return m;
|
63 |
-
}
|
64 |
-
|
65 |
-
TEST_SUBMODULE(stl_binders, m) {
|
66 |
-
// test_vector_int
|
67 |
-
py::bind_vector<std::vector<unsigned int>>(m, "VectorInt", py::buffer_protocol());
|
68 |
-
|
69 |
-
// test_vector_custom
|
70 |
-
py::class_<El>(m, "El")
|
71 |
-
.def(py::init<int>());
|
72 |
-
py::bind_vector<std::vector<El>>(m, "VectorEl");
|
73 |
-
py::bind_vector<std::vector<std::vector<El>>>(m, "VectorVectorEl");
|
74 |
-
|
75 |
-
// test_map_string_double
|
76 |
-
py::bind_map<std::map<std::string, double>>(m, "MapStringDouble");
|
77 |
-
py::bind_map<std::unordered_map<std::string, double>>(m, "UnorderedMapStringDouble");
|
78 |
-
|
79 |
-
// test_map_string_double_const
|
80 |
-
py::bind_map<std::map<std::string, double const>>(m, "MapStringDoubleConst");
|
81 |
-
py::bind_map<std::unordered_map<std::string, double const>>(m, "UnorderedMapStringDoubleConst");
|
82 |
-
|
83 |
-
py::class_<E_nc>(m, "ENC")
|
84 |
-
.def(py::init<int>())
|
85 |
-
.def_readwrite("value", &E_nc::value);
|
86 |
-
|
87 |
-
// test_noncopyable_containers
|
88 |
-
py::bind_vector<std::vector<E_nc>>(m, "VectorENC");
|
89 |
-
m.def("get_vnc", &one_to_n<std::vector<E_nc>>, py::return_value_policy::reference);
|
90 |
-
py::bind_vector<std::deque<E_nc>>(m, "DequeENC");
|
91 |
-
m.def("get_dnc", &one_to_n<std::deque<E_nc>>, py::return_value_policy::reference);
|
92 |
-
py::bind_map<std::map<int, E_nc>>(m, "MapENC");
|
93 |
-
m.def("get_mnc", ×_ten<std::map<int, E_nc>>, py::return_value_policy::reference);
|
94 |
-
py::bind_map<std::unordered_map<int, E_nc>>(m, "UmapENC");
|
95 |
-
m.def("get_umnc", ×_ten<std::unordered_map<int, E_nc>>, py::return_value_policy::reference);
|
96 |
-
// Issue #1885: binding nested std::map<X, Container<E>> with E non-copyable
|
97 |
-
py::bind_map<std::map<int, std::vector<E_nc>>>(m, "MapVecENC");
|
98 |
-
m.def("get_nvnc", [](int n)
|
99 |
-
{
|
100 |
-
auto m = new std::map<int, std::vector<E_nc>>();
|
101 |
-
for (int i = 1; i <= n; i++)
|
102 |
-
for (int j = 1; j <= n; j++)
|
103 |
-
(*m)[i].emplace_back(j);
|
104 |
-
return m;
|
105 |
-
}, py::return_value_policy::reference);
|
106 |
-
py::bind_map<std::map<int, std::map<int, E_nc>>>(m, "MapMapENC");
|
107 |
-
m.def("get_nmnc", ×_hundred<std::map<int, std::map<int, E_nc>>>, py::return_value_policy::reference);
|
108 |
-
py::bind_map<std::unordered_map<int, std::unordered_map<int, E_nc>>>(m, "UmapUmapENC");
|
109 |
-
m.def("get_numnc", ×_hundred<std::unordered_map<int, std::unordered_map<int, E_nc>>>, py::return_value_policy::reference);
|
110 |
-
|
111 |
-
// test_vector_buffer
|
112 |
-
py::bind_vector<std::vector<unsigned char>>(m, "VectorUChar", py::buffer_protocol());
|
113 |
-
// no dtype declared for this version:
|
114 |
-
struct VUndeclStruct { bool w; uint32_t x; double y; bool z; };
|
115 |
-
m.def("create_undeclstruct", [m] () mutable {
|
116 |
-
py::bind_vector<std::vector<VUndeclStruct>>(m, "VectorUndeclStruct", py::buffer_protocol());
|
117 |
-
});
|
118 |
-
|
119 |
-
// The rest depends on numpy:
|
120 |
-
try { py::module::import("numpy"); }
|
121 |
-
catch (...) { return; }
|
122 |
-
|
123 |
-
// test_vector_buffer_numpy
|
124 |
-
struct VStruct { bool w; uint32_t x; double y; bool z; };
|
125 |
-
PYBIND11_NUMPY_DTYPE(VStruct, w, x, y, z);
|
126 |
-
py::class_<VStruct>(m, "VStruct").def_readwrite("x", &VStruct::x);
|
127 |
-
py::bind_vector<std::vector<VStruct>>(m, "VectorStruct", py::buffer_protocol());
|
128 |
-
m.def("get_vectorstruct", [] {return std::vector<VStruct> {{0, 5, 3.0, 1}, {1, 30, -1e4, 0}};});
|
129 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/inner_product.h
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system has no special version of this algorithm
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/unique_by_key.h
DELETED
@@ -1,934 +0,0 @@
|
|
1 |
-
/******************************************************************************
|
2 |
-
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
3 |
-
*
|
4 |
-
* Redistribution and use in source and binary forms, with or without
|
5 |
-
* modification, are permitted provided that the following conditions are met:
|
6 |
-
* * Redistributions of source code must retain the above copyright
|
7 |
-
* notice, this list of conditions and the following disclaimer.
|
8 |
-
* * Redistributions in binary form must reproduce the above copyright
|
9 |
-
* notice, this list of conditions and the following disclaimer in the
|
10 |
-
* documentation and/or other materials provided with the distribution.
|
11 |
-
* * Neither the name of the NVIDIA CORPORATION nor the
|
12 |
-
* names of its contributors may be used to endorse or promote products
|
13 |
-
* derived from this software without specific prior written permission.
|
14 |
-
*
|
15 |
-
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
16 |
-
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17 |
-
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18 |
-
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
19 |
-
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
20 |
-
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
21 |
-
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
22 |
-
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
23 |
-
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
24 |
-
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
25 |
-
*
|
26 |
-
******************************************************************************/
|
27 |
-
#pragma once
|
28 |
-
|
29 |
-
|
30 |
-
#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
|
31 |
-
#include <thrust/system/cuda/config.h>
|
32 |
-
|
33 |
-
#include <thrust/detail/cstdint.h>
|
34 |
-
#include <thrust/detail/temporary_array.h>
|
35 |
-
#include <thrust/system/cuda/detail/util.h>
|
36 |
-
#include <cub/device/device_select.cuh>
|
37 |
-
#include <thrust/system/cuda/detail/core/agent_launcher.h>
|
38 |
-
#include <thrust/system/cuda/detail/get_value.h>
|
39 |
-
#include <thrust/system/cuda/detail/par_to_seq.h>
|
40 |
-
#include <thrust/functional.h>
|
41 |
-
#include <thrust/pair.h>
|
42 |
-
#include <thrust/detail/mpl/math.h>
|
43 |
-
#include <thrust/detail/minmax.h>
|
44 |
-
#include <thrust/distance.h>
|
45 |
-
#include <thrust/detail/alignment.h>
|
46 |
-
|
47 |
-
namespace thrust
|
48 |
-
{
|
49 |
-
|
50 |
-
template <typename DerivedPolicy,
|
51 |
-
typename ForwardIterator1,
|
52 |
-
typename ForwardIterator2>
|
53 |
-
__host__ __device__ thrust::pair<ForwardIterator1, ForwardIterator2>
|
54 |
-
unique_by_key(
|
55 |
-
const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
56 |
-
ForwardIterator1 keys_first,
|
57 |
-
ForwardIterator1 keys_last,
|
58 |
-
ForwardIterator2 values_first);
|
59 |
-
template <typename DerivedPolicy,
|
60 |
-
typename InputIterator1,
|
61 |
-
typename InputIterator2,
|
62 |
-
typename OutputIterator1,
|
63 |
-
typename OutputIterator2>
|
64 |
-
__host__ __device__ thrust::pair<OutputIterator1, OutputIterator2>
|
65 |
-
unique_by_key_copy(
|
66 |
-
const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
67 |
-
InputIterator1 keys_first,
|
68 |
-
InputIterator1 keys_last,
|
69 |
-
InputIterator2 values_first,
|
70 |
-
OutputIterator1 keys_result,
|
71 |
-
OutputIterator2 values_result);
|
72 |
-
|
73 |
-
|
74 |
-
namespace cuda_cub {
|
75 |
-
|
76 |
-
// XXX it should be possible to unify unique & unique_by_key into a single
|
77 |
-
// agent with various specializations, similar to what is done
|
78 |
-
// with partition
|
79 |
-
namespace __unique_by_key {
|
80 |
-
|
81 |
-
template <int _BLOCK_THREADS,
|
82 |
-
int _ITEMS_PER_THREAD = 1,
|
83 |
-
cub::BlockLoadAlgorithm _LOAD_ALGORITHM = cub::BLOCK_LOAD_DIRECT,
|
84 |
-
cub::CacheLoadModifier _LOAD_MODIFIER = cub::LOAD_LDG,
|
85 |
-
cub::BlockScanAlgorithm _SCAN_ALGORITHM = cub::BLOCK_SCAN_WARP_SCANS>
|
86 |
-
struct PtxPolicy
|
87 |
-
{
|
88 |
-
enum
|
89 |
-
{
|
90 |
-
BLOCK_THREADS = _BLOCK_THREADS,
|
91 |
-
ITEMS_PER_THREAD = _ITEMS_PER_THREAD,
|
92 |
-
ITEMS_PER_TILE = _BLOCK_THREADS * _ITEMS_PER_THREAD,
|
93 |
-
};
|
94 |
-
static const cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM;
|
95 |
-
static const cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER;
|
96 |
-
static const cub::BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM;
|
97 |
-
}; // struct PtxPolicy
|
98 |
-
|
99 |
-
template<class,class>
|
100 |
-
struct Tuning;
|
101 |
-
|
102 |
-
namespace mpl = thrust::detail::mpl::math;
|
103 |
-
|
104 |
-
template<class T, size_t NOMINAL_4B_ITEMS_PER_THREAD>
|
105 |
-
struct items_per_thread
|
106 |
-
{
|
107 |
-
enum
|
108 |
-
{
|
109 |
-
value = mpl::min<
|
110 |
-
int,
|
111 |
-
NOMINAL_4B_ITEMS_PER_THREAD,
|
112 |
-
mpl::max<int,
|
113 |
-
1,
|
114 |
-
(NOMINAL_4B_ITEMS_PER_THREAD * 4 /
|
115 |
-
sizeof(T))>::value>::value
|
116 |
-
};
|
117 |
-
};
|
118 |
-
|
119 |
-
|
120 |
-
template<class T>
|
121 |
-
struct Tuning<sm52,T>
|
122 |
-
{
|
123 |
-
const static int INPUT_SIZE = sizeof(T);
|
124 |
-
enum
|
125 |
-
{
|
126 |
-
NOMINAL_4B_ITEMS_PER_THREAD = 11,
|
127 |
-
//
|
128 |
-
ITEMS_PER_THREAD = items_per_thread<T,
|
129 |
-
NOMINAL_4B_ITEMS_PER_THREAD>::value
|
130 |
-
};
|
131 |
-
|
132 |
-
typedef PtxPolicy<64,
|
133 |
-
ITEMS_PER_THREAD,
|
134 |
-
cub::BLOCK_LOAD_WARP_TRANSPOSE,
|
135 |
-
cub::LOAD_LDG,
|
136 |
-
cub::BLOCK_SCAN_WARP_SCANS>
|
137 |
-
type;
|
138 |
-
}; // Tuning for sm52
|
139 |
-
|
140 |
-
template<class T>
|
141 |
-
struct Tuning<sm35,T>
|
142 |
-
{
|
143 |
-
const static int INPUT_SIZE = sizeof(T);
|
144 |
-
enum
|
145 |
-
{
|
146 |
-
NOMINAL_4B_ITEMS_PER_THREAD = 9,
|
147 |
-
//
|
148 |
-
ITEMS_PER_THREAD = items_per_thread<T,
|
149 |
-
NOMINAL_4B_ITEMS_PER_THREAD>::value
|
150 |
-
};
|
151 |
-
|
152 |
-
typedef PtxPolicy<128,
|
153 |
-
ITEMS_PER_THREAD,
|
154 |
-
cub::BLOCK_LOAD_WARP_TRANSPOSE,
|
155 |
-
cub::LOAD_LDG,
|
156 |
-
cub::BLOCK_SCAN_WARP_SCANS>
|
157 |
-
type;
|
158 |
-
}; // Tuning for sm35
|
159 |
-
|
160 |
-
template<class T>
|
161 |
-
struct Tuning<sm30,T>
|
162 |
-
{
|
163 |
-
const static int INPUT_SIZE = sizeof(T);
|
164 |
-
enum
|
165 |
-
{
|
166 |
-
NOMINAL_4B_ITEMS_PER_THREAD = 7,
|
167 |
-
//
|
168 |
-
ITEMS_PER_THREAD = items_per_thread<T,
|
169 |
-
NOMINAL_4B_ITEMS_PER_THREAD>::value
|
170 |
-
};
|
171 |
-
|
172 |
-
typedef PtxPolicy<128,
|
173 |
-
ITEMS_PER_THREAD,
|
174 |
-
cub::BLOCK_LOAD_WARP_TRANSPOSE,
|
175 |
-
cub::LOAD_DEFAULT,
|
176 |
-
cub::BLOCK_SCAN_WARP_SCANS>
|
177 |
-
type;
|
178 |
-
}; // Tuning for sm30
|
179 |
-
|
180 |
-
template <class KeyInputIt,
|
181 |
-
class ValInputIt,
|
182 |
-
class KeyOutputIt,
|
183 |
-
class ValOutputIt,
|
184 |
-
class BinaryPred,
|
185 |
-
class Size,
|
186 |
-
class NumSelectedOutIt>
|
187 |
-
struct UniqueByKeyAgent
|
188 |
-
{
|
189 |
-
typedef typename iterator_traits<KeyInputIt>::value_type key_type;
|
190 |
-
typedef typename iterator_traits<ValInputIt>::value_type value_type;
|
191 |
-
|
192 |
-
typedef cub::ScanTileState<Size> ScanTileState;
|
193 |
-
|
194 |
-
template <class Arch>
|
195 |
-
struct PtxPlan : Tuning<Arch, key_type>::type
|
196 |
-
{
|
197 |
-
typedef Tuning<Arch, key_type> tuning;
|
198 |
-
|
199 |
-
typedef typename core::LoadIterator<PtxPlan, KeyInputIt>::type KeyLoadIt;
|
200 |
-
typedef typename core::LoadIterator<PtxPlan, ValInputIt>::type ValLoadIt;
|
201 |
-
|
202 |
-
typedef typename core::BlockLoad<PtxPlan, KeyLoadIt>::type BlockLoadKeys;
|
203 |
-
typedef typename core::BlockLoad<PtxPlan, ValLoadIt>::type BlockLoadValues;
|
204 |
-
|
205 |
-
typedef cub::BlockDiscontinuity<key_type,
|
206 |
-
PtxPlan::BLOCK_THREADS,
|
207 |
-
1,
|
208 |
-
1,
|
209 |
-
Arch::ver>
|
210 |
-
BlockDiscontinuityKeys;
|
211 |
-
|
212 |
-
typedef cub::TilePrefixCallbackOp<Size,
|
213 |
-
cub::Sum,
|
214 |
-
ScanTileState,
|
215 |
-
Arch::ver>
|
216 |
-
TilePrefixCallback;
|
217 |
-
typedef cub::BlockScan<Size,
|
218 |
-
PtxPlan::BLOCK_THREADS,
|
219 |
-
PtxPlan::SCAN_ALGORITHM,
|
220 |
-
1,
|
221 |
-
1,
|
222 |
-
Arch::ver>
|
223 |
-
BlockScan;
|
224 |
-
|
225 |
-
typedef core::uninitialized_array<key_type, PtxPlan::ITEMS_PER_TILE>
|
226 |
-
shared_keys_t;
|
227 |
-
typedef core::uninitialized_array<value_type, PtxPlan::ITEMS_PER_TILE>
|
228 |
-
shared_values_t;
|
229 |
-
|
230 |
-
union TempStorage
|
231 |
-
{
|
232 |
-
struct
|
233 |
-
{
|
234 |
-
typename BlockScan::TempStorage scan;
|
235 |
-
typename TilePrefixCallback::TempStorage prefix;
|
236 |
-
typename BlockDiscontinuityKeys::TempStorage discontinuity;
|
237 |
-
};
|
238 |
-
|
239 |
-
typename BlockLoadKeys::TempStorage load_keys;
|
240 |
-
typename BlockLoadValues::TempStorage load_values;
|
241 |
-
|
242 |
-
shared_keys_t shared_keys;
|
243 |
-
shared_values_t shared_values;
|
244 |
-
}; // union TempStorage
|
245 |
-
}; // struct PtxPlan
|
246 |
-
|
247 |
-
typedef typename core::specialize_plan_msvc10_war<PtxPlan>::type::type ptx_plan;
|
248 |
-
|
249 |
-
typedef typename ptx_plan::KeyLoadIt KeyLoadIt;
|
250 |
-
typedef typename ptx_plan::ValLoadIt ValLoadIt;
|
251 |
-
typedef typename ptx_plan::BlockLoadKeys BlockLoadKeys;
|
252 |
-
typedef typename ptx_plan::BlockLoadValues BlockLoadValues;
|
253 |
-
typedef typename ptx_plan::BlockDiscontinuityKeys BlockDiscontinuityKeys;
|
254 |
-
typedef typename ptx_plan::TilePrefixCallback TilePrefixCallback;
|
255 |
-
typedef typename ptx_plan::BlockScan BlockScan;
|
256 |
-
typedef typename ptx_plan::TempStorage TempStorage;
|
257 |
-
typedef typename ptx_plan::shared_keys_t shared_keys_t;
|
258 |
-
typedef typename ptx_plan::shared_values_t shared_values_t;
|
259 |
-
|
260 |
-
enum
|
261 |
-
{
|
262 |
-
BLOCK_THREADS = ptx_plan::BLOCK_THREADS,
|
263 |
-
ITEMS_PER_THREAD = ptx_plan::ITEMS_PER_THREAD,
|
264 |
-
ITEMS_PER_TILE = ptx_plan::ITEMS_PER_TILE
|
265 |
-
};
|
266 |
-
|
267 |
-
struct impl
|
268 |
-
{
|
269 |
-
//---------------------------------------------------------------------
|
270 |
-
// Per-thread fields
|
271 |
-
//---------------------------------------------------------------------
|
272 |
-
|
273 |
-
TempStorage & temp_storage;
|
274 |
-
ScanTileState & tile_state;
|
275 |
-
KeyLoadIt keys_in;
|
276 |
-
ValLoadIt values_in;
|
277 |
-
KeyOutputIt keys_out;
|
278 |
-
ValOutputIt values_out;
|
279 |
-
cub::InequalityWrapper<BinaryPred> predicate;
|
280 |
-
Size num_items;
|
281 |
-
|
282 |
-
//---------------------------------------------------------------------
|
283 |
-
// Utility functions
|
284 |
-
//---------------------------------------------------------------------
|
285 |
-
|
286 |
-
struct key_tag {};
|
287 |
-
struct value_tag {};
|
288 |
-
|
289 |
-
THRUST_DEVICE_FUNCTION
|
290 |
-
shared_keys_t &get_shared(key_tag)
|
291 |
-
{
|
292 |
-
return temp_storage.shared_keys;
|
293 |
-
}
|
294 |
-
THRUST_DEVICE_FUNCTION
|
295 |
-
shared_values_t &get_shared(value_tag)
|
296 |
-
{
|
297 |
-
return temp_storage.shared_values;
|
298 |
-
}
|
299 |
-
|
300 |
-
|
301 |
-
template <class Tag,
|
302 |
-
class OutputIt,
|
303 |
-
class T>
|
304 |
-
void THRUST_DEVICE_FUNCTION
|
305 |
-
scatter(Tag tag,
|
306 |
-
OutputIt items_out,
|
307 |
-
T (&items)[ITEMS_PER_THREAD],
|
308 |
-
Size (&selection_flags)[ITEMS_PER_THREAD],
|
309 |
-
Size (&selection_indices)[ITEMS_PER_THREAD],
|
310 |
-
int /*num_tile_items*/,
|
311 |
-
int num_tile_selections,
|
312 |
-
Size num_selections_prefix,
|
313 |
-
Size /*num_selections*/)
|
314 |
-
{
|
315 |
-
using core::sync_threadblock;
|
316 |
-
|
317 |
-
#pragma unroll
|
318 |
-
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
|
319 |
-
{
|
320 |
-
int local_scatter_offset = selection_indices[ITEM] -
|
321 |
-
num_selections_prefix;
|
322 |
-
if (selection_flags[ITEM])
|
323 |
-
{
|
324 |
-
get_shared(tag)[local_scatter_offset] = items[ITEM];
|
325 |
-
}
|
326 |
-
}
|
327 |
-
|
328 |
-
sync_threadblock();
|
329 |
-
|
330 |
-
for (int item = threadIdx.x;
|
331 |
-
item < num_tile_selections;
|
332 |
-
item += BLOCK_THREADS)
|
333 |
-
{
|
334 |
-
items_out[num_selections_prefix + item] = get_shared(tag)[item];
|
335 |
-
}
|
336 |
-
|
337 |
-
sync_threadblock();
|
338 |
-
}
|
339 |
-
|
340 |
-
//---------------------------------------------------------------------
|
341 |
-
// Tile processing
|
342 |
-
//---------------------------------------------------------------------
|
343 |
-
|
344 |
-
template <bool IS_LAST_TILE, bool IS_FIRST_TILE>
|
345 |
-
Size THRUST_DEVICE_FUNCTION
|
346 |
-
consume_tile_impl(int num_tile_items,
|
347 |
-
int tile_idx,
|
348 |
-
Size tile_base)
|
349 |
-
{
|
350 |
-
using core::sync_threadblock;
|
351 |
-
|
352 |
-
key_type keys[ITEMS_PER_THREAD];
|
353 |
-
Size selection_flags[ITEMS_PER_THREAD];
|
354 |
-
Size selection_idx[ITEMS_PER_THREAD];
|
355 |
-
|
356 |
-
if (IS_LAST_TILE)
|
357 |
-
{
|
358 |
-
// Fill last elements with the first element
|
359 |
-
// because collectives are not suffix guarded
|
360 |
-
BlockLoadKeys(temp_storage.load_keys)
|
361 |
-
.Load(keys_in + tile_base,
|
362 |
-
keys,
|
363 |
-
num_tile_items,
|
364 |
-
*(keys_in + tile_base));
|
365 |
-
}
|
366 |
-
else
|
367 |
-
{
|
368 |
-
BlockLoadKeys(temp_storage.load_keys).Load(keys_in + tile_base, keys);
|
369 |
-
}
|
370 |
-
|
371 |
-
|
372 |
-
sync_threadblock();
|
373 |
-
|
374 |
-
value_type values[ITEMS_PER_THREAD];
|
375 |
-
if (IS_LAST_TILE)
|
376 |
-
{
|
377 |
-
// Fill last elements with the first element
|
378 |
-
// because collectives are not suffix guarded
|
379 |
-
BlockLoadValues(temp_storage.load_values)
|
380 |
-
.Load(values_in + tile_base,
|
381 |
-
values,
|
382 |
-
num_tile_items,
|
383 |
-
*(values_in + tile_base));
|
384 |
-
}
|
385 |
-
else
|
386 |
-
{
|
387 |
-
BlockLoadValues(temp_storage.load_values)
|
388 |
-
.Load(values_in + tile_base, values);
|
389 |
-
}
|
390 |
-
|
391 |
-
sync_threadblock();
|
392 |
-
|
393 |
-
if (IS_FIRST_TILE)
|
394 |
-
{
|
395 |
-
BlockDiscontinuityKeys(temp_storage.discontinuity)
|
396 |
-
.FlagHeads(selection_flags, keys, predicate);
|
397 |
-
}
|
398 |
-
else
|
399 |
-
{
|
400 |
-
key_type tile_predecessor = keys_in[tile_base - 1];
|
401 |
-
BlockDiscontinuityKeys(temp_storage.discontinuity)
|
402 |
-
.FlagHeads(selection_flags, keys, predicate, tile_predecessor);
|
403 |
-
}
|
404 |
-
#pragma unroll
|
405 |
-
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
|
406 |
-
{
|
407 |
-
// Set selection_flags for out-of-bounds items
|
408 |
-
if ((IS_LAST_TILE) && (Size(threadIdx.x * ITEMS_PER_THREAD) + ITEM >= num_tile_items))
|
409 |
-
selection_flags[ITEM] = 1;
|
410 |
-
}
|
411 |
-
|
412 |
-
sync_threadblock();
|
413 |
-
|
414 |
-
|
415 |
-
Size num_tile_selections = 0;
|
416 |
-
Size num_selections = 0;
|
417 |
-
Size num_selections_prefix = 0;
|
418 |
-
if (IS_FIRST_TILE)
|
419 |
-
{
|
420 |
-
BlockScan(temp_storage.scan)
|
421 |
-
.ExclusiveSum(selection_flags,
|
422 |
-
selection_idx,
|
423 |
-
num_tile_selections);
|
424 |
-
|
425 |
-
if (threadIdx.x == 0)
|
426 |
-
{
|
427 |
-
// Update tile status if this is not the last tile
|
428 |
-
if (!IS_LAST_TILE)
|
429 |
-
tile_state.SetInclusive(0, num_tile_selections);
|
430 |
-
}
|
431 |
-
|
432 |
-
// Do not count any out-of-bounds selections
|
433 |
-
if (IS_LAST_TILE)
|
434 |
-
{
|
435 |
-
int num_discount = ITEMS_PER_TILE - num_tile_items;
|
436 |
-
num_tile_selections -= num_discount;
|
437 |
-
}
|
438 |
-
num_selections = num_tile_selections;
|
439 |
-
}
|
440 |
-
else
|
441 |
-
{
|
442 |
-
TilePrefixCallback prefix_cb(tile_state,
|
443 |
-
temp_storage.prefix,
|
444 |
-
cub::Sum(),
|
445 |
-
tile_idx);
|
446 |
-
BlockScan(temp_storage.scan)
|
447 |
-
.ExclusiveSum(selection_flags,
|
448 |
-
selection_idx,
|
449 |
-
prefix_cb);
|
450 |
-
|
451 |
-
num_selections = prefix_cb.GetInclusivePrefix();
|
452 |
-
num_tile_selections = prefix_cb.GetBlockAggregate();
|
453 |
-
num_selections_prefix = prefix_cb.GetExclusivePrefix();
|
454 |
-
|
455 |
-
if (IS_LAST_TILE)
|
456 |
-
{
|
457 |
-
int num_discount = ITEMS_PER_TILE - num_tile_items;
|
458 |
-
num_tile_selections -= num_discount;
|
459 |
-
num_selections -= num_discount;
|
460 |
-
}
|
461 |
-
}
|
462 |
-
|
463 |
-
sync_threadblock();
|
464 |
-
|
465 |
-
scatter(key_tag(),
|
466 |
-
keys_out,
|
467 |
-
keys,
|
468 |
-
selection_flags,
|
469 |
-
selection_idx,
|
470 |
-
num_tile_items,
|
471 |
-
num_tile_selections,
|
472 |
-
num_selections_prefix,
|
473 |
-
num_selections);
|
474 |
-
|
475 |
-
sync_threadblock();
|
476 |
-
|
477 |
-
scatter(value_tag(),
|
478 |
-
values_out,
|
479 |
-
values,
|
480 |
-
selection_flags,
|
481 |
-
selection_idx,
|
482 |
-
num_tile_items,
|
483 |
-
num_tile_selections,
|
484 |
-
num_selections_prefix,
|
485 |
-
num_selections);
|
486 |
-
|
487 |
-
return num_selections;
|
488 |
-
}
|
489 |
-
|
490 |
-
|
491 |
-
template <bool IS_LAST_TILE>
|
492 |
-
Size THRUST_DEVICE_FUNCTION
|
493 |
-
consume_tile(int num_tile_items,
|
494 |
-
int tile_idx,
|
495 |
-
Size tile_base)
|
496 |
-
{
|
497 |
-
if (tile_idx == 0)
|
498 |
-
{
|
499 |
-
return consume_tile_impl<IS_LAST_TILE, true>(num_tile_items,
|
500 |
-
tile_idx,
|
501 |
-
tile_base);
|
502 |
-
}
|
503 |
-
else
|
504 |
-
{
|
505 |
-
return consume_tile_impl<IS_LAST_TILE, false>(num_tile_items,
|
506 |
-
tile_idx,
|
507 |
-
tile_base);
|
508 |
-
}
|
509 |
-
}
|
510 |
-
|
511 |
-
//---------------------------------------------------------------------
|
512 |
-
// Constructor
|
513 |
-
//---------------------------------------------------------------------
|
514 |
-
|
515 |
-
THRUST_DEVICE_FUNCTION
|
516 |
-
impl(TempStorage & temp_storage_,
|
517 |
-
ScanTileState & tile_state_,
|
518 |
-
KeyLoadIt keys_in_,
|
519 |
-
ValLoadIt values_in_,
|
520 |
-
KeyOutputIt keys_out_,
|
521 |
-
ValOutputIt values_out_,
|
522 |
-
BinaryPred binary_pred_,
|
523 |
-
Size num_items_,
|
524 |
-
int num_tiles,
|
525 |
-
NumSelectedOutIt num_selected_out)
|
526 |
-
// filed ctors
|
527 |
-
: temp_storage(temp_storage_),
|
528 |
-
tile_state(tile_state_),
|
529 |
-
keys_in(keys_in_),
|
530 |
-
values_in(values_in_),
|
531 |
-
keys_out(keys_out_),
|
532 |
-
values_out(values_out_),
|
533 |
-
predicate(binary_pred_),
|
534 |
-
num_items(num_items_)
|
535 |
-
{
|
536 |
-
int tile_idx = blockIdx.x;
|
537 |
-
Size tile_base = tile_idx * ITEMS_PER_TILE;
|
538 |
-
|
539 |
-
if (tile_idx < num_tiles - 1)
|
540 |
-
{
|
541 |
-
consume_tile<false>(ITEMS_PER_TILE,
|
542 |
-
tile_idx,
|
543 |
-
tile_base);
|
544 |
-
}
|
545 |
-
else
|
546 |
-
{
|
547 |
-
int num_remaining = static_cast<int>(num_items - tile_base);
|
548 |
-
Size num_selections = consume_tile<true>(num_remaining,
|
549 |
-
tile_idx,
|
550 |
-
tile_base);
|
551 |
-
if (threadIdx.x == 0)
|
552 |
-
{
|
553 |
-
*num_selected_out = num_selections;
|
554 |
-
}
|
555 |
-
}
|
556 |
-
}
|
557 |
-
}; // struct impl
|
558 |
-
|
559 |
-
//---------------------------------------------------------------------
|
560 |
-
// Agent entry point
|
561 |
-
//---------------------------------------------------------------------
|
562 |
-
|
563 |
-
THRUST_AGENT_ENTRY(KeyInputIt keys_in,
|
564 |
-
ValInputIt values_in,
|
565 |
-
KeyOutputIt keys_out,
|
566 |
-
ValOutputIt values_out,
|
567 |
-
BinaryPred binary_pred,
|
568 |
-
NumSelectedOutIt num_selected_out,
|
569 |
-
Size num_items,
|
570 |
-
ScanTileState tile_state,
|
571 |
-
int num_tiles,
|
572 |
-
char * shmem)
|
573 |
-
{
|
574 |
-
TempStorage &storage = *reinterpret_cast<TempStorage *>(shmem);
|
575 |
-
|
576 |
-
impl(storage,
|
577 |
-
tile_state,
|
578 |
-
core::make_load_iterator(ptx_plan(), keys_in),
|
579 |
-
core::make_load_iterator(ptx_plan(), values_in),
|
580 |
-
keys_out,
|
581 |
-
values_out,
|
582 |
-
binary_pred,
|
583 |
-
num_items,
|
584 |
-
num_tiles,
|
585 |
-
num_selected_out);
|
586 |
-
}
|
587 |
-
}; // struct UniqueByKeyAgent
|
588 |
-
|
589 |
-
|
590 |
-
template <class ScanTileState,
|
591 |
-
class NumSelectedIt,
|
592 |
-
class Size>
|
593 |
-
struct InitAgent
|
594 |
-
{
|
595 |
-
template <class Arch>
|
596 |
-
struct PtxPlan : PtxPolicy<128> {};
|
597 |
-
|
598 |
-
typedef core::specialize_plan<PtxPlan> ptx_plan;
|
599 |
-
|
600 |
-
//---------------------------------------------------------------------
|
601 |
-
// Agent entry point
|
602 |
-
//---------------------------------------------------------------------
|
603 |
-
|
604 |
-
THRUST_AGENT_ENTRY(ScanTileState tile_state,
|
605 |
-
Size num_tiles,
|
606 |
-
NumSelectedIt num_selected_out,
|
607 |
-
char * /*shmem*/)
|
608 |
-
{
|
609 |
-
tile_state.InitializeStatus(num_tiles);
|
610 |
-
if (blockIdx.x == 0 && threadIdx.x == 0)
|
611 |
-
*num_selected_out = 0;
|
612 |
-
}
|
613 |
-
|
614 |
-
}; // struct InitAgent
|
615 |
-
|
616 |
-
|
617 |
-
template <class KeyInputIt,
|
618 |
-
class ValInputIt,
|
619 |
-
class KeyOutputIt,
|
620 |
-
class ValOutputIt,
|
621 |
-
class BinaryPred,
|
622 |
-
class Size,
|
623 |
-
class NumSelectedOutIt>
|
624 |
-
static cudaError_t THRUST_RUNTIME_FUNCTION
|
625 |
-
doit_step(void * d_temp_storage,
|
626 |
-
size_t & temp_storage_bytes,
|
627 |
-
KeyInputIt keys_in,
|
628 |
-
ValInputIt values_in,
|
629 |
-
KeyOutputIt keys_out,
|
630 |
-
ValOutputIt values_out,
|
631 |
-
BinaryPred binary_pred,
|
632 |
-
NumSelectedOutIt num_selected_out,
|
633 |
-
Size num_items,
|
634 |
-
cudaStream_t stream,
|
635 |
-
bool debug_sync)
|
636 |
-
{
|
637 |
-
using core::AgentLauncher;
|
638 |
-
using core::AgentPlan;
|
639 |
-
using core::get_agent_plan;
|
640 |
-
|
641 |
-
typedef AgentLauncher<
|
642 |
-
UniqueByKeyAgent<KeyInputIt,
|
643 |
-
ValInputIt,
|
644 |
-
KeyOutputIt,
|
645 |
-
ValOutputIt,
|
646 |
-
BinaryPred,
|
647 |
-
Size,
|
648 |
-
NumSelectedOutIt> >
|
649 |
-
unique_agent;
|
650 |
-
|
651 |
-
typedef typename unique_agent::ScanTileState ScanTileState;
|
652 |
-
|
653 |
-
typedef AgentLauncher<
|
654 |
-
InitAgent<ScanTileState, NumSelectedOutIt, Size> >
|
655 |
-
init_agent;
|
656 |
-
|
657 |
-
using core::get_plan;
|
658 |
-
typename get_plan<init_agent>::type init_plan = init_agent::get_plan();
|
659 |
-
typename get_plan<unique_agent>::type unique_plan = unique_agent::get_plan(stream);
|
660 |
-
|
661 |
-
|
662 |
-
int tile_size = unique_plan.items_per_tile;
|
663 |
-
size_t num_tiles = (num_items + tile_size - 1) / tile_size;
|
664 |
-
|
665 |
-
size_t vshmem_size = core::vshmem_size(unique_plan.shared_memory_size,
|
666 |
-
num_tiles);
|
667 |
-
|
668 |
-
cudaError_t status = cudaSuccess;
|
669 |
-
size_t allocation_sizes[2] = {0, vshmem_size};
|
670 |
-
status = ScanTileState::AllocationSize(static_cast<int>(num_tiles), allocation_sizes[0]);
|
671 |
-
CUDA_CUB_RET_IF_FAIL(status);
|
672 |
-
|
673 |
-
void *allocations[2] = {NULL, NULL};
|
674 |
-
//
|
675 |
-
status = cub::AliasTemporaries(d_temp_storage,
|
676 |
-
temp_storage_bytes,
|
677 |
-
allocations,
|
678 |
-
allocation_sizes);
|
679 |
-
CUDA_CUB_RET_IF_FAIL(status);
|
680 |
-
|
681 |
-
if (d_temp_storage == NULL)
|
682 |
-
{
|
683 |
-
return status;
|
684 |
-
}
|
685 |
-
|
686 |
-
ScanTileState tile_status;
|
687 |
-
status = tile_status.Init(static_cast<int>(num_tiles), allocations[0], allocation_sizes[0]);
|
688 |
-
CUDA_CUB_RET_IF_FAIL(status);
|
689 |
-
|
690 |
-
num_tiles = max<size_t>(1,num_tiles);
|
691 |
-
init_agent ia(init_plan, num_tiles, stream, "unique_by_key::init_agent", debug_sync);
|
692 |
-
ia.launch(tile_status, num_tiles, num_selected_out);
|
693 |
-
CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
|
694 |
-
|
695 |
-
if (num_items == 0) { return status; }
|
696 |
-
|
697 |
-
char *vshmem_ptr = vshmem_size > 0 ? (char *)allocations[1] : NULL;
|
698 |
-
|
699 |
-
unique_agent ua(unique_plan, num_items, stream, vshmem_ptr, "unique_by_key::unique_agent", debug_sync);
|
700 |
-
ua.launch(keys_in,
|
701 |
-
values_in,
|
702 |
-
keys_out,
|
703 |
-
values_out,
|
704 |
-
binary_pred,
|
705 |
-
num_selected_out,
|
706 |
-
num_items,
|
707 |
-
tile_status,
|
708 |
-
num_tiles);
|
709 |
-
CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
|
710 |
-
return status;
|
711 |
-
}
|
712 |
-
|
713 |
-
template <typename Derived,
|
714 |
-
typename KeyInputIt,
|
715 |
-
typename ValInputIt,
|
716 |
-
typename KeyOutputIt,
|
717 |
-
typename ValOutputIt,
|
718 |
-
typename BinaryPred>
|
719 |
-
THRUST_RUNTIME_FUNCTION
|
720 |
-
pair<KeyOutputIt, ValOutputIt>
|
721 |
-
unique_by_key(execution_policy<Derived>& policy,
|
722 |
-
KeyInputIt keys_first,
|
723 |
-
KeyInputIt keys_last,
|
724 |
-
ValInputIt values_first,
|
725 |
-
KeyOutputIt keys_result,
|
726 |
-
ValOutputIt values_result,
|
727 |
-
BinaryPred binary_pred)
|
728 |
-
{
|
729 |
-
|
730 |
-
typedef int size_type;
|
731 |
-
|
732 |
-
size_type num_items
|
733 |
-
= static_cast<size_type>(thrust::distance(keys_first, keys_last));
|
734 |
-
|
735 |
-
size_t temp_storage_bytes = 0;
|
736 |
-
cudaStream_t stream = cuda_cub::stream(policy);
|
737 |
-
bool debug_sync = THRUST_DEBUG_SYNC_FLAG;
|
738 |
-
|
739 |
-
cudaError_t status;
|
740 |
-
status = __unique_by_key::doit_step(NULL,
|
741 |
-
temp_storage_bytes,
|
742 |
-
keys_first,
|
743 |
-
values_first,
|
744 |
-
keys_result,
|
745 |
-
values_result,
|
746 |
-
binary_pred,
|
747 |
-
reinterpret_cast<size_type*>(NULL),
|
748 |
-
num_items,
|
749 |
-
stream,
|
750 |
-
debug_sync);
|
751 |
-
cuda_cub::throw_on_error(status, "unique_by_key: failed on 1st step");
|
752 |
-
|
753 |
-
size_t allocation_sizes[2] = {sizeof(size_type), temp_storage_bytes};
|
754 |
-
void * allocations[2] = {NULL, NULL};
|
755 |
-
|
756 |
-
size_t storage_size = 0;
|
757 |
-
status = core::alias_storage(NULL,
|
758 |
-
storage_size,
|
759 |
-
allocations,
|
760 |
-
allocation_sizes);
|
761 |
-
cuda_cub::throw_on_error(status, "unique_by_key failed on 1st alias_storage");
|
762 |
-
|
763 |
-
// Allocate temporary storage.
|
764 |
-
thrust::detail::temporary_array<thrust::detail::uint8_t, Derived>
|
765 |
-
tmp(policy, storage_size);
|
766 |
-
void *ptr = static_cast<void*>(tmp.data().get());
|
767 |
-
|
768 |
-
status = core::alias_storage(ptr,
|
769 |
-
storage_size,
|
770 |
-
allocations,
|
771 |
-
allocation_sizes);
|
772 |
-
cuda_cub::throw_on_error(status, "unique_by_key failed on 2nd alias_storage");
|
773 |
-
|
774 |
-
size_type* d_num_selected_out
|
775 |
-
= thrust::detail::aligned_reinterpret_cast<size_type*>(allocations[0]);
|
776 |
-
|
777 |
-
status = __unique_by_key::doit_step(allocations[1],
|
778 |
-
temp_storage_bytes,
|
779 |
-
keys_first,
|
780 |
-
values_first,
|
781 |
-
keys_result,
|
782 |
-
values_result,
|
783 |
-
binary_pred,
|
784 |
-
d_num_selected_out,
|
785 |
-
num_items,
|
786 |
-
stream,
|
787 |
-
debug_sync);
|
788 |
-
cuda_cub::throw_on_error(status, "unique_by_key: failed on 2nd step");
|
789 |
-
|
790 |
-
status = cuda_cub::synchronize(policy);
|
791 |
-
cuda_cub::throw_on_error(status, "unique_by_key: failed to synchronize");
|
792 |
-
|
793 |
-
size_type num_selected = get_value(policy, d_num_selected_out);
|
794 |
-
|
795 |
-
return thrust::make_pair(
|
796 |
-
keys_result + num_selected,
|
797 |
-
values_result + num_selected
|
798 |
-
);
|
799 |
-
}
|
800 |
-
|
801 |
-
} // namespace __unique_by_key
|
802 |
-
|
803 |
-
|
804 |
-
//-------------------------
|
805 |
-
// Thrust API entry points
|
806 |
-
//-------------------------
|
807 |
-
|
808 |
-
|
809 |
-
__thrust_exec_check_disable__
|
810 |
-
template <class Derived,
|
811 |
-
class KeyInputIt,
|
812 |
-
class ValInputIt,
|
813 |
-
class KeyOutputIt,
|
814 |
-
class ValOutputIt,
|
815 |
-
class BinaryPred>
|
816 |
-
pair<KeyOutputIt, ValOutputIt> __host__ __device__
|
817 |
-
unique_by_key_copy(execution_policy<Derived> &policy,
|
818 |
-
KeyInputIt keys_first,
|
819 |
-
KeyInputIt keys_last,
|
820 |
-
ValInputIt values_first,
|
821 |
-
KeyOutputIt keys_result,
|
822 |
-
ValOutputIt values_result,
|
823 |
-
BinaryPred binary_pred)
|
824 |
-
{
|
825 |
-
pair<KeyOutputIt, ValOutputIt> ret = thrust::make_pair(keys_result, values_result);
|
826 |
-
if (__THRUST_HAS_CUDART__)
|
827 |
-
{
|
828 |
-
ret = __unique_by_key::unique_by_key(policy,
|
829 |
-
keys_first,
|
830 |
-
keys_last,
|
831 |
-
values_first,
|
832 |
-
keys_result,
|
833 |
-
values_result,
|
834 |
-
binary_pred);
|
835 |
-
}
|
836 |
-
else
|
837 |
-
{
|
838 |
-
#if !__THRUST_HAS_CUDART__
|
839 |
-
ret = thrust::unique_by_key_copy(cvt_to_seq(derived_cast(policy)),
|
840 |
-
keys_first,
|
841 |
-
keys_last,
|
842 |
-
values_first,
|
843 |
-
keys_result,
|
844 |
-
values_result,
|
845 |
-
binary_pred);
|
846 |
-
#endif
|
847 |
-
}
|
848 |
-
return ret;
|
849 |
-
}
|
850 |
-
|
851 |
-
template <class Derived,
|
852 |
-
class KeyInputIt,
|
853 |
-
class ValInputIt,
|
854 |
-
class KeyOutputIt,
|
855 |
-
class ValOutputIt>
|
856 |
-
pair<KeyOutputIt, ValOutputIt> __host__ __device__
|
857 |
-
unique_by_key_copy(execution_policy<Derived> &policy,
|
858 |
-
KeyInputIt keys_first,
|
859 |
-
KeyInputIt keys_last,
|
860 |
-
ValInputIt values_first,
|
861 |
-
KeyOutputIt keys_result,
|
862 |
-
ValOutputIt values_result)
|
863 |
-
{
|
864 |
-
typedef typename iterator_traits<KeyInputIt>::value_type key_type;
|
865 |
-
return cuda_cub::unique_by_key_copy(policy,
|
866 |
-
keys_first,
|
867 |
-
keys_last,
|
868 |
-
values_first,
|
869 |
-
keys_result,
|
870 |
-
values_result,
|
871 |
-
equal_to<key_type>());
|
872 |
-
}
|
873 |
-
|
874 |
-
template <class Derived,
|
875 |
-
class KeyInputIt,
|
876 |
-
class ValInputIt,
|
877 |
-
class BinaryPred>
|
878 |
-
pair<KeyInputIt, ValInputIt> __host__ __device__
|
879 |
-
unique_by_key(execution_policy<Derived> &policy,
|
880 |
-
KeyInputIt keys_first,
|
881 |
-
KeyInputIt keys_last,
|
882 |
-
ValInputIt values_first,
|
883 |
-
BinaryPred binary_pred)
|
884 |
-
{
|
885 |
-
pair<KeyInputIt, ValInputIt> ret = thrust::make_pair(keys_first, values_first);
|
886 |
-
if (__THRUST_HAS_CUDART__)
|
887 |
-
{
|
888 |
-
ret = cuda_cub::unique_by_key_copy(policy,
|
889 |
-
keys_first,
|
890 |
-
keys_last,
|
891 |
-
values_first,
|
892 |
-
keys_first,
|
893 |
-
values_first,
|
894 |
-
binary_pred);
|
895 |
-
}
|
896 |
-
else
|
897 |
-
{
|
898 |
-
#if !__THRUST_HAS_CUDART__
|
899 |
-
ret = thrust::unique_by_key(cvt_to_seq(derived_cast(policy)),
|
900 |
-
keys_first,
|
901 |
-
keys_last,
|
902 |
-
values_first,
|
903 |
-
binary_pred);
|
904 |
-
#endif
|
905 |
-
}
|
906 |
-
return ret;
|
907 |
-
}
|
908 |
-
|
909 |
-
template <class Derived,
|
910 |
-
class KeyInputIt,
|
911 |
-
class ValInputIt>
|
912 |
-
pair<KeyInputIt, ValInputIt> __host__ __device__
|
913 |
-
unique_by_key(execution_policy<Derived> &policy,
|
914 |
-
KeyInputIt keys_first,
|
915 |
-
KeyInputIt keys_last,
|
916 |
-
ValInputIt values_first)
|
917 |
-
{
|
918 |
-
typedef typename iterator_traits<KeyInputIt>::value_type key_type;
|
919 |
-
return cuda_cub::unique_by_key(policy,
|
920 |
-
keys_first,
|
921 |
-
keys_last,
|
922 |
-
values_first,
|
923 |
-
equal_to<key_type>());
|
924 |
-
}
|
925 |
-
|
926 |
-
|
927 |
-
|
928 |
-
} // namespace cuda_cub
|
929 |
-
} // end namespace thrust
|
930 |
-
|
931 |
-
#include <thrust/memory.h>
|
932 |
-
#include <thrust/unique.h>
|
933 |
-
|
934 |
-
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/core/export/pytorch2onnx.py
DELETED
@@ -1,154 +0,0 @@
|
|
1 |
-
from functools import partial
|
2 |
-
|
3 |
-
import mmcv
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
from mmcv.runner import load_checkpoint
|
7 |
-
|
8 |
-
|
9 |
-
def generate_inputs_and_wrap_model(config_path,
|
10 |
-
checkpoint_path,
|
11 |
-
input_config,
|
12 |
-
cfg_options=None):
|
13 |
-
"""Prepare sample input and wrap model for ONNX export.
|
14 |
-
|
15 |
-
The ONNX export API only accept args, and all inputs should be
|
16 |
-
torch.Tensor or corresponding types (such as tuple of tensor).
|
17 |
-
So we should call this function before exporting. This function will:
|
18 |
-
|
19 |
-
1. generate corresponding inputs which are used to execute the model.
|
20 |
-
2. Wrap the model's forward function.
|
21 |
-
|
22 |
-
For example, the MMDet models' forward function has a parameter
|
23 |
-
``return_loss:bool``. As we want to set it as False while export API
|
24 |
-
supports neither bool type or kwargs. So we have to replace the forward
|
25 |
-
like: ``model.forward = partial(model.forward, return_loss=False)``
|
26 |
-
|
27 |
-
Args:
|
28 |
-
config_path (str): the OpenMMLab config for the model we want to
|
29 |
-
export to ONNX
|
30 |
-
checkpoint_path (str): Path to the corresponding checkpoint
|
31 |
-
input_config (dict): the exactly data in this dict depends on the
|
32 |
-
framework. For MMSeg, we can just declare the input shape,
|
33 |
-
and generate the dummy data accordingly. However, for MMDet,
|
34 |
-
we may pass the real img path, or the NMS will return None
|
35 |
-
as there is no legal bbox.
|
36 |
-
|
37 |
-
Returns:
|
38 |
-
tuple: (model, tensor_data) wrapped model which can be called by \
|
39 |
-
model(*tensor_data) and a list of inputs which are used to execute \
|
40 |
-
the model while exporting.
|
41 |
-
"""
|
42 |
-
|
43 |
-
model = build_model_from_cfg(
|
44 |
-
config_path, checkpoint_path, cfg_options=cfg_options)
|
45 |
-
one_img, one_meta = preprocess_example_input(input_config)
|
46 |
-
tensor_data = [one_img]
|
47 |
-
model.forward = partial(
|
48 |
-
model.forward, img_metas=[[one_meta]], return_loss=False)
|
49 |
-
|
50 |
-
# pytorch has some bug in pytorch1.3, we have to fix it
|
51 |
-
# by replacing these existing op
|
52 |
-
opset_version = 11
|
53 |
-
# put the import within the function thus it will not cause import error
|
54 |
-
# when not using this function
|
55 |
-
try:
|
56 |
-
from mmcv.onnx.symbolic import register_extra_symbolics
|
57 |
-
except ModuleNotFoundError:
|
58 |
-
raise NotImplementedError('please update mmcv to version>=v1.0.4')
|
59 |
-
register_extra_symbolics(opset_version)
|
60 |
-
|
61 |
-
return model, tensor_data
|
62 |
-
|
63 |
-
|
64 |
-
def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None):
|
65 |
-
"""Build a model from config and load the given checkpoint.
|
66 |
-
|
67 |
-
Args:
|
68 |
-
config_path (str): the OpenMMLab config for the model we want to
|
69 |
-
export to ONNX
|
70 |
-
checkpoint_path (str): Path to the corresponding checkpoint
|
71 |
-
|
72 |
-
Returns:
|
73 |
-
torch.nn.Module: the built model
|
74 |
-
"""
|
75 |
-
from mmdet.models import build_detector
|
76 |
-
|
77 |
-
cfg = mmcv.Config.fromfile(config_path)
|
78 |
-
if cfg_options is not None:
|
79 |
-
cfg.merge_from_dict(cfg_options)
|
80 |
-
# import modules from string list.
|
81 |
-
if cfg.get('custom_imports', None):
|
82 |
-
from mmcv.utils import import_modules_from_strings
|
83 |
-
import_modules_from_strings(**cfg['custom_imports'])
|
84 |
-
# set cudnn_benchmark
|
85 |
-
if cfg.get('cudnn_benchmark', False):
|
86 |
-
torch.backends.cudnn.benchmark = True
|
87 |
-
cfg.model.pretrained = None
|
88 |
-
cfg.data.test.test_mode = True
|
89 |
-
|
90 |
-
# build the model
|
91 |
-
cfg.model.train_cfg = None
|
92 |
-
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
|
93 |
-
load_checkpoint(model, checkpoint_path, map_location='cpu')
|
94 |
-
model.cpu().eval()
|
95 |
-
return model
|
96 |
-
|
97 |
-
|
98 |
-
def preprocess_example_input(input_config):
|
99 |
-
"""Prepare an example input image for ``generate_inputs_and_wrap_model``.
|
100 |
-
|
101 |
-
Args:
|
102 |
-
input_config (dict): customized config describing the example input.
|
103 |
-
|
104 |
-
Returns:
|
105 |
-
tuple: (one_img, one_meta), tensor of the example input image and \
|
106 |
-
meta information for the example input image.
|
107 |
-
|
108 |
-
Examples:
|
109 |
-
>>> from mmdet.core.export import preprocess_example_input
|
110 |
-
>>> input_config = {
|
111 |
-
>>> 'input_shape': (1,3,224,224),
|
112 |
-
>>> 'input_path': 'demo/demo.jpg',
|
113 |
-
>>> 'normalize_cfg': {
|
114 |
-
>>> 'mean': (123.675, 116.28, 103.53),
|
115 |
-
>>> 'std': (58.395, 57.12, 57.375)
|
116 |
-
>>> }
|
117 |
-
>>> }
|
118 |
-
>>> one_img, one_meta = preprocess_example_input(input_config)
|
119 |
-
>>> print(one_img.shape)
|
120 |
-
torch.Size([1, 3, 224, 224])
|
121 |
-
>>> print(one_meta)
|
122 |
-
{'img_shape': (224, 224, 3),
|
123 |
-
'ori_shape': (224, 224, 3),
|
124 |
-
'pad_shape': (224, 224, 3),
|
125 |
-
'filename': '<demo>.png',
|
126 |
-
'scale_factor': 1.0,
|
127 |
-
'flip': False}
|
128 |
-
"""
|
129 |
-
input_path = input_config['input_path']
|
130 |
-
input_shape = input_config['input_shape']
|
131 |
-
one_img = mmcv.imread(input_path)
|
132 |
-
one_img = mmcv.imresize(one_img, input_shape[2:][::-1])
|
133 |
-
show_img = one_img.copy()
|
134 |
-
if 'normalize_cfg' in input_config.keys():
|
135 |
-
normalize_cfg = input_config['normalize_cfg']
|
136 |
-
mean = np.array(normalize_cfg['mean'], dtype=np.float32)
|
137 |
-
std = np.array(normalize_cfg['std'], dtype=np.float32)
|
138 |
-
to_rgb = normalize_cfg.get('to_rgb', True)
|
139 |
-
one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb)
|
140 |
-
one_img = one_img.transpose(2, 0, 1)
|
141 |
-
one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(
|
142 |
-
True)
|
143 |
-
(_, C, H, W) = input_shape
|
144 |
-
one_meta = {
|
145 |
-
'img_shape': (H, W, C),
|
146 |
-
'ori_shape': (H, W, C),
|
147 |
-
'pad_shape': (H, W, C),
|
148 |
-
'filename': '<demo>.png',
|
149 |
-
'scale_factor': 1.0,
|
150 |
-
'flip': False,
|
151 |
-
'show_img': show_img,
|
152 |
-
}
|
153 |
-
|
154 |
-
return one_img, one_meta
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/transfiner/configs/quick_schedules/README.md
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
These are quick configs for performance or accuracy regression tracking purposes.
|
2 |
-
|
3 |
-
* `*instance_test.yaml`: can train on 2 GPUs. They are used to test whether the training can
|
4 |
-
successfully finish. They are not expected to produce reasonable training results.
|
5 |
-
* `*inference_acc_test.yaml`: They should be run using `--eval-only`. They run inference using pre-trained models and verify
|
6 |
-
the results are as expected.
|
7 |
-
* `*training_acc_test.yaml`: They should be trained on 8 GPUs. They finish in about an hour and verify the training accuracy
|
8 |
-
is within the normal range.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/memes/charpic/__init__.py
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
from typing import List
|
2 |
-
|
3 |
-
from PIL import Image, ImageDraw
|
4 |
-
from pil_utils import BuildImage
|
5 |
-
from pil_utils.fonts import Font
|
6 |
-
|
7 |
-
from meme_generator import add_meme
|
8 |
-
from meme_generator.utils import make_jpg_or_gif
|
9 |
-
|
10 |
-
|
11 |
-
def charpic(images: List[BuildImage], texts, args):
|
12 |
-
img = images[0]
|
13 |
-
str_map = "@@$$&B88QMMGW##EE93SPPDOOU**==()+^,\"--''. "
|
14 |
-
num = len(str_map)
|
15 |
-
font = Font.find("Consolas").load_font(15)
|
16 |
-
|
17 |
-
def make(img: BuildImage) -> BuildImage:
|
18 |
-
img = img.convert("L").resize_width(150)
|
19 |
-
img = img.resize((img.width, img.height // 2))
|
20 |
-
lines = []
|
21 |
-
for y in range(img.height):
|
22 |
-
line = ""
|
23 |
-
for x in range(img.width):
|
24 |
-
gray = img.image.getpixel((x, y))
|
25 |
-
line += str_map[int(num * gray / 256)]
|
26 |
-
lines.append(line)
|
27 |
-
text = "\n".join(lines)
|
28 |
-
text_img = Image.new("RGB", (2000, 2000), "white")
|
29 |
-
draw = ImageDraw.Draw(text_img)
|
30 |
-
_, _, w, h = draw.multiline_textbbox((0, 0), text, font=font)
|
31 |
-
draw.multiline_text((0, 0), text, font=font, fill="black")
|
32 |
-
text_img = text_img.crop((0, 0, w, h))
|
33 |
-
return BuildImage(text_img)
|
34 |
-
|
35 |
-
return make_jpg_or_gif(img, make)
|
36 |
-
|
37 |
-
|
38 |
-
add_meme("charpic", charpic, min_images=1, max_images=1, keywords=["字符画"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cletrason/dalle2-dreamweddingbooth/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/dalle2/dreamweddingbooth").launch()
|
|
|
|
|
|
|
|
spaces/CofAI/chat.b4/client/js/theme-toggler.js
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
var switch_theme_toggler = document.getElementById("theme-toggler");
|
2 |
-
|
3 |
-
switch_theme_toggler.addEventListener("change", toggleTheme);
|
4 |
-
|
5 |
-
function setTheme(themeName) {
|
6 |
-
localStorage.setItem("theme", themeName);
|
7 |
-
document.documentElement.className = themeName;
|
8 |
-
}
|
9 |
-
|
10 |
-
function toggleTheme() {
|
11 |
-
var currentTheme = localStorage.getItem("theme");
|
12 |
-
var newTheme = currentTheme === "theme-dark" ? "theme-light" : "theme-dark";
|
13 |
-
|
14 |
-
setTheme(newTheme);
|
15 |
-
switch_theme_toggler.checked = newTheme === "theme-dark";
|
16 |
-
}
|
17 |
-
|
18 |
-
(function () {
|
19 |
-
var currentTheme = localStorage.getItem("theme") || "theme-dark";
|
20 |
-
setTheme(currentTheme);
|
21 |
-
switch_theme_toggler.checked = currentTheme === "theme-dark";
|
22 |
-
})();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat.b4/g4f/Provider/Providers/hteyun.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
import os
|
3 |
-
import json
|
4 |
-
from ...typing import sha256, Dict, get_type_hints
|
5 |
-
|
6 |
-
url = 'https://hteyun.com'
|
7 |
-
model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613']
|
8 |
-
supports_stream = True
|
9 |
-
needs_auth = False
|
10 |
-
|
11 |
-
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
|
12 |
-
headers = {
|
13 |
-
'Content-Type': 'application/json',
|
14 |
-
'Accept': 'application/json, text/plain, */*',
|
15 |
-
'Accept-Language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7,ja;q=0.6,zh-TW;q=0.5,zh;q=0.4',
|
16 |
-
'Origin': 'https://hteyun.com',
|
17 |
-
'Referer': 'https://hteyun.com/chat/',
|
18 |
-
}
|
19 |
-
data = {
|
20 |
-
'messages': messages,
|
21 |
-
'model': model,
|
22 |
-
'systemMessage': 'You are ChatGPT, a large language model trained by OpenAI. Follow the user\'s instructions carefully. Respond using russian language.',
|
23 |
-
'temperature': 0.7,
|
24 |
-
'presence_penalty': 0,
|
25 |
-
}
|
26 |
-
response = requests.post(url + '/api/chat-stream', json=data, headers=headers, stream=True)
|
27 |
-
print(response.json())
|
28 |
-
|
29 |
-
# Извлечение текста из response
|
30 |
-
return response.json()['text']
|
31 |
-
|
32 |
-
|
33 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
34 |
-
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/FliImagePlugin.py
DELETED
@@ -1,171 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# The Python Imaging Library.
|
3 |
-
# $Id$
|
4 |
-
#
|
5 |
-
# FLI/FLC file handling.
|
6 |
-
#
|
7 |
-
# History:
|
8 |
-
# 95-09-01 fl Created
|
9 |
-
# 97-01-03 fl Fixed parser, setup decoder tile
|
10 |
-
# 98-07-15 fl Renamed offset attribute to avoid name clash
|
11 |
-
#
|
12 |
-
# Copyright (c) Secret Labs AB 1997-98.
|
13 |
-
# Copyright (c) Fredrik Lundh 1995-97.
|
14 |
-
#
|
15 |
-
# See the README file for information on usage and redistribution.
|
16 |
-
#
|
17 |
-
|
18 |
-
import os
|
19 |
-
|
20 |
-
from . import Image, ImageFile, ImagePalette
|
21 |
-
from ._binary import i16le as i16
|
22 |
-
from ._binary import i32le as i32
|
23 |
-
from ._binary import o8
|
24 |
-
|
25 |
-
#
|
26 |
-
# decoder
|
27 |
-
|
28 |
-
|
29 |
-
def _accept(prefix):
|
30 |
-
return (
|
31 |
-
len(prefix) >= 6
|
32 |
-
and i16(prefix, 4) in [0xAF11, 0xAF12]
|
33 |
-
and i16(prefix, 14) in [0, 3] # flags
|
34 |
-
)
|
35 |
-
|
36 |
-
|
37 |
-
##
|
38 |
-
# Image plugin for the FLI/FLC animation format. Use the <b>seek</b>
|
39 |
-
# method to load individual frames.
|
40 |
-
|
41 |
-
|
42 |
-
class FliImageFile(ImageFile.ImageFile):
|
43 |
-
format = "FLI"
|
44 |
-
format_description = "Autodesk FLI/FLC Animation"
|
45 |
-
_close_exclusive_fp_after_loading = False
|
46 |
-
|
47 |
-
def _open(self):
|
48 |
-
# HEAD
|
49 |
-
s = self.fp.read(128)
|
50 |
-
if not (_accept(s) and s[20:22] == b"\x00\x00"):
|
51 |
-
msg = "not an FLI/FLC file"
|
52 |
-
raise SyntaxError(msg)
|
53 |
-
|
54 |
-
# frames
|
55 |
-
self.n_frames = i16(s, 6)
|
56 |
-
self.is_animated = self.n_frames > 1
|
57 |
-
|
58 |
-
# image characteristics
|
59 |
-
self.mode = "P"
|
60 |
-
self._size = i16(s, 8), i16(s, 10)
|
61 |
-
|
62 |
-
# animation speed
|
63 |
-
duration = i32(s, 16)
|
64 |
-
magic = i16(s, 4)
|
65 |
-
if magic == 0xAF11:
|
66 |
-
duration = (duration * 1000) // 70
|
67 |
-
self.info["duration"] = duration
|
68 |
-
|
69 |
-
# look for palette
|
70 |
-
palette = [(a, a, a) for a in range(256)]
|
71 |
-
|
72 |
-
s = self.fp.read(16)
|
73 |
-
|
74 |
-
self.__offset = 128
|
75 |
-
|
76 |
-
if i16(s, 4) == 0xF100:
|
77 |
-
# prefix chunk; ignore it
|
78 |
-
self.__offset = self.__offset + i32(s)
|
79 |
-
s = self.fp.read(16)
|
80 |
-
|
81 |
-
if i16(s, 4) == 0xF1FA:
|
82 |
-
# look for palette chunk
|
83 |
-
number_of_subchunks = i16(s, 6)
|
84 |
-
chunk_size = None
|
85 |
-
for _ in range(number_of_subchunks):
|
86 |
-
if chunk_size is not None:
|
87 |
-
self.fp.seek(chunk_size - 6, os.SEEK_CUR)
|
88 |
-
s = self.fp.read(6)
|
89 |
-
chunk_type = i16(s, 4)
|
90 |
-
if chunk_type in (4, 11):
|
91 |
-
self._palette(palette, 2 if chunk_type == 11 else 0)
|
92 |
-
break
|
93 |
-
chunk_size = i32(s)
|
94 |
-
if not chunk_size:
|
95 |
-
break
|
96 |
-
|
97 |
-
palette = [o8(r) + o8(g) + o8(b) for (r, g, b) in palette]
|
98 |
-
self.palette = ImagePalette.raw("RGB", b"".join(palette))
|
99 |
-
|
100 |
-
# set things up to decode first frame
|
101 |
-
self.__frame = -1
|
102 |
-
self._fp = self.fp
|
103 |
-
self.__rewind = self.fp.tell()
|
104 |
-
self.seek(0)
|
105 |
-
|
106 |
-
def _palette(self, palette, shift):
|
107 |
-
# load palette
|
108 |
-
|
109 |
-
i = 0
|
110 |
-
for e in range(i16(self.fp.read(2))):
|
111 |
-
s = self.fp.read(2)
|
112 |
-
i = i + s[0]
|
113 |
-
n = s[1]
|
114 |
-
if n == 0:
|
115 |
-
n = 256
|
116 |
-
s = self.fp.read(n * 3)
|
117 |
-
for n in range(0, len(s), 3):
|
118 |
-
r = s[n] << shift
|
119 |
-
g = s[n + 1] << shift
|
120 |
-
b = s[n + 2] << shift
|
121 |
-
palette[i] = (r, g, b)
|
122 |
-
i += 1
|
123 |
-
|
124 |
-
def seek(self, frame):
|
125 |
-
if not self._seek_check(frame):
|
126 |
-
return
|
127 |
-
if frame < self.__frame:
|
128 |
-
self._seek(0)
|
129 |
-
|
130 |
-
for f in range(self.__frame + 1, frame + 1):
|
131 |
-
self._seek(f)
|
132 |
-
|
133 |
-
def _seek(self, frame):
|
134 |
-
if frame == 0:
|
135 |
-
self.__frame = -1
|
136 |
-
self._fp.seek(self.__rewind)
|
137 |
-
self.__offset = 128
|
138 |
-
else:
|
139 |
-
# ensure that the previous frame was loaded
|
140 |
-
self.load()
|
141 |
-
|
142 |
-
if frame != self.__frame + 1:
|
143 |
-
msg = f"cannot seek to frame {frame}"
|
144 |
-
raise ValueError(msg)
|
145 |
-
self.__frame = frame
|
146 |
-
|
147 |
-
# move to next frame
|
148 |
-
self.fp = self._fp
|
149 |
-
self.fp.seek(self.__offset)
|
150 |
-
|
151 |
-
s = self.fp.read(4)
|
152 |
-
if not s:
|
153 |
-
raise EOFError
|
154 |
-
|
155 |
-
framesize = i32(s)
|
156 |
-
|
157 |
-
self.decodermaxblock = framesize
|
158 |
-
self.tile = [("fli", (0, 0) + self.size, self.__offset, None)]
|
159 |
-
|
160 |
-
self.__offset += framesize
|
161 |
-
|
162 |
-
def tell(self):
|
163 |
-
return self.__frame
|
164 |
-
|
165 |
-
|
166 |
-
#
|
167 |
-
# registry
|
168 |
-
|
169 |
-
Image.register_open(FliImageFile.format, FliImageFile, _accept)
|
170 |
-
|
171 |
-
Image.register_extensions(FliImageFile.format, [".fli", ".flc"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_o_p_b_d.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
from .otBase import BaseTTXConverter
|
2 |
-
|
3 |
-
|
4 |
-
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6opbd.html
|
5 |
-
class table__o_p_b_d(BaseTTXConverter):
|
6 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|