Commit
·
de36af1
1
Parent(s):
c96c338
Update parquet files (step 100 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/gpt4free/hpgptai/README.md +0 -39
- spaces/101-5/gpt4free/g4f/.v1/unfinished/openprompt/test.py +0 -6
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bagas Guide How to Download and Install Microsoft Office 2010 with Crack and Keygen.md +0 -38
- spaces/1gistliPinn/ChatGPT4/Examples/Blue Dun Apk Cracked 36.md +0 -9
- spaces/1gistliPinn/ChatGPT4/Examples/Escapeplansubtitles720pbluraynext !!HOT!!.md +0 -112
- spaces/1line/AutoGPT/autogpt/token_counter.py +0 -73
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download AXES.io MOD APK 2.7.19 with Free Shopping and VIP Features from an1.com.md +0 -90
- spaces/1phancelerku/anime-remove-background/Cmo instalar Crafting and Building en tu PC con un emulador.md +0 -128
- spaces/1phancelerku/anime-remove-background/Download Game Off Road 4x4 Driving Simulator and Become a Champion of Epic Trophy Raid.md +0 -121
- spaces/1phancelerku/anime-remove-background/ETS2 Download Tips and Tricks for Running Your Own Trucking Business.md +0 -106
- spaces/1toTree/lora_test/.ipynb_checkpoints/README-checkpoint.md +0 -12
- spaces/2ndelement/voicevox/voicevox_engine/dev/core/mock.py +0 -121
- spaces/A00001/bingothoo/README.md +0 -196
- spaces/AIFILMS/generate_human_motion/VQ-Trans/dataset/dataset_TM_eval.py +0 -217
- spaces/AIGC-Audio/AudioGPT/sound_extraction/utils/wav_io.py +0 -23
- spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/utils/utils.py +0 -171
- spaces/AIGText/GlyphControl/ldm/models/diffusion/ddim.py +0 -337
- spaces/ATang0729/Forecast4Muses/Model/__init__.py +0 -0
- spaces/Abhilashvj/haystack_QA/README.md +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/circularprogress/CircularProgress.js +0 -2
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/models/stylegan2/op/upfirdn2d.py +0 -60
- spaces/Amrrs/DragGan-Inversion/PTI/models/__init__.py +0 -0
- spaces/Amrrs/numerizerlit/app.py +0 -49
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py +0 -185
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py +0 -339
- spaces/Andy1621/uniformer_image_detection/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py +0 -58
- spaces/Andy1621/uniformer_image_detection/exp/cascade_mask_rcnn_3x_ms_hybrid_base/config.py +0 -142
- spaces/Andy1621/uniformer_image_detection/mmdet/utils/optimizer.py +0 -33
- spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes.py +0 -4
- spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/optimization/arguments.py +0 -197
- spaces/ArtGAN/Diffusion-API/diffusion_webui/diffusion_models/text2img_app.py +0 -173
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/candidates.py +0 -552
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/packaging/_musllinux.py +0 -136
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_export_torchscript.py +0 -296
- spaces/Beasto/Face_To_Anime_Cyclegan/README.md +0 -13
- spaces/Benson/text-generation/Examples/Descarga De Impacto De Genshin Qooapp.md +0 -116
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_emoji_codes.py +0 -0
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py +0 -314
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/errors.py +0 -127
- spaces/Billyosoro/ESRGAN/realesrgan/weights/README.md +0 -3
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/backbone/__init__.py +0 -8
- spaces/CVPR/DualStyleGAN/images/README.md +0 -6
- spaces/CVPR/LIVE/pybind11/tests/test_embed/test_interpreter.py +0 -10
- spaces/Chandrasekahar2k/KVCSekharGenAIBot/app.py +0 -34
- spaces/ChrisPreston/diff-svc_minato_aqua/utils/pl_utils.py +0 -1625
- spaces/CoreyMorris/MMLU-by-task-Leaderboard/moral_app.py +0 -248
- spaces/DaleChen/AutoGPT/tests/__init__.py +0 -0
- spaces/DaleChen/AutoGPT/ui/app.py +0 -145
- spaces/Dimentian/LLMs-Stable-Vicuna-13B/README.md +0 -12
- spaces/DpNaze/webui-docker/on_start.sh +0 -124
spaces/101-5/gpt4free/g4f/.v1/gpt4free/hpgptai/README.md
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
# HpgptAI
|
2 |
-
Written by [hp_mzx](https://github.com/hpsj).
|
3 |
-
|
4 |
-
## Examples:
|
5 |
-
### Completion:
|
6 |
-
```python
|
7 |
-
res = hpgptai.Completion.create("你是谁","127.0.0.1:7890")
|
8 |
-
print(res["reply"])
|
9 |
-
```
|
10 |
-
|
11 |
-
### Chat Completion:
|
12 |
-
Support context
|
13 |
-
```python
|
14 |
-
messages = [
|
15 |
-
{
|
16 |
-
"content": "你是谁",
|
17 |
-
"html": "你是谁",
|
18 |
-
"id": hpgptai.ChatCompletion.randomStr(),
|
19 |
-
"role": "user",
|
20 |
-
"who": "User: ",
|
21 |
-
},
|
22 |
-
{
|
23 |
-
"content": "我是一位AI助手,专门为您提供各种服务和支持。我可以回答您的问题,帮助您解决问题,提供相关信息,并执行一些任务。请随时告诉我您需要什么帮助。",
|
24 |
-
"html": "我是一位AI助手,专门为您提供各种服务和支持。我可以回答您的问题,帮助您解决问题,提供相关信息,并执行一些任务。请随时告诉我您需要什么帮助。",
|
25 |
-
"id": hpgptai.ChatCompletion.randomStr(),
|
26 |
-
"role": "assistant",
|
27 |
-
"who": "AI: ",
|
28 |
-
},
|
29 |
-
{
|
30 |
-
"content": "我上一句问的是什么?",
|
31 |
-
"html": "我上一句问的是什么?",
|
32 |
-
"id": hpgptai.ChatCompletion.randomStr(),
|
33 |
-
"role": "user",
|
34 |
-
"who": "User: ",
|
35 |
-
},
|
36 |
-
]
|
37 |
-
res = hpgptai.ChatCompletion.create(messages,proxy="127.0.0.1:7890")
|
38 |
-
print(res["reply"])
|
39 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/101-5/gpt4free/g4f/.v1/unfinished/openprompt/test.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
access_token = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV'
|
2 |
-
supabase_auth_token = '%5B%22eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8%22%2C%22_Zp8uXIA2InTDKYgo8TCqA%22%2Cnull%2Cnull%2Cnull%5D'
|
3 |
-
|
4 |
-
idk = [
|
5 |
-
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8",
|
6 |
-
"_Zp8uXIA2InTDKYgo8TCqA", None, None, None]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bagas Guide How to Download and Install Microsoft Office 2010 with Crack and Keygen.md
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Microsoft Office 2010 Full Crack + Keygen Bagas</h1>
|
3 |
-
<p>If you are looking for a way to download Microsoft Office 2010 full version for free, you have come to the right place. In this article, I will show you how to download and install Microsoft Office 2010 with crack and keygen from Bagas, a popular website that provides software downloads and tutorials.</p>
|
4 |
-
<p>Microsoft Office 2010 is a suite of productivity applications that includes Word, Excel, PowerPoint, Outlook, Access, and more. It has many features and improvements over the previous versions, such as the ribbon interface, backstage view, online collaboration, cloud integration, and enhanced graphics. Microsoft Office 2010 is compatible with Windows 7, Windows 8/8.1, and Windows 10.</p>
|
5 |
-
<h2>download microsoft office 2010 full crack + keygen bagas</h2><br /><p><b><b>DOWNLOAD</b> ⚹⚹⚹ <a href="https://byltly.com/2uKyJO">https://byltly.com/2uKyJO</a></b></p><br /><br />
|
6 |
-
<h2>Steps to Download Microsoft Office 2010 Full Crack + Keygen Bagas</h2>
|
7 |
-
<ol>
|
8 |
-
<li>Go to <a href="https://www.inputekno.com/2021/09/free-download-microsoft-office-2010.html">this link</a> and download the Microsoft Office 2010 full version file (1.3 GB) in RAR format. You will need a password to extract the file, which is rahasia.</li>
|
9 |
-
<li>Extract the file using WinRAR or any other software that can handle RAR files. You will get a folder named Ms Office_2010_Full_Version.</li>
|
10 |
-
<li>Run the setup.exe file inside the folder and follow the instructions to install Microsoft Office 2010 on your computer. You can choose the language and components that you want to install.</li>
|
11 |
-
<li>After the installation is complete, do not open any of the Microsoft Office applications yet. Extract the crack file that is also inside the folder. You will get another folder named Online_KMS_Activation.</li>
|
12 |
-
<li>Run the software inside the Online_KMS_Activation folder and click on Activate Office. This will activate your Microsoft Office 2010 with a valid license key.</li>
|
13 |
-
<li>Enjoy your Microsoft Office 2010 full version for free!</li>
|
14 |
-
</ol>
|
15 |
-
<h2>Tips and Warnings</h2>
|
16 |
-
<ul>
|
17 |
-
<li>Make sure you have a stable internet connection when downloading and activating Microsoft Office 2010.</li>
|
18 |
-
<li>Disable your antivirus software before running the crack software, as it may detect it as a virus or malware.</li>
|
19 |
-
<li>Do not update your Microsoft Office 2010 after activation, as it may revoke your license key and make it invalid.</li>
|
20 |
-
<li>This method is only for educational purposes and not for commercial use. If you like Microsoft Office 2010 and want to support the developers, please buy a genuine copy from the official website.</li>
|
21 |
-
</ul>
|
22 |
-
<h2>Conclusion</h2>
|
23 |
-
<p>In this article, I have shown you how to download Microsoft Office 2010 full crack + keygen bagas for free. This is a simple and easy way to get Microsoft Office 2010 on your computer without paying anything. However, this method may not be legal or ethical, so use it at your own risk. I hope this article was helpful for you. If you have any questions or suggestions, please leave a comment below.</p><h2>How to Use Microsoft Office 2010</h2>
|
24 |
-
<p>Now that you have downloaded and installed Microsoft Office 2010 full crack + keygen bagas, you may wonder how to use it. Microsoft Office 2010 is a suite of applications that can help you create and edit various types of documents, such as text, spreadsheets, presentations, emails, databases, and more. Here are some basic tips on how to use Microsoft Office 2010:</p>
|
25 |
-
<ul>
|
26 |
-
<li>To start any of the Microsoft Office applications, go to the Start menu and click on All Programs. Then find the Microsoft Office folder and click on the application that you want to use.</li>
|
27 |
-
<li>To create a new document, click on the File tab and select New. You can choose from various templates or create a blank document.</li>
|
28 |
-
<li>To open an existing document, click on the File tab and select Open. You can browse your computer or cloud storage to find the document that you want to open.</li>
|
29 |
-
<li>To save a document, click on the File tab and select Save or Save As. You can choose the location and format of your document.</li>
|
30 |
-
<li>To edit a document, use the tools and commands on the ribbon. The ribbon is the strip of tabs and icons at the top of the window. Each tab has different groups of commands that are related to a specific task. For example, the Home tab has commands for formatting text, the Insert tab has commands for adding pictures and tables, and the Review tab has commands for checking spelling and grammar.</li>
|
31 |
-
<li>To customize the ribbon, click on the File tab and select Options. Then click on Customize Ribbon and choose the tabs and commands that you want to add or remove.</li>
|
32 |
-
<li>To print a document, click on the File tab and select Print. You can adjust the settings and preview your document before printing.</li>
|
33 |
-
<li>To share a document, click on the File tab and select Share. You can send your document as an email attachment, upload it to a cloud service, or publish it online.</li>
|
34 |
-
</ul>
|
35 |
-
<p>These are some of the basic functions of Microsoft Office 2010. To learn more about how to use Microsoft Office 2010, you can visit <a href="https://support.microsoft.com/en-us/office/office-2010-end-of-support-roadmap-c17c6b06-9d15-4bcb-9adf-9ce1a13f434f">the official support website</a> or watch <a href="https://www.youtube.com/watch?v=QqW3hOGsZ9U">this video tutorial</a> . You can also explore the features and options of each application by yourself and discover what you can do with Microsoft Office 2010.</p>
|
36 |
-
<p></p> ddb901b051<br />
|
37 |
-
<br />
|
38 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Blue Dun Apk Cracked 36.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>but it's worth noting that apollo's roots reach far further back. picasso has referred to the apollo series as his "desire to contemplate man's relationship to his creations". and so, if some of his other artistic expressions have been questionable, this series ought to be given the benefit of the doubt, as it seems he was attempting to capture that "relationship" by making a specific project that required technical research.</p>
|
3 |
-
<h2>Blue Dun Apk Cracked 36</h2><br /><p><b><b>Download Zip</b> ☑ <a href="https://imgfil.com/2uy17C">https://imgfil.com/2uy17C</a></b></p><br /><br />
|
4 |
-
<p>and on the subject of apollo--like many of you mentioned, he is really intricate--and most importantly, it's entirely for our time. it was created in anticipation of the icarus 2000 series that would begin in 2000, and finished ahead of schedule.</p>
|
5 |
-
<p>while i couldnt log into my social network accounts, i noticed that my internet access was painfully slow. i wanted to look at my latest-used facebook posts and share them on twitter. then, i noticed that the apps on the web browser were popping up and asking for permission to use my contact information and settings to log me in to facebook, twitter and more. although common for web-based apps and services to need an account and permission from you before you can use them, this led to unwanted surprises. it prompted my iphone app store to display apps that i purchased as being compatible with my phone and running my apps without my permission or even knowledge.</p>
|
6 |
-
<p>when is an application not a valid and legal application? if the developer of the app is to be believed, and in many cases they are, the answer is when the developer is a manufacturer who markets the product as his own. in other words, an application is not a valid and legal application if its developer belongs to the league of businesses who manufacture other products. thats what apple has claimed in court. when is an app a product? when it comes with the manufacturer into retail. apple has claimed that, yes indeed, instadial is indeed a product. even though it was simply developed by a single person, which is not an unusual arrangement in software development, there is no product without manufacture. and apple knows who the manufacture is. read the fine print. if the developer of the software says it is his own, than he is a manufacturer.</p>
|
7 |
-
<p></p> 899543212b<br />
|
8 |
-
<br />
|
9 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Escapeplansubtitles720pbluraynext !!HOT!!.md
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Escapeplansubtitles720pbluraynext: How to Enjoy the Movie with Perfect Subtitles</h1>
|
3 |
-
|
4 |
-
<p>Escape Plan is a 2013 action thriller movie starring Sylvester Stallone and Arnold Schwarzenegger as two prisoners who try to break out of a high-tech prison that they designed themselves. The movie was a box office success and received mixed reviews from critics and audiences.</p>
|
5 |
-
<h2>escapeplansubtitles720pbluraynext</h2><br /><p><b><b>Download</b> ✸ <a href="https://imgfil.com/2uy0DL">https://imgfil.com/2uy0DL</a></b></p><br /><br />
|
6 |
-
|
7 |
-
<p>If you want to watch Escape Plan with subtitles, you may have searched for escapeplansubtitles720pbluraynext on the internet. This is a keyword that refers to the subtitles for the 720p BluRay version of the movie, which is one of the best quality formats available. However, finding the right subtitles for this movie can be tricky, as there are many different sources and versions of subtitles online.</p>
|
8 |
-
|
9 |
-
<p>In this article, we will give you some tips on how to find and use escapeplansubtitles720pbluraynext for your movie experience. We will also provide you with some alternative titles for this keyword that you can use to search for subtitles more easily.</p>
|
10 |
-
|
11 |
-
<h2>How to find escapeplansubtitles720pbluraynext</h2>
|
12 |
-
|
13 |
-
<p>One of the easiest ways to find escapeplansubtitles720pbluraynext is to use a subtitle search engine or website. These are online platforms that allow you to search for subtitles by movie title, language, format, and other criteria. Some of the most popular subtitle websites are:</p>
|
14 |
-
|
15 |
-
<ul>
|
16 |
-
<li>Opensubtitles.com: This is one of the largest and most reliable subtitle websites, with over 5 million subtitles in various languages and formats. You can search for Escape Plan subtitles by entering the movie title or the keyword escapeplansubtitles720pbluraynext in the search box. You can also filter the results by language, genre, release type, and rating. You can download the subtitles as .srt files and view them on opensubtitles.org.</li>
|
17 |
-
<li>Scribd.com: This is a website that allows you to read and download various documents, books, and files online. You can also find Escape Plan subtitles on Scribd by searching for escape-plan-subtitles-720p-bluray-next in the search box. You can read the subtitles online or download them as PDF files.</li>
|
18 |
-
<li>Builtwel.com: This is a website that provides various services and products related to construction and engineering. You can also find Escape Plan subtitles on Builtwel by searching for escapeplansubtitles720pbluraynext in the forum section. You can download the subtitles as .zip files.</li>
|
19 |
-
<li>Hiepsibaotap.com: This is a website that offers various information and resources related to education and learning. You can also find Escape Plan subtitles on Hiepsibaotap by searching for makode.pdf in the search box. This is a PDF file that contains Escape Plan subtitles in Vietnamese.</li>
|
20 |
-
</ul>
|
21 |
-
|
22 |
-
<p>These are some of the websites that you can use to find escapeplansubtitles720pbluraynext. However, you should be careful when downloading subtitles from unknown sources, as they may contain viruses or malware that can harm your device or compromise your privacy. You should also check the quality and accuracy of the subtitles before using them, as they may have errors or inconsistencies.</p>
|
23 |
-
<p></p>
|
24 |
-
|
25 |
-
<h2>How to use escapeplansubtitles720pbluraynext</h2>
|
26 |
-
|
27 |
-
<p>Once you have downloaded escapeplansubtitles720pbluraynext from a reliable source, you can use them to watch Escape Plan with subtitles on your device. To do this, you need to have a media player that supports external subtitles, such as VLC Media Player, KMPlayer, or PotPlayer. You also need to have the 720p BluRay version of Escape Plan on your device or on a disc.</p>
|
28 |
-
|
29 |
-
<p>To use escapeplansubtitles720pbluraynext, follow these steps:</p>
|
30 |
-
|
31 |
-
<ol>
|
32 |
-
<li>Open your media player and load Escape Plan on it.</li>
|
33 |
-
<li>Go to the menu or settings of your media player and select Subtitles or Subtitle Track.</li>
|
34 |
-
<li>Browse your device or disc and locate escapeplansubtitles720pbluraynext file that you downloaded.</li>
|
35 |
-
<li>Select escapeplansubtitles720pbluraynext file and click Open or OK.</li>
|
36 |
-
<li>The subtitles should appear on your screen along with the movie.</li>
|
37 |
-
</ol>
|
38 |
-
|
39 |
-
<p>You can adjust the size, position, color, and timing of the subtitles according to your preference. You can also switch between different subtitle languages if you have more than one subtitle file.</p>
|
40 |
-
|
41 |
-
<h2>Alternative titles for escapeplansubtitles720pbluraynext</h2>
|
42 |
-
|
43 |
-
<p>If you have trouble finding escapeplansubtitles720pbluraynext online, you can try using some alternative titles for this keyword that may yield better results. Some of these alternative titles are:</p>
|
44 |
-
|
45 |
-
<ul>
|
46 |
-
<li>Escape Plan 2013 720p BluRay x264 YIFY: This is a popular release name for movies that are encoded by YIFY, a group that provides high-quality movies with small file sizes. You can find many subtitle websites that offer subtitles for YIFY releases.</li>
|
47 |
-
<li>Escape Plan 2013 720p BluRay x264 SPARKS: This is another release name for movies that are encoded by SPARKS, a group that provides high-quality movies with larger file sizes. You can also find many subtitle websites that offer subtitles for SPARKS releases.</li>
|
48 |
-
<li>Escape Plan 2013 BRRip XviD AC3-SANTi: This is another release name for movies that are encoded by SANTi, a group that provides good-quality movies with medium file sizes. You can also find some subtitle websites that offer subtitles for SANTi releases.</li>
|
49 |
-
</ul>
|
50 |
-
|
51 |
-
<p>These are some of the alternative titles for escapeplansubtitles720pbluraynext that you can use to search for subtitles more easily. However, you should make sure that the subtitle file matches the movie file exactly, as different releases may have different frame rates, durations, or audio tracks.</p>
|
52 |
-
|
53 |
-
<h2>Conclusion</h2>
|
54 |
-
|
55 |
-
<p>Escape Plan is a great movie to watch with subtitles if you want to enjoy the action and dialogue of Sylvester Stallone and Arnold Schwarzenegger. To find and use escapeplansubtitles720pbluraynext, you can use a subtitle website or search engine, download the subtitle file from a reliable source, and load it on your media player along with the 720p BluRay version of Escape Plan. You can also use some alternative titles for this keyword that may help you find subtitles more easily.</p>
|
56 |
-
<h1>Escapeplansubtitles720pbluraynext: How to Enjoy the Movie with Perfect Subtitles</h1>
|
57 |
-
|
58 |
-
<p>Escape Plan is a 2013 action thriller movie starring Sylvester Stallone and Arnold Schwarzenegger as two prisoners who try to break out of a high-tech prison that they designed themselves. The movie was a box office success and received mixed reviews from critics and audiences.</p>
|
59 |
-
|
60 |
-
<p>If you want to watch Escape Plan with subtitles, you may have searched for escapeplansubtitles720pbluraynext on the internet. This is a keyword that refers to the subtitles for the 720p BluRay version of the movie, which is one of the best quality formats available. However, finding the right subtitles for this movie can be tricky, as there are many different sources and versions of subtitles online.</p>
|
61 |
-
|
62 |
-
<p>In this article, we will give you some tips on how to find and use escapeplansubtitles720pbluraynext for your movie experience. We will also provide you with some alternative titles for this keyword that you can use to search for subtitles more easily.</p>
|
63 |
-
|
64 |
-
<h2>How to find escapeplansubtitles720pbluraynext</h2>
|
65 |
-
|
66 |
-
<p>One of the easiest ways to find escapeplansubtitles720pbluraynext is to use a subtitle search engine or website. These are online platforms that allow you to search for subtitles by movie title, language, format, and other criteria. Some of the most popular subtitle websites are:</p>
|
67 |
-
|
68 |
-
<ul>
|
69 |
-
<li>Opensubtitles.com: This is one of the largest and most reliable subtitle websites, with over 5 million subtitles in various languages and formats. You can search for Escape Plan subtitles by entering the movie title or the keyword escapeplansubtitles720pbluraynext in the search box. You can also filter the results by language, genre, release type, and rating. You can download the subtitles as .srt files and view them on opensubtitles.org.</li>
|
70 |
-
<li>Scribd.com: This is a website that allows you to read and download various documents, books, and files online. You can also find Escape Plan subtitles on Scribd by searching for escape-plan-subtitles-720p-bluray-next in the search box. You can read the subtitles online or download them as PDF files.</li>
|
71 |
-
<li>Builtwel.com: This is a website that provides various services and products related to construction and engineering. You can also find Escape Plan subtitles on Builtwel by searching for escapeplansubtitles720pbluraynext in the forum section. You can download the subtitles as .zip files.</li>
|
72 |
-
<li>Hiepsibaotap.com: This is a website that offers various information and resources related to education and learning. You can also find Escape Plan subtitles on Hiepsibaotap by searching for makode.pdf in the search box. This is a PDF file that contains Escape Plan subtitles in Vietnamese.</li>
|
73 |
-
</ul>
|
74 |
-
|
75 |
-
<p>These are some of the websites that you can use to find escapeplansubtitles720pbluraynext. However, you should be careful when downloading subtitles from unknown sources, as they may contain viruses or malware that can harm your device or compromise your privacy. You should also check the quality and accuracy of the subtitles before using them, as they may have errors or inconsistencies.</p>
|
76 |
-
|
77 |
-
<h2>How to use escapeplansubtitles720pbluraynext</h2>
|
78 |
-
|
79 |
-
<p>Once you have downloaded escapeplansubtitles720pbluraynext from a reliable source, you can use them to watch Escape Plan with subtitles on your device. To do this, you need to have a media player that supports external subtitles, such as VLC Media Player, KMPlayer, or PotPlayer. You also need to have the 720p BluRay version of Escape Plan on your device or on a disc.</p>
|
80 |
-
|
81 |
-
<p>To use escapeplansubtitles720pbluraynext, follow these steps:</p>
|
82 |
-
|
83 |
-
<ol>
|
84 |
-
<li>Open your media player and load Escape Plan on it.</li>
|
85 |
-
<li>Go to the menu or settings of your media player and select Subtitles or Subtitle Track.</li>
|
86 |
-
<li>Browse your device or disc and locate escapeplansubtitles720pbluraynext file that you downloaded.</li>
|
87 |
-
<li>Select escapeplansubtitles720pbluraynext file and click Open or OK.</li>
|
88 |
-
<li>The subtitles should appear on your screen along with the movie.</li>
|
89 |
-
</ol>
|
90 |
-
|
91 |
-
<p>You can adjust the size, position, color, and timing of the subtitles according to your preference. You can also switch between different subtitle languages if you have more than one subtitle file.</p>
|
92 |
-
|
93 |
-
<h2>Alternative titles for escapeplansubtitles720pbluraynext</h2>
|
94 |
-
|
95 |
-
<p>If you have trouble finding escapeplansubtitles720pbluraynext online, you can try using some alternative titles for this keyword that may yield better results. Some of these alternative titles are:</p>
|
96 |
-
|
97 |
-
<ul>
|
98 |
-
<li>Escape Plan 2013 720p BluRay x264 YIFY: This is a popular release name for movies that are encoded by YIFY, a group that provides high-quality movies with small file sizes. You can find many subtitle websites that offer subtitles for YIFY releases.</li>
|
99 |
-
<li>Escape Plan 2013 720p BluRay x264 SPARKS: This is another release name for movies that are encoded by SPARKS, a group that provides high-quality movies with larger file sizes. You can also find many subtitle websites that offer subtitles for SPARKS releases.</li>
|
100 |
-
<li>Escape Plan 2013 BRRip XviD AC3-SANTi: This is another release name for movies that are encoded by SANTi, a group that provides good-quality movies with medium file sizes. You can also find some subtitle websites that offer subtitles for SANTi releases.</li>
|
101 |
-
</ul>
|
102 |
-
|
103 |
-
<p>These are some of the alternative titles for escapeplansubtitles720pbluraynext that you can use to search for subtitles more easily. However, you should make sure that the subtitle file matches the movie file exactly, as different releases may have different frame rates, durations, or audio tracks.</p>
|
104 |
-
|
105 |
-
<h2>Conclusion</h2>
|
106 |
-
|
107 |
-
<p>Escape Plan is a great movie to watch with subtitles if you want to enjoy the action and dialogue of Sylvester Stallone and Arnold Schwarzenegger. To find and use escapeplansubtitles720pbluraynext, you can use a subtitle website or search engine, download the subtitle file from a reliable source, and load it on your media player along with the 720p BluRay version of Escape Plan. You can also use some alternative titles for this keyword that may help you find subtitles more easily.</p>
|
108 |
-
<h2>Conclusion</h2>
|
109 |
-
|
110 |
-
<p>Escape Plan is a great movie to watch with subtitles if you want to enjoy the action and dialogue of Sylvester Stallone and Arnold Schwarzenegger. To find and use escapeplansubtitles720pbluraynext, you can use a subtitle website or search engine, download the subtitle file from a reliable source, and load it on your media player along with the 720p BluRay version of Escape Plan. You can also use some alternative titles for this keyword that may help you find subtitles more easily.</p> 3cee63e6c2<br />
|
111 |
-
<br />
|
112 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1line/AutoGPT/autogpt/token_counter.py
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
"""Functions for counting the number of tokens in a message or string."""
|
2 |
-
from __future__ import annotations
|
3 |
-
|
4 |
-
import tiktoken
|
5 |
-
|
6 |
-
from autogpt.logs import logger
|
7 |
-
|
8 |
-
|
9 |
-
def count_message_tokens(
|
10 |
-
messages: list[dict[str, str]], model: str = "gpt-3.5-turbo-0301"
|
11 |
-
) -> int:
|
12 |
-
"""
|
13 |
-
Returns the number of tokens used by a list of messages.
|
14 |
-
|
15 |
-
Args:
|
16 |
-
messages (list): A list of messages, each of which is a dictionary
|
17 |
-
containing the role and content of the message.
|
18 |
-
model (str): The name of the model to use for tokenization.
|
19 |
-
Defaults to "gpt-3.5-turbo-0301".
|
20 |
-
|
21 |
-
Returns:
|
22 |
-
int: The number of tokens used by the list of messages.
|
23 |
-
"""
|
24 |
-
try:
|
25 |
-
encoding = tiktoken.encoding_for_model(model)
|
26 |
-
except KeyError:
|
27 |
-
logger.warn("Warning: model not found. Using cl100k_base encoding.")
|
28 |
-
encoding = tiktoken.get_encoding("cl100k_base")
|
29 |
-
if model == "gpt-3.5-turbo":
|
30 |
-
# !Note: gpt-3.5-turbo may change over time.
|
31 |
-
# Returning num tokens assuming gpt-3.5-turbo-0301.")
|
32 |
-
return count_message_tokens(messages, model="gpt-3.5-turbo-0301")
|
33 |
-
elif model == "gpt-4":
|
34 |
-
# !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.")
|
35 |
-
return count_message_tokens(messages, model="gpt-4-0314")
|
36 |
-
elif model == "gpt-3.5-turbo-0301":
|
37 |
-
tokens_per_message = (
|
38 |
-
4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
39 |
-
)
|
40 |
-
tokens_per_name = -1 # if there's a name, the role is omitted
|
41 |
-
elif model == "gpt-4-0314":
|
42 |
-
tokens_per_message = 3
|
43 |
-
tokens_per_name = 1
|
44 |
-
else:
|
45 |
-
raise NotImplementedError(
|
46 |
-
f"num_tokens_from_messages() is not implemented for model {model}.\n"
|
47 |
-
" See https://github.com/openai/openai-python/blob/main/chatml.md for"
|
48 |
-
" information on how messages are converted to tokens."
|
49 |
-
)
|
50 |
-
num_tokens = 0
|
51 |
-
for message in messages:
|
52 |
-
num_tokens += tokens_per_message
|
53 |
-
for key, value in message.items():
|
54 |
-
num_tokens += len(encoding.encode(value))
|
55 |
-
if key == "name":
|
56 |
-
num_tokens += tokens_per_name
|
57 |
-
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
58 |
-
return num_tokens
|
59 |
-
|
60 |
-
|
61 |
-
def count_string_tokens(string: str, model_name: str) -> int:
|
62 |
-
"""
|
63 |
-
Returns the number of tokens in a text string.
|
64 |
-
|
65 |
-
Args:
|
66 |
-
string (str): The text string.
|
67 |
-
model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo")
|
68 |
-
|
69 |
-
Returns:
|
70 |
-
int: The number of tokens in the text string.
|
71 |
-
"""
|
72 |
-
encoding = tiktoken.encoding_for_model(model_name)
|
73 |
-
return len(encoding.encode(string))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download AXES.io MOD APK 2.7.19 with Free Shopping and VIP Features from an1.com.md
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Axes io Mod Apk An1 Com: A Guide for Android Users</h1>
|
3 |
-
<p>If you are looking for a fun and addictive multiplayer game that you can play with your friends or strangers online, then you should try Axes io. This is a battle royale game where you have to throw axes at your enemies and survive as long as possible. But if you want to have more advantages and resources in the game, then you should download Axes io Mod Apk An1 Com. This is a modified version of the game that gives you unlimited money, free shopping, free chests, VIP access, and more. In this article, we will tell you everything you need to know about Axes io and Axes io Mod Apk An1 Com, including how to download and install it on your Android device.</p>
|
4 |
-
<h2>download axes io mod apk an1 com</h2><br /><p><b><b>Download File</b> ✶ <a href="https://urlin.us/2uSVxQ">https://urlin.us/2uSVxQ</a></b></p><br /><br />
|
5 |
-
<h2>What is Axes io?</h2>
|
6 |
-
<h3>A multiplayer battle royale game with axes</h3>
|
7 |
-
<p>Axes io is a game developed by CASUAL AZUR GAMES, a popular studio that has created many other games like Worms Zone.io, Stack Ball, and Rocket Clash 3D. Axes io is a game where you have to throw axes at other players and try to be the last one standing. You can play online with up to 40 players from all over the world, or offline with bots. You can also choose between different game modes, such as Deathmatch, Team Deathmatch, Zombie Mode, and more.</p>
|
8 |
-
<h3>Features of Axes io</h3>
|
9 |
-
<h4>Different game modes and maps</h4>
|
10 |
-
<p>Axes io offers you a variety of game modes and maps to choose from. You can play in Deathmatch mode, where you have to kill as many players as possible in a limited time. You can also play in Team Deathmatch mode, where you have to cooperate with your teammates and eliminate the enemy team. Or you can play in Zombie Mode, where you have to survive the zombie apocalypse and kill the undead. There are also different maps to explore, such as Forest, Desert, Snowy Mountain, and more.</p>
|
11 |
-
<h4>Various weapons and skins</h4>
|
12 |
-
<p>Axes io also allows you to customize your character and your weapons. You can unlock and use different types of axes, such as fire axes, ice axes, electric axes, etc. You can also unlock and use different skins for your character, such as ninja, pirate, cowboy, etc. You can also upgrade your weapons and skills to make them more powerful and effective.</p>
|
13 |
-
<p>download axes io mod apk unlimited money and gems<br />
|
14 |
-
download axes io mod apk latest version for android<br />
|
15 |
-
download axes io mod apk free shopping and vip<br />
|
16 |
-
download axes io mod apk no ads and no root<br />
|
17 |
-
download axes io mod apk with all weapons unlocked<br />
|
18 |
-
download axes io hack apk an1 com free<br />
|
19 |
-
download axes io cheat apk an1 com online<br />
|
20 |
-
download axes io cracked apk an1 com offline<br />
|
21 |
-
download axes io premium apk an1 com full<br />
|
22 |
-
download axes io pro apk an1 com modded<br />
|
23 |
-
how to download axes io mod apk from an1 com<br />
|
24 |
-
where to download axes io mod apk by an1 com<br />
|
25 |
-
what is axes io mod apk on an1 com<br />
|
26 |
-
why download axes io mod apk via an1 com<br />
|
27 |
-
when to download axes io mod apk through an1 com<br />
|
28 |
-
best site to download axes io mod apk like an1 com<br />
|
29 |
-
top 10 sites to download axes io mod apk similar to an1 com<br />
|
30 |
-
alternative sites to download axes io mod apk instead of an1 com<br />
|
31 |
-
safe sites to download axes io mod apk other than an1 com<br />
|
32 |
-
trusted sites to download axes io mod apk besides an1 com<br />
|
33 |
-
download axes io battle royale mod apk an1 com<br />
|
34 |
-
download axes io survival mode mod apk an1 com<br />
|
35 |
-
download axes io zombie mode mod apk an1 com<br />
|
36 |
-
download axes io multiplayer mode mod apk an1 com<br />
|
37 |
-
download axes io single player mode mod apk an1 com<br />
|
38 |
-
download axes io 2.7.19 mod apk from an1 com<br />
|
39 |
-
download axes io 2.7.18 mod apk by an1 com<br />
|
40 |
-
download axes io 2.7.17 mod apk on an1 com<br />
|
41 |
-
download axes io 2.7.16 mod apk via an1 com<br />
|
42 |
-
download axes io 2.7.15 mod apk through an1 com<br />
|
43 |
-
benefits of downloading axes io mod apk from an1 com<br />
|
44 |
-
drawbacks of downloading axes io mod apk by an1 com<br />
|
45 |
-
reviews of downloading axes io mod apk on an1 com<br />
|
46 |
-
ratings of downloading axes io mod apk via an1 com<br />
|
47 |
-
feedback of downloading axes io mod apk through an1 com<br />
|
48 |
-
tips for downloading axes io mod apk from an1 com<br />
|
49 |
-
tricks for downloading axes io mod apk by an1 com<br />
|
50 |
-
guides for downloading axes io mod apk on an1 com<br />
|
51 |
-
tutorials for downloading axes io mod apk via an1 com<br />
|
52 |
-
instructions for downloading axes io mod apk through an1 com</p>
|
53 |
-
<h4>Simple controls and graphics</h4>
|
54 |
-
<p>Axes io has simple controls that are easy to learn and use. You just have to swipe on the screen to move your character and tap to throw your axes. You can also use buttons to switch weapons and activate skills. The game also has colorful and cartoonish graphics that are suitable for all ages. The game runs smoothly on most devices and does not require a lot of storage space or internet connection.</p>
|
55 |
-
<h2>What is Axes io Mod Apk An1 Com?</h2>
|
56 |
-
<h3>A modified version of Axes io with unlimited resources</h3>
|
57 |
-
<p>Axes io Mod Apk An1 Com is a modified version of Axes io that gives you unlimited resources and benefits in the game. With this mod apk, you can enjoy free shopping, free money, free chests, VIP access, and more. You can buy any weapon or skin you want without spending real money. You can open unlimited chests and get rare items and rewards. You can also access the VIP features, such as exclusive skins, weapons, and bonuses. You can also remove the annoying ads that pop up in the game and enjoy a smoother gaming experience.</p>
|
58 |
-
<h3>Benefits of Axes io Mod Apk An1 Com</h3>
|
59 |
-
<h4>Free shopping, money, chests, and VIP</h4>
|
60 |
-
<p>With Axes io Mod Apk An1 Com, you can get everything you want in the game for free. You can buy any weapon or skin you like without spending any money. You can also get unlimited money to upgrade your weapons and skills. You can also open unlimited chests and get rare items and rewards. You can also access the VIP features, such as exclusive skins, weapons, and bonuses.</p>
|
61 |
-
<h4>No ads, no root, no virus</h4>
|
62 |
-
<p>Axes io Mod Apk An1 Com is also safe and secure to use. You don't have to worry about any ads that might interrupt your gameplay or drain your battery. You also don't have to root your device to use the mod apk. The mod apk is also free from any virus or malware that might harm your device or data.</p>
|
63 |
-
<h4>Easy to download and install</h4>
|
64 |
-
<p>Axes io Mod Apk An1 Com is also easy to download and install on your Android device. You don't need any special skills or tools to do it. You just have to follow some simple steps that we will explain below.</p>
|
65 |
-
<h2>How to Download and Install Axes io Mod Apk An1 Com?</h2>
|
66 |
-
<h3>Step 1: Enable unknown sources on your device</h3>
|
67 |
-
<p>Before you can install the mod apk file, you have to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings and look for the security option. Then, find the unknown sources option and toggle it on.</p>
|
68 |
-
<h3>Step 2: Download the mod apk file from the link below</h3>
|
69 |
-
<p>Next, you have to download the mod apk file from the link below. This is a direct and fast link that will take you to the download page. Once you are there, click on the download button and wait for the file to be downloaded on your device.</p>
|
70 |
-
<a href="">Download Axes io Mod Apk An1 Com Here</a>
|
71 |
-
<h3>Step 3: Install the mod apk file and enjoy the game</h3>
|
72 |
-
<p>Finally, you have to install the mod apk file on your device. To do this, locate the file in your device storage and tap on it. Then, follow the instructions on the screen and wait for the installation to be completed. Once it is done, you can launch the game and enjoy all the features of Axes io Mod Apk An1 Com.</p>
|
73 |
-
<h2>Conclusion</h2>
|
74 |
-
<p>Axes io is a fun and addictive multiplayer game that you can play with your friends or strangers online. You have to throw axes at your enemies and survive as long as possible. But if you want to have more advantages and resources in the game, then you should download Axes io Mod Apk An1 Com. This is a modified version of the game that gives you unlimited money, free shopping, free chests, VIP access, and more. You can also remove the ads, avoid rooting your device, and easily download and install the mod apk file. So what are you waiting for? Download Axes io Mod Apk An1 Com now and enjoy the game!</p>
|
75 |
-
<h2>FAQs</h2>
|
76 |
-
<p>Here are some frequently asked questions about Axes io Mod Apk An1 Com:</p>
|
77 |
-
<ol>
|
78 |
-
<li><b>Is Axes io Mod Apk An1 Com safe to use?</b></li>
|
79 |
-
<p>Yes, Axes io Mod Apk An1 Com is safe to use. It is free from any virus or malware that might harm your device or data. It also does not require root access or any special permissions.</p>
|
80 |
-
<li><b>Is Axes io Mod Apk An1 Com compatible with my device?</b></li>
|
81 |
-
<p>Axes io Mod Apk An1 Com is compatible with most Android devices that run on Android 4.4 or higher. However, some devices may not support some features or functions of the mod apk.</p>
|
82 |
-
<li><b>Can I play online with Axes io Mod Apk An1 Com?</b></li>
|
83 |
-
<p>Yes, you can play online with Axes io Mod Apk An1 Com. However, you may face some issues or errors while connecting to the server or playing with other players who are using the original version of the game.</p>
|
84 |
-
<li><b>Can I update Axes io Mod Apk An1 Com?</b></li>
|
85 |
-
<p>No, you cannot update Axes io Mod Apk An1 Com. You have to download the latest version of the mod apk file from the link below whenever there is a new update available for the game.</p>
|
86 |
-
<li><b>Where can I get more information about Axes io Mod Apk An1 Com?</b></li>
|
87 |
-
<p>If you have any questions or problems regarding Axes io Mod Apk An1 Com, you can contact us through the comment section below. We will try to answer your queries and solve your issues as soon as possible.</p>
|
88 |
-
</ol></p> 197e85843d<br />
|
89 |
-
<br />
|
90 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Cmo instalar Crafting and Building en tu PC con un emulador.md
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Crafting and Building para PC: A Guide for Beginners</h1>
|
3 |
-
<p>If you are looking for a fun and creative game that lets you build anything you want, then you should try <strong>Crafting and Building</strong>. This is a free game for Android devices that lets you explore, craft, and build your own world with blocks. You can also play with your friends online, visit their worlds, and help them with their constructions.</p>
|
4 |
-
<h2>download crafting and building para pc</h2><br /><p><b><b>DOWNLOAD</b> 🆓 <a href="https://jinyurl.com/2uNJcz">https://jinyurl.com/2uNJcz</a></b></p><br /><br />
|
5 |
-
<p>But what if you want to play this game on a bigger screen, with better graphics, performance, and controls? Well, you can do that by downloading <strong>Crafting and Building para PC</strong>. This means that you can play this game on your Windows or Mac computer using an emulator. An emulator is a software that allows you to run Android apps on your PC.</p>
|
6 |
-
<p>In this guide, we will show you how to download and install <strong>Crafting and Building para PC</strong> using three different em <p>ulators: BlueStacks, MEmu, and GameLoop. These are some of the most popular and reliable emulators that you can use to play Android games on your PC. We will also tell you about the features and reviews of <strong>Crafting and Building</strong> game, so you can see why it is worth playing.</p>
|
7 |
-
<h2>How to download and install Crafting and Building para PC with BlueStacks</h2>
|
8 |
-
<p>BlueStacks is one of the most widely used emulators for playing Android games on PC. It has a user-friendly interface, high compatibility, and smooth performance. Here are the steps to download and install <strong>Crafting and Building para PC</strong> with BlueStacks:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Step 1: Download and install BlueStacks on your PC from its official website. You can choose the version that suits your operating system (Windows or Mac).</li>
|
11 |
-
<li>Step 2: Complete Google sign-in to access the Play Store, or do it later. You will need a Google account to download apps from the Play Store.</li>
|
12 |
-
<li>Step 3: Search for Crafting and Building in the search bar at the top right corner. You can also browse the categories or genres of games in the Play Store.</li>
|
13 |
-
<li>Step 4: Click to install Crafting and Building from the search results. You will see the game icon and some information about it.</li>
|
14 |
-
<li>Step 5: Complete Google sign-in (if you skipped step 2) to install Crafting and Building. You will need to agree to the terms and conditions of the game.</li>
|
15 |
-
<li>Step 6: Click the Crafting and Building icon on the home screen to start playing. You can also find the game in your app drawer or library.</li>
|
16 |
-
</ul>
|
17 |
-
<p>Congratulations, you have successfully downloaded and installed <strong>Crafting and Building para PC</strong> with BlueStacks. Now you can enjoy this game on your PC with better graphics, performance, and controls.</p>
|
18 |
-
<h2>How to download and install Crafting and Building para PC with MEmu</h2>
|
19 |
-
<p>MEmu is another popular emulator for playing Android games on PC. It has a fast and stable performance, high compatibility, and multiple instances support. Here are the steps to download and install <strong>Crafting and Building para PC</strong> with MEmu:</p>
|
20 |
-
<p>download crafting and building on pc with bluestacks<br />
|
21 |
-
descargar crafting and building para pc gratis<br />
|
22 |
-
crafting and building game for pc free download<br />
|
23 |
-
como descargar crafting and building para pc<br />
|
24 |
-
crafting and building pc emulator<br />
|
25 |
-
download crafting and building adventure game for pc<br />
|
26 |
-
crafting and building online para pc<br />
|
27 |
-
descargar crafting and building en pc con gameloop<br />
|
28 |
-
crafting and building para pc windows 10<br />
|
29 |
-
download crafting and building offline game for pc<br />
|
30 |
-
crafting and building para pc sin emulador<br />
|
31 |
-
descargar crafting and building apk para pc<br />
|
32 |
-
crafting and building para pc requisitos<br />
|
33 |
-
download crafting and building mod apk for pc<br />
|
34 |
-
crafting and building para pc 2023<br />
|
35 |
-
descargar crafting and building ultima version para pc<br />
|
36 |
-
crafting and building multiplayer para pc<br />
|
37 |
-
download crafting and building for mac<br />
|
38 |
-
crafting and building para pc descargar mega<br />
|
39 |
-
download crafting and building for windows 7<br />
|
40 |
-
crafting and building para pc mediafire<br />
|
41 |
-
download crafting and building for laptop<br />
|
42 |
-
crafting and building para pc uptodown<br />
|
43 |
-
download crafting and building latest version for pc<br />
|
44 |
-
crafting and building para pc sin internet<br />
|
45 |
-
download crafting and building 2 for pc<br />
|
46 |
-
crafting and building skins para pc<br />
|
47 |
-
download crafting and building 3d for pc<br />
|
48 |
-
crafting and building juegos similares para pc<br />
|
49 |
-
download crafting and building survival for pc<br />
|
50 |
-
como jugar crafting and building en pc<br />
|
51 |
-
download crafting and building sandbox game for pc<br />
|
52 |
-
como instalar crafting and building en pc<br />
|
53 |
-
download crafting and building creative mode for pc<br />
|
54 |
-
como actualizar crafting and building en pc<br />
|
55 |
-
download crafting and building house design for pc<br />
|
56 |
-
como crear un servidor de crafting and building en pc<br />
|
57 |
-
download crafting and building pixel world for pc<br />
|
58 |
-
como tener diamantes infinitos en crafting and building para pc<br />
|
59 |
-
download crafting and building exploration for pc<br />
|
60 |
-
como hacer un portal en crafting and building para pc<br />
|
61 |
-
download crafting and building city builder for pc<br />
|
62 |
-
como tener mascotas en crafting and building para pc<br />
|
63 |
-
download crafting and building block craft for pc<br />
|
64 |
-
como cambiar el nombre en crafting and building para pc<br />
|
65 |
-
download crafting and building mine games for pc<br />
|
66 |
-
como hacer una casa en crafting and building para pc<br />
|
67 |
-
download crafting and building craft games for pc</p>
|
68 |
-
<ul>
|
69 |
-
<li>Step 1: Download MEmu installer and finish the setup from its official website. You can choose the language and location of the installation.</li>
|
70 |
-
<li>Step 2: Start MEmu then open Google Play on the desktop. You will see the Google Play Store icon on the home screen of MEmu.</li>
|
71 |
-
<li>Step 3: Search Crafting and Building in Google Play. You can also browse the categories or genres of games in the Play Store.</li>
|
72 |
-
<li>Step 4: Download and Install Crafting and Building. You will see the game icon and some information about it.</li>
|
73 |
-
<li>Step 5: On install completion click the icon to start. You will find the game icon on the home screen of MEmu.</li>
|
74 |
-
<li>Step 6: Enjoy playing Crafting and Building on PC with MEmu. You can also customize the settings, keyboard mapping, and gamepad support of MEmu.</li>
|
75 |
-
</ul>
|
76 |
-
<p>Congratulations, you have successfully downloaded and installed <strong>Crafting and Building para PC</strong> with MEmu. Now you can enjoy this game on your PC with better graphics, performance, and controls.</p>
|
77 |
-
<h2>How to download and install Crafting and Building para PC with GameLoop</h2>
|
78 |
-
<p>GameLoop is another popular emulator for playing Android games on PC. It is developed by Tencent, the company behind PUBG Mobile, Call of Duty Mobile, and other popular games. It has a smooth and optimized performance, high compatibility, and exclusive features. Here are the steps to download and install <strong>Crafting and Building para PC</strong> with GameLoop:</p>
|
79 |
-
<ul>
|
80 |
-
<li>Step 1: Download GameLoop from its official website. You can choose the language of the installation.</li>
|
81 |
-
<li>Step 2: Run GameLoop and click on the Game Center tab. You will see a list of recommended games that you can play on GameLoop.</li>
|
82 |
-
<li>Step 3: Search for Crafting and Building in the Game Center or in the search results. You can also browse the categories or genres of games in GameLoop.</li>
|
83 |
-
<li>Step 4: Click on the Install button to download and install Crafting and Building. You will see the game icon and some information about it.</li>
|
84 |
-
<li>Step 5: Once the installation is done, click on the My Games tab. You will find all your installed games on GameLoop.</li>
|
85 |
-
<li>Step 6: Find Crafting and Building in your game library and click on the Play button. You can also adjust the settings, keyboard mapping, and gamepad support of GameLoop.</li>
|
86 |
-
</ul>
|
87 |
-
<p>Congratulations, you have successfully downloaded and installed <strong>Crafting and Building para PC</strong> with GameLoop. Now you can enjoy this game on your PC with better graphics, performance, and controls.</p>
|
88 |
-
<h2>Features and reviews of Crafting and Building game</h2>
|
89 |
-
<p><strong>Crafting and Building</strong> is a game that lets you unleash your creativity and imagination. You can build anything you want with blocks, from houses to castles, from farms to cities. You can also explore the world, mine resources, craft tools and weapons, and fight enemies. You can also play with your friends online, visit their worlds, and help them with their constructions.</p>
|
90 |
-
<p>Here are some of the features and reviews of <strong>Crafting and Building</strong> game that make it worth playing:</p>
|
91 |
-
<h3>Cool game, search for a hidden cave with your friends, multiplayer mode is cool</h3>
|
92 |
-
<p>One of the best things about <strong>Crafting and Building</strong> is that you can play with your friends online. You can join their worlds or invite them to yours, chat with them, and cooperate with them. You can also search for hidden caves, dungeons, and treasures together, and have fun exploring the world.</p>
|
93 |
-
<h3>Build anything, house with a room and a kitchen, a castle, etc.</h3>
|
94 |
-
<p><strong>Crafting and Building</strong> gives you the freedom to build anything you want with blocks. You can create your own house with a room and a kitchen, a castle with towers and walls, a farm with crops and animals, a city with skyscrapers and roads, or anything else you can imagine. You can also decorate your buildings with furniture, paintings, carpets, etc.</p>
|
95 |
-
<h3>Choose your character, boy or girl, custom skin, etc.</h3>
|
96 |
-
<p><strong>Crafting and Building</strong> lets you choose your character from different options. You can choose to be a boy or a girl, change your hair style and color, wear different clothes and accessories, or even create your own custom skin. You can also change your character anytime you want.</p>
|
97 |
-
<h3>Multiplayer games, you can play online and help your friend to build their house</h3>
|
98 |
-
<p><strong>Crafting and Building</strong> is not only a solo game but also a multiplayer game. You can play online with other players from around the world, chat with them, make friends with them, or compete with them. You can also help your friend to build their house or ask them to help you with yours. You can also share your creations with other players and see their creations.</p>
|
99 |
-
<h3>Fun game, play with villagers and animals, it is so fun</h3>
|
100 |
-
<p><strong>Crafting and Building</strong> is also a fun game that lets you play with villagers and animals. You can interact with them, trade with them, feed them, pet them, or even ride them. You can also find different types of animals in the world, such as cows, sheep, chickens, horses, dogs, cats, etc.</p>
|
101 |
-
<h3>Cool graphics, enjoy the best pixel graphics with high fps</h3>
|
102 |
-
<p><strong>Crafting and Building</strong> has cool graphics that are pixelated but colorful and detailed. You can enjoy the best pixel graphics with high fps on your PC. You can also change the graphics settings according to your preference. You can also admire the beautiful scenery of the world, such as the day-night cycle, the weather effects, the water reflections, etc.</p>
|
103 |
-
<h3>Free game, play the game for free</h3>
|
104 |
-
<p><strong>Crafting and Building</strong> is a free game that you can download and play without paying anything. You can enjoy all the features and content of the game without any limitations. You can also play the game offline without an internet connection. You can also update the game regularly to get new features and improvements.</p>
|
105 |
-
<h3>Building game, build your own constructions, who will have the best building?</h3>
|
106 |
-
<p><strong>Crafting and Building</strong> is a building game that lets you show your creativity and skills. You can build your own constructions with blocks, from simple to complex, from realistic to fantasy, from small to large. You can also challenge yourself or your friends to see who will have the best building. You can also rate and comment on other players' buildings and get feedback on yours.</p>
|
107 |
-
<p>As you can see, <strong>Crafting and Building</strong> is a game that has many features and benefits that make it worth playing. But don't take our word for it, see what other players and reviewers have to say about it.</p>
|
108 |
-
<h4>Akisha Rodriguez gave it a 3-star rating on Google Play Store</h4>
|
109 |
-
<p>"Love it! I use it just to pass the time and build stuff. The only thing making it a 3 star is the very frustrating adds every couple of minutes."</p>
|
110 |
-
<h4>GamesRadar+ ranked it as one of the best crafting games for craftiest gamers among us</h4>
|
111 |
-
<p>"If you like sidescrolling crafting games but you’re looking for less of a challenge in the survival department, then Junk Jack is the game for you."</p>
|
112 |
-
<p>In conclusion, <strong>Crafting and Building</strong> is a game that lets you explore, craft, and build your own world with blocks. You can also play with your friends online, visit their worlds, and help them with their constructions. You can download and install <strong>Crafting and Building para PC</strong> using an emulator such as BlueStacks, MEmu, or GameLoop. You can enjoy this game on your PC with better graphics, performance, and controls. You can also enjoy the features and reviews of <strong>Crafting and Building</strong> game that make it worth playing.</p>
|
113 |
-
<h2>Frequently Asked Questions</h2>
|
114 |
-
<p>Here are some of the frequently asked questions about <strong>Crafting and Building para PC</strong>:</p>
|
115 |
-
<ul>
|
116 |
-
<li><strong>Q: Is Crafting and Building safe to download and play?</strong></li>
|
117 |
-
<li>A: Yes, Crafting and Building is safe to download and play. It does not contain any viruses, malware, or spyware. It also does not require any personal information or permissions from your device.</li>
|
118 |
-
<li><strong>Q: Is Crafting and Building compatible with my PC?</strong></li>
|
119 |
-
<li>A: Yes, Crafting and Building is compatible with most PCs that run Windows or Mac operating systems. However, you may need to check the minimum system requirements of the emulator that you use to run Crafting and Building para PC.</li>
|
120 |
-
<li><strong>Q: How can I update Crafting and Building para PC?</strong></li>
|
121 |
-
<li>A: You can update Crafting and Building para PC by following the same steps that you used to download and install it. You can also check for updates in the Play Store or in the emulator that you use.</li>
|
122 |
-
<li><strong>Q: How can I uninstall Crafting and Building para PC?</strong></li>
|
123 |
-
<li>A: You can uninstall Crafting and Building para PC by following the same steps that you used to install it. You can also delete the emulator that you use if you no longer need it.</li>
|
124 |
-
<li><strong>Q: How can I contact the developer of Crafting and Building?</strong></li>
|
125 |
-
<li>A: You can contact the developer of Crafting and Building by sending an email to [email protected] or by visiting their website at https://craftingbuildinggame.com/.</li>
|
126 |
-
</ul></p> 401be4b1e0<br />
|
127 |
-
<br />
|
128 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Game Off Road 4x4 Driving Simulator and Become a Champion of Epic Trophy Raid.md
DELETED
@@ -1,121 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Game Off Road 4x4 Driving Simulator</h1>
|
3 |
-
<p>If you are looking for a thrilling and realistic off-road racing adventure, you should try Off Road 4x4 Driving Simulator. This game is one of the best mud truck driving games and car racing simulators available on Android devices. In this article, we will tell you everything you need to know about this game, including its features, how to download it, and some FAQs.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<h3>What is Off Road 4x4 Driving Simulator?</h3>
|
6 |
-
<p>Off Road 4x4 Driving Simulator is a game developed by Azur Interactive Games Limited. It is an addictive ultimate mud truck driving game and realistic car racing simulator. You can choose from a huge selection of 4x4 trucks and vehicles, each with different driving characteristics, and customize them to your liking. You can also test your driving skills in various off-road racing challenges, time trials, and extreme obstacles. You can enjoy stunning detailed graphics, real driving physics, realistic sounds, and a simple and convenient in-game map.</p>
|
7 |
-
<h2>download game off road 4x4 driving simulator</h2><br /><p><b><b>Download Zip</b> ✦ <a href="https://jinyurl.com/2uNNfo">https://jinyurl.com/2uNNfo</a></b></p><br /><br />
|
8 |
-
<h3>Why should you play Off Road 4x4 Driving Simulator?</h3>
|
9 |
-
<p>There are many reasons why you should play Off Road 4x4 Driving Simulator. Here are some of them:</p>
|
10 |
-
<ul>
|
11 |
-
<li>It is fun and exciting. You can experience the thrill of driving in rugged environments, such as mud, snow, sand, rocks, hills, and more. You can also perform stunts, jumps, drifts, and flips with your 4x4 trucks.</li>
|
12 |
-
<li>It is challenging and rewarding. You can compete against yourself or other players in various off-road racing modes, such as time trials, checkpoints, free roam, and more. You can also earn coins and rewards for completing challenges and races.</li>
|
13 |
-
<li>It is realistic and immersive. You can feel the realistic car driving physics, such as suspension, traction, torque, weight, and damage. You can also hear the realistic sounds of the engines, tires, brakes, and collisions. You can also see the gorgeous graphics of the landscapes, vehicles, weather effects, and shadows.</li>
|
14 |
-
</ul>
|
15 |
-
<h2>Features of Off Road 4x4 Driving Simulator</h2>
|
16 |
-
<h3>Gorgeous graphics and realistic physics</h3>
|
17 |
-
<p>One of the main features of Off Road 4x4 Driving Simulator is its stunning graphics and realistic physics. The game uses advanced graphics technology to create lifelike environments and vehicles. You can see the details of the textures, lighting, shadows, reflections, and particles. You can also feel the realistic physics of the vehicles, such as suspension, traction, torque, weight, and damage. The game also supports different weather effects, such as rain, snow, fog, and wind.</p>
|
18 |
-
<h3>Various 4x4 trucks and vehicles</h3>
|
19 |
-
<p>Another feature of Off Road 4x4 Driving Simulator is its variety of 4x4 trucks and vehicles. You can choose from over 20 different vehicles, each with different driving characteristics. You can drive pickup trucks, SUVs, jeeps, monster trucks, rally cars, military vehicles, and more. Each vehicle has its own strengths and weaknesses in terms of speed, acceleration, handling, durability, and fuel consumption.</p>
|
20 |
-
<h3>Endless tuning and customization</h3>
|
21 |
-
<p>A third feature of Off Road 4x4 Driving Simulator is its endless tuning and customization options. You can modify your vehicles to suit your preferences and needs. You can change the color, paint job, stickers, wheels, tires, suspension, engine, transmission, exhaust and more. You can also upgrade your vehicles to improve their performance and durability. You can also unlock new vehicles by earning coins and rewards.</p>
|
22 |
-
<h3>Realistic sounds and map</h3>
|
23 |
-
<p>A fourth feature of Off Road 4x4 Driving Simulator is its realistic sounds and map. The game has high-quality sound effects that enhance the immersion and realism of the game. You can hear the sounds of the engines, tires, brakes, collisions, and environment. You can also use the in-game map to navigate the different locations and modes. The map shows you the terrain, roads, checkpoints, obstacles, and other points of interest.</p>
|
24 |
-
<h3>Dozens of challenges and time trials</h3>
|
25 |
-
<p>A fifth feature of Off Road 4x4 Driving Simulator is its dozens of challenges and time trials. The game has various off-road racing modes that test your driving skills and abilities. You can compete against yourself or other players in time trials, checkpoints, free roam, and more. You can also complete challenges that require you to perform stunts, jumps, drifts, flips, and more. You can earn coins and rewards for completing challenges and races.</p>
|
26 |
-
<h3>Extreme obstacles and terrain</h3>
|
27 |
-
<p>A sixth feature of Off Road 4x4 Driving Simulator is its extreme obstacles and terrain. The game has different locations that offer different challenges and experiences. You can drive in mud, snow, sand, rocks, hills, forests, deserts, swamps, and more. You can also encounter various obstacles, such as ramps, bridges, logs, barrels, crates, pipes, fences, and more. You have to overcome these obstacles and terrain with your 4x4 trucks.</p>
|
28 |
-
<p>download game off road 4x4 driving simulator mod apk<br />
|
29 |
-
download game off road 4x4 driving simulator for pc<br />
|
30 |
-
download game off road 4x4 driving simulator android<br />
|
31 |
-
download game off road 4x4 driving simulator online<br />
|
32 |
-
download game off road 4x4 driving simulator free<br />
|
33 |
-
download game off road 4x4 driving simulator 2023<br />
|
34 |
-
download game off road 4x4 driving simulator extreme<br />
|
35 |
-
download game off road 4x4 driving simulator multiplayer<br />
|
36 |
-
download game off road 4x4 driving simulator full version<br />
|
37 |
-
download game off road 4x4 driving simulator windows 10<br />
|
38 |
-
download game off road 4x4 driving simulator apk<br />
|
39 |
-
download game off road 4x4 driving simulator pc offline<br />
|
40 |
-
download game off road 4x4 driving simulator ios<br />
|
41 |
-
download game off road 4x4 driving simulator laptop<br />
|
42 |
-
download game off road 4x4 driving simulator hack<br />
|
43 |
-
download game off road 4x4 driving simulator unlimited money<br />
|
44 |
-
download game off road 4x4 driving simulator latest version<br />
|
45 |
-
download game off road 4x4 driving simulator hd<br />
|
46 |
-
download game off road 4x4 driving simulator pro<br />
|
47 |
-
download game off road 4x4 driving simulator real<br />
|
48 |
-
download game off road 4x4 driving simulator best<br />
|
49 |
-
download game off road 4x4 driving simulator new<br />
|
50 |
-
download game off road 4x4 driving simulator update<br />
|
51 |
-
download game off road 4x4 driving simulator premium<br />
|
52 |
-
download game off road 4x4 driving simulator cheats<br />
|
53 |
-
download game off road 4x4 driving simulator review<br />
|
54 |
-
download game off road 4x4 driving simulator tips<br />
|
55 |
-
download game off road 4x4 driving simulator tricks<br />
|
56 |
-
download game off road 4x4 driving simulator guide<br />
|
57 |
-
download game off road 4x4 driving simulator gameplay<br />
|
58 |
-
download game off road 4x4 driving simulator walkthrough<br />
|
59 |
-
download game off road 4x4 driving simulator tutorial<br />
|
60 |
-
download game off road 4x4 driving simulator video<br />
|
61 |
-
download game off road 4x4 driving simulator youtube<br />
|
62 |
-
download game off road 4x4 driving simulator facebook<br />
|
63 |
-
download game off road 4x4 driving simulator instagram<br />
|
64 |
-
download game off road 4x4 driving simulator twitter<br />
|
65 |
-
download game off road 4x4 driving simulator reddit<br />
|
66 |
-
download game off road 4x4 driving simulator quora<br />
|
67 |
-
download game off road 4x4 driving simulator pinterest<br />
|
68 |
-
download game off road 4x4 driving simulator blogspot<br />
|
69 |
-
download game off road 4x4 driving simulator wordpress<br />
|
70 |
-
download game off road 4x3d6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+64driving-simulator.com</p>
|
71 |
-
<h2>How to download Off Road 4x4 Driving Simulator</h2>
|
72 |
-
<h3>Download from Google Play Store</h3>
|
73 |
-
<p>The easiest way to download Off Road 4x4 Driving Simulator is from the Google Play Store. You can follow these steps:</p>
|
74 |
-
<ol>
|
75 |
-
<li>Open the Google Play Store app on your Android device.</li>
|
76 |
-
<li>Search for "Off Road 4x4 Driving Simulator" in the search bar.</li>
|
77 |
-
<li>Select the game from the list of results.</li>
|
78 |
-
<li>Tap on the "Install" button to download and install the game.</li>
|
79 |
-
<li>Wait for the installation to finish and then tap on the "Open" button to launch the game.</li>
|
80 |
-
</ol>
|
81 |
-
<h3>Download from APK websites</h3>
|
82 |
-
<p>Another way to download Off Road 4x4 Driving Simulator is from APK websites. These are websites that offer APK files of Android apps and games that you can download and install manually. However, you should be careful when downloading from these websites as some of them may contain malware or viruses. You should only download from trusted and reputable websites. You can follow these steps:</p>
|
83 |
-
<ol>
|
84 |
-
<li>Open your web browser on your Android device.</li>
|
85 |
-
<li>Search for "Off Road 4x4 Driving Simulator APK" in the search engine.</li>
|
86 |
-
<li>Select a website that offers the APK file of the game.</li>
|
87 |
-
<li>Download the APK file to your device.</li>
|
88 |
-
<li>Before installing the APK file, you need to enable "Unknown sources" in your device settings. This will allow you to install apps from sources other than the Google Play Store.</li>
|
89 |
-
<li>Locate the APK file in your device storage and tap on it to install it.</li>
|
90 |
-
<li>Wait for the installation to finish and then tap on the game icon to launch it.</li>
|
91 |
-
</ol>
|
92 |
-
<h3>Download from PC emulator</h3>
|
93 |
-
<p>A third way to download Off Road 4x4 Driving Simulator is from a PC emulator. This is a software that allows you to run Android apps and games on your PC. This way, you can enjoy the game on a bigger screen and with better controls. However, you need to have a compatible PC emulator installed on your PC first. Some of the popular PC emulators are BlueStacks, NoxPlayer, MEmu, and LDPlayer. You can follow these steps:</p>
|
94 |
-
<ol>
|
95 |
-
<li>Download and install a PC emulator of your choice on your PC.</li>
|
96 |
-
<li>Launch the PC emulator and sign in with your Google account.</li>
|
97 |
-
<li>Open the Google Play Store app within the PC emulator.</li>
|
98 |
-
<li>Search for "Off Road 4x4 Driving Simulator" in the search bar.</li>
|
99 |
-
<li>Select the game from the list of results.</li>
|
100 |
-
<li>Click on the "Install" button to download and install the game.</li>
|
101 |
-
<li>Wait for the installation to finish and then click on the game icon to launch it.</li>
|
102 |
-
</ol>
|
103 |
-
<h2>Conclusion</h2>
|
104 |
-
<h3>Summary of the article</h3>
|
105 |
-
<p>In conclusion, Off Road 4x4 Driving Simulator is a great game for anyone who loves off-road racing and driving. It has amazing graphics, physics, sounds, and map. It has a wide range of 4x4 trucks and vehicles that you can tune and customize. It has various challenges and time trials that you can compete in. It has extreme obstacles and terrain that you can explore. You can download the game from the Google Play Store, APK websites, or PC emulator. We hope you enjoyed this article and found it helpful. If you have any questions or feedback, please let us know in the comments section below.</p>
|
106 |
-
<h3>FAQs</h3>
|
107 |
-
<p>Here are some frequently asked questions about Off Road 4x4 Driving Simulator:</p>
|
108 |
-
<ul>
|
109 |
-
<li><b>Q: How much space does the game require on my device?</b></li>
|
110 |
-
<li>A: The game requires about 200 MB of free space on your device.</li>
|
111 |
-
<li><b>Q: Is the game compatible with my device?</b></li>
|
112 |
-
<li>A: The game is compatible with Android devices that have Android 5.0 or higher.</li>
|
113 |
-
<li><b>Q: Is the game free to play?</b></li>
|
114 |
-
<li>A: The game is free to download and play, but it contains ads and in-app purchases.</li>
|
115 |
-
<li><b>Q: How can I remove the ads from the game?</b></li>
|
116 |
-
<li>A: You can remove the ads from the game by purchasing the ad-free version for $1.99.</li>
|
117 |
-
<li><b>Q: How can I contact the developer of the game?</b></li>
|
118 |
-
<li>A: You can contact the developer of the game by emailing them at [email protected] or visiting their website at https://azurgames.com/.</li>
|
119 |
-
</ul></p> 401be4b1e0<br />
|
120 |
-
<br />
|
121 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/ETS2 Download Tips and Tricks for Running Your Own Trucking Business.md
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>ETS2 Download: How to Get Euro Truck Simulator 2 for Free</h1>
|
3 |
-
<p>If you are a fan of driving games and simulation games, you might have heard of Euro Truck Simulator 2, or ETS2 for short. This game lets you travel across Europe as a truck driver, delivering cargo and exploring different cities and landscapes. But how can you get ETS2 download for free? In this article, we will show you how to download Euro Truck Simulator 2 demo, full version, and mods from various sources. But first, let's take a look at what this game is all about.</p>
|
4 |
-
<h2>ets2 download</h2><br /><p><b><b>DOWNLOAD</b> ✒ <a href="https://jinyurl.com/2uNNNG">https://jinyurl.com/2uNNNG</a></b></p><br /><br />
|
5 |
-
<h2>What is Euro Truck Simulator 2?</h2>
|
6 |
-
<p>Euro Truck Simulator 2 is a truck driving simulation game developed and published by SCS Software in 2012. It is the sequel to the original Euro Truck Simulator, which was released in 2008. The game features licensed trucks from various manufacturers, such as MAN, Scania, Volvo, DAF, Renault, and more. You can customize your truck with different parts, paint jobs, accessories, and decals. You can also run your own trucking business, hire drivers, buy garages, and manage your finances.</p>
|
7 |
-
<h3>Euro Truck Simulator 2 features</h3>
|
8 |
-
<p>Some of the main features of Euro Truck Simulator 2 are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Transport a vast variety of cargo across more than 60 European cities.</li>
|
11 |
-
<li>Travel through realistic road networks and landmarks.</li>
|
12 |
-
<li>Experience different weather conditions and day-night cycles.</li>
|
13 |
-
<li>Follow the traffic rules and regulations of each country.</li>
|
14 |
-
<li>Listen to real radio stations or your own music playlist.</li>
|
15 |
-
<li>Enjoy the scenery and the dynamic environment.</li>
|
16 |
-
<li>Expand your truck fleet and your company reputation.</li>
|
17 |
-
<li>Compete with other players online or join a multiplayer convoy.</li>
|
18 |
-
</ul>
|
19 |
-
<h3>Euro Truck Simulator 2 system requirements</h3>
|
20 |
-
<p>To run Euro Truck Simulator 2 on your PC, you need to meet the following system requirements:</p>
|
21 |
-
<table>
|
22 |
-
<tr><th>Minimum</th><th>Recommended</th></tr>
|
23 |
-
<tr><td>OS: Windows 7<br>CPU: Dual core CPU 2.4 GHz<br>RAM: 4 GB<br>GPU: GeForce GTS 450-class (Intel HD 4000)<br>HDD: 7 GB available space<br>DirectX: DirectX 9.0c</td><td>OS: Windows 7/8.1/10 64-bit<br>CPU: Quad core CPU 3.0 GHz<br>RAM: 6 GB<br>GPU: GeForce GTX 760-class (2 GB)<br>HDD: 7 GB available space<br>DirectX: DirectX 9.0c</td></tr>
|
24 |
-
</table>
|
25 |
-
<h3>Euro Truck Simulator 2 reviews</h3>
|
26 |
-
<p>Euro Truck Simulator 2 has received overwhelmingly positive reviews from critics and players alike. The game has a score of 96% on Steam, based on over 470,000 user reviews. The game has also won several awards, such as the “I Thought This Game Was Cool Before It Won An Award” Award and the “Sit Back and Relax” Award from Steam Awards. Some of the praises for the game are:</p>
|
27 |
-
<blockquote>"Euro Truck Simulator 2 is that rare thing, a strong sim tethered to a strong game. Where other vehicle-obsessed devs seem to take player motivation for granted, Czech studio SCS understand that a pleasingly modelled steed needs a pleasingly modelled environment to shine." - PC Gamer</blockquote>
|
28 |
-
<blockquote>"Euro Truck Simulator 2 reviews are mostly positive, praising the game as the best simulation game period and the best heavy vehicle simulator ever made. The game offers a realistic and varied environment of Europe and a pleasingly modelled steed to drive. The game is also praised for its modding community, which adds new content and features to the game." - Game Rant</blockquote>
|
29 |
-
<blockquote>"Euro Truck Simulator 2 is a deep and rewarding game, and it was met with favorable reviews when it released back in 2012. It's maintained popularity with fans, who continue to produce mods that add new vehicles, maps, and more to the game. It's not often that a simulator game can appeal to a wide audience, but Euro Truck Simulator 2 does just that." - Screen Rant</blockquote>
|
30 |
-
<h2>How to download Euro Truck Simulator 2 for free?</h2>
|
31 |
-
<p>Now that you know what Euro Truck Simulator 2 is and why it is so popular, you might be wondering how to get ETS2 download for free. There are several ways to do that, depending on what you want to play. Here are some of the options:</p>
|
32 |
-
<p>ets2 download free trial<br />
|
33 |
-
ets2 download steam key<br />
|
34 |
-
ets2 download full version<br />
|
35 |
-
ets2 download latest version<br />
|
36 |
-
ets2 download for pc<br />
|
37 |
-
ets2 download for mac<br />
|
38 |
-
ets2 download for linux<br />
|
39 |
-
ets2 download mods<br />
|
40 |
-
ets2 download map<br />
|
41 |
-
ets2 download crack<br />
|
42 |
-
ets2 download torrent<br />
|
43 |
-
ets2 download apk<br />
|
44 |
-
ets2 download android<br />
|
45 |
-
ets2 download ios<br />
|
46 |
-
ets2 download online<br />
|
47 |
-
ets2 download multiplayer<br />
|
48 |
-
ets2 download demo<br />
|
49 |
-
ets2 download patch<br />
|
50 |
-
ets2 download update<br />
|
51 |
-
ets2 download dlc<br />
|
52 |
-
ets2 download going east<br />
|
53 |
-
ets2 download scandinavia<br />
|
54 |
-
ets2 download vive la france<br />
|
55 |
-
ets2 download italia<br />
|
56 |
-
ets2 download road to the black sea<br />
|
57 |
-
ets2 download beyond the baltic sea<br />
|
58 |
-
ets2 download heart of russia<br />
|
59 |
-
ets2 download promods<br />
|
60 |
-
ets2 download truckersmp<br />
|
61 |
-
ets2 download truck skins<br />
|
62 |
-
ets2 download trailer skins<br />
|
63 |
-
ets2 download sound mods<br />
|
64 |
-
ets2 download traffic mods<br />
|
65 |
-
ets2 download weather mods<br />
|
66 |
-
ets2 download graphics mods<br />
|
67 |
-
ets2 download realistic mods<br />
|
68 |
-
ets2 download tuning mods<br />
|
69 |
-
ets2 download bus mods<br />
|
70 |
-
ets2 download car mods<br />
|
71 |
-
ets2 download save game<br />
|
72 |
-
ets2 download profile editor<br />
|
73 |
-
ets2 download cheat engine<br />
|
74 |
-
ets2 download money hack<br />
|
75 |
-
ets2 download level hack<br />
|
76 |
-
ets2 download console commands<br />
|
77 |
-
ets2 download radio stations<br />
|
78 |
-
ets2 download custom music<br />
|
79 |
-
ets2 download world of trucks account</p>
|
80 |
-
<h3>Download Euro Truck Simulator 2 demo from the official website</h3>
|
81 |
-
<p>If you want to try out the game before buying it, you can download the Euro Truck Simulator 2 demo from the official website. The demo version allows you to play for one hour with one of the basic trucks. You can also visit several cities in Germany, Austria, Switzerland, and Italy. The demo is compatible with Windows 7 or later, and requires about 4 GB of disk space.</p>
|
82 |
-
<h3>Download Euro Truck Simulator 2 from Steam</h3>
|
83 |
-
<p>If you want to play the full version of the game, you can buy it from Steam, the popular digital distribution platform. The game costs $19.99 USD, but it often goes on sale for up to 75% off. You can also buy various DLCs (downloadable content) that add new maps, trucks, cargoes, and more to the game. Some of the most popular DLCs are:</p>
|
84 |
-
<ul>
|
85 |
-
<li>Going East! - expands the map to Eastern Europe.</li>
|
86 |
-
<li>Scandinavia - adds new destinations in Denmark, Norway, and Sweden.</li>
|
87 |
-
<li>Vive la France! - explores the beautiful countryside of France.</li>
|
88 |
-
<li>Italia - delivers goods across Italy and its islands.</li>
|
89 |
-
<li>Beyond the Baltic Sea - ventures into Finland, Estonia, Latvia, Lithuania, and Russia.</li>
|
90 |
-
<li>Road to the Black Sea - travels through Romania, Bulgaria, and Turkey.</li>
|
91 |
-
<li>Iberia - covers Spain and Portugal.</li>
|
92 |
-
</ul>
|
93 |
-
<h3>Download Euro Truck Simulator 2 mods from Steam Workshop or other websites</h3>
|
94 |
-
<p>If you want to enhance your gameplay experience with custom content created by other players, you can download Euro Truck Simulator 2 mods from Steam Workshop or other websites. Mods are modifications that change or add new features to the game, such as new trucks, trailers, skins, sounds, maps, traffic, weather, and more. You can browse through thousands of mods and choose the ones that suit your preferences. To install mods from Steam Workshop, you need to subscribe to them and enable them in the game's mod manager. To install mods from other websites, you need to download them and copy them to the "mod" folder in your game directory.</p>
|
95 |
-
<h2>Conclusion</h2>
|
96 |
-
<p>Euro Truck Simulator 2 is a fun and realistic truck driving simulation game that lets you explore Europe as a truck driver. You can download ETS2 for free by using the demo version from the official website, or by buying the full version from Steam. You can also download ETS2 mods from Steam Workshop or other websites to customize your game with new content and features. Whether you want to relax and enjoy the scenery, or challenge yourself with different cargoes and routes, Euro Truck Simulator 2 has something for everyone.</p>
|
97 |
-
<h3>FAQs</h3>
|
98 |
-
<ul>
|
99 |
-
<li><b>Q: How do I update Euro Truck Simulator 2?</b><br>A: If you have bought the game from Steam, it will update automatically when a new version is available. If you have downloaded the game from another source, you need to download the latest patch from the official website and install it manually.</li>
|
100 |
-
<li><b>Q: How do I play Euro Truck Simulator 2 online?</b><br>A: You can play Euro Truck Simulator 2 online with other players by using a third-party software called TruckersMP. This software allows you to join multiplayer servers and chat with other drivers. You need to register an account on their website and download their client. You also need to have a valid copy of the game on Steam.</li>
|
101 |
-
<li><b>Q: How do I use cheats in Euro Truck Simulator 2?</b><br>A: You can use cheats in Euro Truck Simulator 2 by editing some files in your game folder. For example, you can edit the "config.cfg" file to change some settings, such as money, level, skills, etc. You can also edit the "game.sii" file to change your truck, trailer, cargo, etc. However, using cheats may affect your game performance and achievements. Use them at your own risk.</li>
|
102 |
-
<li><b>Q: How do I install Euro Truck Simulator 2 on Mac?</b><br>A: You can install Euro Truck Simulator 2 on Mac by buying the game from Steam or the App Store. The game is compatible with macOS 10.9 or later, and requires about 7 GB of disk space.</li>
|
103 |
-
<li><b>Q: How do I get more money in Euro Truck Simulator 2?</b><br>A: You can get more money in Euro Truck Simulator 2 by completing more deliveries, taking higher-paying jobs, hiring more drivers, upgrading your trucks, and managing your expenses. You can also use cheats or mods to increase your money, but this may affect your game balance and achievements.</li>
|
104 |
-
</ul></p> 401be4b1e0<br />
|
105 |
-
<br />
|
106 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/.ipynb_checkpoints/README-checkpoint.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: LoRa ppdiffusers dreambooth
|
3 |
-
emoji: 🎨🎞️
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.18.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/dev/core/mock.py
DELETED
@@ -1,121 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
from logging import getLogger
|
3 |
-
from typing import Any, Dict, List
|
4 |
-
|
5 |
-
import numpy as np
|
6 |
-
from pyopenjtalk import tts
|
7 |
-
from scipy.signal import resample
|
8 |
-
|
9 |
-
DUMMY_TEXT = "これはダミーのテキストです"
|
10 |
-
|
11 |
-
|
12 |
-
def initialize(path: str, use_gpu: bool, *args: List[Any]) -> None:
|
13 |
-
pass
|
14 |
-
|
15 |
-
|
16 |
-
def yukarin_s_forward(length: int, **kwargs: Dict[str, Any]) -> np.ndarray:
|
17 |
-
logger = getLogger("uvicorn") # FastAPI / Uvicorn 内からの利用のため
|
18 |
-
logger.info(
|
19 |
-
"Sorry, yukarin_s_forward() is a mock. Return values are incorrect.",
|
20 |
-
)
|
21 |
-
return np.ones(length) / 5
|
22 |
-
|
23 |
-
|
24 |
-
def yukarin_sa_forward(length: int, **kwargs: Dict[str, Any]) -> np.ndarray:
|
25 |
-
logger = getLogger("uvicorn") # FastAPI / Uvicorn 内からの利用のため
|
26 |
-
logger.info(
|
27 |
-
"Sorry, yukarin_sa_forward() is a mock. Return values are incorrect.",
|
28 |
-
)
|
29 |
-
return np.ones((1, length)) * 5
|
30 |
-
|
31 |
-
|
32 |
-
def decode_forward(length: int, **kwargs: Dict[str, Any]) -> np.ndarray:
|
33 |
-
"""
|
34 |
-
合成音声の波形データをNumPy配列で返します。ただし、常に固定の文言を読み上げます(DUMMY_TEXT)
|
35 |
-
参照→SynthesisEngine のdocstring [Mock]
|
36 |
-
|
37 |
-
Parameters
|
38 |
-
----------
|
39 |
-
length : int
|
40 |
-
フレームの長さ
|
41 |
-
|
42 |
-
Returns
|
43 |
-
-------
|
44 |
-
wave : np.ndarray
|
45 |
-
音声合成した波形データ
|
46 |
-
|
47 |
-
Note
|
48 |
-
-------
|
49 |
-
ここで行う音声合成では、調声(ピッチ等)を反映しない
|
50 |
-
また、入力内容によらず常に固定の文言を読み上げる
|
51 |
-
|
52 |
-
# pyopenjtalk.tts()の出力仕様
|
53 |
-
dtype=np.float64, 16 bit, mono 48000 Hz
|
54 |
-
|
55 |
-
# resampleの説明
|
56 |
-
非モックdecode_forwardと合わせるために、出力を24kHzに変換した。
|
57 |
-
"""
|
58 |
-
logger = getLogger("uvicorn") # FastAPI / Uvicorn 内からの利用のため
|
59 |
-
logger.info(
|
60 |
-
"Sorry, decode_forward() is a mock. Return values are incorrect.",
|
61 |
-
)
|
62 |
-
wave, sr = tts(DUMMY_TEXT)
|
63 |
-
wave = resample(
|
64 |
-
wave.astype("int16"),
|
65 |
-
24000 * len(wave) // 48000,
|
66 |
-
)
|
67 |
-
return wave
|
68 |
-
|
69 |
-
|
70 |
-
def metas() -> str:
|
71 |
-
return json.dumps(
|
72 |
-
[
|
73 |
-
{
|
74 |
-
"name": "dummy1",
|
75 |
-
"styles": [
|
76 |
-
{"name": "style0", "id": 0},
|
77 |
-
{"name": "style1", "id": 2},
|
78 |
-
{"name": "style2", "id": 4},
|
79 |
-
{"name": "style3", "id": 6},
|
80 |
-
],
|
81 |
-
"speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff",
|
82 |
-
"version": "mock",
|
83 |
-
},
|
84 |
-
{
|
85 |
-
"name": "dummy2",
|
86 |
-
"styles": [
|
87 |
-
{"name": "style0", "id": 1},
|
88 |
-
{"name": "style1", "id": 3},
|
89 |
-
{"name": "style2", "id": 5},
|
90 |
-
{"name": "style3", "id": 7},
|
91 |
-
],
|
92 |
-
"speaker_uuid": "388f246b-8c41-4ac1-8e2d-5d79f3ff56d9",
|
93 |
-
"version": "mock",
|
94 |
-
},
|
95 |
-
{
|
96 |
-
"name": "dummy3",
|
97 |
-
"styles": [
|
98 |
-
{"name": "style0", "id": 8},
|
99 |
-
],
|
100 |
-
"speaker_uuid": "35b2c544-660e-401e-b503-0e14c635303a",
|
101 |
-
"version": "mock",
|
102 |
-
},
|
103 |
-
{
|
104 |
-
"name": "dummy4",
|
105 |
-
"styles": [
|
106 |
-
{"name": "style0", "id": 9},
|
107 |
-
],
|
108 |
-
"speaker_uuid": "b1a81618-b27b-40d2-b0ea-27a9ad408c4b",
|
109 |
-
"version": "mock",
|
110 |
-
},
|
111 |
-
]
|
112 |
-
)
|
113 |
-
|
114 |
-
|
115 |
-
def supported_devices() -> str:
|
116 |
-
return json.dumps(
|
117 |
-
{
|
118 |
-
"cpu": True,
|
119 |
-
"cuda": False,
|
120 |
-
}
|
121 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A00001/bingothoo/README.md
DELETED
@@ -1,196 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: bingo
|
3 |
-
emoji: 📉
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: red
|
6 |
-
sdk: docker
|
7 |
-
license: mit
|
8 |
-
duplicated_from: hf4all/bingo
|
9 |
-
---
|
10 |
-
|
11 |
-
<div align="center">
|
12 |
-
|
13 |
-
# Bingo
|
14 |
-
|
15 |
-
Bingo,一个让你呼吸顺畅 New Bing。
|
16 |
-
|
17 |
-
高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。
|
18 |
-
|
19 |
-

|
20 |
-

|
21 |
-
[](https://hub.docker.com/repository/docker/weaigc/bingo/)
|
22 |
-
[](https://hub.docker.com/repository/docker/weaigc/bingo/)
|
23 |
-
[](https://github.com/weaigc/bingo/blob/main/license)
|
24 |
-
|
25 |
-
</div>
|
26 |
-
|
27 |
-
## 演示站点
|
28 |
-
|
29 |
-
https://bing.github1s.tk
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
[](https://bing.github1s.tk)
|
34 |
-
|
35 |
-
## 功能和特点
|
36 |
-
|
37 |
-
- 完全基于 Next.js 重写,高度还原 New Bing Web 版 UI,使用体验和 Bing AI 基本一致。
|
38 |
-
- 支持 Docker 构建,方便快捷地部署和访问。
|
39 |
-
- Cookie 可全局配置,全局共享。
|
40 |
-
- 支持持续语音对话
|
41 |
-
|
42 |
-
## RoadMap
|
43 |
-
|
44 |
-
- [x] 支持 wss 转发
|
45 |
-
- [x] 支持一键部署
|
46 |
-
- [x] 优化移动端展示
|
47 |
-
- [x] 支持画图
|
48 |
-
- [x] 支持语音输入(支持语音指令,目前仅支持 PC 版 Edge 及 Chrome 浏览器)
|
49 |
-
- [x] 支持语音输出(需要手动开启)
|
50 |
-
- [x] 支持图片输入
|
51 |
-
- [x] 支持自定义域名
|
52 |
-
- [ ] 支持历史记录
|
53 |
-
- [ ] 适配深色模式
|
54 |
-
- [ ] 支持内置提示词
|
55 |
-
- [ ] 支持离线访问
|
56 |
-
- [ ] 国际化翻译
|
57 |
-
|
58 |
-
## 一键部署
|
59 |
-
你也可以一键部署自己的 New Bing AI 到 🤗 HuggingFace 。
|
60 |
-
|
61 |
-
### 部署到 Huggingface
|
62 |
-
1. 点击此图标
|
63 |
-
[](https://huggingface.co/login?next=%2Fspaces%2Fhf4all%2Fbingo%3Fduplicate%3Dtrue%26visibility%3Dpublic),配置可以不改。
|
64 |
-
|
65 |
-
2. 部署署完成后,点击“设置” 》“站点域名”,点一下,复制一下 HF 域名信息,然后分享给别人即可。
|
66 |
-
|
67 |
-
> Huggingface 不支持绑定自己的域名,不过我们可以使用曲线救国的方式来达到这个目的
|
68 |
-
> 1. 方式二,借助 Cloudflare Workers [部署Cloudflare Workers](#使用Cloudflare-Workers自定义域名)
|
69 |
-
> 2. 方式一,借助 Github Pages 及 iframe [如何绑定域名](https://github.com/weaigc/bingo/issues/4)
|
70 |
-
|
71 |
-
### 使用Cloudflare Workers自定义域名
|
72 |
-
|
73 |
-
> 核心代码 [worker.js](./cloudflare/worker.js)
|
74 |
-
|
75 |
-
- [注册 Cloudflare 账号](https://dash.cloudflare.com/sign-up)
|
76 |
-
|
77 |
-
- 添加一个新的网站,需要你有自己的域名并且将域名`Name Server`托管给 Cloudflare 才行(更多信息可自行 Google)
|
78 |
-
|
79 |
-
- 通过左侧菜单进入「Workers」,并点击「Create a Worker」。
|
80 |
-
|
81 |
-
- 创建 Worker 服务,复制 [worker.js](./cloudflare/worker.js) 全部代码,粘贴至创建的服务中,根据注释进行改动,保存并部署。
|
82 |
-
|
83 |
-
- 触发器 中自定义访问域名。
|
84 |
-
|
85 |
-
### 部署其它平台
|
86 |
-
<details>
|
87 |
-
<summary>
|
88 |
-
由于其他平台目前遭到 New Bing 封杀,会遇到很多问题,不再做推荐,有需要的可以自行查看
|
89 |
-
</summary>
|
90 |
-
|
91 |
-
#### 部署到 Netlify
|
92 |
-
[](https://app.netlify.com/start/deploy?repository=https://github.com/weaigc/bingo)
|
93 |
-
|
94 |
-
#### 部署到 Vercel
|
95 |
-
如果你是 Vercel 付费用户,可以点以下链接一键部署到 Vercel。免费版本有[接口超时限制](https://vercel.com/docs/concepts/limits/overview),不推荐使用
|
96 |
-
|
97 |
-
[](https://vercel.com/new/clone?demo-title=bingo&demo-description=bingo&demo-url=https%3A%2F%2Fbing.github1s.tk%2F&project-name=bingo&repository-name=bingo&repository-url=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo&from=templates&skippable-integrations=1&env=BING_HEADER&envDescription=%E5%A6%82%E6%9E%9C%E4%B8%8D%E7%9F%A5%E9%81%93%E6%80%8E%E4%B9%88%E9%85%8D%E7%BD%AE%E8%AF%B7%E7%82%B9%E5%8F%B3%E4%BE%A7Learn+More&envLink=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo%2Fblob%2Fmain%2F.env.example)
|
98 |
-
|
99 |
-
#### 部署到 Render
|
100 |
-
|
101 |
-
[](https://render.com/deploy?repo=https://github.com/weaigc/bingo)
|
102 |
-
</details>
|
103 |
-
|
104 |
-
## 环境和依赖
|
105 |
-
|
106 |
-
- Node.js >= 18
|
107 |
-
- Bing AI 的[身份信息](#如何获取-BING_HEADER))
|
108 |
-
|
109 |
-
## 安装和使用
|
110 |
-
|
111 |
-
> 由于目前微软封杀比较严重,推荐优先使用 [部署 Huggingface](#部署到-huggingface) 。
|
112 |
-
|
113 |
-
* 使用 Node 启动
|
114 |
-
|
115 |
-
```bash
|
116 |
-
git clone https://github.com/weaigc/bingo.git
|
117 |
-
npm i # 推荐使用 pnpm i
|
118 |
-
npm run build
|
119 |
-
npm run start
|
120 |
-
```
|
121 |
-
|
122 |
-
* 使用 Docker 启动
|
123 |
-
```bash
|
124 |
-
docker pull weaigc/bingo
|
125 |
-
docker run --rm -it -p 7860:7860 weaigc/bingo
|
126 |
-
# 或者
|
127 |
-
docker run --rm -it -e BING_HEADER=xxxx -p 7860:7860 weaigc/bingo
|
128 |
-
```
|
129 |
-
|
130 |
-
## 如何获取 BING_HEADER
|
131 |
-
> 配置了 BING_HEADER 意味着你将自己的账号共享给所有使用此服务的人,如果不需要免登录画图的功能,不建议设置此变量
|
132 |
-
|
133 |
-
打开 https://www.bing.com 并登录,然后访问 https://www.bing.com/turing/captcha/challenge,通过人机校验,然后
|
134 |
-
|
135 |
-

|
136 |
-
|
137 |
-
> 复制出来的内容应该如下所示。确认格式无误后,打开 https://effulgent-bubblegum-e2f5df.netlify.app/#dialog=%22settings%22 ,粘贴进去,点击“转成 BING_HEADER 并复制”,然后从剪切板粘贴即可得到。(你也可以先在网页上进行验证)
|
138 |
-
|
139 |
-
以下是格式参考,需要注意的是,网页端保存的格式是以`curl`开头, 而服务端配置的 `BING_HEADER` 是 `base64` 格式,两者不能互通。
|
140 |
-
<details>
|
141 |
-
<summary>正常格式/网页端保存的格式(格式仅供参考)</summary>
|
142 |
-
|
143 |
-
```
|
144 |
-
curl 'https://www.bing.com/turing/captcha/challenge' \
|
145 |
-
-H 'authority: www.bing.com' \
|
146 |
-
-H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \
|
147 |
-
-H 'accept-language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6' \
|
148 |
-
-H 'cache-control: max-age=0' \
|
149 |
-
-H 'cookie: MicrosoftApplicationsTelemetryDeviceId=3399c004-fd0e-48ec-bb92-d82a27b2bbd4; _EDGE_V=1; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=29EBDDA4E6674329ACCF1A0A423C3E98&dmnchg=1; _UR=QS=0&TQS=0; _HPVN=CS=eyJQbiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiUCJ9LCJTYyI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiSCJ9LCJReiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiVCJ9LCJBcCI6dHJ1ZSwiTXV0ZSI6dHJ1ZSwiTGFkIjoiMjAyMy0wNy0yNVQwMDowMDowMFoiLCJJb3RkIjowLCJHd2IiOjAsIkRmdCI6bnVsbCwiTXZzIjowLCJGbHQiOjAsIkltcCI6Mn0=; _RwBf=ilt=1&ihpd=1&ispd=0&rc=0&rb=0&gb=0&rg=200&pc=0&mtu=0&rbb=0&g=0&cid=&clo=0&v=1&l=2023-07-25T07:00:00.0000000Z&lft=0001-01-01T00:00:00.0000000&aof=0&o=2&p=&c=&t=0&s=0001-01-01T00:00:00.0000000+00:00&ts=2023-07-25T11:00:31.7111548+00:00&rwred=0&wls=&lka=0&lkt=0&TH=&dci=0; ANON=A=0043C6590EA808ED6E395059FFFFFFFF&E=1c8b&W=1; NAP=V=1.9&E=1c31&C=DnaMSbDN_4efZ_xXqBF3Daorjr53kYqYoaP8YHsupjmiXnysX7a37A&W=1; PPLState=1; KievRPSSecAuth=FABSBBRaTOJILtFsMkpLVWSG6AN6C/svRwNmAAAEgAAACMGUA7EGVSjGEAQBGHtNsc5sNL7unmJsfPJ2t6imfo4BeUJlAia3IpMTtMUy4PU/C5QAzRI5pODtsIee0+blgllXt/5IiWwGjwmdhivsFM597pRPkjARPfwsPhNLPNbJrCPNPHdje4Is78MnCADXw6/NBq2FL8V2/byw2fH6IuAMD2MvN/VvqpEa9ZxiDjZtENj4HEj0mO2SgzjfyEhVAkjvznJqU2rw/Q2tHmX94NAM2kzlzKF/hWPhCCUmu8IHLvCnHDS6mSptvJDDP/sp3ovtzOXkP1mlM/Xju5ftesUvccVEQGffXORa1dE5hEMbKIiKXz1tDdduSXE19g9/+mRMAjaQhpwhI8XmilCTx1adb1Ll5qK+VjC9GNfEZzcbsGBPVaOl+anG8rEMq+Xnhjo7J+NqTNolavHgcuV8kJsCeJZIged33UA8eOZeFo+wAECMguxMoSqgpGH+sthqynvD/FJD6r/tiU2N3uqVq8NE8V37asrN6T14Z0FGBJOe6ET1+PGApm3s11OY9/xhFEB9T5BEPUGEbvRcLcW2ncFQX0EU+xweiPqo1Q1hNUg/dCtSI+lZ7c2H8XheePZavZ0TJQ8oNCSAuKiTqJmI0fVGpwbXwfaADkEipuawz3fIuMJBNgMU0OtA7Hm59v2fGLIBuvi6YeKS6GgVk3BIPf+P/eKahwozrxQZaFnoHTSqMkvct7xCP4atBROfXKf5Ww0CcFKp+2WX9BIskTOo2jjk6bAyyYJ+ElUB1fgLKNk5m/YSMc9iYCLIBMIGN8F0Yvy3tZ7cvh7Ue5Klo98US/I+nW1G7ZJMHRgUO8h8lpneHqEMegKd8gynO4VF7RpCjJkunDmW0Ta+RkXAP619pg0dqHMFkoOgknN78oBbGTV6fJUKotv+vi61kLhAeXZGWoHGCRXh2wUC6YgfPgKA6ESRNHtFn7E5B3HHpLc5rVMDSNhKZYfdhupV4Ezf6+5DhMcZLZhi0kk+ivDiN1gdHlVtSN55xpvf+c+XZDzR0uhgcvgy0LAbmzgk6y4WbYH+LQsMpzNNj+aC72vMiWovWrKh9jY4MYCmdgxsS/skPtLdp18muiEIRXTbZQGUmhxFpJAIbBIsCscMpzL0BgeujxUwM5wr79Sd9r4xwbgSMwmBlBfUHRVBdNyg8feepeJbCS63nD6eHOuLqMRsPIio3w/ki/EAa92UUEiZeavLsMUD/y/qAvWUdzdP5Y+C/TM+CMGS/kGL4LEdY/28MQeTvU1qv1X21kQt2aiaj3pPVL36hAzxbcLgqcMo9oymDRy87kdCXW/+g4oKLtMh6fm/G6W6Y/B01JlxohyyvueHQIG557uzkEkTJ3FnOVODSKBKpb3WZ65rExfV71zSZa25F3GmpaIG6HiYrX2YYhQAkIE9pKEQBHbnwHuwNDGottZTXZw=; WLS=C=9df3f9d8518fae19&N=wen; WLID=pGY8HgWCu4p5XYCOk2oa0+DBdftkMUfmNIn8XtSjSTKsgv/Il7GUlYs0Jpjf/E12jZMgV7x44Dy3fXOgjjUoJx7Y/ClLrLhsk20THksJJoI=; _EDGE_S=F=1&SID=17CF6EE006426448213C7DB907436588&mkt=zh-CN; MUID=225621093D8A6C27301632413C0E6D08; MUIDB=225621093D8A6C27301632413C0E6D08; SUID=A; SNRHOP=I=&TS=; _U=nGyzKQruEsDwLiu65fZFIG6e12hf2lwTJmroW__k8joUJIKmG3OIjayXKGW9dCVR3sNhF76mEVxyW6yjUGPodOfjtSa3s3J_DxMOrEK1BqXCOBI9bC66spAIASV7prsYFlVAJz73jVNENp_tBubLHJy6EbT0BKRe4AjrYkH-9uMnmCKB8Zmyg; _SS=SID=17CF6EE006426448213C7DB907436588&R=0&RB=0&GB=0&RG=200&RP=0&PC=U531; SRCHS=PC=U531; USRLOC=HS=1&ELOC=LAT=22.501529693603516|LON=113.9263687133789|N=%E5%8D%97%E5%B1%B1%E5%8C%BA%EF%BC%8C%E5%B9%BF%E4%B8%9C%E7%9C%81|ELT=2|&CLOC=LAT=22.50153029046461|LON=113.92637070632928|A=733.4464586120832|TS=230726151034|SRC=W; SRCHUSR=DOB=20230725&T=1690384908000&POEX=W; ipv6=hit=1690388509974&t=6; SRCHHPGUSR=HV=1690384945&SRCHLANG=zh-Hans&PV=15.0.0&BRW=MW&BRH=MT&CW=410&CH=794&SCW=410&SCH=794&DPR=1.5&UTC=480&DM=0&WTS=63825879627&PRVCW=410&PRVCH=794&PR=1.5; cct=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpny6Y_CVyi_MSyM94VyMWnjdYkkccVtm3czoIAtXUGQA; GC=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpR3Y_D9Ytcks4Ht6XhadXk75dvhzP4YOUS0UmoEyqyxw' \
|
150 |
-
-H 'dnt: 1' \
|
151 |
-
-H 'sec-ch-ua: "Chromium";v="116", "Not)A;Brand";v="24", "Microsoft Edge";v="116"' \
|
152 |
-
-H 'sec-ch-ua-arch: "x86"' \
|
153 |
-
-H 'sec-ch-ua-bitness: "64"' \
|
154 |
-
-H 'sec-ch-ua-full-version: "116.0.1938.29"' \
|
155 |
-
-H 'sec-ch-ua-full-version-list: "Chromium";v="116.0.5845.42", "Not)A;Brand";v="24.0.0.0", "Microsoft Edge";v="116.0.1938.29"' \
|
156 |
-
-H 'sec-ch-ua-mobile: ?0' \
|
157 |
-
-H 'sec-ch-ua-model: ""' \
|
158 |
-
-H 'sec-ch-ua-platform: "Windows"' \
|
159 |
-
-H 'sec-ch-ua-platform-version: "15.0.0"' \
|
160 |
-
-H 'sec-fetch-dest: document' \
|
161 |
-
-H 'sec-fetch-mode: navigate' \
|
162 |
-
-H 'sec-fetch-site: none' \
|
163 |
-
-H 'sec-fetch-user: ?1' \
|
164 |
-
-H 'sec-ms-gec: B3F47AD4A283CAB374C0451C46AAFD147C6A4DACAFF6A1C13F34B2C72B024494' \
|
165 |
-
-H 'sec-ms-gec-version: 1-116.0.1938.29' \
|
166 |
-
-H 'upgrade-insecure-requests: 1' \
|
167 |
-
-H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.0.0' \
|
168 |
-
-H 'x-client-data: eyIxIjoiMiIsIjEwIjoiXCJTMGg3R05HOTF2aDQ1TUZSUnZ5NHN2akRmMWdlaVJKenNxNlA3aU1WbnF3PVwiIiwiMiI6IjEiLCIzIjoiMSIsIjQiOiIyMTU4ODQ5NTM4MjY4OTM5NTA3IiwiNSI6IlwiSm9GUWpPTDk3OS9MbkRRZnlCd2N1M2FsOUN3eTZTQmdaMGNYMXBtOWVMZz1cIiIsIjYiOiJiZXRhIiwiNyI6IjE4MDM4ODYyNjQzNSIsIjkiOiJkZXNrdG9wIn0=' \
|
169 |
-
-H 'x-edge-shopping-flag: 1' \
|
170 |
-
--compressed
|
171 |
-
```
|
172 |
-
</details>
|
173 |
-
|
174 |
-
<details>
|
175 |
-
<summary>转成base64之后的格式(BING_HEADER只能使用 base64 之后的格式)</summary>
|
176 |
-
|
177 |
-
```
|
178 |
-
Y3VybCAnaHR0cHM6Ly93d3cuYmluZy5jb20vdHVyaW5nL2NvbnZlcnNhdGlvbi9jcmVhdGUnIFwgICAtSCAnYXV0aG9yaXR5OiB3d3cuYmluZy5jb20nIFwgICAtSCAnYWNjZXB0OiB0ZXh0L2h0bWwsYXBwbGljYXRpb24veGh0bWwreG1sLGFwcGxpY2F0aW9uL3htbDtxPTAuOSxpbWFnZS93ZWJwLGltYWdlL2FwbmcsKi8qO3E9MC44LGFwcGxpY2F0aW9uL3NpZ25lZC1leGNoYW5nZTt2PWIzO3E9MC43JyBcICAgLUggJ2FjY2VwdC1sYW5ndWFnZTogemgtQ04semg7cT0wLjksZW47cT0wLjgsZW4tR0I7cT0wLjcsZW4tVVM7cT0wLjYnIFwgICAtSCAnY2FjaGUtY29udHJvbDogbWF4LWFnZT0wJyBcICAgLUggJ2Nvb2tpZTogTWljcm9zb2Z0QXBwbGljYXRpb25zVGVsZW1ldHJ5RGV2aWNlSWQ9MzM5OWMwMDQtZmQwZS00OGVjLWJiOTItZDgyYTI3YjJiYmQ0OyBfRURHRV9WPTE7IFNSQ0hEPUFGPU5PRk9STTsgU1JDSFVJRD1WPTImR1VJRD0yOUVCRERBNEU2Njc0MzI5QUNDRjFBMEE0MjNDM0U5OCZkbW5jaGc9MTsgX1VSPVFTPTAmVFFTPTA7IF9IUFZOPUNTPWV5SlFiaUk2ZXlKRGJpSTZNU3dpVTNRaU9qQXNJbEZ6SWpvd0xDSlFjbTlrSWpvaVVDSjlMQ0pUWXlJNmV5SkRiaUk2TVN3aVUzUWlPakFzSWxGeklqb3dMQ0pRY205a0lqb2lTQ0o5TENKUmVpSTZleUpEYmlJNk1Td2lVM1FpT2pBc0lsRnpJam93TENKUWNtOWtJam9pVkNKOUxDSkJjQ0k2ZEhKMVpTd2lUWFYwWlNJNmRISjFaU3dpVEdGa0lqb2lNakF5TXkwd055MHlOVlF3TURvd01Eb3dNRm9pTENKSmIzUmtJam93TENKSGQySWlPakFzSWtSbWRDSTZiblZzYkN3aVRYWnpJam93TENKR2JIUWlPakFzSWtsdGNDSTZNbjA9OyBfUndCZj1pbHQ9MSZpaHBkPTEmaXNwZD0wJnJjPTAmcmI9MCZnYj0wJnJnPTIwMCZwYz0wJm10dT0wJnJiYj0wJmc9MCZjaWQ9JmNsbz0wJnY9MSZsPTIwMjMtMDctMjVUMDc6MDA6MDAuMDAwMDAwMFombGZ0PTAwMDEtMDEtMDFUMDA6MDA6MDAuMDAwMDAwMCZhb2Y9MCZvPTImcD0mYz0mdD0wJnM9MDAwMS0wMS0wMVQwMDowMDowMC4wMDAwMDAwKzAwOjAwJnRzPTIwMjMtMDctMjVUMTE6MDA6MzEuNzExMTU0OCswMDowMCZyd3JlZD0wJndscz0mbGthPTAmbGt0PTAmVEg9JmRjaT0wOyBBTk9OPUE9MDA0M0M2NTkwRUE4MDhFRDZFMzk1MDU5RkZGRkZGRkYmRT0xYzhiJlc9MTsgTkFQPVY9MS45JkU9MWMzMSZDPURuYU1TYkROXzRlZlpfeFhxQkYzRGFvcmpyNTNrWXFZb2FQOFlIc3Vwam1pWG55c1g3YTM3QSZXPTE7IFBQTFN0YXRlPTE7IEtpZXZSUFNTZWNBdXRoPUZBQlNCQlJhVE9KSUx0RnNNa3BMVldTRzZBTjZDL3N2UndObUFBQUVnQUFBQ01HVUE3RUdWU2pHRUFRQkdIdE5zYzVzTkw3dW5tSnNmUEoydDZpbWZvNEJlVUpsQWlhM0lwTVR0TVV5NFBVL0M1UUF6Ukk1cE9EdHNJZWUwK2JsZ2xsWHQvNUlpV3dHandtZGhpdnNGTTU5N3BSUGtqQVJQZndzUGhOTFBOYkpyQ1BOUEhkamU0SXM3OE1uQ0FEWHc2L05CcTJGTDhWMi9ieXcyZkg2SXVBTUQyTXZOL1Z2cXBFYTlaeGlEalp0RU5qNEhFajBtTzJTZ3pqZnlFaFZBa2p2em5KcVUycncvUTJ0SG1YOTROQU0ya3psektGL2hXUGhDQ1VtdThJSEx2Q25IRFM2bVNwdHZKRERQL3NwM292dHpPWGtQMW1sTS9YanU1ZnRlc1V2Y2NWRVFHZmZYT1JhMWRFNWhFTWJLSWlLWHoxdERkZHVTWEUxOWc5LyttUk1BamFRaHB3aEk4WG1pbENUeDFhZGIxTGw1cUsrVmpDOUdOZkVaemNic0dCUFZhT2wrYW5HOHJFTXErWG5oam83SitOcVROb2xhdkhnY3VWOGtKc0NlSlpJZ2VkMzNVQThlT1plRm8rd0FFQ01ndXhNb1NxZ3BHSCtzdGhxeW52RC9GSkQ2ci90aVUyTjN1cVZxOE5FOFYzN2Fzck42VDE0WjBGR0JKT2U2RVQxK1BHQXBtM3MxMU9ZOS94aEZFQjlUNUJFUFVHRWJ2UmNMY1cybmNGUVgwRVUreHdlaVBxbzFRMWhOVWcvZEN0U0krbFo3YzJIOFhoZWVQWmF2WjBUSlE4b05DU0F1S2lUcUptSTBmVkdwd2JYd2ZhQURrRWlwdWF3ejNmSXVNSkJOZ01VME90QTdIbTU5djJmR0xJQnV2aTZZZUtTNkdnVmszQklQZitQL2VLYWh3b3pyeFFaYUZub0hUU3FNa3ZjdDd4Q1A0YXRCUk9mWEtmNVd3MENjRktwKzJXWDlCSXNrVE9vMmpqazZiQXl5WUorRWxVQjFmZ0xLTms1bS9ZU01jOWlZQ0xJQk1JR044RjBZdnkzdFo3Y3ZoN1VlNUtsbzk4VVMvSStuVzFHN1pKTUhSZ1VPOGg4bHBuZUhxRU1lZ0tkOGd5bk80VkY3UnBDakprdW5EbVcwVGErUmtYQVA2MTlwZzBkcUhNRmtvT2drbk43OG9CYkdUVjZmSlVLb3R2K3ZpNjFrTGhBZVhaR1dvSEdDUlhoMndVQzZZZ2ZQZ0tBNkVTUk5IdEZuN0U1QjNISHBMYzVyVk1EU05oS1pZZmRodXBWNEV6ZjYrNURoTWNaTFpoaTBraytpdkRpTjFnZEhsVnRTTjU1eHB2ZitjK1haRHpSMHVoZ2N2Z3kwTEFibXpnazZ5NFdiWUgrTFFzTXB6Tk5qK2FDNzJ2TWlXb3ZXcktoOWpZNE1ZQ21kZ3hzUy9za1B0TGRwMThtdWlFSVJYVGJaUUdVbWh4RnBKQUliQklzQ3NjTXB6TDBCZ2V1anhVd001d3I3OVNkOXI0eHdiZ1NNd21CbEJmVUhSVkJkTnlnOGZlZXBlSmJDUzYzbkQ2ZUhPdUxxTVJzUElpbzN3L2tpL0VBYTkyVVVFaVplYXZMc01VRC95L3FBdldVZHpkUDVZK0MvVE0rQ01HUy9rR0w0TEVkWS8yOE1RZVR2VTFxdjFYMjFrUXQyYWlhajNwUFZMMzZoQXp4YmNMZ3FjTW85b3ltRFJ5ODdrZENYVy8rZzRvS0x0TWg2Zm0vRzZXNlkvQjAxSmx4b2h5eXZ1ZUhRSUc1NTd1emtFa1RKM0ZuT1ZPRFNLQktwYjNXWjY1ckV4ZlY3MXpTWmEyNUYzR21wYUlHNkhpWXJYMllZaFFBa0lFOXBLRVFCSGJud0h1d05ER290dFpUWFp3PTsgV0xTPUM9OWRmM2Y5ZDg1MThmYWUxOSZOPXdlbjsgV0xJRD1wR1k4SGdXQ3U0cDVYWUNPazJvYTArREJkZnRrTVVmbU5JbjhYdFNqU1RLc2d2L0lsN0dVbFlzMEpwamYvRTEyalpNZ1Y3eDQ0RHkzZlhPZ2pqVW9KeDdZL0NsTHJMaHNrMjBUSGtzSkpvST07IF9FREdFX1M9Rj0xJlNJRD0xN0NGNkVFMDA2NDI2NDQ4MjEzQzdEQjkwNzQzNjU4OCZta3Q9emgtQ047IE1VSUQ9MjI1NjIxMDkzRDhBNkMyNzMwMTYzMjQxM0MwRTZEMDg7IE1VSURCPTIyNTYyMTA5M0Q4QTZDMjczMDE2MzI0MTNDMEU2RDA4OyBTVUlEPUE7IFNOUkhPUD1JPSZUUz07IF9VPW5HeXpLUXJ1RXNEd0xpdTY1ZlpGSUc2ZTEyaGYybHdUSm1yb1dfX2s4am9VSklLbUczT0lqYXlYS0dXOWRDVlIzc05oRjc2bUVWeHlXNnlqVUdQb2RPZmp0U2EzczNKX0R4TU9yRUsxQnFYQ09CSTliQzY2c3BBSUFTVjdwcnNZRmxWQUp6NzNqVk5FTnBfdEJ1YkxISnk2RWJUMEJLUmU0QWpyWWtILTl1TW5tQ0tCOFpteWc7IF9TUz1TSUQ9MTdDRjZFRTAwNjQyNjQ0ODIxM0M3REI5MDc0MzY1ODgmUj0wJlJCPTAmR0I9MCZSRz0yMDAmUlA9MCZQQz1VNTMxOyBTUkNIUz1QQz1VNTMxOyBVU1JMT0M9SFM9MSZFTE9DPUxBVD0yMi41MDE1Mjk2OTM2MDM1MTZ8TE9OPTExMy45MjYzNjg3MTMzNzg5fE49JUU1JThEJTk3JUU1JUIxJUIxJUU1JThDJUJBJUVGJUJDJThDJUU1JUI5JUJGJUU0JUI4JTlDJUU3JTlDJTgxfEVMVD0yfCZDTE9DPUxBVD0yMi41MDE1MzAyOTA0NjQ2MXxMT049MTEzLjkyNjM3MDcwNjMyOTI4fEE9NzMzLjQ0NjQ1ODYxMjA4MzJ8VFM9MjMwNzI2MTUxMDM0fFNSQz1XOyBTUkNIVVNSPURPQj0yMDIzMDcyNSZUPTE2OTAzODQ5MDgwMDAmUE9FWD1XOyBpcHY2PWhpdD0xNjkwMzg4NTA5OTc0JnQ9NjsgU1JDSEhQR1VTUj1IVj0xNjkwMzg0OTQ1JlNSQ0hMQU5HPXpoLUhhbnMmUFY9MTUuMC4wJkJSVz1NVyZCUkg9TVQmQ1c9NDEwJkNIPTc5NCZTQ1c9NDEwJlNDSD03OTQmRFBSPTEuNSZVVEM9NDgwJkRNPTAmV1RTPTYzODI1ODc5NjI3JlBSVkNXPTQxMCZQUlZDSD03OTQmUFI9MS41OyBjY3Q9QWpXSUJZT29WUC1BZnE2Z1d3dHg4MElmNnlIbjZpQnVFVkhBMVhIZEFLcG55NllfQ1Z5aV9NU3lNOTRWeU1XbmpkWWtrY2NWdG0zY3pvSUF0WFVHUUE7IEdDPUFqV0lCWU9vVlAtQWZxNmdXd3R4ODBJZjZ5SG42aUJ1RVZIQTFYSGRBS3BSM1lfRDlZdGNrczRIdDZYaGFkWGs3NWR2aHpQNFlPVVMwVW1vRXlxeXh3JyBcICAgLUggJ2RudDogMScgXCAgIC1IICdzZWMtY2gtdWE6ICJDaHJvbWl1bSI7dj0iMTE2IiwgIk5vdClBO0JyYW5kIjt2PSIyNCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2IicgXCAgIC1IICdzZWMtY2gtdWEtYXJjaDogIng4NiInIFwgICAtSCAnc2VjLWNoLXVhLWJpdG5lc3M6ICI2NCInIFwgICAtSCAnc2VjLWNoLXVhLWZ1bGwtdmVyc2lvbjogIjExNi4wLjE5MzguMjkiJyBcICAgLUggJ3NlYy1jaC11YS1mdWxsLXZlcnNpb24tbGlzdDogIkNocm9taXVtIjt2PSIxMTYuMC41ODQ1LjQyIiwgIk5vdClBO0JyYW5kIjt2PSIyNC4wLjAuMCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2LjAuMTkzOC4yOSInIFwgICAtSCAnc2VjLWNoLXVhLW1vYmlsZTogPzAnIFwgICAtSCAnc2VjLWNoLXVhLW1vZGVsOiAiIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm06ICJXaW5kb3dzIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm0tdmVyc2lvbjogIjE1LjAuMCInIFwgICAtSCAnc2VjLWZldGNoLWRlc3Q6IGRvY3VtZW50JyBcICAgLUggJ3NlYy1mZXRjaC1tb2RlOiBuYXZpZ2F0ZScgXCAgIC1IICdzZWMtZmV0Y2gtc2l0ZTogbm9uZScgXCAgIC1IICdzZWMtZmV0Y2gtdXNlcjogPzEnIFwgICAtSCAnc2VjLW1zLWdlYzogQjNGNDdBRDRBMjgzQ0FCMzc0QzA0NTFDNDZBQUZEMTQ3QzZBNERBQ0FGRjZBMUMxM0YzNEIyQzcyQjAyNDQ5NCcgXCAgIC1IICdzZWMtbXMtZ2VjLXZlcnNpb246IDEtMTE2LjAuMTkzOC4yOScgXCAgIC1IICd1cGdyYWRlLWluc2VjdXJlLXJlcXVlc3RzOiAxJyBcICAgLUggJ3VzZXItYWdlbnQ6IE1vemlsbGEvNS4wIChXaW5kb3dzIE5UIDEwLjA7IFdpbjY0OyB4NjQpIEFwcGxlV2ViS2l0LzUzNy4zNiAoS0hUTUwsIGxpa2UgR2Vja28pIENocm9tZS8xMTYuMC4wLjAgU2FmYXJpLzUzNy4zNiBFZGcvMTE2LjAuMC4wJyBcICAgLUggJ3gtY2xpZW50LWRhdGE6IGV5SXhJam9pTWlJc0lqRXdJam9pWENKVE1HZzNSMDVIT1RGMmFEUTFUVVpTVW5aNU5ITjJha1JtTVdkbGFWSktlbk54TmxBM2FVMVdibkYzUFZ3aUlpd2lNaUk2SWpFaUxDSXpJam9pTVNJc0lqUWlPaUl5TVRVNE9EUTVOVE00TWpZNE9UTTVOVEEzSWl3aU5TSTZJbHdpU205R1VXcFBURGszT1M5TWJrUlJabmxDZDJOMU0yRnNPVU4zZVRaVFFtZGFNR05ZTVhCdE9XVk1aejFjSWlJc0lqWWlPaUppWlhSaElpd2lOeUk2SWpFNE1ETTRPRFl5TmpRek5TSXNJamtpT2lKa1pYTnJkRzl3SW4wPScgXCAgIC1IICd4LWVkZ2Utc2hvcHBpbmctZmxhZzogMScgXCAgIC0tY29tcHJlc3NlZA==
|
179 |
-
```
|
180 |
-
</details>
|
181 |
-
|
182 |
-
|
183 |
-
## 鸣谢
|
184 |
-
- 感谢 [EdgeGPT](https://github.com/acheong08/EdgeGPT) 提供的代理 API 的方法。
|
185 |
-
- 感谢 [Vercel AI](https://github.com/vercel-labs/ai-chatbot) 提供的基础脚手架和 [ChatHub](https://github.com/chathub-dev/chathub) [go-proxy-bingai](https://github.com/adams549659584/go-proxy-bingai) 提供的部分代码。
|
186 |
-
|
187 |
-
|
188 |
-
## 答疑及交流
|
189 |
-
|
190 |
-
<image src="./docs/images/wechat.png" width=240 />
|
191 |
-
|
192 |
-
## License
|
193 |
-
|
194 |
-
MIT © [LICENSE](https://github.com/weaigc/bingo/blob/main/LICENSE).
|
195 |
-
|
196 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/VQ-Trans/dataset/dataset_TM_eval.py
DELETED
@@ -1,217 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.utils import data
|
3 |
-
import numpy as np
|
4 |
-
from os.path import join as pjoin
|
5 |
-
import random
|
6 |
-
import codecs as cs
|
7 |
-
from tqdm import tqdm
|
8 |
-
|
9 |
-
import utils.paramUtil as paramUtil
|
10 |
-
from torch.utils.data._utils.collate import default_collate
|
11 |
-
|
12 |
-
|
13 |
-
def collate_fn(batch):
|
14 |
-
batch.sort(key=lambda x: x[3], reverse=True)
|
15 |
-
return default_collate(batch)
|
16 |
-
|
17 |
-
|
18 |
-
'''For use of training text-2-motion generative model'''
|
19 |
-
class Text2MotionDataset(data.Dataset):
|
20 |
-
def __init__(self, dataset_name, is_test, w_vectorizer, feat_bias = 5, max_text_len = 20, unit_length = 4):
|
21 |
-
|
22 |
-
self.max_length = 20
|
23 |
-
self.pointer = 0
|
24 |
-
self.dataset_name = dataset_name
|
25 |
-
self.is_test = is_test
|
26 |
-
self.max_text_len = max_text_len
|
27 |
-
self.unit_length = unit_length
|
28 |
-
self.w_vectorizer = w_vectorizer
|
29 |
-
if dataset_name == 't2m':
|
30 |
-
self.data_root = './dataset/HumanML3D'
|
31 |
-
self.motion_dir = pjoin(self.data_root, 'new_joint_vecs')
|
32 |
-
self.text_dir = pjoin(self.data_root, 'texts')
|
33 |
-
self.joints_num = 22
|
34 |
-
radius = 4
|
35 |
-
fps = 20
|
36 |
-
self.max_motion_length = 196
|
37 |
-
dim_pose = 263
|
38 |
-
kinematic_chain = paramUtil.t2m_kinematic_chain
|
39 |
-
self.meta_dir = 'checkpoints/t2m/VQVAEV3_CB1024_CMT_H1024_NRES3/meta'
|
40 |
-
elif dataset_name == 'kit':
|
41 |
-
self.data_root = './dataset/KIT-ML'
|
42 |
-
self.motion_dir = pjoin(self.data_root, 'new_joint_vecs')
|
43 |
-
self.text_dir = pjoin(self.data_root, 'texts')
|
44 |
-
self.joints_num = 21
|
45 |
-
radius = 240 * 8
|
46 |
-
fps = 12.5
|
47 |
-
dim_pose = 251
|
48 |
-
self.max_motion_length = 196
|
49 |
-
kinematic_chain = paramUtil.kit_kinematic_chain
|
50 |
-
self.meta_dir = 'checkpoints/kit/VQVAEV3_CB1024_CMT_H1024_NRES3/meta'
|
51 |
-
|
52 |
-
mean = np.load(pjoin(self.meta_dir, 'mean.npy'))
|
53 |
-
std = np.load(pjoin(self.meta_dir, 'std.npy'))
|
54 |
-
|
55 |
-
if is_test:
|
56 |
-
split_file = pjoin(self.data_root, 'test.txt')
|
57 |
-
else:
|
58 |
-
split_file = pjoin(self.data_root, 'val.txt')
|
59 |
-
|
60 |
-
min_motion_len = 40 if self.dataset_name =='t2m' else 24
|
61 |
-
# min_motion_len = 64
|
62 |
-
|
63 |
-
joints_num = self.joints_num
|
64 |
-
|
65 |
-
data_dict = {}
|
66 |
-
id_list = []
|
67 |
-
with cs.open(split_file, 'r') as f:
|
68 |
-
for line in f.readlines():
|
69 |
-
id_list.append(line.strip())
|
70 |
-
|
71 |
-
new_name_list = []
|
72 |
-
length_list = []
|
73 |
-
for name in tqdm(id_list):
|
74 |
-
try:
|
75 |
-
motion = np.load(pjoin(self.motion_dir, name + '.npy'))
|
76 |
-
if (len(motion)) < min_motion_len or (len(motion) >= 200):
|
77 |
-
continue
|
78 |
-
text_data = []
|
79 |
-
flag = False
|
80 |
-
with cs.open(pjoin(self.text_dir, name + '.txt')) as f:
|
81 |
-
for line in f.readlines():
|
82 |
-
text_dict = {}
|
83 |
-
line_split = line.strip().split('#')
|
84 |
-
caption = line_split[0]
|
85 |
-
tokens = line_split[1].split(' ')
|
86 |
-
f_tag = float(line_split[2])
|
87 |
-
to_tag = float(line_split[3])
|
88 |
-
f_tag = 0.0 if np.isnan(f_tag) else f_tag
|
89 |
-
to_tag = 0.0 if np.isnan(to_tag) else to_tag
|
90 |
-
|
91 |
-
text_dict['caption'] = caption
|
92 |
-
text_dict['tokens'] = tokens
|
93 |
-
if f_tag == 0.0 and to_tag == 0.0:
|
94 |
-
flag = True
|
95 |
-
text_data.append(text_dict)
|
96 |
-
else:
|
97 |
-
try:
|
98 |
-
n_motion = motion[int(f_tag*fps) : int(to_tag*fps)]
|
99 |
-
if (len(n_motion)) < min_motion_len or (len(n_motion) >= 200):
|
100 |
-
continue
|
101 |
-
new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name
|
102 |
-
while new_name in data_dict:
|
103 |
-
new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name
|
104 |
-
data_dict[new_name] = {'motion': n_motion,
|
105 |
-
'length': len(n_motion),
|
106 |
-
'text':[text_dict]}
|
107 |
-
new_name_list.append(new_name)
|
108 |
-
length_list.append(len(n_motion))
|
109 |
-
except:
|
110 |
-
print(line_split)
|
111 |
-
print(line_split[2], line_split[3], f_tag, to_tag, name)
|
112 |
-
# break
|
113 |
-
|
114 |
-
if flag:
|
115 |
-
data_dict[name] = {'motion': motion,
|
116 |
-
'length': len(motion),
|
117 |
-
'text': text_data}
|
118 |
-
new_name_list.append(name)
|
119 |
-
length_list.append(len(motion))
|
120 |
-
except Exception as e:
|
121 |
-
# print(e)
|
122 |
-
pass
|
123 |
-
|
124 |
-
name_list, length_list = zip(*sorted(zip(new_name_list, length_list), key=lambda x: x[1]))
|
125 |
-
self.mean = mean
|
126 |
-
self.std = std
|
127 |
-
self.length_arr = np.array(length_list)
|
128 |
-
self.data_dict = data_dict
|
129 |
-
self.name_list = name_list
|
130 |
-
self.reset_max_len(self.max_length)
|
131 |
-
|
132 |
-
def reset_max_len(self, length):
|
133 |
-
assert length <= self.max_motion_length
|
134 |
-
self.pointer = np.searchsorted(self.length_arr, length)
|
135 |
-
print("Pointer Pointing at %d"%self.pointer)
|
136 |
-
self.max_length = length
|
137 |
-
|
138 |
-
def inv_transform(self, data):
|
139 |
-
return data * self.std + self.mean
|
140 |
-
|
141 |
-
def forward_transform(self, data):
|
142 |
-
return (data - self.mean) / self.std
|
143 |
-
|
144 |
-
def __len__(self):
|
145 |
-
return len(self.data_dict) - self.pointer
|
146 |
-
|
147 |
-
def __getitem__(self, item):
|
148 |
-
idx = self.pointer + item
|
149 |
-
name = self.name_list[idx]
|
150 |
-
data = self.data_dict[name]
|
151 |
-
# data = self.data_dict[self.name_list[idx]]
|
152 |
-
motion, m_length, text_list = data['motion'], data['length'], data['text']
|
153 |
-
# Randomly select a caption
|
154 |
-
text_data = random.choice(text_list)
|
155 |
-
caption, tokens = text_data['caption'], text_data['tokens']
|
156 |
-
|
157 |
-
if len(tokens) < self.max_text_len:
|
158 |
-
# pad with "unk"
|
159 |
-
tokens = ['sos/OTHER'] + tokens + ['eos/OTHER']
|
160 |
-
sent_len = len(tokens)
|
161 |
-
tokens = tokens + ['unk/OTHER'] * (self.max_text_len + 2 - sent_len)
|
162 |
-
else:
|
163 |
-
# crop
|
164 |
-
tokens = tokens[:self.max_text_len]
|
165 |
-
tokens = ['sos/OTHER'] + tokens + ['eos/OTHER']
|
166 |
-
sent_len = len(tokens)
|
167 |
-
pos_one_hots = []
|
168 |
-
word_embeddings = []
|
169 |
-
for token in tokens:
|
170 |
-
word_emb, pos_oh = self.w_vectorizer[token]
|
171 |
-
pos_one_hots.append(pos_oh[None, :])
|
172 |
-
word_embeddings.append(word_emb[None, :])
|
173 |
-
pos_one_hots = np.concatenate(pos_one_hots, axis=0)
|
174 |
-
word_embeddings = np.concatenate(word_embeddings, axis=0)
|
175 |
-
|
176 |
-
if self.unit_length < 10:
|
177 |
-
coin2 = np.random.choice(['single', 'single', 'double'])
|
178 |
-
else:
|
179 |
-
coin2 = 'single'
|
180 |
-
|
181 |
-
if coin2 == 'double':
|
182 |
-
m_length = (m_length // self.unit_length - 1) * self.unit_length
|
183 |
-
elif coin2 == 'single':
|
184 |
-
m_length = (m_length // self.unit_length) * self.unit_length
|
185 |
-
idx = random.randint(0, len(motion) - m_length)
|
186 |
-
motion = motion[idx:idx+m_length]
|
187 |
-
|
188 |
-
"Z Normalization"
|
189 |
-
motion = (motion - self.mean) / self.std
|
190 |
-
|
191 |
-
if m_length < self.max_motion_length:
|
192 |
-
motion = np.concatenate([motion,
|
193 |
-
np.zeros((self.max_motion_length - m_length, motion.shape[1]))
|
194 |
-
], axis=0)
|
195 |
-
|
196 |
-
return word_embeddings, pos_one_hots, caption, sent_len, motion, m_length, '_'.join(tokens), name
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
def DATALoader(dataset_name, is_test,
|
202 |
-
batch_size, w_vectorizer,
|
203 |
-
num_workers = 8, unit_length = 4) :
|
204 |
-
|
205 |
-
val_loader = torch.utils.data.DataLoader(Text2MotionDataset(dataset_name, is_test, w_vectorizer, unit_length=unit_length),
|
206 |
-
batch_size,
|
207 |
-
shuffle = True,
|
208 |
-
num_workers=num_workers,
|
209 |
-
collate_fn=collate_fn,
|
210 |
-
drop_last = True)
|
211 |
-
return val_loader
|
212 |
-
|
213 |
-
|
214 |
-
def cycle(iterable):
|
215 |
-
while True:
|
216 |
-
for x in iterable:
|
217 |
-
yield x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/sound_extraction/utils/wav_io.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import librosa
|
2 |
-
import librosa.filters
|
3 |
-
import math
|
4 |
-
import numpy as np
|
5 |
-
import scipy.io.wavfile
|
6 |
-
|
7 |
-
def load_wav(path):
|
8 |
-
max_length = 32000 * 10
|
9 |
-
wav = librosa.core.load(path, sr=32000)[0]
|
10 |
-
if len(wav) > max_length:
|
11 |
-
audio = wav[0:max_length]
|
12 |
-
|
13 |
-
# pad audio to max length, 10s for AudioCaps
|
14 |
-
if len(wav) < max_length:
|
15 |
-
# audio = torch.nn.functional.pad(audio, (0, self.max_length - audio.size(1)), 'constant')
|
16 |
-
wav = np.pad(wav, (0, max_length - len(wav)), 'constant')
|
17 |
-
wav = wav[...,None]
|
18 |
-
return wav
|
19 |
-
|
20 |
-
|
21 |
-
def save_wav(wav, path):
|
22 |
-
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
|
23 |
-
scipy.io.wavfile.write(path, 32000, wav.astype(np.int16))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/utils/utils.py
DELETED
@@ -1,171 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
|
3 |
-
# Copyright 2019 Tomoki Hayashi
|
4 |
-
# MIT License (https://opensource.org/licenses/MIT)
|
5 |
-
|
6 |
-
"""Utility functions."""
|
7 |
-
|
8 |
-
import fnmatch
|
9 |
-
import logging
|
10 |
-
import os
|
11 |
-
import sys
|
12 |
-
try:
|
13 |
-
import h5py
|
14 |
-
except:
|
15 |
-
pass
|
16 |
-
import numpy as np
|
17 |
-
|
18 |
-
|
19 |
-
def find_files(root_dir, query="*.wav", include_root_dir=True):
|
20 |
-
"""Find files recursively.
|
21 |
-
|
22 |
-
Args:
|
23 |
-
root_dir (str): Root root_dir to find.
|
24 |
-
query (str): Query to find.
|
25 |
-
include_root_dir (bool): If False, root_dir name is not included.
|
26 |
-
|
27 |
-
Returns:
|
28 |
-
list: List of found filenames.
|
29 |
-
|
30 |
-
"""
|
31 |
-
files = []
|
32 |
-
for root, dirnames, filenames in os.walk(root_dir, followlinks=True):
|
33 |
-
for filename in fnmatch.filter(filenames, query):
|
34 |
-
files.append(os.path.join(root, filename))
|
35 |
-
if not include_root_dir:
|
36 |
-
files = [file_.replace(root_dir + "/", "") for file_ in files]
|
37 |
-
|
38 |
-
return files
|
39 |
-
|
40 |
-
|
41 |
-
def read_hdf5(hdf5_name, hdf5_path):
|
42 |
-
"""Read hdf5 dataset.
|
43 |
-
|
44 |
-
Args:
|
45 |
-
hdf5_name (str): Filename of hdf5 file.
|
46 |
-
hdf5_path (str): Dataset name in hdf5 file.
|
47 |
-
|
48 |
-
Return:
|
49 |
-
any: Dataset values.
|
50 |
-
|
51 |
-
"""
|
52 |
-
if not os.path.exists(hdf5_name):
|
53 |
-
logging.error(f"There is no such a hdf5 file ({hdf5_name}).")
|
54 |
-
sys.exit(1)
|
55 |
-
|
56 |
-
hdf5_file = h5py.File(hdf5_name, "r")
|
57 |
-
|
58 |
-
if hdf5_path not in hdf5_file:
|
59 |
-
logging.error(f"There is no such a data in hdf5 file. ({hdf5_path})")
|
60 |
-
sys.exit(1)
|
61 |
-
|
62 |
-
hdf5_data = hdf5_file[hdf5_path][()]
|
63 |
-
hdf5_file.close()
|
64 |
-
|
65 |
-
return hdf5_data
|
66 |
-
|
67 |
-
|
68 |
-
def write_hdf5(hdf5_name, hdf5_path, write_data, is_overwrite=True):
|
69 |
-
"""Write dataset to hdf5.
|
70 |
-
|
71 |
-
Args:
|
72 |
-
hdf5_name (str): Hdf5 dataset filename.
|
73 |
-
hdf5_path (str): Dataset path in hdf5.
|
74 |
-
write_data (ndarray): Data to write.
|
75 |
-
is_overwrite (bool): Whether to overwrite dataset.
|
76 |
-
|
77 |
-
"""
|
78 |
-
# convert to numpy array
|
79 |
-
write_data = np.array(write_data)
|
80 |
-
|
81 |
-
# check folder existence
|
82 |
-
folder_name, _ = os.path.split(hdf5_name)
|
83 |
-
if not os.path.exists(folder_name) and len(folder_name) != 0:
|
84 |
-
os.makedirs(folder_name)
|
85 |
-
|
86 |
-
# check hdf5 existence
|
87 |
-
if os.path.exists(hdf5_name):
|
88 |
-
# if already exists, open with r+ mode
|
89 |
-
hdf5_file = h5py.File(hdf5_name, "r+")
|
90 |
-
# check dataset existence
|
91 |
-
if hdf5_path in hdf5_file:
|
92 |
-
if is_overwrite:
|
93 |
-
logging.warning("Dataset in hdf5 file already exists. "
|
94 |
-
"recreate dataset in hdf5.")
|
95 |
-
hdf5_file.__delitem__(hdf5_path)
|
96 |
-
else:
|
97 |
-
logging.error("Dataset in hdf5 file already exists. "
|
98 |
-
"if you want to overwrite, please set is_overwrite = True.")
|
99 |
-
hdf5_file.close()
|
100 |
-
sys.exit(1)
|
101 |
-
else:
|
102 |
-
# if not exists, open with w mode
|
103 |
-
hdf5_file = h5py.File(hdf5_name, "w")
|
104 |
-
|
105 |
-
# write data to hdf5
|
106 |
-
hdf5_file.create_dataset(hdf5_path, data=write_data)
|
107 |
-
hdf5_file.flush()
|
108 |
-
hdf5_file.close()
|
109 |
-
|
110 |
-
|
111 |
-
class HDF5ScpLoader(object):
|
112 |
-
"""Loader class for a fests.scp file of hdf5 file.
|
113 |
-
|
114 |
-
Examples:
|
115 |
-
key1 /some/path/a.h5:feats
|
116 |
-
key2 /some/path/b.h5:feats
|
117 |
-
key3 /some/path/c.h5:feats
|
118 |
-
key4 /some/path/d.h5:feats
|
119 |
-
...
|
120 |
-
>>> loader = HDF5ScpLoader("hdf5.scp")
|
121 |
-
>>> array = loader["key1"]
|
122 |
-
|
123 |
-
key1 /some/path/a.h5
|
124 |
-
key2 /some/path/b.h5
|
125 |
-
key3 /some/path/c.h5
|
126 |
-
key4 /some/path/d.h5
|
127 |
-
...
|
128 |
-
>>> loader = HDF5ScpLoader("hdf5.scp", "feats")
|
129 |
-
>>> array = loader["key1"]
|
130 |
-
|
131 |
-
"""
|
132 |
-
|
133 |
-
def __init__(self, feats_scp, default_hdf5_path="feats"):
|
134 |
-
"""Initialize HDF5 scp loader.
|
135 |
-
|
136 |
-
Args:
|
137 |
-
feats_scp (str): Kaldi-style feats.scp file with hdf5 format.
|
138 |
-
default_hdf5_path (str): Path in hdf5 file. If the scp contain the info, not used.
|
139 |
-
|
140 |
-
"""
|
141 |
-
self.default_hdf5_path = default_hdf5_path
|
142 |
-
with open(feats_scp) as f:
|
143 |
-
lines = [line.replace("\n", "") for line in f.readlines()]
|
144 |
-
self.data = {}
|
145 |
-
for line in lines:
|
146 |
-
key, value = line.split()
|
147 |
-
self.data[key] = value
|
148 |
-
|
149 |
-
def get_path(self, key):
|
150 |
-
"""Get hdf5 file path for a given key."""
|
151 |
-
return self.data[key]
|
152 |
-
|
153 |
-
def __getitem__(self, key):
|
154 |
-
"""Get ndarray for a given key."""
|
155 |
-
p = self.data[key]
|
156 |
-
if ":" in p:
|
157 |
-
return read_hdf5(*p.split(":"))
|
158 |
-
else:
|
159 |
-
return read_hdf5(p, self.default_hdf5_path)
|
160 |
-
|
161 |
-
def __len__(self):
|
162 |
-
"""Return the length of the scp file."""
|
163 |
-
return len(self.data)
|
164 |
-
|
165 |
-
def __iter__(self):
|
166 |
-
"""Return the iterator of the scp file."""
|
167 |
-
return iter(self.data)
|
168 |
-
|
169 |
-
def keys(self):
|
170 |
-
"""Return the keys of the scp file."""
|
171 |
-
return self.data.keys()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGText/GlyphControl/ldm/models/diffusion/ddim.py
DELETED
@@ -1,337 +0,0 @@
|
|
1 |
-
"""SAMPLING ONLY."""
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import numpy as np
|
5 |
-
from tqdm import tqdm
|
6 |
-
|
7 |
-
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
|
8 |
-
|
9 |
-
|
10 |
-
class DDIMSampler(object):
|
11 |
-
def __init__(self, model, schedule="linear", **kwargs):
|
12 |
-
super().__init__()
|
13 |
-
self.model = model
|
14 |
-
self.ddpm_num_timesteps = model.num_timesteps
|
15 |
-
self.schedule = schedule
|
16 |
-
|
17 |
-
def register_buffer(self, name, attr):
|
18 |
-
if type(attr) == torch.Tensor:
|
19 |
-
if attr.device != torch.device("cuda"):
|
20 |
-
attr = attr.to(torch.device("cuda"))
|
21 |
-
setattr(self, name, attr)
|
22 |
-
# make schedule for DDIM
|
23 |
-
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
|
24 |
-
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
|
25 |
-
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
|
26 |
-
alphas_cumprod = self.model.alphas_cumprod
|
27 |
-
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
|
28 |
-
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
|
29 |
-
|
30 |
-
self.register_buffer('betas', to_torch(self.model.betas))
|
31 |
-
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
32 |
-
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
|
33 |
-
|
34 |
-
# calculations for diffusion q(x_t | x_{t-1}) and others
|
35 |
-
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
|
36 |
-
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
|
37 |
-
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
|
38 |
-
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
|
39 |
-
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
|
40 |
-
|
41 |
-
# ddim sampling parameters
|
42 |
-
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
|
43 |
-
ddim_timesteps=self.ddim_timesteps,
|
44 |
-
eta=ddim_eta,verbose=verbose)
|
45 |
-
self.register_buffer('ddim_sigmas', ddim_sigmas)
|
46 |
-
self.register_buffer('ddim_alphas', ddim_alphas)
|
47 |
-
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
|
48 |
-
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
|
49 |
-
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
|
50 |
-
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
|
51 |
-
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
|
52 |
-
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
|
53 |
-
|
54 |
-
@torch.no_grad()
|
55 |
-
def sample(self,
|
56 |
-
S,
|
57 |
-
batch_size,
|
58 |
-
shape,
|
59 |
-
conditioning=None,
|
60 |
-
callback=None,
|
61 |
-
normals_sequence=None,
|
62 |
-
img_callback=None,
|
63 |
-
quantize_x0=False,
|
64 |
-
eta=0.,
|
65 |
-
mask=None,
|
66 |
-
x0=None,
|
67 |
-
temperature=1.,
|
68 |
-
noise_dropout=0.,
|
69 |
-
score_corrector=None,
|
70 |
-
corrector_kwargs=None,
|
71 |
-
verbose=True,
|
72 |
-
x_T=None,
|
73 |
-
log_every_t=100,
|
74 |
-
unconditional_guidance_scale=1.,
|
75 |
-
unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
|
76 |
-
dynamic_threshold=None,
|
77 |
-
ucg_schedule=None,
|
78 |
-
**kwargs
|
79 |
-
):
|
80 |
-
if conditioning is not None:
|
81 |
-
if isinstance(conditioning, dict):
|
82 |
-
ctmp = conditioning[list(conditioning.keys())[0]]
|
83 |
-
while isinstance(ctmp, list): ctmp = ctmp[0]
|
84 |
-
cbs = ctmp.shape[0]
|
85 |
-
if cbs != batch_size:
|
86 |
-
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
87 |
-
|
88 |
-
elif isinstance(conditioning, list):
|
89 |
-
for ctmp in conditioning:
|
90 |
-
if ctmp.shape[0] != batch_size:
|
91 |
-
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
92 |
-
|
93 |
-
else:
|
94 |
-
if conditioning.shape[0] != batch_size:
|
95 |
-
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
|
96 |
-
|
97 |
-
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
|
98 |
-
# sampling
|
99 |
-
C, H, W = shape
|
100 |
-
size = (batch_size, C, H, W)
|
101 |
-
print(f'Data shape for DDIM sampling is {size}, eta {eta}')
|
102 |
-
|
103 |
-
samples, intermediates = self.ddim_sampling(conditioning, size,
|
104 |
-
callback=callback,
|
105 |
-
img_callback=img_callback,
|
106 |
-
quantize_denoised=quantize_x0,
|
107 |
-
mask=mask, x0=x0,
|
108 |
-
ddim_use_original_steps=False,
|
109 |
-
noise_dropout=noise_dropout,
|
110 |
-
temperature=temperature,
|
111 |
-
score_corrector=score_corrector,
|
112 |
-
corrector_kwargs=corrector_kwargs,
|
113 |
-
x_T=x_T,
|
114 |
-
log_every_t=log_every_t,
|
115 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
116 |
-
unconditional_conditioning=unconditional_conditioning,
|
117 |
-
dynamic_threshold=dynamic_threshold,
|
118 |
-
ucg_schedule=ucg_schedule
|
119 |
-
)
|
120 |
-
return samples, intermediates
|
121 |
-
|
122 |
-
@torch.no_grad()
|
123 |
-
def ddim_sampling(self, cond, shape,
|
124 |
-
x_T=None, ddim_use_original_steps=False,
|
125 |
-
callback=None, timesteps=None, quantize_denoised=False,
|
126 |
-
mask=None, x0=None, img_callback=None, log_every_t=100,
|
127 |
-
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
128 |
-
unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,
|
129 |
-
ucg_schedule=None):
|
130 |
-
device = self.model.betas.device
|
131 |
-
b = shape[0]
|
132 |
-
if x_T is None:
|
133 |
-
img = torch.randn(shape, device=device)
|
134 |
-
else:
|
135 |
-
img = x_T
|
136 |
-
|
137 |
-
if timesteps is None:
|
138 |
-
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
|
139 |
-
elif timesteps is not None and not ddim_use_original_steps:
|
140 |
-
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
|
141 |
-
timesteps = self.ddim_timesteps[:subset_end]
|
142 |
-
|
143 |
-
intermediates = {'x_inter': [img], 'pred_x0': [img]}
|
144 |
-
time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
|
145 |
-
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
|
146 |
-
print(f"Running DDIM Sampling with {total_steps} timesteps")
|
147 |
-
|
148 |
-
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
|
149 |
-
|
150 |
-
for i, step in enumerate(iterator):
|
151 |
-
index = total_steps - i - 1
|
152 |
-
ts = torch.full((b,), step, device=device, dtype=torch.long)
|
153 |
-
# print(ts[0])
|
154 |
-
if mask is not None:
|
155 |
-
assert x0 is not None
|
156 |
-
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
|
157 |
-
img = img_orig * mask + (1. - mask) * img
|
158 |
-
|
159 |
-
if ucg_schedule is not None: # schedule for unconditional guidance scale
|
160 |
-
assert len(ucg_schedule) == len(time_range)
|
161 |
-
unconditional_guidance_scale = ucg_schedule[i]
|
162 |
-
# one step in reverse process
|
163 |
-
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
|
164 |
-
quantize_denoised=quantize_denoised, temperature=temperature,
|
165 |
-
noise_dropout=noise_dropout, score_corrector=score_corrector,
|
166 |
-
corrector_kwargs=corrector_kwargs,
|
167 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
168 |
-
unconditional_conditioning=unconditional_conditioning,
|
169 |
-
dynamic_threshold=dynamic_threshold)
|
170 |
-
img, pred_x0 = outs
|
171 |
-
if callback: callback(i)
|
172 |
-
if img_callback: img_callback(pred_x0, i)
|
173 |
-
|
174 |
-
if index % log_every_t == 0 or index == total_steps - 1:
|
175 |
-
intermediates['x_inter'].append(img)
|
176 |
-
intermediates['pred_x0'].append(pred_x0)
|
177 |
-
|
178 |
-
return img, intermediates
|
179 |
-
# one step
|
180 |
-
@torch.no_grad()
|
181 |
-
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
182 |
-
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
183 |
-
unconditional_guidance_scale=1., unconditional_conditioning=None,
|
184 |
-
dynamic_threshold=None):
|
185 |
-
b, *_, device = *x.shape, x.device
|
186 |
-
|
187 |
-
if unconditional_conditioning is None or unconditional_guidance_scale == 1.: # no classifier-free guidance
|
188 |
-
model_output = self.model.apply_model(x, t, c)
|
189 |
-
else:
|
190 |
-
x_in = torch.cat([x] * 2)
|
191 |
-
t_in = torch.cat([t] * 2)
|
192 |
-
if isinstance(c, dict):
|
193 |
-
assert isinstance(unconditional_conditioning, dict)
|
194 |
-
c_in = dict()
|
195 |
-
for k in c:
|
196 |
-
if isinstance(c[k], list):
|
197 |
-
c_in[k] = [torch.cat([
|
198 |
-
unconditional_conditioning[k][i],
|
199 |
-
c[k][i]]) for i in range(len(c[k]))]
|
200 |
-
else:
|
201 |
-
c_in[k] = torch.cat([
|
202 |
-
unconditional_conditioning[k],
|
203 |
-
c[k]])
|
204 |
-
elif isinstance(c, list):
|
205 |
-
c_in = list()
|
206 |
-
assert isinstance(unconditional_conditioning, list)
|
207 |
-
for i in range(len(c)):
|
208 |
-
c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))
|
209 |
-
else: # predict simultaneously for both with condition and without condition
|
210 |
-
c_in = torch.cat([unconditional_conditioning, c])
|
211 |
-
model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
|
212 |
-
model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)
|
213 |
-
# s * e(x, t, c) + (1-s) * e(x, t, None)
|
214 |
-
|
215 |
-
if self.model.parameterization == "v":
|
216 |
-
e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)
|
217 |
-
else:
|
218 |
-
e_t = model_output
|
219 |
-
|
220 |
-
if score_corrector is not None:
|
221 |
-
assert self.model.parameterization == "eps", 'not implemented'
|
222 |
-
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
|
223 |
-
|
224 |
-
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
|
225 |
-
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
|
226 |
-
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
|
227 |
-
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
|
228 |
-
# select parameters corresponding to the currently considered timestep
|
229 |
-
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
|
230 |
-
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
|
231 |
-
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
|
232 |
-
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
|
233 |
-
|
234 |
-
# current prediction for x_0
|
235 |
-
if self.model.parameterization != "v":
|
236 |
-
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
|
237 |
-
else:
|
238 |
-
pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)
|
239 |
-
|
240 |
-
if quantize_denoised:
|
241 |
-
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
|
242 |
-
|
243 |
-
if dynamic_threshold is not None:
|
244 |
-
raise NotImplementedError()
|
245 |
-
|
246 |
-
# direction pointing to x_t
|
247 |
-
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
|
248 |
-
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
|
249 |
-
if noise_dropout > 0.:
|
250 |
-
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
251 |
-
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
|
252 |
-
return x_prev, pred_x0
|
253 |
-
|
254 |
-
@torch.no_grad()
|
255 |
-
def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,
|
256 |
-
unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):
|
257 |
-
num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]
|
258 |
-
|
259 |
-
assert t_enc <= num_reference_steps
|
260 |
-
num_steps = t_enc
|
261 |
-
|
262 |
-
if use_original_steps:
|
263 |
-
alphas_next = self.alphas_cumprod[:num_steps]
|
264 |
-
alphas = self.alphas_cumprod_prev[:num_steps]
|
265 |
-
else:
|
266 |
-
alphas_next = self.ddim_alphas[:num_steps]
|
267 |
-
alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])
|
268 |
-
|
269 |
-
x_next = x0
|
270 |
-
intermediates = []
|
271 |
-
inter_steps = []
|
272 |
-
for i in tqdm(range(num_steps), desc='Encoding Image'):
|
273 |
-
t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)
|
274 |
-
if unconditional_guidance_scale == 1.:
|
275 |
-
noise_pred = self.model.apply_model(x_next, t, c)
|
276 |
-
else:
|
277 |
-
assert unconditional_conditioning is not None
|
278 |
-
e_t_uncond, noise_pred = torch.chunk(
|
279 |
-
self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),
|
280 |
-
torch.cat((unconditional_conditioning, c))), 2)
|
281 |
-
noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)
|
282 |
-
|
283 |
-
xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next
|
284 |
-
weighted_noise_pred = alphas_next[i].sqrt() * (
|
285 |
-
(1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred
|
286 |
-
x_next = xt_weighted + weighted_noise_pred
|
287 |
-
if return_intermediates and i % (
|
288 |
-
num_steps // return_intermediates) == 0 and i < num_steps - 1:
|
289 |
-
intermediates.append(x_next)
|
290 |
-
inter_steps.append(i)
|
291 |
-
elif return_intermediates and i >= num_steps - 2:
|
292 |
-
intermediates.append(x_next)
|
293 |
-
inter_steps.append(i)
|
294 |
-
if callback: callback(i)
|
295 |
-
|
296 |
-
out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}
|
297 |
-
if return_intermediates:
|
298 |
-
out.update({'intermediates': intermediates})
|
299 |
-
return x_next, out
|
300 |
-
|
301 |
-
@torch.no_grad()
|
302 |
-
def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
|
303 |
-
# fast, but does not allow for exact reconstruction
|
304 |
-
# t serves as an index to gather the correct alphas
|
305 |
-
if use_original_steps:
|
306 |
-
sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
|
307 |
-
sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
|
308 |
-
else:
|
309 |
-
sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
|
310 |
-
sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
|
311 |
-
|
312 |
-
if noise is None:
|
313 |
-
noise = torch.randn_like(x0)
|
314 |
-
return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
|
315 |
-
extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)
|
316 |
-
|
317 |
-
@torch.no_grad()
|
318 |
-
def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
|
319 |
-
use_original_steps=False, callback=None):
|
320 |
-
|
321 |
-
timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
|
322 |
-
timesteps = timesteps[:t_start]
|
323 |
-
|
324 |
-
time_range = np.flip(timesteps)
|
325 |
-
total_steps = timesteps.shape[0]
|
326 |
-
print(f"Running DDIM Sampling with {total_steps} timesteps")
|
327 |
-
|
328 |
-
iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
|
329 |
-
x_dec = x_latent
|
330 |
-
for i, step in enumerate(iterator):
|
331 |
-
index = total_steps - i - 1
|
332 |
-
ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
|
333 |
-
x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
|
334 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
335 |
-
unconditional_conditioning=unconditional_conditioning)
|
336 |
-
if callback: callback(i)
|
337 |
-
return x_dec
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/__init__.py
DELETED
File without changes
|
spaces/Abhilashvj/haystack_QA/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Haystack QA
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: green
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.15.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/circularprogress/CircularProgress.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import CircularProgress from '../../../plugins/circularprogress.js';
|
2 |
-
export default CircularProgress;
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/models/stylegan2/op/upfirdn2d.py
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import torch
|
4 |
-
from torch.nn import functional as F
|
5 |
-
|
6 |
-
|
7 |
-
module_path = os.path.dirname(__file__)
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
|
12 |
-
out = upfirdn2d_native(
|
13 |
-
input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]
|
14 |
-
)
|
15 |
-
|
16 |
-
return out
|
17 |
-
|
18 |
-
|
19 |
-
def upfirdn2d_native(
|
20 |
-
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
|
21 |
-
):
|
22 |
-
_, channel, in_h, in_w = input.shape
|
23 |
-
input = input.reshape(-1, in_h, in_w, 1)
|
24 |
-
|
25 |
-
_, in_h, in_w, minor = input.shape
|
26 |
-
kernel_h, kernel_w = kernel.shape
|
27 |
-
|
28 |
-
out = input.view(-1, in_h, 1, in_w, 1, minor)
|
29 |
-
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
|
30 |
-
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
|
31 |
-
|
32 |
-
out = F.pad(
|
33 |
-
out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
|
34 |
-
)
|
35 |
-
out = out[
|
36 |
-
:,
|
37 |
-
max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
|
38 |
-
max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
|
39 |
-
:,
|
40 |
-
]
|
41 |
-
|
42 |
-
out = out.permute(0, 3, 1, 2)
|
43 |
-
out = out.reshape(
|
44 |
-
[-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
|
45 |
-
)
|
46 |
-
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
|
47 |
-
out = F.conv2d(out, w)
|
48 |
-
out = out.reshape(
|
49 |
-
-1,
|
50 |
-
minor,
|
51 |
-
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
|
52 |
-
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
|
53 |
-
)
|
54 |
-
out = out.permute(0, 2, 3, 1)
|
55 |
-
out = out[:, ::down_y, ::down_x, :]
|
56 |
-
|
57 |
-
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
|
58 |
-
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
|
59 |
-
|
60 |
-
return out.view(-1, channel, out_h, out_w)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/__init__.py
DELETED
File without changes
|
spaces/Amrrs/numerizerlit/app.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
# load required libraries
|
2 |
-
|
3 |
-
import streamlit as st
|
4 |
-
import spacy
|
5 |
-
#import en_core_web_sm
|
6 |
-
from numerizer import numerize
|
7 |
-
|
8 |
-
|
9 |
-
st.title("Numerizer - Convert *English Numbers* into *Ints* and *Floats*")
|
10 |
-
|
11 |
-
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
|
12 |
-
def load_model():
|
13 |
-
"""Load a spaCy model."""
|
14 |
-
model = spacy.load("en_core_web_sm")
|
15 |
-
return model
|
16 |
-
|
17 |
-
|
18 |
-
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
|
19 |
-
def process_text(text: str):
|
20 |
-
"""Process a text and create a Doc object."""
|
21 |
-
nlp = load_model()
|
22 |
-
return nlp(text)
|
23 |
-
|
24 |
-
st.markdown("Input Text")
|
25 |
-
|
26 |
-
inp_text = st.text_input(label="Add text here", value = "Two plus Two equals Four")
|
27 |
-
#inp_text = 'The Hogwarts Express is at platform nine and three quarters and platform nine and three quarters'
|
28 |
-
|
29 |
-
st.write(inp_text)
|
30 |
-
|
31 |
-
doc = process_text(inp_text)
|
32 |
-
|
33 |
-
|
34 |
-
numerized_parts = doc._.numerize()
|
35 |
-
|
36 |
-
st.markdown("Numerized Sections \n")
|
37 |
-
|
38 |
-
st.markdown( numerized_parts)
|
39 |
-
|
40 |
-
|
41 |
-
final_sentence = inp_text
|
42 |
-
|
43 |
-
for key in numerized_parts.keys():
|
44 |
-
#print(key)
|
45 |
-
final_sentence = final_sentence.replace(str(key),numerized_parts[key])
|
46 |
-
|
47 |
-
st.write("### Numerized Output Text")
|
48 |
-
|
49 |
-
st.write(final_sentence)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py
DELETED
@@ -1,185 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
""" Conversion script for the NCSNPP checkpoints. """
|
16 |
-
|
17 |
-
import argparse
|
18 |
-
import json
|
19 |
-
|
20 |
-
import torch
|
21 |
-
|
22 |
-
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNet2DModel
|
23 |
-
|
24 |
-
|
25 |
-
def convert_ncsnpp_checkpoint(checkpoint, config):
|
26 |
-
"""
|
27 |
-
Takes a state dict and the path to
|
28 |
-
"""
|
29 |
-
new_model_architecture = UNet2DModel(**config)
|
30 |
-
new_model_architecture.time_proj.W.data = checkpoint["all_modules.0.W"].data
|
31 |
-
new_model_architecture.time_proj.weight.data = checkpoint["all_modules.0.W"].data
|
32 |
-
new_model_architecture.time_embedding.linear_1.weight.data = checkpoint["all_modules.1.weight"].data
|
33 |
-
new_model_architecture.time_embedding.linear_1.bias.data = checkpoint["all_modules.1.bias"].data
|
34 |
-
|
35 |
-
new_model_architecture.time_embedding.linear_2.weight.data = checkpoint["all_modules.2.weight"].data
|
36 |
-
new_model_architecture.time_embedding.linear_2.bias.data = checkpoint["all_modules.2.bias"].data
|
37 |
-
|
38 |
-
new_model_architecture.conv_in.weight.data = checkpoint["all_modules.3.weight"].data
|
39 |
-
new_model_architecture.conv_in.bias.data = checkpoint["all_modules.3.bias"].data
|
40 |
-
|
41 |
-
new_model_architecture.conv_norm_out.weight.data = checkpoint[list(checkpoint.keys())[-4]].data
|
42 |
-
new_model_architecture.conv_norm_out.bias.data = checkpoint[list(checkpoint.keys())[-3]].data
|
43 |
-
new_model_architecture.conv_out.weight.data = checkpoint[list(checkpoint.keys())[-2]].data
|
44 |
-
new_model_architecture.conv_out.bias.data = checkpoint[list(checkpoint.keys())[-1]].data
|
45 |
-
|
46 |
-
module_index = 4
|
47 |
-
|
48 |
-
def set_attention_weights(new_layer, old_checkpoint, index):
|
49 |
-
new_layer.query.weight.data = old_checkpoint[f"all_modules.{index}.NIN_0.W"].data.T
|
50 |
-
new_layer.key.weight.data = old_checkpoint[f"all_modules.{index}.NIN_1.W"].data.T
|
51 |
-
new_layer.value.weight.data = old_checkpoint[f"all_modules.{index}.NIN_2.W"].data.T
|
52 |
-
|
53 |
-
new_layer.query.bias.data = old_checkpoint[f"all_modules.{index}.NIN_0.b"].data
|
54 |
-
new_layer.key.bias.data = old_checkpoint[f"all_modules.{index}.NIN_1.b"].data
|
55 |
-
new_layer.value.bias.data = old_checkpoint[f"all_modules.{index}.NIN_2.b"].data
|
56 |
-
|
57 |
-
new_layer.proj_attn.weight.data = old_checkpoint[f"all_modules.{index}.NIN_3.W"].data.T
|
58 |
-
new_layer.proj_attn.bias.data = old_checkpoint[f"all_modules.{index}.NIN_3.b"].data
|
59 |
-
|
60 |
-
new_layer.group_norm.weight.data = old_checkpoint[f"all_modules.{index}.GroupNorm_0.weight"].data
|
61 |
-
new_layer.group_norm.bias.data = old_checkpoint[f"all_modules.{index}.GroupNorm_0.bias"].data
|
62 |
-
|
63 |
-
def set_resnet_weights(new_layer, old_checkpoint, index):
|
64 |
-
new_layer.conv1.weight.data = old_checkpoint[f"all_modules.{index}.Conv_0.weight"].data
|
65 |
-
new_layer.conv1.bias.data = old_checkpoint[f"all_modules.{index}.Conv_0.bias"].data
|
66 |
-
new_layer.norm1.weight.data = old_checkpoint[f"all_modules.{index}.GroupNorm_0.weight"].data
|
67 |
-
new_layer.norm1.bias.data = old_checkpoint[f"all_modules.{index}.GroupNorm_0.bias"].data
|
68 |
-
|
69 |
-
new_layer.conv2.weight.data = old_checkpoint[f"all_modules.{index}.Conv_1.weight"].data
|
70 |
-
new_layer.conv2.bias.data = old_checkpoint[f"all_modules.{index}.Conv_1.bias"].data
|
71 |
-
new_layer.norm2.weight.data = old_checkpoint[f"all_modules.{index}.GroupNorm_1.weight"].data
|
72 |
-
new_layer.norm2.bias.data = old_checkpoint[f"all_modules.{index}.GroupNorm_1.bias"].data
|
73 |
-
|
74 |
-
new_layer.time_emb_proj.weight.data = old_checkpoint[f"all_modules.{index}.Dense_0.weight"].data
|
75 |
-
new_layer.time_emb_proj.bias.data = old_checkpoint[f"all_modules.{index}.Dense_0.bias"].data
|
76 |
-
|
77 |
-
if new_layer.in_channels != new_layer.out_channels or new_layer.up or new_layer.down:
|
78 |
-
new_layer.conv_shortcut.weight.data = old_checkpoint[f"all_modules.{index}.Conv_2.weight"].data
|
79 |
-
new_layer.conv_shortcut.bias.data = old_checkpoint[f"all_modules.{index}.Conv_2.bias"].data
|
80 |
-
|
81 |
-
for i, block in enumerate(new_model_architecture.downsample_blocks):
|
82 |
-
has_attentions = hasattr(block, "attentions")
|
83 |
-
for j in range(len(block.resnets)):
|
84 |
-
set_resnet_weights(block.resnets[j], checkpoint, module_index)
|
85 |
-
module_index += 1
|
86 |
-
if has_attentions:
|
87 |
-
set_attention_weights(block.attentions[j], checkpoint, module_index)
|
88 |
-
module_index += 1
|
89 |
-
|
90 |
-
if hasattr(block, "downsamplers") and block.downsamplers is not None:
|
91 |
-
set_resnet_weights(block.resnet_down, checkpoint, module_index)
|
92 |
-
module_index += 1
|
93 |
-
block.skip_conv.weight.data = checkpoint[f"all_modules.{module_index}.Conv_0.weight"].data
|
94 |
-
block.skip_conv.bias.data = checkpoint[f"all_modules.{module_index}.Conv_0.bias"].data
|
95 |
-
module_index += 1
|
96 |
-
|
97 |
-
set_resnet_weights(new_model_architecture.mid_block.resnets[0], checkpoint, module_index)
|
98 |
-
module_index += 1
|
99 |
-
set_attention_weights(new_model_architecture.mid_block.attentions[0], checkpoint, module_index)
|
100 |
-
module_index += 1
|
101 |
-
set_resnet_weights(new_model_architecture.mid_block.resnets[1], checkpoint, module_index)
|
102 |
-
module_index += 1
|
103 |
-
|
104 |
-
for i, block in enumerate(new_model_architecture.up_blocks):
|
105 |
-
has_attentions = hasattr(block, "attentions")
|
106 |
-
for j in range(len(block.resnets)):
|
107 |
-
set_resnet_weights(block.resnets[j], checkpoint, module_index)
|
108 |
-
module_index += 1
|
109 |
-
if has_attentions:
|
110 |
-
set_attention_weights(
|
111 |
-
block.attentions[0], checkpoint, module_index
|
112 |
-
) # why can there only be a single attention layer for up?
|
113 |
-
module_index += 1
|
114 |
-
|
115 |
-
if hasattr(block, "resnet_up") and block.resnet_up is not None:
|
116 |
-
block.skip_norm.weight.data = checkpoint[f"all_modules.{module_index}.weight"].data
|
117 |
-
block.skip_norm.bias.data = checkpoint[f"all_modules.{module_index}.bias"].data
|
118 |
-
module_index += 1
|
119 |
-
block.skip_conv.weight.data = checkpoint[f"all_modules.{module_index}.weight"].data
|
120 |
-
block.skip_conv.bias.data = checkpoint[f"all_modules.{module_index}.bias"].data
|
121 |
-
module_index += 1
|
122 |
-
set_resnet_weights(block.resnet_up, checkpoint, module_index)
|
123 |
-
module_index += 1
|
124 |
-
|
125 |
-
new_model_architecture.conv_norm_out.weight.data = checkpoint[f"all_modules.{module_index}.weight"].data
|
126 |
-
new_model_architecture.conv_norm_out.bias.data = checkpoint[f"all_modules.{module_index}.bias"].data
|
127 |
-
module_index += 1
|
128 |
-
new_model_architecture.conv_out.weight.data = checkpoint[f"all_modules.{module_index}.weight"].data
|
129 |
-
new_model_architecture.conv_out.bias.data = checkpoint[f"all_modules.{module_index}.bias"].data
|
130 |
-
|
131 |
-
return new_model_architecture.state_dict()
|
132 |
-
|
133 |
-
|
134 |
-
if __name__ == "__main__":
|
135 |
-
parser = argparse.ArgumentParser()
|
136 |
-
|
137 |
-
parser.add_argument(
|
138 |
-
"--checkpoint_path",
|
139 |
-
default="/Users/arthurzucker/Work/diffusers/ArthurZ/diffusion_pytorch_model.bin",
|
140 |
-
type=str,
|
141 |
-
required=False,
|
142 |
-
help="Path to the checkpoint to convert.",
|
143 |
-
)
|
144 |
-
|
145 |
-
parser.add_argument(
|
146 |
-
"--config_file",
|
147 |
-
default="/Users/arthurzucker/Work/diffusers/ArthurZ/config.json",
|
148 |
-
type=str,
|
149 |
-
required=False,
|
150 |
-
help="The config json file corresponding to the architecture.",
|
151 |
-
)
|
152 |
-
|
153 |
-
parser.add_argument(
|
154 |
-
"--dump_path",
|
155 |
-
default="/Users/arthurzucker/Work/diffusers/ArthurZ/diffusion_model_new.pt",
|
156 |
-
type=str,
|
157 |
-
required=False,
|
158 |
-
help="Path to the output model.",
|
159 |
-
)
|
160 |
-
|
161 |
-
args = parser.parse_args()
|
162 |
-
|
163 |
-
checkpoint = torch.load(args.checkpoint_path, map_location="cpu")
|
164 |
-
|
165 |
-
with open(args.config_file) as f:
|
166 |
-
config = json.loads(f.read())
|
167 |
-
|
168 |
-
converted_checkpoint = convert_ncsnpp_checkpoint(
|
169 |
-
checkpoint,
|
170 |
-
config,
|
171 |
-
)
|
172 |
-
|
173 |
-
if "sde" in config:
|
174 |
-
del config["sde"]
|
175 |
-
|
176 |
-
model = UNet2DModel(**config)
|
177 |
-
model.load_state_dict(converted_checkpoint)
|
178 |
-
|
179 |
-
try:
|
180 |
-
scheduler = ScoreSdeVeScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
|
181 |
-
|
182 |
-
pipe = ScoreSdeVePipeline(unet=model, scheduler=scheduler)
|
183 |
-
pipe.save_pretrained(args.dump_path)
|
184 |
-
except: # noqa: E722
|
185 |
-
model.save_pretrained(args.dump_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py
DELETED
@@ -1,339 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import unittest
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
|
20 |
-
from diffusers import (
|
21 |
-
KandinskyV22CombinedPipeline,
|
22 |
-
KandinskyV22Img2ImgCombinedPipeline,
|
23 |
-
KandinskyV22InpaintCombinedPipeline,
|
24 |
-
)
|
25 |
-
from diffusers.utils import torch_device
|
26 |
-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
|
27 |
-
|
28 |
-
from ..test_pipelines_common import PipelineTesterMixin
|
29 |
-
from .test_kandinsky import Dummies
|
30 |
-
from .test_kandinsky_img2img import Dummies as Img2ImgDummies
|
31 |
-
from .test_kandinsky_inpaint import Dummies as InpaintDummies
|
32 |
-
from .test_kandinsky_prior import Dummies as PriorDummies
|
33 |
-
|
34 |
-
|
35 |
-
enable_full_determinism()
|
36 |
-
|
37 |
-
|
38 |
-
class KandinskyV22PipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
|
39 |
-
pipeline_class = KandinskyV22CombinedPipeline
|
40 |
-
params = [
|
41 |
-
"prompt",
|
42 |
-
]
|
43 |
-
batch_params = ["prompt", "negative_prompt"]
|
44 |
-
required_optional_params = [
|
45 |
-
"generator",
|
46 |
-
"height",
|
47 |
-
"width",
|
48 |
-
"latents",
|
49 |
-
"guidance_scale",
|
50 |
-
"negative_prompt",
|
51 |
-
"num_inference_steps",
|
52 |
-
"return_dict",
|
53 |
-
"guidance_scale",
|
54 |
-
"num_images_per_prompt",
|
55 |
-
"output_type",
|
56 |
-
"return_dict",
|
57 |
-
]
|
58 |
-
test_xformers_attention = False
|
59 |
-
|
60 |
-
def get_dummy_components(self):
|
61 |
-
dummy = Dummies()
|
62 |
-
prior_dummy = PriorDummies()
|
63 |
-
components = dummy.get_dummy_components()
|
64 |
-
|
65 |
-
components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
|
66 |
-
return components
|
67 |
-
|
68 |
-
def get_dummy_inputs(self, device, seed=0):
|
69 |
-
prior_dummy = PriorDummies()
|
70 |
-
inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
|
71 |
-
inputs.update(
|
72 |
-
{
|
73 |
-
"height": 64,
|
74 |
-
"width": 64,
|
75 |
-
}
|
76 |
-
)
|
77 |
-
return inputs
|
78 |
-
|
79 |
-
def test_kandinsky(self):
|
80 |
-
device = "cpu"
|
81 |
-
|
82 |
-
components = self.get_dummy_components()
|
83 |
-
|
84 |
-
pipe = self.pipeline_class(**components)
|
85 |
-
pipe = pipe.to(device)
|
86 |
-
|
87 |
-
pipe.set_progress_bar_config(disable=None)
|
88 |
-
|
89 |
-
output = pipe(**self.get_dummy_inputs(device))
|
90 |
-
image = output.images
|
91 |
-
|
92 |
-
image_from_tuple = pipe(
|
93 |
-
**self.get_dummy_inputs(device),
|
94 |
-
return_dict=False,
|
95 |
-
)[0]
|
96 |
-
|
97 |
-
image_slice = image[0, -3:, -3:, -1]
|
98 |
-
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
|
99 |
-
|
100 |
-
assert image.shape == (1, 64, 64, 3)
|
101 |
-
|
102 |
-
expected_slice = np.array([0.3013, 0.0471, 0.5176, 0.1817, 0.2566, 0.7076, 0.6712, 0.4421, 0.7503])
|
103 |
-
|
104 |
-
assert (
|
105 |
-
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
106 |
-
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
|
107 |
-
assert (
|
108 |
-
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
|
109 |
-
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
|
110 |
-
|
111 |
-
@require_torch_gpu
|
112 |
-
def test_offloads(self):
|
113 |
-
pipes = []
|
114 |
-
components = self.get_dummy_components()
|
115 |
-
sd_pipe = self.pipeline_class(**components).to(torch_device)
|
116 |
-
pipes.append(sd_pipe)
|
117 |
-
|
118 |
-
components = self.get_dummy_components()
|
119 |
-
sd_pipe = self.pipeline_class(**components)
|
120 |
-
sd_pipe.enable_model_cpu_offload()
|
121 |
-
pipes.append(sd_pipe)
|
122 |
-
|
123 |
-
components = self.get_dummy_components()
|
124 |
-
sd_pipe = self.pipeline_class(**components)
|
125 |
-
sd_pipe.enable_sequential_cpu_offload()
|
126 |
-
pipes.append(sd_pipe)
|
127 |
-
|
128 |
-
image_slices = []
|
129 |
-
for pipe in pipes:
|
130 |
-
inputs = self.get_dummy_inputs(torch_device)
|
131 |
-
image = pipe(**inputs).images
|
132 |
-
|
133 |
-
image_slices.append(image[0, -3:, -3:, -1].flatten())
|
134 |
-
|
135 |
-
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
|
136 |
-
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
|
137 |
-
|
138 |
-
def test_inference_batch_single_identical(self):
|
139 |
-
super().test_inference_batch_single_identical(expected_max_diff=1e-2)
|
140 |
-
|
141 |
-
|
142 |
-
class KandinskyV22PipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
|
143 |
-
pipeline_class = KandinskyV22Img2ImgCombinedPipeline
|
144 |
-
params = ["prompt", "image"]
|
145 |
-
batch_params = ["prompt", "negative_prompt", "image"]
|
146 |
-
required_optional_params = [
|
147 |
-
"generator",
|
148 |
-
"height",
|
149 |
-
"width",
|
150 |
-
"latents",
|
151 |
-
"guidance_scale",
|
152 |
-
"negative_prompt",
|
153 |
-
"num_inference_steps",
|
154 |
-
"return_dict",
|
155 |
-
"guidance_scale",
|
156 |
-
"num_images_per_prompt",
|
157 |
-
"output_type",
|
158 |
-
"return_dict",
|
159 |
-
]
|
160 |
-
test_xformers_attention = False
|
161 |
-
|
162 |
-
def get_dummy_components(self):
|
163 |
-
dummy = Img2ImgDummies()
|
164 |
-
prior_dummy = PriorDummies()
|
165 |
-
components = dummy.get_dummy_components()
|
166 |
-
|
167 |
-
components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
|
168 |
-
return components
|
169 |
-
|
170 |
-
def get_dummy_inputs(self, device, seed=0):
|
171 |
-
prior_dummy = PriorDummies()
|
172 |
-
dummy = Img2ImgDummies()
|
173 |
-
inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
|
174 |
-
inputs.update(dummy.get_dummy_inputs(device=device, seed=seed))
|
175 |
-
inputs.pop("image_embeds")
|
176 |
-
inputs.pop("negative_image_embeds")
|
177 |
-
return inputs
|
178 |
-
|
179 |
-
def test_kandinsky(self):
|
180 |
-
device = "cpu"
|
181 |
-
|
182 |
-
components = self.get_dummy_components()
|
183 |
-
|
184 |
-
pipe = self.pipeline_class(**components)
|
185 |
-
pipe = pipe.to(device)
|
186 |
-
|
187 |
-
pipe.set_progress_bar_config(disable=None)
|
188 |
-
|
189 |
-
output = pipe(**self.get_dummy_inputs(device))
|
190 |
-
image = output.images
|
191 |
-
|
192 |
-
image_from_tuple = pipe(
|
193 |
-
**self.get_dummy_inputs(device),
|
194 |
-
return_dict=False,
|
195 |
-
)[0]
|
196 |
-
|
197 |
-
image_slice = image[0, -3:, -3:, -1]
|
198 |
-
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
|
199 |
-
|
200 |
-
assert image.shape == (1, 64, 64, 3)
|
201 |
-
|
202 |
-
expected_slice = np.array([0.4353, 0.4710, 0.5128, 0.4806, 0.5054, 0.5348, 0.5224, 0.4603, 0.5025])
|
203 |
-
|
204 |
-
assert (
|
205 |
-
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
206 |
-
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
|
207 |
-
assert (
|
208 |
-
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
|
209 |
-
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
|
210 |
-
|
211 |
-
@require_torch_gpu
|
212 |
-
def test_offloads(self):
|
213 |
-
pipes = []
|
214 |
-
components = self.get_dummy_components()
|
215 |
-
sd_pipe = self.pipeline_class(**components).to(torch_device)
|
216 |
-
pipes.append(sd_pipe)
|
217 |
-
|
218 |
-
components = self.get_dummy_components()
|
219 |
-
sd_pipe = self.pipeline_class(**components)
|
220 |
-
sd_pipe.enable_model_cpu_offload()
|
221 |
-
pipes.append(sd_pipe)
|
222 |
-
|
223 |
-
components = self.get_dummy_components()
|
224 |
-
sd_pipe = self.pipeline_class(**components)
|
225 |
-
sd_pipe.enable_sequential_cpu_offload()
|
226 |
-
pipes.append(sd_pipe)
|
227 |
-
|
228 |
-
image_slices = []
|
229 |
-
for pipe in pipes:
|
230 |
-
inputs = self.get_dummy_inputs(torch_device)
|
231 |
-
image = pipe(**inputs).images
|
232 |
-
|
233 |
-
image_slices.append(image[0, -3:, -3:, -1].flatten())
|
234 |
-
|
235 |
-
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
|
236 |
-
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
|
237 |
-
|
238 |
-
def test_inference_batch_single_identical(self):
|
239 |
-
super().test_inference_batch_single_identical(expected_max_diff=1e-2)
|
240 |
-
|
241 |
-
|
242 |
-
class KandinskyV22PipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
|
243 |
-
pipeline_class = KandinskyV22InpaintCombinedPipeline
|
244 |
-
params = ["prompt", "image", "mask_image"]
|
245 |
-
batch_params = ["prompt", "negative_prompt", "image", "mask_image"]
|
246 |
-
required_optional_params = [
|
247 |
-
"generator",
|
248 |
-
"height",
|
249 |
-
"width",
|
250 |
-
"latents",
|
251 |
-
"guidance_scale",
|
252 |
-
"negative_prompt",
|
253 |
-
"num_inference_steps",
|
254 |
-
"return_dict",
|
255 |
-
"guidance_scale",
|
256 |
-
"num_images_per_prompt",
|
257 |
-
"output_type",
|
258 |
-
"return_dict",
|
259 |
-
]
|
260 |
-
test_xformers_attention = False
|
261 |
-
|
262 |
-
def get_dummy_components(self):
|
263 |
-
dummy = InpaintDummies()
|
264 |
-
prior_dummy = PriorDummies()
|
265 |
-
components = dummy.get_dummy_components()
|
266 |
-
|
267 |
-
components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
|
268 |
-
return components
|
269 |
-
|
270 |
-
def get_dummy_inputs(self, device, seed=0):
|
271 |
-
prior_dummy = PriorDummies()
|
272 |
-
dummy = InpaintDummies()
|
273 |
-
inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
|
274 |
-
inputs.update(dummy.get_dummy_inputs(device=device, seed=seed))
|
275 |
-
inputs.pop("image_embeds")
|
276 |
-
inputs.pop("negative_image_embeds")
|
277 |
-
return inputs
|
278 |
-
|
279 |
-
def test_kandinsky(self):
|
280 |
-
device = "cpu"
|
281 |
-
|
282 |
-
components = self.get_dummy_components()
|
283 |
-
|
284 |
-
pipe = self.pipeline_class(**components)
|
285 |
-
pipe = pipe.to(device)
|
286 |
-
|
287 |
-
pipe.set_progress_bar_config(disable=None)
|
288 |
-
|
289 |
-
output = pipe(**self.get_dummy_inputs(device))
|
290 |
-
image = output.images
|
291 |
-
|
292 |
-
image_from_tuple = pipe(
|
293 |
-
**self.get_dummy_inputs(device),
|
294 |
-
return_dict=False,
|
295 |
-
)[0]
|
296 |
-
|
297 |
-
image_slice = image[0, -3:, -3:, -1]
|
298 |
-
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
|
299 |
-
|
300 |
-
assert image.shape == (1, 64, 64, 3)
|
301 |
-
|
302 |
-
expected_slice = np.array([0.5039, 0.4926, 0.4898, 0.4978, 0.4838, 0.4942, 0.4738, 0.4702, 0.4816])
|
303 |
-
|
304 |
-
assert (
|
305 |
-
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
306 |
-
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
|
307 |
-
assert (
|
308 |
-
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
|
309 |
-
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
|
310 |
-
|
311 |
-
@require_torch_gpu
|
312 |
-
def test_offloads(self):
|
313 |
-
pipes = []
|
314 |
-
components = self.get_dummy_components()
|
315 |
-
sd_pipe = self.pipeline_class(**components).to(torch_device)
|
316 |
-
pipes.append(sd_pipe)
|
317 |
-
|
318 |
-
components = self.get_dummy_components()
|
319 |
-
sd_pipe = self.pipeline_class(**components)
|
320 |
-
sd_pipe.enable_model_cpu_offload()
|
321 |
-
pipes.append(sd_pipe)
|
322 |
-
|
323 |
-
components = self.get_dummy_components()
|
324 |
-
sd_pipe = self.pipeline_class(**components)
|
325 |
-
sd_pipe.enable_sequential_cpu_offload()
|
326 |
-
pipes.append(sd_pipe)
|
327 |
-
|
328 |
-
image_slices = []
|
329 |
-
for pipe in pipes:
|
330 |
-
inputs = self.get_dummy_inputs(torch_device)
|
331 |
-
image = pipe(**inputs).images
|
332 |
-
|
333 |
-
image_slices.append(image[0, -3:, -3:, -1].flatten())
|
334 |
-
|
335 |
-
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
|
336 |
-
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
|
337 |
-
|
338 |
-
def test_inference_batch_single_identical(self):
|
339 |
-
super().test_inference_batch_single_identical(expected_max_diff=1e-2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/retinanet_r50_fpn.py',
|
3 |
-
'../_base_/datasets/coco_detection.py',
|
4 |
-
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
pretrained='open-mmlab://regnetx_3.2gf',
|
8 |
-
backbone=dict(
|
9 |
-
_delete_=True,
|
10 |
-
type='RegNet',
|
11 |
-
arch='regnetx_3.2gf',
|
12 |
-
out_indices=(0, 1, 2, 3),
|
13 |
-
frozen_stages=1,
|
14 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
15 |
-
norm_eval=True,
|
16 |
-
style='pytorch'),
|
17 |
-
neck=dict(
|
18 |
-
type='FPN',
|
19 |
-
in_channels=[96, 192, 432, 1008],
|
20 |
-
out_channels=256,
|
21 |
-
num_outs=5))
|
22 |
-
img_norm_cfg = dict(
|
23 |
-
# The mean and std are used in PyCls when training RegNets
|
24 |
-
mean=[103.53, 116.28, 123.675],
|
25 |
-
std=[57.375, 57.12, 58.395],
|
26 |
-
to_rgb=False)
|
27 |
-
train_pipeline = [
|
28 |
-
dict(type='LoadImageFromFile'),
|
29 |
-
dict(type='LoadAnnotations', with_bbox=True),
|
30 |
-
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
|
31 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
32 |
-
dict(type='Normalize', **img_norm_cfg),
|
33 |
-
dict(type='Pad', size_divisor=32),
|
34 |
-
dict(type='DefaultFormatBundle'),
|
35 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
|
36 |
-
]
|
37 |
-
test_pipeline = [
|
38 |
-
dict(type='LoadImageFromFile'),
|
39 |
-
dict(
|
40 |
-
type='MultiScaleFlipAug',
|
41 |
-
img_scale=(1333, 800),
|
42 |
-
flip=False,
|
43 |
-
transforms=[
|
44 |
-
dict(type='Resize', keep_ratio=True),
|
45 |
-
dict(type='RandomFlip'),
|
46 |
-
dict(type='Normalize', **img_norm_cfg),
|
47 |
-
dict(type='Pad', size_divisor=32),
|
48 |
-
dict(type='ImageToTensor', keys=['img']),
|
49 |
-
dict(type='Collect', keys=['img']),
|
50 |
-
])
|
51 |
-
]
|
52 |
-
data = dict(
|
53 |
-
train=dict(pipeline=train_pipeline),
|
54 |
-
val=dict(pipeline=test_pipeline),
|
55 |
-
test=dict(pipeline=test_pipeline))
|
56 |
-
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
|
57 |
-
optimizer_config = dict(
|
58 |
-
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/exp/cascade_mask_rcnn_3x_ms_hybrid_base/config.py
DELETED
@@ -1,142 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../../configs/_base_/models/cascade_mask_rcnn_uniformer_fpn.py',
|
3 |
-
'../../configs/_base_/datasets/coco_instance.py',
|
4 |
-
'../../configs/_base_/schedules/schedule_1x.py',
|
5 |
-
'../../configs/_base_/default_runtime.py'
|
6 |
-
]
|
7 |
-
|
8 |
-
model = dict(
|
9 |
-
backbone=dict(
|
10 |
-
embed_dim=[64, 128, 320, 512],
|
11 |
-
layers=[5, 8, 20, 7],
|
12 |
-
head_dim=64,
|
13 |
-
drop_path_rate=0.4,
|
14 |
-
use_checkpoint=True,
|
15 |
-
checkpoint_num=[0, 0, 20, 0],
|
16 |
-
windows=False,
|
17 |
-
hybrid=True,
|
18 |
-
window_size=14
|
19 |
-
),
|
20 |
-
neck=dict(in_channels=[64, 128, 320, 512]),
|
21 |
-
roi_head=dict(
|
22 |
-
bbox_head=[
|
23 |
-
dict(
|
24 |
-
type='ConvFCBBoxHead',
|
25 |
-
num_shared_convs=4,
|
26 |
-
num_shared_fcs=1,
|
27 |
-
in_channels=256,
|
28 |
-
conv_out_channels=256,
|
29 |
-
fc_out_channels=1024,
|
30 |
-
roi_feat_size=7,
|
31 |
-
num_classes=80,
|
32 |
-
bbox_coder=dict(
|
33 |
-
type='DeltaXYWHBBoxCoder',
|
34 |
-
target_means=[0., 0., 0., 0.],
|
35 |
-
target_stds=[0.1, 0.1, 0.2, 0.2]),
|
36 |
-
reg_class_agnostic=False,
|
37 |
-
reg_decoded_bbox=True,
|
38 |
-
norm_cfg=dict(type='SyncBN', requires_grad=True),
|
39 |
-
loss_cls=dict(
|
40 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
|
41 |
-
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
|
42 |
-
dict(
|
43 |
-
type='ConvFCBBoxHead',
|
44 |
-
num_shared_convs=4,
|
45 |
-
num_shared_fcs=1,
|
46 |
-
in_channels=256,
|
47 |
-
conv_out_channels=256,
|
48 |
-
fc_out_channels=1024,
|
49 |
-
roi_feat_size=7,
|
50 |
-
num_classes=80,
|
51 |
-
bbox_coder=dict(
|
52 |
-
type='DeltaXYWHBBoxCoder',
|
53 |
-
target_means=[0., 0., 0., 0.],
|
54 |
-
target_stds=[0.05, 0.05, 0.1, 0.1]),
|
55 |
-
reg_class_agnostic=False,
|
56 |
-
reg_decoded_bbox=True,
|
57 |
-
norm_cfg=dict(type='SyncBN', requires_grad=True),
|
58 |
-
loss_cls=dict(
|
59 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
|
60 |
-
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
|
61 |
-
dict(
|
62 |
-
type='ConvFCBBoxHead',
|
63 |
-
num_shared_convs=4,
|
64 |
-
num_shared_fcs=1,
|
65 |
-
in_channels=256,
|
66 |
-
conv_out_channels=256,
|
67 |
-
fc_out_channels=1024,
|
68 |
-
roi_feat_size=7,
|
69 |
-
num_classes=80,
|
70 |
-
bbox_coder=dict(
|
71 |
-
type='DeltaXYWHBBoxCoder',
|
72 |
-
target_means=[0., 0., 0., 0.],
|
73 |
-
target_stds=[0.033, 0.033, 0.067, 0.067]),
|
74 |
-
reg_class_agnostic=False,
|
75 |
-
reg_decoded_bbox=True,
|
76 |
-
norm_cfg=dict(type='SyncBN', requires_grad=True),
|
77 |
-
loss_cls=dict(
|
78 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
|
79 |
-
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))
|
80 |
-
]))
|
81 |
-
|
82 |
-
img_norm_cfg = dict(
|
83 |
-
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
84 |
-
|
85 |
-
# augmentation strategy originates from DETR / Sparse RCNN
|
86 |
-
train_pipeline = [
|
87 |
-
dict(type='LoadImageFromFile'),
|
88 |
-
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
|
89 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
90 |
-
dict(type='AutoAugment',
|
91 |
-
policies=[
|
92 |
-
[
|
93 |
-
dict(type='Resize',
|
94 |
-
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
|
95 |
-
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
|
96 |
-
(736, 1333), (768, 1333), (800, 1333)],
|
97 |
-
multiscale_mode='value',
|
98 |
-
keep_ratio=True)
|
99 |
-
],
|
100 |
-
[
|
101 |
-
dict(type='Resize',
|
102 |
-
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
|
103 |
-
multiscale_mode='value',
|
104 |
-
keep_ratio=True),
|
105 |
-
dict(type='RandomCrop',
|
106 |
-
crop_type='absolute_range',
|
107 |
-
crop_size=(384, 600),
|
108 |
-
allow_negative_crop=True),
|
109 |
-
dict(type='Resize',
|
110 |
-
img_scale=[(480, 1333), (512, 1333), (544, 1333),
|
111 |
-
(576, 1333), (608, 1333), (640, 1333),
|
112 |
-
(672, 1333), (704, 1333), (736, 1333),
|
113 |
-
(768, 1333), (800, 1333)],
|
114 |
-
multiscale_mode='value',
|
115 |
-
override=True,
|
116 |
-
keep_ratio=True)
|
117 |
-
]
|
118 |
-
]),
|
119 |
-
dict(type='Normalize', **img_norm_cfg),
|
120 |
-
dict(type='Pad', size_divisor=32),
|
121 |
-
dict(type='DefaultFormatBundle'),
|
122 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
|
123 |
-
]
|
124 |
-
data = dict(train=dict(pipeline=train_pipeline))
|
125 |
-
|
126 |
-
optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
|
127 |
-
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
|
128 |
-
'relative_position_bias_table': dict(decay_mult=0.),
|
129 |
-
'norm': dict(decay_mult=0.)}))
|
130 |
-
lr_config = dict(step=[27, 33])
|
131 |
-
runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
|
132 |
-
|
133 |
-
# do not use mmdet version fp16
|
134 |
-
fp16 = None
|
135 |
-
optimizer_config = dict(
|
136 |
-
type="DistOptimizerHook",
|
137 |
-
update_interval=1,
|
138 |
-
grad_clip=None,
|
139 |
-
coalesce=True,
|
140 |
-
bucket_size_mb=-1,
|
141 |
-
use_fp16=True,
|
142 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/utils/optimizer.py
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
from mmcv.runner import OptimizerHook, HOOKS
|
2 |
-
try:
|
3 |
-
import apex
|
4 |
-
except:
|
5 |
-
print('apex is not installed')
|
6 |
-
|
7 |
-
|
8 |
-
@HOOKS.register_module()
|
9 |
-
class DistOptimizerHook(OptimizerHook):
|
10 |
-
"""Optimizer hook for distributed training."""
|
11 |
-
|
12 |
-
def __init__(self, update_interval=1, grad_clip=None, coalesce=True, bucket_size_mb=-1, use_fp16=False):
|
13 |
-
self.grad_clip = grad_clip
|
14 |
-
self.coalesce = coalesce
|
15 |
-
self.bucket_size_mb = bucket_size_mb
|
16 |
-
self.update_interval = update_interval
|
17 |
-
self.use_fp16 = use_fp16
|
18 |
-
|
19 |
-
def before_run(self, runner):
|
20 |
-
runner.optimizer.zero_grad()
|
21 |
-
|
22 |
-
def after_train_iter(self, runner):
|
23 |
-
runner.outputs['loss'] /= self.update_interval
|
24 |
-
if self.use_fp16:
|
25 |
-
with apex.amp.scale_loss(runner.outputs['loss'], runner.optimizer) as scaled_loss:
|
26 |
-
scaled_loss.backward()
|
27 |
-
else:
|
28 |
-
runner.outputs['loss'].backward()
|
29 |
-
if self.every_n_iters(runner, self.update_interval):
|
30 |
-
if self.grad_clip is not None:
|
31 |
-
self.clip_grads(runner.model.parameters())
|
32 |
-
runner.optimizer.step()
|
33 |
-
runner.optimizer.zero_grad()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/dnl_r50-d8.py', '../_base_/datasets/cityscapes.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
|
4 |
-
]
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/optimization/arguments.py
DELETED
@@ -1,197 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
|
3 |
-
|
4 |
-
def get_arguments() -> argparse.Namespace:
|
5 |
-
parser = argparse.ArgumentParser()
|
6 |
-
|
7 |
-
# Inputs
|
8 |
-
parser.add_argument(
|
9 |
-
"-p", "--prompt", type=str, help="The prompt for the desired editing", required=True
|
10 |
-
)
|
11 |
-
parser.add_argument(
|
12 |
-
"-i", "--init_image", type=str, help="The path to the source image input", required=True
|
13 |
-
)
|
14 |
-
parser.add_argument(
|
15 |
-
"-i2", "--init_image_2", type=str, help="The path to the source image input", required=True
|
16 |
-
)
|
17 |
-
|
18 |
-
parser.add_argument("--mask", type=str, help="The path to the mask to edit with", default=None)
|
19 |
-
|
20 |
-
# Diffusion
|
21 |
-
parser.add_argument(
|
22 |
-
"--skip_timesteps",
|
23 |
-
type=int,
|
24 |
-
help="How many steps to skip during the diffusion.",
|
25 |
-
default=25,
|
26 |
-
)
|
27 |
-
parser.add_argument(
|
28 |
-
"--local_clip_guided_diffusion",
|
29 |
-
help="Indicator for using local CLIP guided diffusion (for baseline comparison)",
|
30 |
-
action="store_true",
|
31 |
-
dest="local_clip_guided_diffusion",
|
32 |
-
)
|
33 |
-
parser.add_argument(
|
34 |
-
"--ddim",
|
35 |
-
help="Indicator for using DDIM instead of DDPM",
|
36 |
-
action="store_true",
|
37 |
-
)
|
38 |
-
|
39 |
-
# For more details read guided-diffusion/guided_diffusion/respace.py
|
40 |
-
parser.add_argument(
|
41 |
-
"--timestep_respacing",
|
42 |
-
type=str,
|
43 |
-
help="How to respace the intervals of the diffusion process (number between 1 and 1000).",
|
44 |
-
default="100",
|
45 |
-
)
|
46 |
-
parser.add_argument(
|
47 |
-
"--model_output_size",
|
48 |
-
type=int,
|
49 |
-
help="The resolution of the outputs of the diffusion model",
|
50 |
-
default=256,
|
51 |
-
choices=[256, 512],
|
52 |
-
)
|
53 |
-
|
54 |
-
# Augmentations
|
55 |
-
parser.add_argument("--aug_num", type=int, help="The number of augmentation", default=8)
|
56 |
-
|
57 |
-
# Loss
|
58 |
-
parser.add_argument(
|
59 |
-
"--clip_guidance_lambda",
|
60 |
-
type=float,
|
61 |
-
help="Controls how much the image should look like the prompt",
|
62 |
-
default=1000,
|
63 |
-
)
|
64 |
-
parser.add_argument(
|
65 |
-
"--range_lambda",
|
66 |
-
type=float,
|
67 |
-
help="Controls how far out of range RGB values are allowed to be",
|
68 |
-
default=50,
|
69 |
-
)
|
70 |
-
parser.add_argument(
|
71 |
-
"--lpips_sim_lambda",
|
72 |
-
type=float,
|
73 |
-
help="The LPIPS similarity to the input image",
|
74 |
-
default=1000,
|
75 |
-
)
|
76 |
-
parser.add_argument(
|
77 |
-
"--l2_sim_lambda", type=float, help="The L2 similarity to the input image", default=10000,
|
78 |
-
)
|
79 |
-
parser.add_argument(
|
80 |
-
"--background_preservation_loss",
|
81 |
-
help="Indicator for using the background preservation loss",
|
82 |
-
action="store_true",
|
83 |
-
)
|
84 |
-
|
85 |
-
# Mask
|
86 |
-
parser.add_argument(
|
87 |
-
"--invert_mask",
|
88 |
-
help="Indicator for mask inversion",
|
89 |
-
action="store_true",
|
90 |
-
dest="invert_mask",
|
91 |
-
)
|
92 |
-
parser.add_argument(
|
93 |
-
"--no_enforce_background",
|
94 |
-
help="Indicator disabling the last background enforcement",
|
95 |
-
action="store_false",
|
96 |
-
dest="enforce_background",
|
97 |
-
)
|
98 |
-
|
99 |
-
# Misc
|
100 |
-
parser.add_argument("--seed", type=int, help="The random seed", default=404)
|
101 |
-
parser.add_argument("--gpu_id", type=int, help="The GPU ID", default=0)
|
102 |
-
parser.add_argument("--output_path", type=str, default="output")
|
103 |
-
parser.add_argument(
|
104 |
-
"-o",
|
105 |
-
"--output_file",
|
106 |
-
type=str,
|
107 |
-
help="The filename to save, must be png",
|
108 |
-
default="output.png",
|
109 |
-
)
|
110 |
-
parser.add_argument("--iterations_num", type=int, help="The number of iterations", default=8)
|
111 |
-
parser.add_argument(
|
112 |
-
"--batch_size",
|
113 |
-
type=int,
|
114 |
-
help="The number number if images to sample each diffusion process",
|
115 |
-
default=4,
|
116 |
-
)
|
117 |
-
parser.add_argument(
|
118 |
-
"--vid",
|
119 |
-
help="Indicator for saving the video of the diffusion process",
|
120 |
-
action="store_true",
|
121 |
-
dest="save_video",
|
122 |
-
)
|
123 |
-
parser.add_argument(
|
124 |
-
"--export_assets",
|
125 |
-
help="Indicator for saving raw assets of the prediction",
|
126 |
-
action="store_true",
|
127 |
-
dest="export_assets",
|
128 |
-
)
|
129 |
-
parser.add_argument(
|
130 |
-
"--image_guide",
|
131 |
-
help="Indicator image or text",
|
132 |
-
action="store_true",
|
133 |
-
dest="image_guide",
|
134 |
-
)
|
135 |
-
parser.add_argument(
|
136 |
-
"--coarse_to_fine",
|
137 |
-
help="Indicator mask from big to small",
|
138 |
-
action="store_true",
|
139 |
-
dest="coarse_to_fine",
|
140 |
-
)
|
141 |
-
parser.add_argument(
|
142 |
-
"--classifier_scale",
|
143 |
-
type=float,
|
144 |
-
help="Classifer scale for class guided",
|
145 |
-
default=10.,
|
146 |
-
)
|
147 |
-
parser.add_argument(
|
148 |
-
"--y",
|
149 |
-
type=int,
|
150 |
-
help="Target class for classifier guidence",
|
151 |
-
default=0,
|
152 |
-
)
|
153 |
-
parser.add_argument(
|
154 |
-
"--class_cond",
|
155 |
-
help="classifer conditioned for diffusion model or not",
|
156 |
-
action="store_true",
|
157 |
-
dest="class_cond",
|
158 |
-
)
|
159 |
-
parser.add_argument(
|
160 |
-
"--background_complex",
|
161 |
-
type=float,
|
162 |
-
help="BG complex guidance scale",
|
163 |
-
default=0.,
|
164 |
-
)
|
165 |
-
parser.add_argument(
|
166 |
-
"--final_save_root",
|
167 |
-
type=str,
|
168 |
-
help="Final save root",
|
169 |
-
default="validation-generated/generated-with-25-steps-bg/final/",
|
170 |
-
)
|
171 |
-
parser.add_argument(
|
172 |
-
"--hard",
|
173 |
-
help="hard or smooth",
|
174 |
-
action="store_true",
|
175 |
-
dest="hard",
|
176 |
-
)
|
177 |
-
parser.add_argument(
|
178 |
-
"--random_position",
|
179 |
-
help="apply random position",
|
180 |
-
action="store_true",
|
181 |
-
dest="random_position",
|
182 |
-
)
|
183 |
-
parser.add_argument(
|
184 |
-
"--rotate_obj",
|
185 |
-
help="apply random rotate to objects",
|
186 |
-
action="store_true",
|
187 |
-
dest="rotate_obj",
|
188 |
-
)
|
189 |
-
parser.add_argument(
|
190 |
-
"--angle",
|
191 |
-
type=int,
|
192 |
-
help="angle",
|
193 |
-
default=0,
|
194 |
-
)
|
195 |
-
args = parser.parse_args()
|
196 |
-
print(args)
|
197 |
-
return args
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArtGAN/Diffusion-API/diffusion_webui/diffusion_models/text2img_app.py
DELETED
@@ -1,173 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import torch
|
3 |
-
from diffusers import StableDiffusionPipeline,DiffusionPipeline
|
4 |
-
|
5 |
-
from diffusion_webui.utils.model_list import stable_model_list
|
6 |
-
from diffusion_webui.utils.scheduler_list import (
|
7 |
-
SCHEDULER_MAPPING,
|
8 |
-
get_scheduler,
|
9 |
-
)
|
10 |
-
|
11 |
-
|
12 |
-
class StableDiffusionText2ImageGenerator:
|
13 |
-
def __init__(self):
|
14 |
-
self.pipe = None
|
15 |
-
|
16 |
-
def load_model(
|
17 |
-
self,
|
18 |
-
stable_model_path,
|
19 |
-
scheduler,
|
20 |
-
):
|
21 |
-
if self.pipe is None or self.pipe.model_name != stable_model_path or self.pipe.scheduler_name != scheduler:
|
22 |
-
if stable_model_path == "stabilityai/stable-diffusion-xl-base-0.9":
|
23 |
-
self.pipe = DiffusionPipeline.from_pretrained(
|
24 |
-
stable_model_path, safety_checker=None, torch_dtype=torch.float16
|
25 |
-
)
|
26 |
-
else:
|
27 |
-
self.pipe = StableDiffusionPipeline.from_pretrained(
|
28 |
-
stable_model_path, safety_checker=None, torch_dtype=torch.float16
|
29 |
-
)
|
30 |
-
|
31 |
-
self.pipe = get_scheduler(pipe=self.pipe, scheduler=scheduler)
|
32 |
-
self.pipe.to("cuda")
|
33 |
-
self.pipe.enable_xformers_memory_efficient_attention()
|
34 |
-
self.pipe.model_name = stable_model_path
|
35 |
-
self.pipe.scheduler_name = scheduler
|
36 |
-
|
37 |
-
return self.pipe
|
38 |
-
|
39 |
-
def generate_image(
|
40 |
-
self,
|
41 |
-
stable_model_path: str,
|
42 |
-
prompt: str,
|
43 |
-
negative_prompt: str,
|
44 |
-
num_images_per_prompt: int,
|
45 |
-
scheduler: str,
|
46 |
-
guidance_scale: int,
|
47 |
-
num_inference_step: int,
|
48 |
-
height: int,
|
49 |
-
width: int,
|
50 |
-
seed_generator=0,
|
51 |
-
):
|
52 |
-
pipe = self.load_model(
|
53 |
-
stable_model_path=stable_model_path,
|
54 |
-
scheduler=scheduler,
|
55 |
-
)
|
56 |
-
if seed_generator == 0:
|
57 |
-
random_seed = torch.randint(0, 1000000, (1,))
|
58 |
-
generator = torch.manual_seed(random_seed)
|
59 |
-
else:
|
60 |
-
generator = torch.manual_seed(seed_generator)
|
61 |
-
|
62 |
-
images = pipe(
|
63 |
-
prompt=prompt,
|
64 |
-
height=height,
|
65 |
-
width=width,
|
66 |
-
negative_prompt=negative_prompt,
|
67 |
-
num_images_per_prompt=num_images_per_prompt,
|
68 |
-
num_inference_steps=num_inference_step,
|
69 |
-
guidance_scale=guidance_scale,
|
70 |
-
generator=generator,
|
71 |
-
).images
|
72 |
-
|
73 |
-
return images
|
74 |
-
|
75 |
-
def app():
|
76 |
-
with gr.Blocks():
|
77 |
-
with gr.Row():
|
78 |
-
with gr.Column():
|
79 |
-
text2image_prompt = gr.Textbox(
|
80 |
-
lines=1,
|
81 |
-
placeholder="Prompt",
|
82 |
-
show_label=False,
|
83 |
-
)
|
84 |
-
|
85 |
-
text2image_negative_prompt = gr.Textbox(
|
86 |
-
lines=1,
|
87 |
-
placeholder="Negative Prompt",
|
88 |
-
show_label=False,
|
89 |
-
)
|
90 |
-
with gr.Row():
|
91 |
-
with gr.Column():
|
92 |
-
text2image_model_path = gr.Dropdown(
|
93 |
-
choices=stable_model_list,
|
94 |
-
value=stable_model_list[0],
|
95 |
-
label="Text-Image Model Id",
|
96 |
-
)
|
97 |
-
|
98 |
-
text2image_guidance_scale = gr.Slider(
|
99 |
-
minimum=0.1,
|
100 |
-
maximum=15,
|
101 |
-
step=0.1,
|
102 |
-
value=7.5,
|
103 |
-
label="Guidance Scale",
|
104 |
-
)
|
105 |
-
|
106 |
-
text2image_num_inference_step = gr.Slider(
|
107 |
-
minimum=1,
|
108 |
-
maximum=100,
|
109 |
-
step=1,
|
110 |
-
value=50,
|
111 |
-
label="Num Inference Step",
|
112 |
-
)
|
113 |
-
text2image_num_images_per_prompt = gr.Slider(
|
114 |
-
minimum=1,
|
115 |
-
maximum=4,
|
116 |
-
step=1,
|
117 |
-
value=1,
|
118 |
-
label="Number Of Images",
|
119 |
-
)
|
120 |
-
with gr.Row():
|
121 |
-
with gr.Column():
|
122 |
-
text2image_scheduler = gr.Dropdown(
|
123 |
-
choices=list(SCHEDULER_MAPPING.keys()),
|
124 |
-
value=list(SCHEDULER_MAPPING.keys())[0],
|
125 |
-
label="Scheduler",
|
126 |
-
)
|
127 |
-
|
128 |
-
text2image_height = gr.Slider(
|
129 |
-
minimum=128,
|
130 |
-
maximum=1280,
|
131 |
-
step=32,
|
132 |
-
value=512,
|
133 |
-
label="Image Height",
|
134 |
-
)
|
135 |
-
|
136 |
-
text2image_width = gr.Slider(
|
137 |
-
minimum=128,
|
138 |
-
maximum=1280,
|
139 |
-
step=32,
|
140 |
-
value=512,
|
141 |
-
label="Image Width",
|
142 |
-
)
|
143 |
-
text2image_seed_generator = gr.Slider(
|
144 |
-
label="Seed(0 for random)",
|
145 |
-
minimum=0,
|
146 |
-
maximum=1000000,
|
147 |
-
value=0,
|
148 |
-
)
|
149 |
-
text2image_predict = gr.Button(value="Generator")
|
150 |
-
|
151 |
-
with gr.Column():
|
152 |
-
output_image = gr.Gallery(
|
153 |
-
label="Generated images",
|
154 |
-
show_label=False,
|
155 |
-
elem_id="gallery",
|
156 |
-
).style(grid=(1, 2), height=200)
|
157 |
-
|
158 |
-
text2image_predict.click(
|
159 |
-
fn=StableDiffusionText2ImageGenerator().generate_image,
|
160 |
-
inputs=[
|
161 |
-
text2image_model_path,
|
162 |
-
text2image_prompt,
|
163 |
-
text2image_negative_prompt,
|
164 |
-
text2image_num_images_per_prompt,
|
165 |
-
text2image_scheduler,
|
166 |
-
text2image_guidance_scale,
|
167 |
-
text2image_num_inference_step,
|
168 |
-
text2image_height,
|
169 |
-
text2image_width,
|
170 |
-
text2image_seed_generator,
|
171 |
-
],
|
172 |
-
outputs=output_image,
|
173 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/candidates.py
DELETED
@@ -1,552 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import sys
|
3 |
-
from typing import TYPE_CHECKING, Any, FrozenSet, Iterable, Optional, Tuple, Union, cast
|
4 |
-
|
5 |
-
from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
|
6 |
-
from pip._vendor.packaging.version import Version
|
7 |
-
|
8 |
-
from pip._internal.exceptions import (
|
9 |
-
HashError,
|
10 |
-
InstallationSubprocessError,
|
11 |
-
MetadataInconsistent,
|
12 |
-
)
|
13 |
-
from pip._internal.metadata import BaseDistribution
|
14 |
-
from pip._internal.models.link import Link, links_equivalent
|
15 |
-
from pip._internal.models.wheel import Wheel
|
16 |
-
from pip._internal.req.constructors import (
|
17 |
-
install_req_from_editable,
|
18 |
-
install_req_from_line,
|
19 |
-
)
|
20 |
-
from pip._internal.req.req_install import InstallRequirement
|
21 |
-
from pip._internal.utils.direct_url_helpers import direct_url_from_link
|
22 |
-
from pip._internal.utils.misc import normalize_version_info
|
23 |
-
|
24 |
-
from .base import Candidate, CandidateVersion, Requirement, format_name
|
25 |
-
|
26 |
-
if TYPE_CHECKING:
|
27 |
-
from .factory import Factory
|
28 |
-
|
29 |
-
logger = logging.getLogger(__name__)
|
30 |
-
|
31 |
-
BaseCandidate = Union[
|
32 |
-
"AlreadyInstalledCandidate",
|
33 |
-
"EditableCandidate",
|
34 |
-
"LinkCandidate",
|
35 |
-
]
|
36 |
-
|
37 |
-
# Avoid conflicting with the PyPI package "Python".
|
38 |
-
REQUIRES_PYTHON_IDENTIFIER = cast(NormalizedName, "<Python from Requires-Python>")
|
39 |
-
|
40 |
-
|
41 |
-
def as_base_candidate(candidate: Candidate) -> Optional[BaseCandidate]:
|
42 |
-
"""The runtime version of BaseCandidate."""
|
43 |
-
base_candidate_classes = (
|
44 |
-
AlreadyInstalledCandidate,
|
45 |
-
EditableCandidate,
|
46 |
-
LinkCandidate,
|
47 |
-
)
|
48 |
-
if isinstance(candidate, base_candidate_classes):
|
49 |
-
return candidate
|
50 |
-
return None
|
51 |
-
|
52 |
-
|
53 |
-
def make_install_req_from_link(
|
54 |
-
link: Link, template: InstallRequirement
|
55 |
-
) -> InstallRequirement:
|
56 |
-
assert not template.editable, "template is editable"
|
57 |
-
if template.req:
|
58 |
-
line = str(template.req)
|
59 |
-
else:
|
60 |
-
line = link.url
|
61 |
-
ireq = install_req_from_line(
|
62 |
-
line,
|
63 |
-
user_supplied=template.user_supplied,
|
64 |
-
comes_from=template.comes_from,
|
65 |
-
use_pep517=template.use_pep517,
|
66 |
-
isolated=template.isolated,
|
67 |
-
constraint=template.constraint,
|
68 |
-
global_options=template.global_options,
|
69 |
-
hash_options=template.hash_options,
|
70 |
-
config_settings=template.config_settings,
|
71 |
-
)
|
72 |
-
ireq.original_link = template.original_link
|
73 |
-
ireq.link = link
|
74 |
-
ireq.extras = template.extras
|
75 |
-
return ireq
|
76 |
-
|
77 |
-
|
78 |
-
def make_install_req_from_editable(
|
79 |
-
link: Link, template: InstallRequirement
|
80 |
-
) -> InstallRequirement:
|
81 |
-
assert template.editable, "template not editable"
|
82 |
-
ireq = install_req_from_editable(
|
83 |
-
link.url,
|
84 |
-
user_supplied=template.user_supplied,
|
85 |
-
comes_from=template.comes_from,
|
86 |
-
use_pep517=template.use_pep517,
|
87 |
-
isolated=template.isolated,
|
88 |
-
constraint=template.constraint,
|
89 |
-
permit_editable_wheels=template.permit_editable_wheels,
|
90 |
-
global_options=template.global_options,
|
91 |
-
hash_options=template.hash_options,
|
92 |
-
config_settings=template.config_settings,
|
93 |
-
)
|
94 |
-
ireq.extras = template.extras
|
95 |
-
return ireq
|
96 |
-
|
97 |
-
|
98 |
-
def _make_install_req_from_dist(
|
99 |
-
dist: BaseDistribution, template: InstallRequirement
|
100 |
-
) -> InstallRequirement:
|
101 |
-
if template.req:
|
102 |
-
line = str(template.req)
|
103 |
-
elif template.link:
|
104 |
-
line = f"{dist.canonical_name} @ {template.link.url}"
|
105 |
-
else:
|
106 |
-
line = f"{dist.canonical_name}=={dist.version}"
|
107 |
-
ireq = install_req_from_line(
|
108 |
-
line,
|
109 |
-
user_supplied=template.user_supplied,
|
110 |
-
comes_from=template.comes_from,
|
111 |
-
use_pep517=template.use_pep517,
|
112 |
-
isolated=template.isolated,
|
113 |
-
constraint=template.constraint,
|
114 |
-
global_options=template.global_options,
|
115 |
-
hash_options=template.hash_options,
|
116 |
-
config_settings=template.config_settings,
|
117 |
-
)
|
118 |
-
ireq.satisfied_by = dist
|
119 |
-
return ireq
|
120 |
-
|
121 |
-
|
122 |
-
class _InstallRequirementBackedCandidate(Candidate):
|
123 |
-
"""A candidate backed by an ``InstallRequirement``.
|
124 |
-
|
125 |
-
This represents a package request with the target not being already
|
126 |
-
in the environment, and needs to be fetched and installed. The backing
|
127 |
-
``InstallRequirement`` is responsible for most of the leg work; this
|
128 |
-
class exposes appropriate information to the resolver.
|
129 |
-
|
130 |
-
:param link: The link passed to the ``InstallRequirement``. The backing
|
131 |
-
``InstallRequirement`` will use this link to fetch the distribution.
|
132 |
-
:param source_link: The link this candidate "originates" from. This is
|
133 |
-
different from ``link`` when the link is found in the wheel cache.
|
134 |
-
``link`` would point to the wheel cache, while this points to the
|
135 |
-
found remote link (e.g. from pypi.org).
|
136 |
-
"""
|
137 |
-
|
138 |
-
dist: BaseDistribution
|
139 |
-
is_installed = False
|
140 |
-
|
141 |
-
def __init__(
|
142 |
-
self,
|
143 |
-
link: Link,
|
144 |
-
source_link: Link,
|
145 |
-
ireq: InstallRequirement,
|
146 |
-
factory: "Factory",
|
147 |
-
name: Optional[NormalizedName] = None,
|
148 |
-
version: Optional[CandidateVersion] = None,
|
149 |
-
) -> None:
|
150 |
-
self._link = link
|
151 |
-
self._source_link = source_link
|
152 |
-
self._factory = factory
|
153 |
-
self._ireq = ireq
|
154 |
-
self._name = name
|
155 |
-
self._version = version
|
156 |
-
self.dist = self._prepare()
|
157 |
-
|
158 |
-
def __str__(self) -> str:
|
159 |
-
return f"{self.name} {self.version}"
|
160 |
-
|
161 |
-
def __repr__(self) -> str:
|
162 |
-
return "{class_name}({link!r})".format(
|
163 |
-
class_name=self.__class__.__name__,
|
164 |
-
link=str(self._link),
|
165 |
-
)
|
166 |
-
|
167 |
-
def __hash__(self) -> int:
|
168 |
-
return hash((self.__class__, self._link))
|
169 |
-
|
170 |
-
def __eq__(self, other: Any) -> bool:
|
171 |
-
if isinstance(other, self.__class__):
|
172 |
-
return links_equivalent(self._link, other._link)
|
173 |
-
return False
|
174 |
-
|
175 |
-
@property
|
176 |
-
def source_link(self) -> Optional[Link]:
|
177 |
-
return self._source_link
|
178 |
-
|
179 |
-
@property
|
180 |
-
def project_name(self) -> NormalizedName:
|
181 |
-
"""The normalised name of the project the candidate refers to"""
|
182 |
-
if self._name is None:
|
183 |
-
self._name = self.dist.canonical_name
|
184 |
-
return self._name
|
185 |
-
|
186 |
-
@property
|
187 |
-
def name(self) -> str:
|
188 |
-
return self.project_name
|
189 |
-
|
190 |
-
@property
|
191 |
-
def version(self) -> CandidateVersion:
|
192 |
-
if self._version is None:
|
193 |
-
self._version = self.dist.version
|
194 |
-
return self._version
|
195 |
-
|
196 |
-
def format_for_error(self) -> str:
|
197 |
-
return "{} {} (from {})".format(
|
198 |
-
self.name,
|
199 |
-
self.version,
|
200 |
-
self._link.file_path if self._link.is_file else self._link,
|
201 |
-
)
|
202 |
-
|
203 |
-
def _prepare_distribution(self) -> BaseDistribution:
|
204 |
-
raise NotImplementedError("Override in subclass")
|
205 |
-
|
206 |
-
def _check_metadata_consistency(self, dist: BaseDistribution) -> None:
|
207 |
-
"""Check for consistency of project name and version of dist."""
|
208 |
-
if self._name is not None and self._name != dist.canonical_name:
|
209 |
-
raise MetadataInconsistent(
|
210 |
-
self._ireq,
|
211 |
-
"name",
|
212 |
-
self._name,
|
213 |
-
dist.canonical_name,
|
214 |
-
)
|
215 |
-
if self._version is not None and self._version != dist.version:
|
216 |
-
raise MetadataInconsistent(
|
217 |
-
self._ireq,
|
218 |
-
"version",
|
219 |
-
str(self._version),
|
220 |
-
str(dist.version),
|
221 |
-
)
|
222 |
-
|
223 |
-
def _prepare(self) -> BaseDistribution:
|
224 |
-
try:
|
225 |
-
dist = self._prepare_distribution()
|
226 |
-
except HashError as e:
|
227 |
-
# Provide HashError the underlying ireq that caused it. This
|
228 |
-
# provides context for the resulting error message to show the
|
229 |
-
# offending line to the user.
|
230 |
-
e.req = self._ireq
|
231 |
-
raise
|
232 |
-
except InstallationSubprocessError as exc:
|
233 |
-
# The output has been presented already, so don't duplicate it.
|
234 |
-
exc.context = "See above for output."
|
235 |
-
raise
|
236 |
-
|
237 |
-
self._check_metadata_consistency(dist)
|
238 |
-
return dist
|
239 |
-
|
240 |
-
def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
|
241 |
-
requires = self.dist.iter_dependencies() if with_requires else ()
|
242 |
-
for r in requires:
|
243 |
-
yield self._factory.make_requirement_from_spec(str(r), self._ireq)
|
244 |
-
yield self._factory.make_requires_python_requirement(self.dist.requires_python)
|
245 |
-
|
246 |
-
def get_install_requirement(self) -> Optional[InstallRequirement]:
|
247 |
-
return self._ireq
|
248 |
-
|
249 |
-
|
250 |
-
class LinkCandidate(_InstallRequirementBackedCandidate):
|
251 |
-
is_editable = False
|
252 |
-
|
253 |
-
def __init__(
|
254 |
-
self,
|
255 |
-
link: Link,
|
256 |
-
template: InstallRequirement,
|
257 |
-
factory: "Factory",
|
258 |
-
name: Optional[NormalizedName] = None,
|
259 |
-
version: Optional[CandidateVersion] = None,
|
260 |
-
) -> None:
|
261 |
-
source_link = link
|
262 |
-
cache_entry = factory.get_wheel_cache_entry(source_link, name)
|
263 |
-
if cache_entry is not None:
|
264 |
-
logger.debug("Using cached wheel link: %s", cache_entry.link)
|
265 |
-
link = cache_entry.link
|
266 |
-
ireq = make_install_req_from_link(link, template)
|
267 |
-
assert ireq.link == link
|
268 |
-
if ireq.link.is_wheel and not ireq.link.is_file:
|
269 |
-
wheel = Wheel(ireq.link.filename)
|
270 |
-
wheel_name = canonicalize_name(wheel.name)
|
271 |
-
assert name == wheel_name, f"{name!r} != {wheel_name!r} for wheel"
|
272 |
-
# Version may not be present for PEP 508 direct URLs
|
273 |
-
if version is not None:
|
274 |
-
wheel_version = Version(wheel.version)
|
275 |
-
assert version == wheel_version, "{!r} != {!r} for wheel {}".format(
|
276 |
-
version, wheel_version, name
|
277 |
-
)
|
278 |
-
|
279 |
-
if cache_entry is not None:
|
280 |
-
assert ireq.link.is_wheel
|
281 |
-
assert ireq.link.is_file
|
282 |
-
if cache_entry.persistent and template.link is template.original_link:
|
283 |
-
ireq.cached_wheel_source_link = source_link
|
284 |
-
if cache_entry.origin is not None:
|
285 |
-
ireq.download_info = cache_entry.origin
|
286 |
-
else:
|
287 |
-
# Legacy cache entry that does not have origin.json.
|
288 |
-
# download_info may miss the archive_info.hashes field.
|
289 |
-
ireq.download_info = direct_url_from_link(
|
290 |
-
source_link, link_is_in_wheel_cache=cache_entry.persistent
|
291 |
-
)
|
292 |
-
|
293 |
-
super().__init__(
|
294 |
-
link=link,
|
295 |
-
source_link=source_link,
|
296 |
-
ireq=ireq,
|
297 |
-
factory=factory,
|
298 |
-
name=name,
|
299 |
-
version=version,
|
300 |
-
)
|
301 |
-
|
302 |
-
def _prepare_distribution(self) -> BaseDistribution:
|
303 |
-
preparer = self._factory.preparer
|
304 |
-
return preparer.prepare_linked_requirement(self._ireq, parallel_builds=True)
|
305 |
-
|
306 |
-
|
307 |
-
class EditableCandidate(_InstallRequirementBackedCandidate):
|
308 |
-
is_editable = True
|
309 |
-
|
310 |
-
def __init__(
|
311 |
-
self,
|
312 |
-
link: Link,
|
313 |
-
template: InstallRequirement,
|
314 |
-
factory: "Factory",
|
315 |
-
name: Optional[NormalizedName] = None,
|
316 |
-
version: Optional[CandidateVersion] = None,
|
317 |
-
) -> None:
|
318 |
-
super().__init__(
|
319 |
-
link=link,
|
320 |
-
source_link=link,
|
321 |
-
ireq=make_install_req_from_editable(link, template),
|
322 |
-
factory=factory,
|
323 |
-
name=name,
|
324 |
-
version=version,
|
325 |
-
)
|
326 |
-
|
327 |
-
def _prepare_distribution(self) -> BaseDistribution:
|
328 |
-
return self._factory.preparer.prepare_editable_requirement(self._ireq)
|
329 |
-
|
330 |
-
|
331 |
-
class AlreadyInstalledCandidate(Candidate):
|
332 |
-
is_installed = True
|
333 |
-
source_link = None
|
334 |
-
|
335 |
-
def __init__(
|
336 |
-
self,
|
337 |
-
dist: BaseDistribution,
|
338 |
-
template: InstallRequirement,
|
339 |
-
factory: "Factory",
|
340 |
-
) -> None:
|
341 |
-
self.dist = dist
|
342 |
-
self._ireq = _make_install_req_from_dist(dist, template)
|
343 |
-
self._factory = factory
|
344 |
-
|
345 |
-
# This is just logging some messages, so we can do it eagerly.
|
346 |
-
# The returned dist would be exactly the same as self.dist because we
|
347 |
-
# set satisfied_by in _make_install_req_from_dist.
|
348 |
-
# TODO: Supply reason based on force_reinstall and upgrade_strategy.
|
349 |
-
skip_reason = "already satisfied"
|
350 |
-
factory.preparer.prepare_installed_requirement(self._ireq, skip_reason)
|
351 |
-
|
352 |
-
def __str__(self) -> str:
|
353 |
-
return str(self.dist)
|
354 |
-
|
355 |
-
def __repr__(self) -> str:
|
356 |
-
return "{class_name}({distribution!r})".format(
|
357 |
-
class_name=self.__class__.__name__,
|
358 |
-
distribution=self.dist,
|
359 |
-
)
|
360 |
-
|
361 |
-
def __hash__(self) -> int:
|
362 |
-
return hash((self.__class__, self.name, self.version))
|
363 |
-
|
364 |
-
def __eq__(self, other: Any) -> bool:
|
365 |
-
if isinstance(other, self.__class__):
|
366 |
-
return self.name == other.name and self.version == other.version
|
367 |
-
return False
|
368 |
-
|
369 |
-
@property
|
370 |
-
def project_name(self) -> NormalizedName:
|
371 |
-
return self.dist.canonical_name
|
372 |
-
|
373 |
-
@property
|
374 |
-
def name(self) -> str:
|
375 |
-
return self.project_name
|
376 |
-
|
377 |
-
@property
|
378 |
-
def version(self) -> CandidateVersion:
|
379 |
-
return self.dist.version
|
380 |
-
|
381 |
-
@property
|
382 |
-
def is_editable(self) -> bool:
|
383 |
-
return self.dist.editable
|
384 |
-
|
385 |
-
def format_for_error(self) -> str:
|
386 |
-
return f"{self.name} {self.version} (Installed)"
|
387 |
-
|
388 |
-
def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
|
389 |
-
if not with_requires:
|
390 |
-
return
|
391 |
-
for r in self.dist.iter_dependencies():
|
392 |
-
yield self._factory.make_requirement_from_spec(str(r), self._ireq)
|
393 |
-
|
394 |
-
def get_install_requirement(self) -> Optional[InstallRequirement]:
|
395 |
-
return None
|
396 |
-
|
397 |
-
|
398 |
-
class ExtrasCandidate(Candidate):
|
399 |
-
"""A candidate that has 'extras', indicating additional dependencies.
|
400 |
-
|
401 |
-
Requirements can be for a project with dependencies, something like
|
402 |
-
foo[extra]. The extras don't affect the project/version being installed
|
403 |
-
directly, but indicate that we need additional dependencies. We model that
|
404 |
-
by having an artificial ExtrasCandidate that wraps the "base" candidate.
|
405 |
-
|
406 |
-
The ExtrasCandidate differs from the base in the following ways:
|
407 |
-
|
408 |
-
1. It has a unique name, of the form foo[extra]. This causes the resolver
|
409 |
-
to treat it as a separate node in the dependency graph.
|
410 |
-
2. When we're getting the candidate's dependencies,
|
411 |
-
a) We specify that we want the extra dependencies as well.
|
412 |
-
b) We add a dependency on the base candidate.
|
413 |
-
See below for why this is needed.
|
414 |
-
3. We return None for the underlying InstallRequirement, as the base
|
415 |
-
candidate will provide it, and we don't want to end up with duplicates.
|
416 |
-
|
417 |
-
The dependency on the base candidate is needed so that the resolver can't
|
418 |
-
decide that it should recommend foo[extra1] version 1.0 and foo[extra2]
|
419 |
-
version 2.0. Having those candidates depend on foo=1.0 and foo=2.0
|
420 |
-
respectively forces the resolver to recognise that this is a conflict.
|
421 |
-
"""
|
422 |
-
|
423 |
-
def __init__(
|
424 |
-
self,
|
425 |
-
base: BaseCandidate,
|
426 |
-
extras: FrozenSet[str],
|
427 |
-
) -> None:
|
428 |
-
self.base = base
|
429 |
-
self.extras = extras
|
430 |
-
|
431 |
-
def __str__(self) -> str:
|
432 |
-
name, rest = str(self.base).split(" ", 1)
|
433 |
-
return "{}[{}] {}".format(name, ",".join(self.extras), rest)
|
434 |
-
|
435 |
-
def __repr__(self) -> str:
|
436 |
-
return "{class_name}(base={base!r}, extras={extras!r})".format(
|
437 |
-
class_name=self.__class__.__name__,
|
438 |
-
base=self.base,
|
439 |
-
extras=self.extras,
|
440 |
-
)
|
441 |
-
|
442 |
-
def __hash__(self) -> int:
|
443 |
-
return hash((self.base, self.extras))
|
444 |
-
|
445 |
-
def __eq__(self, other: Any) -> bool:
|
446 |
-
if isinstance(other, self.__class__):
|
447 |
-
return self.base == other.base and self.extras == other.extras
|
448 |
-
return False
|
449 |
-
|
450 |
-
@property
|
451 |
-
def project_name(self) -> NormalizedName:
|
452 |
-
return self.base.project_name
|
453 |
-
|
454 |
-
@property
|
455 |
-
def name(self) -> str:
|
456 |
-
"""The normalised name of the project the candidate refers to"""
|
457 |
-
return format_name(self.base.project_name, self.extras)
|
458 |
-
|
459 |
-
@property
|
460 |
-
def version(self) -> CandidateVersion:
|
461 |
-
return self.base.version
|
462 |
-
|
463 |
-
def format_for_error(self) -> str:
|
464 |
-
return "{} [{}]".format(
|
465 |
-
self.base.format_for_error(), ", ".join(sorted(self.extras))
|
466 |
-
)
|
467 |
-
|
468 |
-
@property
|
469 |
-
def is_installed(self) -> bool:
|
470 |
-
return self.base.is_installed
|
471 |
-
|
472 |
-
@property
|
473 |
-
def is_editable(self) -> bool:
|
474 |
-
return self.base.is_editable
|
475 |
-
|
476 |
-
@property
|
477 |
-
def source_link(self) -> Optional[Link]:
|
478 |
-
return self.base.source_link
|
479 |
-
|
480 |
-
def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
|
481 |
-
factory = self.base._factory
|
482 |
-
|
483 |
-
# Add a dependency on the exact base
|
484 |
-
# (See note 2b in the class docstring)
|
485 |
-
yield factory.make_requirement_from_candidate(self.base)
|
486 |
-
if not with_requires:
|
487 |
-
return
|
488 |
-
|
489 |
-
# The user may have specified extras that the candidate doesn't
|
490 |
-
# support. We ignore any unsupported extras here.
|
491 |
-
valid_extras = self.extras.intersection(self.base.dist.iter_provided_extras())
|
492 |
-
invalid_extras = self.extras.difference(self.base.dist.iter_provided_extras())
|
493 |
-
for extra in sorted(invalid_extras):
|
494 |
-
logger.warning(
|
495 |
-
"%s %s does not provide the extra '%s'",
|
496 |
-
self.base.name,
|
497 |
-
self.version,
|
498 |
-
extra,
|
499 |
-
)
|
500 |
-
|
501 |
-
for r in self.base.dist.iter_dependencies(valid_extras):
|
502 |
-
requirement = factory.make_requirement_from_spec(
|
503 |
-
str(r), self.base._ireq, valid_extras
|
504 |
-
)
|
505 |
-
if requirement:
|
506 |
-
yield requirement
|
507 |
-
|
508 |
-
def get_install_requirement(self) -> Optional[InstallRequirement]:
|
509 |
-
# We don't return anything here, because we always
|
510 |
-
# depend on the base candidate, and we'll get the
|
511 |
-
# install requirement from that.
|
512 |
-
return None
|
513 |
-
|
514 |
-
|
515 |
-
class RequiresPythonCandidate(Candidate):
|
516 |
-
is_installed = False
|
517 |
-
source_link = None
|
518 |
-
|
519 |
-
def __init__(self, py_version_info: Optional[Tuple[int, ...]]) -> None:
|
520 |
-
if py_version_info is not None:
|
521 |
-
version_info = normalize_version_info(py_version_info)
|
522 |
-
else:
|
523 |
-
version_info = sys.version_info[:3]
|
524 |
-
self._version = Version(".".join(str(c) for c in version_info))
|
525 |
-
|
526 |
-
# We don't need to implement __eq__() and __ne__() since there is always
|
527 |
-
# only one RequiresPythonCandidate in a resolution, i.e. the host Python.
|
528 |
-
# The built-in object.__eq__() and object.__ne__() do exactly what we want.
|
529 |
-
|
530 |
-
def __str__(self) -> str:
|
531 |
-
return f"Python {self._version}"
|
532 |
-
|
533 |
-
@property
|
534 |
-
def project_name(self) -> NormalizedName:
|
535 |
-
return REQUIRES_PYTHON_IDENTIFIER
|
536 |
-
|
537 |
-
@property
|
538 |
-
def name(self) -> str:
|
539 |
-
return REQUIRES_PYTHON_IDENTIFIER
|
540 |
-
|
541 |
-
@property
|
542 |
-
def version(self) -> CandidateVersion:
|
543 |
-
return self._version
|
544 |
-
|
545 |
-
def format_for_error(self) -> str:
|
546 |
-
return f"Python {self.version}"
|
547 |
-
|
548 |
-
def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
|
549 |
-
return ()
|
550 |
-
|
551 |
-
def get_install_requirement(self) -> Optional[InstallRequirement]:
|
552 |
-
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/packaging/_musllinux.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
"""PEP 656 support.
|
2 |
-
|
3 |
-
This module implements logic to detect if the currently running Python is
|
4 |
-
linked against musl, and what musl version is used.
|
5 |
-
"""
|
6 |
-
|
7 |
-
import contextlib
|
8 |
-
import functools
|
9 |
-
import operator
|
10 |
-
import os
|
11 |
-
import re
|
12 |
-
import struct
|
13 |
-
import subprocess
|
14 |
-
import sys
|
15 |
-
from typing import IO, Iterator, NamedTuple, Optional, Tuple
|
16 |
-
|
17 |
-
|
18 |
-
def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
|
19 |
-
return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
|
20 |
-
|
21 |
-
|
22 |
-
def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
|
23 |
-
"""Detect musl libc location by parsing the Python executable.
|
24 |
-
|
25 |
-
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
|
26 |
-
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
|
27 |
-
"""
|
28 |
-
f.seek(0)
|
29 |
-
try:
|
30 |
-
ident = _read_unpacked(f, "16B")
|
31 |
-
except struct.error:
|
32 |
-
return None
|
33 |
-
if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
|
34 |
-
return None
|
35 |
-
f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
|
36 |
-
|
37 |
-
try:
|
38 |
-
# e_fmt: Format for program header.
|
39 |
-
# p_fmt: Format for section header.
|
40 |
-
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
|
41 |
-
e_fmt, p_fmt, p_idx = {
|
42 |
-
1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
|
43 |
-
2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
|
44 |
-
}[ident[4]]
|
45 |
-
except KeyError:
|
46 |
-
return None
|
47 |
-
else:
|
48 |
-
p_get = operator.itemgetter(*p_idx)
|
49 |
-
|
50 |
-
# Find the interpreter section and return its content.
|
51 |
-
try:
|
52 |
-
_, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
|
53 |
-
except struct.error:
|
54 |
-
return None
|
55 |
-
for i in range(e_phnum + 1):
|
56 |
-
f.seek(e_phoff + e_phentsize * i)
|
57 |
-
try:
|
58 |
-
p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
|
59 |
-
except struct.error:
|
60 |
-
return None
|
61 |
-
if p_type != 3: # Not PT_INTERP.
|
62 |
-
continue
|
63 |
-
f.seek(p_offset)
|
64 |
-
interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
|
65 |
-
if "musl" not in interpreter:
|
66 |
-
return None
|
67 |
-
return interpreter
|
68 |
-
return None
|
69 |
-
|
70 |
-
|
71 |
-
class _MuslVersion(NamedTuple):
|
72 |
-
major: int
|
73 |
-
minor: int
|
74 |
-
|
75 |
-
|
76 |
-
def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
|
77 |
-
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
|
78 |
-
if len(lines) < 2 or lines[0][:4] != "musl":
|
79 |
-
return None
|
80 |
-
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
|
81 |
-
if not m:
|
82 |
-
return None
|
83 |
-
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
|
84 |
-
|
85 |
-
|
86 |
-
@functools.lru_cache()
|
87 |
-
def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
|
88 |
-
"""Detect currently-running musl runtime version.
|
89 |
-
|
90 |
-
This is done by checking the specified executable's dynamic linking
|
91 |
-
information, and invoking the loader to parse its output for a version
|
92 |
-
string. If the loader is musl, the output would be something like::
|
93 |
-
|
94 |
-
musl libc (x86_64)
|
95 |
-
Version 1.2.2
|
96 |
-
Dynamic Program Loader
|
97 |
-
"""
|
98 |
-
with contextlib.ExitStack() as stack:
|
99 |
-
try:
|
100 |
-
f = stack.enter_context(open(executable, "rb"))
|
101 |
-
except OSError:
|
102 |
-
return None
|
103 |
-
ld = _parse_ld_musl_from_elf(f)
|
104 |
-
if not ld:
|
105 |
-
return None
|
106 |
-
proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
|
107 |
-
return _parse_musl_version(proc.stderr)
|
108 |
-
|
109 |
-
|
110 |
-
def platform_tags(arch: str) -> Iterator[str]:
|
111 |
-
"""Generate musllinux tags compatible to the current platform.
|
112 |
-
|
113 |
-
:param arch: Should be the part of platform tag after the ``linux_``
|
114 |
-
prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
|
115 |
-
prerequisite for the current platform to be musllinux-compatible.
|
116 |
-
|
117 |
-
:returns: An iterator of compatible musllinux tags.
|
118 |
-
"""
|
119 |
-
sys_musl = _get_musl_version(sys.executable)
|
120 |
-
if sys_musl is None: # Python not dynamically linked against musl.
|
121 |
-
return
|
122 |
-
for minor in range(sys_musl.minor, -1, -1):
|
123 |
-
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
|
124 |
-
|
125 |
-
|
126 |
-
if __name__ == "__main__": # pragma: no cover
|
127 |
-
import sysconfig
|
128 |
-
|
129 |
-
plat = sysconfig.get_platform()
|
130 |
-
assert plat.startswith("linux-"), "not linux"
|
131 |
-
|
132 |
-
print("plat:", plat)
|
133 |
-
print("musl:", _get_musl_version(sys.executable))
|
134 |
-
print("tags:", end=" ")
|
135 |
-
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
|
136 |
-
print(t, end="\n ")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_export_torchscript.py
DELETED
@@ -1,296 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
|
3 |
-
import json
|
4 |
-
import os
|
5 |
-
import random
|
6 |
-
import tempfile
|
7 |
-
import unittest
|
8 |
-
import torch
|
9 |
-
from torch import Tensor, nn
|
10 |
-
|
11 |
-
from detectron2 import model_zoo
|
12 |
-
from detectron2.config import get_cfg
|
13 |
-
from detectron2.config.instantiate import dump_dataclass, instantiate
|
14 |
-
from detectron2.export import dump_torchscript_IR, scripting_with_instances
|
15 |
-
from detectron2.export.flatten import TracingAdapter, flatten_to_tuple
|
16 |
-
from detectron2.export.torchscript_patch import patch_builtin_len
|
17 |
-
from detectron2.layers import ShapeSpec
|
18 |
-
from detectron2.modeling import build_backbone
|
19 |
-
from detectron2.modeling.postprocessing import detector_postprocess
|
20 |
-
from detectron2.modeling.roi_heads import KRCNNConvDeconvUpsampleHead
|
21 |
-
from detectron2.structures import Boxes, Instances
|
22 |
-
from detectron2.utils.env import TORCH_VERSION
|
23 |
-
from detectron2.utils.testing import (
|
24 |
-
assert_instances_allclose,
|
25 |
-
convert_scripted_instances,
|
26 |
-
get_sample_coco_image,
|
27 |
-
random_boxes,
|
28 |
-
)
|
29 |
-
|
30 |
-
"""
|
31 |
-
https://detectron2.readthedocs.io/tutorials/deployment.html
|
32 |
-
contains some explanations of this file.
|
33 |
-
"""
|
34 |
-
|
35 |
-
SLOW_PUBLIC_CPU_TEST = unittest.skipIf(
|
36 |
-
os.environ.get("CI") and not torch.cuda.is_available(),
|
37 |
-
"The test is too slow on CPUs and will be executed on CircleCI's GPU jobs.",
|
38 |
-
)
|
39 |
-
|
40 |
-
|
41 |
-
class TestScripting(unittest.TestCase):
|
42 |
-
def testMaskRCNNFPN(self):
|
43 |
-
self._test_rcnn_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
|
44 |
-
|
45 |
-
@SLOW_PUBLIC_CPU_TEST
|
46 |
-
def testMaskRCNNC4(self):
|
47 |
-
self._test_rcnn_model("COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml")
|
48 |
-
|
49 |
-
def testRetinaNet(self):
|
50 |
-
self._test_retinanet_model("COCO-Detection/retinanet_R_50_FPN_3x.yaml")
|
51 |
-
|
52 |
-
def _test_rcnn_model(self, config_path):
|
53 |
-
model = model_zoo.get(config_path, trained=True)
|
54 |
-
model.eval()
|
55 |
-
|
56 |
-
fields = {
|
57 |
-
"proposal_boxes": Boxes,
|
58 |
-
"objectness_logits": Tensor,
|
59 |
-
"pred_boxes": Boxes,
|
60 |
-
"scores": Tensor,
|
61 |
-
"pred_classes": Tensor,
|
62 |
-
"pred_masks": Tensor,
|
63 |
-
}
|
64 |
-
script_model = scripting_with_instances(model, fields)
|
65 |
-
|
66 |
-
# Test that batch inference with different shapes are supported
|
67 |
-
image = get_sample_coco_image()
|
68 |
-
small_image = nn.functional.interpolate(image, scale_factor=0.5)
|
69 |
-
inputs = [{"image": image}, {"image": small_image}]
|
70 |
-
with torch.no_grad():
|
71 |
-
instance = model.inference(inputs, do_postprocess=False)[0]
|
72 |
-
scripted_instance = script_model.inference(inputs, do_postprocess=False)[0]
|
73 |
-
assert_instances_allclose(instance, scripted_instance)
|
74 |
-
|
75 |
-
def _test_retinanet_model(self, config_path):
|
76 |
-
model = model_zoo.get(config_path, trained=True)
|
77 |
-
model.eval()
|
78 |
-
|
79 |
-
fields = {
|
80 |
-
"pred_boxes": Boxes,
|
81 |
-
"scores": Tensor,
|
82 |
-
"pred_classes": Tensor,
|
83 |
-
}
|
84 |
-
script_model = scripting_with_instances(model, fields)
|
85 |
-
|
86 |
-
img = get_sample_coco_image()
|
87 |
-
inputs = [{"image": img}] * 2
|
88 |
-
with torch.no_grad():
|
89 |
-
instance = model(inputs)[0]["instances"]
|
90 |
-
scripted_instance = convert_scripted_instances(script_model(inputs)[0])
|
91 |
-
scripted_instance = detector_postprocess(scripted_instance, img.shape[1], img.shape[2])
|
92 |
-
assert_instances_allclose(instance, scripted_instance)
|
93 |
-
# Note that the model currently cannot be saved and loaded into a new process:
|
94 |
-
# https://github.com/pytorch/pytorch/issues/46944
|
95 |
-
|
96 |
-
|
97 |
-
# TODO: this test requires manifold access, see: T88318502
|
98 |
-
class TestTracing(unittest.TestCase):
|
99 |
-
def testMaskRCNNFPN(self):
|
100 |
-
def inference_func(model, image):
|
101 |
-
inputs = [{"image": image}]
|
102 |
-
return model.inference(inputs, do_postprocess=False)[0]
|
103 |
-
|
104 |
-
self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", inference_func)
|
105 |
-
|
106 |
-
def testMaskRCNNFPN_with_postproc(self):
|
107 |
-
def inference_func(model, image):
|
108 |
-
inputs = [{"image": image, "height": image.shape[1], "width": image.shape[2]}]
|
109 |
-
return model.inference(inputs, do_postprocess=True)[0]["instances"]
|
110 |
-
|
111 |
-
self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", inference_func)
|
112 |
-
|
113 |
-
@SLOW_PUBLIC_CPU_TEST
|
114 |
-
def testMaskRCNNC4(self):
|
115 |
-
def inference_func(model, image):
|
116 |
-
inputs = [{"image": image}]
|
117 |
-
return model.inference(inputs, do_postprocess=False)[0]
|
118 |
-
|
119 |
-
self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml", inference_func)
|
120 |
-
|
121 |
-
@SLOW_PUBLIC_CPU_TEST
|
122 |
-
def testCascadeRCNN(self):
|
123 |
-
def inference_func(model, image):
|
124 |
-
inputs = [{"image": image}]
|
125 |
-
return model.inference(inputs, do_postprocess=False)[0]
|
126 |
-
|
127 |
-
self._test_model("Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml", inference_func)
|
128 |
-
|
129 |
-
# bug fixed by https://github.com/pytorch/pytorch/pull/67734
|
130 |
-
@unittest.skipIf(TORCH_VERSION == (1, 10) and os.environ.get("CI"), "1.10 has bugs.")
|
131 |
-
def testRetinaNet(self):
|
132 |
-
def inference_func(model, image):
|
133 |
-
return model.forward([{"image": image}])[0]["instances"]
|
134 |
-
|
135 |
-
self._test_model("COCO-Detection/retinanet_R_50_FPN_3x.yaml", inference_func)
|
136 |
-
|
137 |
-
def _test_model(self, config_path, inference_func, batch=1):
|
138 |
-
model = model_zoo.get(config_path, trained=True)
|
139 |
-
image = get_sample_coco_image()
|
140 |
-
inputs = tuple(image.clone() for _ in range(batch))
|
141 |
-
|
142 |
-
wrapper = TracingAdapter(model, inputs, inference_func)
|
143 |
-
wrapper.eval()
|
144 |
-
with torch.no_grad():
|
145 |
-
# trace with smaller images, and the trace must still work
|
146 |
-
trace_inputs = tuple(
|
147 |
-
nn.functional.interpolate(image, scale_factor=random.uniform(0.5, 0.7))
|
148 |
-
for _ in range(batch)
|
149 |
-
)
|
150 |
-
traced_model = torch.jit.trace(wrapper, trace_inputs)
|
151 |
-
|
152 |
-
outputs = inference_func(model, *inputs)
|
153 |
-
traced_outputs = wrapper.outputs_schema(traced_model(*inputs))
|
154 |
-
if batch > 1:
|
155 |
-
for output, traced_output in zip(outputs, traced_outputs):
|
156 |
-
assert_instances_allclose(output, traced_output, size_as_tensor=True)
|
157 |
-
else:
|
158 |
-
assert_instances_allclose(outputs, traced_outputs, size_as_tensor=True)
|
159 |
-
|
160 |
-
@SLOW_PUBLIC_CPU_TEST
|
161 |
-
def testMaskRCNNFPN_batched(self):
|
162 |
-
def inference_func(model, image1, image2):
|
163 |
-
inputs = [{"image": image1}, {"image": image2}]
|
164 |
-
return model.inference(inputs, do_postprocess=False)
|
165 |
-
|
166 |
-
self._test_model(
|
167 |
-
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", inference_func, batch=2
|
168 |
-
)
|
169 |
-
|
170 |
-
def testKeypointHead(self):
|
171 |
-
class M(nn.Module):
|
172 |
-
def __init__(self):
|
173 |
-
super().__init__()
|
174 |
-
self.model = KRCNNConvDeconvUpsampleHead(
|
175 |
-
ShapeSpec(channels=4, height=14, width=14), num_keypoints=17, conv_dims=(4,)
|
176 |
-
)
|
177 |
-
|
178 |
-
def forward(self, x, predbox1, predbox2):
|
179 |
-
inst = [
|
180 |
-
Instances((100, 100), pred_boxes=Boxes(predbox1)),
|
181 |
-
Instances((100, 100), pred_boxes=Boxes(predbox2)),
|
182 |
-
]
|
183 |
-
ret = self.model(x, inst)
|
184 |
-
return tuple(x.pred_keypoints for x in ret)
|
185 |
-
|
186 |
-
model = M()
|
187 |
-
model.eval()
|
188 |
-
|
189 |
-
def gen_input(num1, num2):
|
190 |
-
feat = torch.randn((num1 + num2, 4, 14, 14))
|
191 |
-
box1 = random_boxes(num1)
|
192 |
-
box2 = random_boxes(num2)
|
193 |
-
return feat, box1, box2
|
194 |
-
|
195 |
-
with torch.no_grad(), patch_builtin_len():
|
196 |
-
trace = torch.jit.trace(model, gen_input(15, 15), check_trace=False)
|
197 |
-
|
198 |
-
inputs = gen_input(12, 10)
|
199 |
-
trace_outputs = trace(*inputs)
|
200 |
-
true_outputs = model(*inputs)
|
201 |
-
for trace_output, true_output in zip(trace_outputs, true_outputs):
|
202 |
-
self.assertTrue(torch.allclose(trace_output, true_output))
|
203 |
-
|
204 |
-
|
205 |
-
class TestTorchscriptUtils(unittest.TestCase):
|
206 |
-
# TODO: add test to dump scripting
|
207 |
-
def test_dump_IR_tracing(self):
|
208 |
-
cfg = get_cfg()
|
209 |
-
cfg.MODEL.RESNETS.DEPTH = 18
|
210 |
-
cfg.MODEL.RESNETS.RES2_OUT_CHANNELS = 64
|
211 |
-
|
212 |
-
class Mod(nn.Module):
|
213 |
-
def forward(self, x):
|
214 |
-
return tuple(self.m(x).values())
|
215 |
-
|
216 |
-
model = Mod()
|
217 |
-
model.m = build_backbone(cfg)
|
218 |
-
model.eval()
|
219 |
-
|
220 |
-
with torch.no_grad():
|
221 |
-
ts_model = torch.jit.trace(model, (torch.rand(2, 3, 224, 224),))
|
222 |
-
|
223 |
-
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
|
224 |
-
dump_torchscript_IR(ts_model, d)
|
225 |
-
# check that the files are created
|
226 |
-
for name in ["model_ts_code", "model_ts_IR", "model_ts_IR_inlined", "model"]:
|
227 |
-
fname = os.path.join(d, name + ".txt")
|
228 |
-
self.assertTrue(os.stat(fname).st_size > 0, fname)
|
229 |
-
|
230 |
-
def test_dump_IR_function(self):
|
231 |
-
@torch.jit.script
|
232 |
-
def gunc(x, y):
|
233 |
-
return x + y
|
234 |
-
|
235 |
-
def func(x, y):
|
236 |
-
return x + y + gunc(x, y)
|
237 |
-
|
238 |
-
ts_model = torch.jit.trace(func, (torch.rand(3), torch.rand(3)))
|
239 |
-
with tempfile.TemporaryDirectory(prefix="detectron2_test") as d:
|
240 |
-
dump_torchscript_IR(ts_model, d)
|
241 |
-
for name in ["model_ts_code", "model_ts_IR", "model_ts_IR_inlined"]:
|
242 |
-
fname = os.path.join(d, name + ".txt")
|
243 |
-
self.assertTrue(os.stat(fname).st_size > 0, fname)
|
244 |
-
|
245 |
-
def test_flatten_basic(self):
|
246 |
-
obj = [3, ([5, 6], {"name": [7, 9], "name2": 3})]
|
247 |
-
res, schema = flatten_to_tuple(obj)
|
248 |
-
self.assertEqual(res, (3, 5, 6, 7, 9, 3))
|
249 |
-
new_obj = schema(res)
|
250 |
-
self.assertEqual(new_obj, obj)
|
251 |
-
|
252 |
-
_, new_schema = flatten_to_tuple(new_obj)
|
253 |
-
self.assertEqual(schema, new_schema) # test __eq__
|
254 |
-
self._check_schema(schema)
|
255 |
-
|
256 |
-
def _check_schema(self, schema):
|
257 |
-
dumped_schema = dump_dataclass(schema)
|
258 |
-
# Check that the schema is json-serializable
|
259 |
-
# Although in reality you might want to use yaml because it often has many levels
|
260 |
-
json.dumps(dumped_schema)
|
261 |
-
|
262 |
-
# Check that the schema can be deserialized
|
263 |
-
new_schema = instantiate(dumped_schema)
|
264 |
-
self.assertEqual(schema, new_schema)
|
265 |
-
|
266 |
-
def test_flatten_instances_boxes(self):
|
267 |
-
inst = Instances(
|
268 |
-
torch.tensor([5, 8]), pred_masks=torch.tensor([3]), pred_boxes=Boxes(torch.ones((1, 4)))
|
269 |
-
)
|
270 |
-
obj = [3, ([5, 6], inst)]
|
271 |
-
res, schema = flatten_to_tuple(obj)
|
272 |
-
self.assertEqual(res[:3], (3, 5, 6))
|
273 |
-
for r, expected in zip(res[3:], (inst.pred_boxes.tensor, inst.pred_masks, inst.image_size)):
|
274 |
-
self.assertIs(r, expected)
|
275 |
-
new_obj = schema(res)
|
276 |
-
assert_instances_allclose(new_obj[1][1], inst, rtol=0.0, size_as_tensor=True)
|
277 |
-
|
278 |
-
self._check_schema(schema)
|
279 |
-
|
280 |
-
def test_allow_non_tensor(self):
|
281 |
-
data = (torch.tensor([5, 8]), 3) # contains non-tensor
|
282 |
-
|
283 |
-
class M(nn.Module):
|
284 |
-
def forward(self, input, number):
|
285 |
-
return input
|
286 |
-
|
287 |
-
model = M()
|
288 |
-
with self.assertRaisesRegex(ValueError, "must only contain tensors"):
|
289 |
-
adap = TracingAdapter(model, data, allow_non_tensor=False)
|
290 |
-
|
291 |
-
adap = TracingAdapter(model, data, allow_non_tensor=True)
|
292 |
-
_ = adap(*adap.flattened_inputs)
|
293 |
-
|
294 |
-
newdata = (data[0].clone(),)
|
295 |
-
with self.assertRaisesRegex(ValueError, "cannot generalize"):
|
296 |
-
_ = adap(*newdata)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Beasto/Face_To_Anime_Cyclegan/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Face To Anime Cyclegan
|
3 |
-
emoji: 🌖
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: red
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.27.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descarga De Impacto De Genshin Qooapp.md
DELETED
@@ -1,116 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Genshin impacto descargar Qooapp: Cómo jugar el MMORPG épica en su dispositivo Android</h1>
|
3 |
-
<p>Si eres un fan de los RPG de mundo abierto, es posible que hayas oído hablar de Genshin Impact, uno de los juegos más populares y aclamados de 2020. Genshin Impact es un MMORPG desarrollado por miHoYo Limited, la misma compañía detrás del exitoso juego de estilo anime Honkai Impact 3rd. En este artículo, te mostraremos cómo descargar y jugar Genshin Impact en tu dispositivo Android usando QooApp, una tienda de aplicaciones de terceros que ofrece una amplia gama de juegos de Asia. Pero primero, echemos un vistazo a lo que es Genshin Impact y por qué deberías jugarlo. </p>
|
4 |
-
<h2>descarga de impacto de genshin qooapp</h2><br /><p><b><b>Download</b> –––––>>> <a href="https://bltlly.com/2v6KAx">https://bltlly.com/2v6KAx</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es el impacto de Genshin? </h2>
|
6 |
-
<h3>Una breve introducción al juego y sus características</h3>
|
7 |
-
<p>Genshin Impact es un juego que tiene lugar en un mundo de fantasía llamado Teyvat, donde siete naciones son gobernadas por siete dioses de diferentes elementos. Juegas como un viajero que ha perdido a su hermano en un misterioso incidente, y te embarcas en una búsqueda para encontrarlos y descubrir los secretos de este mundo. En el camino, conocerás a varios personajes que se unirán a ti como compañeros, cada uno con sus propias personalidades, habilidades e historias únicas. </p>
|
8 |
-
<p>Genshin Impact es un juego que ofrece mucha libertad y exploración. Puedes correr, escalar, nadar, deslizarte y volar a través de un vasto mundo abierto que está lleno de impresionantes paisajes, tesoros ocultos, rompecabezas, desafíos, enemigos y sorpresas. También puedes cambiar entre diferentes personajes en cualquier momento, y usar sus poderes elementales para crear varios combos y reacciones que pueden ayudarte en combate o exploración. También puedes personalizar tus personajes con diferentes armas, artefactos, talentos y constelaciones que se adapten a tu estilo de juego. </p>
|
9 |
-
<h3>Los beneficios de jugar Genshin impacto en Android</h3>
|
10 |
-
|
11 |
-
<p>Jugar Genshin Impact en Android tiene algunas ventajas sobre otras plataformas. Por un lado, puede utilizar controles táctiles que son intuitivos y fáciles de usar. También puede ajustar la configuración del juego para optimizar su rendimiento y duración de la batería de acuerdo con las especificaciones de su dispositivo. Además, puedes usar algunas funciones del juego que son exclusivas para dispositivos móviles, como tomar capturas de pantalla o grabar vídeos con un solo toque. </p>
|
12 |
-
<h2>¿Qué es QooApp? </h2>
|
13 |
-
<h3>Una breve introducción a la tienda de aplicaciones y sus características</h3>
|
14 |
-
<p>QooApp es una tienda de aplicaciones de terceros que se especializa en juegos de Asia, especialmente Japón, Corea, China y Taiwán. Ofrece una gran selección de juegos de varios géneros, como juegos de rol, juegos de acción, juegos de simulación, juegos de cartas y mucho más. También puedes encontrar algunos juegos que no están disponibles en la tienda oficial de Google Play, como Genshin Impact, Fate/Grand Order, Honkai Impact 3rd y más. </p>
|
15 |
-
<p></p>
|
16 |
-
<p>QooApp es una tienda de aplicaciones segura y confiable que no requiere que rootee su dispositivo o use una VPN. Puedes descargar e instalar juegos desde QooApp sin problemas, y también puedes actualizarlos de forma automática o manual. QooApp también tiene una interfaz fácil de usar que le permite navegar, buscar y filtrar juegos por categorías, regiones, idiomas, calificaciones y popularidad. También puedes leer reseñas de juegos, noticias, guías y consejos de otros usuarios y personal de QooApp. </p>
|
17 |
-
<h3>Los beneficios de usar QooApp para descargar Genshin Impact</h3>
|
18 |
-
<p>Una de las razones por las que es posible que desee utilizar QooApp para descargar Genshin Impact es que es más rápido y más fácil que usar el sitio web oficial. No tienes que pasar por la molestia de escanear un código QR o introducir un código de verificación para descargar el juego. Simplemente puede buscar Genshin Impact en QooApp y toque en el botón de descarga. QooApp también le notificará cuando haya una nueva actualización para el juego, para que siempre pueda mantener su juego actualizado. </p>
|
19 |
-
|
20 |
-
<h2>¿Cómo descargar e instalar Genshin Impact desde QooApp? </h2>
|
21 |
-
<h3>Paso 1: Descargar e instalar QooApp en su dispositivo Android</h3>
|
22 |
-
<p>El primer paso para descargar Genshin Impact de QooApp es descargar e instalar QooApp en su dispositivo Android. Puedes hacer esto siguiendo estos sencillos pasos:</p>
|
23 |
-
<ul>
|
24 |
-
<li>Ir al sitio web oficial de QooApp: <a href="">https://www.qoo-app.com/en</a></li>
|
25 |
-
<li>Toque en el botón "Descargar" en la esquina superior derecha de la pantalla. </li>
|
26 |
-
<li>Aparecerá una ventana emergente pidiéndole que permita a QooApp descargar archivos en su dispositivo. Toque en "Permitir". </li>
|
27 |
-
<li> El archivo APK QooApp comenzará a descargar en su dispositivo. Una vez que se hace, toque en el archivo para abrirlo. </li>
|
28 |
-
<li>Aparecerá una ventana emergente pidiéndole que instale QooApp en su dispositivo. Toque en "Instalar". </li>
|
29 |
-
<li>Espere a que termine el proceso de instalación. Una vez hecho, toque en "Abrir" para iniciar QooApp.</li>
|
30 |
-
</ul>
|
31 |
-
<h3>Paso 2: Buscar Genshin impacto en QooApp y toque en el botón de descarga</h3>
|
32 |
-
<p>El siguiente paso para descargar Genshin Impact de QooApp es buscar Genshin Impact en QooApp y toque en el botón de descarga. Puedes hacer esto siguiendo estos sencillos pasos:</p>
|
33 |
-
<ul>
|
34 |
-
<li>Abra QooApp en su dispositivo y toque en el icono de la lupa en la esquina superior derecha de la pantalla. </li>
|
35 |
-
<li>Escribe "Genshin Impact" en la barra de búsqueda y toca el botón "Buscar". </li>
|
36 |
-
<li>Verá una lista de resultados relacionados con Genshin Impact. Toque en el que coincida con su región e idioma preferido. Por ejemplo, si quieres jugar la versión global de Genshin Impact en inglés, toca la que dice "Genshin Impact (EN)". </li>
|
37 |
-
<li>Verá una página con más información sobre Genshin Impact, como su descripción, capturas de pantalla, videos, calificaciones, comentarios y más. Toque en el botón verde "Descargar" en la parte inferior de la pantalla. </li>
|
38 |
-
<li>Aparecerá una ventana emergente pidiéndole que confirme su descarga. Toque en "OK". </li>
|
39 |
-
|
40 |
-
</ul> <h3>Paso 3: Espera a que termine la descarga y toca el botón de instalación</h3>
|
41 |
-
<p>El tercer paso para descargar Genshin Impact de QooApp es esperar a que la descarga termine y toque en el botón de instalación. Puedes hacer esto siguiendo estos sencillos pasos:</p>
|
42 |
-
<ul>
|
43 |
-
<li> Una vez que se descarga el archivo APK de impacto Genshin, verá una notificación en su dispositivo. Toque en la notificación para abrirla. </li>
|
44 |
-
<li>Aparecerá una ventana emergente pidiéndole que instale Genshin Impact en su dispositivo. Toque en "Instalar". </li>
|
45 |
-
<li>Espere a que termine el proceso de instalación. Puede tardar unos minutos dependiendo de su dispositivo y la velocidad de Internet. </li>
|
46 |
-
<li>Una vez que se hace la instalación, verá un mensaje que dice "App instalado". Toque en "Abrir" para iniciar Genshin Impact.</li>
|
47 |
-
</ul>
|
48 |
-
<h3>Paso 4: Inicie Genshin impacto y disfrutar del juego</h3>
|
49 |
-
<p>El paso final para descargar Genshin Impact de QooApp es lanzar Genshin Impact y disfrutar del juego. Puedes hacer esto siguiendo estos sencillos pasos:</p>
|
50 |
-
<ul>
|
51 |
-
<li>Cuando inicie Genshin Impact por primera vez, verá una pantalla de bienvenida con el logotipo del juego y algunos mensajes de carga. Espere a que el juego se cargue. </li>
|
52 |
-
<li>Verás una pantalla con algunos términos y condiciones. Léelos cuidadosamente y toca "Aceptar" si los aceptas. </li>
|
53 |
-
<li>Verá una pantalla con algunas opciones para vincular sus datos a través de diferentes plataformas. Puedes iniciar sesión con tu cuenta miHoYo, cuenta de Facebook, cuenta de Twitter o ID de Apple. También puedes jugar como invitado, pero no podrás vincular tus datos ni acceder a algunas funciones. Elija la opción que más le convenga y siga las instrucciones. </li>
|
54 |
-
<li>Verá una pantalla con algunas opciones para seleccionar la región e idioma del servidor. Elija los que coincidan con sus preferencias y toque en "Confirmar". </li>
|
55 |
-
|
56 |
-
<li>Verás una pantalla con un video de introducción cinematográfica que te presenta la historia y los personajes de Genshin Impact. Puedes verlo o saltarlo tocando en la pantalla. </li>
|
57 |
-
<li>Verás una pantalla con algunas opciones para seleccionar el género, el nombre y el cumpleaños de tu personaje. Elija los que más le convengan y toque en "Confirmar". </li>
|
58 |
-
<li>Verás una pantalla con algunos mensajes de tutorial que explican cómo jugar el juego. Síguelos y comienza tu aventura en Teyvat.</li>
|
59 |
-
</ul>
|
60 |
-
<h2>Consejos y trucos para jugar Genshin impacto en Android</h2>
|
61 |
-
<h3>Cómo vincular sus datos a través de diferentes plataformas</h3>
|
62 |
-
<p>Como mencionamos antes, uno de los beneficios de jugar Genshin Impact en Android es que puedes vincular tus datos a través de diferentes plataformas utilizando tu cuenta miHoYo. Esto significa que puedes jugar el mismo juego con el mismo progreso, personajes, elementos y ajustes en diferentes dispositivos como PC, PS4, PS5, iOS y Android. Para hacer esto, debes seguir estos sencillos pasos:</p>
|
63 |
-
<ul>
|
64 |
-
<li>Crea una cuenta miHoYo si aún no tienes una. Puedes hacer esto yendo a <a href=">https://account.mihoyo.com/#/register</a> y rellenando tu dirección de correo electrónico, contraseña, código de verificación y apodo. </li>
|
65 |
-
<li>Inicia sesión con tu cuenta miHoYo en cualquier dispositivo en el que quieras jugar a Genshin Impact. Puedes hacer esto yendo al menú de configuración del juego y tocando en "Cuenta" > "Iniciar sesión". </li>
|
66 |
-
<li>Seleccione la misma región del servidor y el mismo idioma que utilizó en sus otros dispositivos. Puedes hacer esto yendo al menú de configuración del juego y tocando "Otro" > "Idioma". </li>
|
67 |
-
<li>Disfruta jugando Genshin Impact con tus datos enlazados a través de diferentes plataformas. </li>
|
68 |
-
</ul> <h3>Cómo optimizar la configuración del juego para un mejor rendimiento y duración de la batería</h3>
|
69 |
-
|
70 |
-
<ul>
|
71 |
-
<li>Ir al menú de configuración en el juego y toque en "Gráficos". </li>
|
72 |
-
<li> Verá un control deslizante que le permite ajustar la calidad de los gráficos de menor a mayor. También puede pulsar en "Personalizado" para ajustar la configuración de cada aspecto, como resolución de renderizado, FPS, anti-aliasing, sombras, efectos visuales y más. </li>
|
73 |
-
<li>Elija la calidad gráfica que se adapte a las capacidades de su dispositivo y sus preferencias personales. En general, cuanto menor sea la calidad gráfica, mejor será el rendimiento y la duración de la batería, pero peor será la apariencia visual. Cuanto mayor sea la calidad gráfica, lo contrario es cierto. </li>
|
74 |
-
<li>También puede habilitar o deshabilitar algunas características que pueden afectar su rendimiento y duración de la batería, como los gráficos de ajuste automático, la optimización del modo cooperativo y el modo de ahorro de batería. Puede encontrar estas características en el menú de configuración bajo "Otro". </li>
|
75 |
-
<li>Guarda tus cambios y disfruta jugando a Genshin Impact con tus ajustes de juego optimizados. </li>
|
76 |
-
</ul>
|
77 |
-
<h3>Cómo usar las características y funciones del juego efectivamente</h3>
|
78 |
-
<p>El último beneficio de jugar a Genshin Impact en Android es que puedes usar algunas características y funciones del juego que son exclusivas para dispositivos móviles o más convenientes en dispositivos móviles. Estas características y funciones pueden ayudarle a mejorar su experiencia de juego y hacer su vida más fácil. Aquí hay algunos ejemplos de estas características y funciones:</p>
|
79 |
-
<ul>
|
80 |
-
<li>Puedes usar controles táctiles para mover, atacar, interactuar, cambiar personajes, usar habilidades elementales y ráfagas, abrir menús y más. También puede personalizar sus controles táctiles yendo al menú de configuración bajo "Controles" > "Personalizar". </li>
|
81 |
-
<li>Puede utilizar gestos para realizar algunas acciones más rápidas o más fáciles, como deslizar hacia arriba para abrir el mapa, deslizar hacia abajo para abrir las notificaciones, deslizar hacia la izquierda o hacia la derecha para cambiar los caracteres, pellizcar o alejar para acercar o alejar, y tocar con dos dedos para abrir el menú de pausa. </li>
|
82 |
-
|
83 |
-
<li>Puedes usar capturas de pantalla o videos para capturar tus momentos de juego y compartirlos con otros. Puede tomar capturas de pantalla pulsando en el icono de la cámara en la esquina superior izquierda de la pantalla. Puede grabar vídeos tocando el icono de vídeo en la esquina superior izquierda de la pantalla. También puede editar sus capturas de pantalla o vídeos tocando el icono de la galería en la esquina superior izquierda de la pantalla. </li>
|
84 |
-
</ul>
|
85 |
-
<h2>Conclusión</h2>
|
86 |
-
<p>Genshin Impact es un juego que deberías probar si te gustan los juegos de rol de mundo abierto. Es un juego que ofrece mucha libertad, exploración, aventura y diversión. También es un juego que puedes jugar en diferentes plataformas, incluyendo dispositivos Android. Sin embargo, si desea jugar Genshin Impact en dispositivos Android, es posible que desee utilizar QooApp para descargarlo en lugar de usar el sitio web oficial. QooApp es una tienda de aplicaciones de terceros que ofrece una forma más rápida y fácil de descargar Genshin Impact, así como algunos contenidos y eventos exclusivos que solo están disponibles para ciertas regiones. Para descargar Genshin Impact de QooApp, solo tiene que seguir cuatro sencillos pasos: descargar e instalar QooApp en su dispositivo, buscar Genshin Impact en QooApp y toque en el botón de descarga, espere a que la descarga termine y toque en el botón de instalación, y lanzar Genshin Impact y disfrutar del juego. También puedes optimizar la configuración de tu juego para mejorar el rendimiento y la duración de la batería, y usar algunas funciones y características del juego que son exclusivas o convenientes para dispositivos móviles. </p>
|
87 |
-
<p>Si estás interesado en jugar Genshin Impact en dispositivos Android usando QooApp, no lo dudes más. ¡Descarga QooApp ahora y comienza tu viaje en Teyvat hoy mismo! </p>
|
88 |
-
<h2>Preguntas frecuentes</h2>
|
89 |
-
<h3>¿Es Genshin Impact libre para jugar? </h3>
|
90 |
-
<p>Sí, Genshin Impact es gratis para jugar. No tienes que pagar nada para descargarlo o jugarlo. Sin embargo, tiene algunas compras opcionales en el juego que pueden ayudarte a progresar más rápido u obtener más artículos. </p>
|
91 |
-
<h3>¿Es seguro jugar a Genshin Impact? </h3>
|
92 |
-
|
93 |
-
<h3>¿Es legal usar QooApp? </h3>
|
94 |
-
<p>Sí, QooApp es legal de usar. No viola ninguna ley o reglamento que prohíba la distribución o el consumo de juegos de diferentes regiones. Tampoco modifica ni hackea los juegos que ofrece. Sin embargo, siempre debes revisar los términos y condiciones de los juegos que descargues de QooApp, y asegurarte de no violar ninguna regla o acuerdo que tengan. </p>
|
95 |
-
<h3>¿Puedo jugar a Genshin Impact con mis amigos? </h3>
|
96 |
-
<p>Sí, puedes jugar a Genshin Impact con tus amigos. Genshin Impact tiene un modo cooperativo que te permite formar equipo con hasta otros tres jugadores y explorar el mundo, completar misiones, luchar contra enemigos y más. También puede unirse o crear una lista de amigos que le permite chatear, enviar regalos e invitarse entre sí al modo cooperativo. Para acceder al modo cooperativo o a la lista de amigos, primero debes alcanzar el rango de aventura 16. </p>
|
97 |
-
<h3>¿Cuáles son los requisitos mínimos para jugar Genshin Impact en dispositivos Android? </h3>
|
98 |
-
<p>Los requisitos mínimos para jugar Genshin Impact en dispositivos Android son los siguientes:</p>
|
99 |
-
<tabla>
|
100 |
-
<tr>
|
101 |
-
<th>OS</th>
|
102 |
-
<th>RAM</th>
|
103 |
-
<th>CPU</th>
|
104 |
-
<th>GPU</th>
|
105 |
-
<th>Almacenamiento</th>
|
106 |
-
</tr>
|
107 |
-
<tr>
|
108 |
-
<td>Android 7.0 o superior</td>
|
109 |
-
<td>3 GB o más</td>
|
110 |
-
<td>Brazo v8a dispositivo de 64 bits</td>
|
111 |
-
<td>Soporta OpenGL ES 3.1 o superior</td>
|
112 |
-
<td>8 GB o más</td>
|
113 |
-
</tr>
|
114 |
-
</tabla></p> 64aa2da5cf<br />
|
115 |
-
<br />
|
116 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_emoji_codes.py
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py
DELETED
@@ -1,314 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
This module provides a pool manager that uses Google App Engine's
|
3 |
-
`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
|
4 |
-
|
5 |
-
Example usage::
|
6 |
-
|
7 |
-
from pip._vendor.urllib3 import PoolManager
|
8 |
-
from pip._vendor.urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
|
9 |
-
|
10 |
-
if is_appengine_sandbox():
|
11 |
-
# AppEngineManager uses AppEngine's URLFetch API behind the scenes
|
12 |
-
http = AppEngineManager()
|
13 |
-
else:
|
14 |
-
# PoolManager uses a socket-level API behind the scenes
|
15 |
-
http = PoolManager()
|
16 |
-
|
17 |
-
r = http.request('GET', 'https://google.com/')
|
18 |
-
|
19 |
-
There are `limitations <https://cloud.google.com/appengine/docs/python/\
|
20 |
-
urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
|
21 |
-
the best choice for your application. There are three options for using
|
22 |
-
urllib3 on Google App Engine:
|
23 |
-
|
24 |
-
1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
|
25 |
-
cost-effective in many circumstances as long as your usage is within the
|
26 |
-
limitations.
|
27 |
-
2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
|
28 |
-
Sockets also have `limitations and restrictions
|
29 |
-
<https://cloud.google.com/appengine/docs/python/sockets/\
|
30 |
-
#limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
|
31 |
-
To use sockets, be sure to specify the following in your ``app.yaml``::
|
32 |
-
|
33 |
-
env_variables:
|
34 |
-
GAE_USE_SOCKETS_HTTPLIB : 'true'
|
35 |
-
|
36 |
-
3. If you are using `App Engine Flexible
|
37 |
-
<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
|
38 |
-
:class:`PoolManager` without any configuration or special environment variables.
|
39 |
-
"""
|
40 |
-
|
41 |
-
from __future__ import absolute_import
|
42 |
-
|
43 |
-
import io
|
44 |
-
import logging
|
45 |
-
import warnings
|
46 |
-
|
47 |
-
from ..exceptions import (
|
48 |
-
HTTPError,
|
49 |
-
HTTPWarning,
|
50 |
-
MaxRetryError,
|
51 |
-
ProtocolError,
|
52 |
-
SSLError,
|
53 |
-
TimeoutError,
|
54 |
-
)
|
55 |
-
from ..packages.six.moves.urllib.parse import urljoin
|
56 |
-
from ..request import RequestMethods
|
57 |
-
from ..response import HTTPResponse
|
58 |
-
from ..util.retry import Retry
|
59 |
-
from ..util.timeout import Timeout
|
60 |
-
from . import _appengine_environ
|
61 |
-
|
62 |
-
try:
|
63 |
-
from google.appengine.api import urlfetch
|
64 |
-
except ImportError:
|
65 |
-
urlfetch = None
|
66 |
-
|
67 |
-
|
68 |
-
log = logging.getLogger(__name__)
|
69 |
-
|
70 |
-
|
71 |
-
class AppEnginePlatformWarning(HTTPWarning):
|
72 |
-
pass
|
73 |
-
|
74 |
-
|
75 |
-
class AppEnginePlatformError(HTTPError):
|
76 |
-
pass
|
77 |
-
|
78 |
-
|
79 |
-
class AppEngineManager(RequestMethods):
|
80 |
-
"""
|
81 |
-
Connection manager for Google App Engine sandbox applications.
|
82 |
-
|
83 |
-
This manager uses the URLFetch service directly instead of using the
|
84 |
-
emulated httplib, and is subject to URLFetch limitations as described in
|
85 |
-
the App Engine documentation `here
|
86 |
-
<https://cloud.google.com/appengine/docs/python/urlfetch>`_.
|
87 |
-
|
88 |
-
Notably it will raise an :class:`AppEnginePlatformError` if:
|
89 |
-
* URLFetch is not available.
|
90 |
-
* If you attempt to use this on App Engine Flexible, as full socket
|
91 |
-
support is available.
|
92 |
-
* If a request size is more than 10 megabytes.
|
93 |
-
* If a response size is more than 32 megabytes.
|
94 |
-
* If you use an unsupported request method such as OPTIONS.
|
95 |
-
|
96 |
-
Beyond those cases, it will raise normal urllib3 errors.
|
97 |
-
"""
|
98 |
-
|
99 |
-
def __init__(
|
100 |
-
self,
|
101 |
-
headers=None,
|
102 |
-
retries=None,
|
103 |
-
validate_certificate=True,
|
104 |
-
urlfetch_retries=True,
|
105 |
-
):
|
106 |
-
if not urlfetch:
|
107 |
-
raise AppEnginePlatformError(
|
108 |
-
"URLFetch is not available in this environment."
|
109 |
-
)
|
110 |
-
|
111 |
-
warnings.warn(
|
112 |
-
"urllib3 is using URLFetch on Google App Engine sandbox instead "
|
113 |
-
"of sockets. To use sockets directly instead of URLFetch see "
|
114 |
-
"https://urllib3.readthedocs.io/en/1.26.x/reference/urllib3.contrib.html.",
|
115 |
-
AppEnginePlatformWarning,
|
116 |
-
)
|
117 |
-
|
118 |
-
RequestMethods.__init__(self, headers)
|
119 |
-
self.validate_certificate = validate_certificate
|
120 |
-
self.urlfetch_retries = urlfetch_retries
|
121 |
-
|
122 |
-
self.retries = retries or Retry.DEFAULT
|
123 |
-
|
124 |
-
def __enter__(self):
|
125 |
-
return self
|
126 |
-
|
127 |
-
def __exit__(self, exc_type, exc_val, exc_tb):
|
128 |
-
# Return False to re-raise any potential exceptions
|
129 |
-
return False
|
130 |
-
|
131 |
-
def urlopen(
|
132 |
-
self,
|
133 |
-
method,
|
134 |
-
url,
|
135 |
-
body=None,
|
136 |
-
headers=None,
|
137 |
-
retries=None,
|
138 |
-
redirect=True,
|
139 |
-
timeout=Timeout.DEFAULT_TIMEOUT,
|
140 |
-
**response_kw
|
141 |
-
):
|
142 |
-
|
143 |
-
retries = self._get_retries(retries, redirect)
|
144 |
-
|
145 |
-
try:
|
146 |
-
follow_redirects = redirect and retries.redirect != 0 and retries.total
|
147 |
-
response = urlfetch.fetch(
|
148 |
-
url,
|
149 |
-
payload=body,
|
150 |
-
method=method,
|
151 |
-
headers=headers or {},
|
152 |
-
allow_truncated=False,
|
153 |
-
follow_redirects=self.urlfetch_retries and follow_redirects,
|
154 |
-
deadline=self._get_absolute_timeout(timeout),
|
155 |
-
validate_certificate=self.validate_certificate,
|
156 |
-
)
|
157 |
-
except urlfetch.DeadlineExceededError as e:
|
158 |
-
raise TimeoutError(self, e)
|
159 |
-
|
160 |
-
except urlfetch.InvalidURLError as e:
|
161 |
-
if "too large" in str(e):
|
162 |
-
raise AppEnginePlatformError(
|
163 |
-
"URLFetch request too large, URLFetch only "
|
164 |
-
"supports requests up to 10mb in size.",
|
165 |
-
e,
|
166 |
-
)
|
167 |
-
raise ProtocolError(e)
|
168 |
-
|
169 |
-
except urlfetch.DownloadError as e:
|
170 |
-
if "Too many redirects" in str(e):
|
171 |
-
raise MaxRetryError(self, url, reason=e)
|
172 |
-
raise ProtocolError(e)
|
173 |
-
|
174 |
-
except urlfetch.ResponseTooLargeError as e:
|
175 |
-
raise AppEnginePlatformError(
|
176 |
-
"URLFetch response too large, URLFetch only supports"
|
177 |
-
"responses up to 32mb in size.",
|
178 |
-
e,
|
179 |
-
)
|
180 |
-
|
181 |
-
except urlfetch.SSLCertificateError as e:
|
182 |
-
raise SSLError(e)
|
183 |
-
|
184 |
-
except urlfetch.InvalidMethodError as e:
|
185 |
-
raise AppEnginePlatformError(
|
186 |
-
"URLFetch does not support method: %s" % method, e
|
187 |
-
)
|
188 |
-
|
189 |
-
http_response = self._urlfetch_response_to_http_response(
|
190 |
-
response, retries=retries, **response_kw
|
191 |
-
)
|
192 |
-
|
193 |
-
# Handle redirect?
|
194 |
-
redirect_location = redirect and http_response.get_redirect_location()
|
195 |
-
if redirect_location:
|
196 |
-
# Check for redirect response
|
197 |
-
if self.urlfetch_retries and retries.raise_on_redirect:
|
198 |
-
raise MaxRetryError(self, url, "too many redirects")
|
199 |
-
else:
|
200 |
-
if http_response.status == 303:
|
201 |
-
method = "GET"
|
202 |
-
|
203 |
-
try:
|
204 |
-
retries = retries.increment(
|
205 |
-
method, url, response=http_response, _pool=self
|
206 |
-
)
|
207 |
-
except MaxRetryError:
|
208 |
-
if retries.raise_on_redirect:
|
209 |
-
raise MaxRetryError(self, url, "too many redirects")
|
210 |
-
return http_response
|
211 |
-
|
212 |
-
retries.sleep_for_retry(http_response)
|
213 |
-
log.debug("Redirecting %s -> %s", url, redirect_location)
|
214 |
-
redirect_url = urljoin(url, redirect_location)
|
215 |
-
return self.urlopen(
|
216 |
-
method,
|
217 |
-
redirect_url,
|
218 |
-
body,
|
219 |
-
headers,
|
220 |
-
retries=retries,
|
221 |
-
redirect=redirect,
|
222 |
-
timeout=timeout,
|
223 |
-
**response_kw
|
224 |
-
)
|
225 |
-
|
226 |
-
# Check if we should retry the HTTP response.
|
227 |
-
has_retry_after = bool(http_response.headers.get("Retry-After"))
|
228 |
-
if retries.is_retry(method, http_response.status, has_retry_after):
|
229 |
-
retries = retries.increment(method, url, response=http_response, _pool=self)
|
230 |
-
log.debug("Retry: %s", url)
|
231 |
-
retries.sleep(http_response)
|
232 |
-
return self.urlopen(
|
233 |
-
method,
|
234 |
-
url,
|
235 |
-
body=body,
|
236 |
-
headers=headers,
|
237 |
-
retries=retries,
|
238 |
-
redirect=redirect,
|
239 |
-
timeout=timeout,
|
240 |
-
**response_kw
|
241 |
-
)
|
242 |
-
|
243 |
-
return http_response
|
244 |
-
|
245 |
-
def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
|
246 |
-
|
247 |
-
if is_prod_appengine():
|
248 |
-
# Production GAE handles deflate encoding automatically, but does
|
249 |
-
# not remove the encoding header.
|
250 |
-
content_encoding = urlfetch_resp.headers.get("content-encoding")
|
251 |
-
|
252 |
-
if content_encoding == "deflate":
|
253 |
-
del urlfetch_resp.headers["content-encoding"]
|
254 |
-
|
255 |
-
transfer_encoding = urlfetch_resp.headers.get("transfer-encoding")
|
256 |
-
# We have a full response's content,
|
257 |
-
# so let's make sure we don't report ourselves as chunked data.
|
258 |
-
if transfer_encoding == "chunked":
|
259 |
-
encodings = transfer_encoding.split(",")
|
260 |
-
encodings.remove("chunked")
|
261 |
-
urlfetch_resp.headers["transfer-encoding"] = ",".join(encodings)
|
262 |
-
|
263 |
-
original_response = HTTPResponse(
|
264 |
-
# In order for decoding to work, we must present the content as
|
265 |
-
# a file-like object.
|
266 |
-
body=io.BytesIO(urlfetch_resp.content),
|
267 |
-
msg=urlfetch_resp.header_msg,
|
268 |
-
headers=urlfetch_resp.headers,
|
269 |
-
status=urlfetch_resp.status_code,
|
270 |
-
**response_kw
|
271 |
-
)
|
272 |
-
|
273 |
-
return HTTPResponse(
|
274 |
-
body=io.BytesIO(urlfetch_resp.content),
|
275 |
-
headers=urlfetch_resp.headers,
|
276 |
-
status=urlfetch_resp.status_code,
|
277 |
-
original_response=original_response,
|
278 |
-
**response_kw
|
279 |
-
)
|
280 |
-
|
281 |
-
def _get_absolute_timeout(self, timeout):
|
282 |
-
if timeout is Timeout.DEFAULT_TIMEOUT:
|
283 |
-
return None # Defer to URLFetch's default.
|
284 |
-
if isinstance(timeout, Timeout):
|
285 |
-
if timeout._read is not None or timeout._connect is not None:
|
286 |
-
warnings.warn(
|
287 |
-
"URLFetch does not support granular timeout settings, "
|
288 |
-
"reverting to total or default URLFetch timeout.",
|
289 |
-
AppEnginePlatformWarning,
|
290 |
-
)
|
291 |
-
return timeout.total
|
292 |
-
return timeout
|
293 |
-
|
294 |
-
def _get_retries(self, retries, redirect):
|
295 |
-
if not isinstance(retries, Retry):
|
296 |
-
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
|
297 |
-
|
298 |
-
if retries.connect or retries.read or retries.redirect:
|
299 |
-
warnings.warn(
|
300 |
-
"URLFetch only supports total retries and does not "
|
301 |
-
"recognize connect, read, or redirect retry parameters.",
|
302 |
-
AppEnginePlatformWarning,
|
303 |
-
)
|
304 |
-
|
305 |
-
return retries
|
306 |
-
|
307 |
-
|
308 |
-
# Alias methods from _appengine_environ to maintain public API interface.
|
309 |
-
|
310 |
-
is_appengine = _appengine_environ.is_appengine
|
311 |
-
is_appengine_sandbox = _appengine_environ.is_appengine_sandbox
|
312 |
-
is_local_appengine = _appengine_environ.is_local_appengine
|
313 |
-
is_prod_appengine = _appengine_environ.is_prod_appengine
|
314 |
-
is_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/errors.py
DELETED
@@ -1,127 +0,0 @@
|
|
1 |
-
"""distutils.errors
|
2 |
-
|
3 |
-
Provides exceptions used by the Distutils modules. Note that Distutils
|
4 |
-
modules may raise standard exceptions; in particular, SystemExit is
|
5 |
-
usually raised for errors that are obviously the end-user's fault
|
6 |
-
(eg. bad command-line arguments).
|
7 |
-
|
8 |
-
This module is safe to use in "from ... import *" mode; it only exports
|
9 |
-
symbols whose names start with "Distutils" and end with "Error"."""
|
10 |
-
|
11 |
-
|
12 |
-
class DistutilsError(Exception):
|
13 |
-
"""The root of all Distutils evil."""
|
14 |
-
|
15 |
-
pass
|
16 |
-
|
17 |
-
|
18 |
-
class DistutilsModuleError(DistutilsError):
|
19 |
-
"""Unable to load an expected module, or to find an expected class
|
20 |
-
within some module (in particular, command modules and classes)."""
|
21 |
-
|
22 |
-
pass
|
23 |
-
|
24 |
-
|
25 |
-
class DistutilsClassError(DistutilsError):
|
26 |
-
"""Some command class (or possibly distribution class, if anyone
|
27 |
-
feels a need to subclass Distribution) is found not to be holding
|
28 |
-
up its end of the bargain, ie. implementing some part of the
|
29 |
-
"command "interface."""
|
30 |
-
|
31 |
-
pass
|
32 |
-
|
33 |
-
|
34 |
-
class DistutilsGetoptError(DistutilsError):
|
35 |
-
"""The option table provided to 'fancy_getopt()' is bogus."""
|
36 |
-
|
37 |
-
pass
|
38 |
-
|
39 |
-
|
40 |
-
class DistutilsArgError(DistutilsError):
|
41 |
-
"""Raised by fancy_getopt in response to getopt.error -- ie. an
|
42 |
-
error in the command line usage."""
|
43 |
-
|
44 |
-
pass
|
45 |
-
|
46 |
-
|
47 |
-
class DistutilsFileError(DistutilsError):
|
48 |
-
"""Any problems in the filesystem: expected file not found, etc.
|
49 |
-
Typically this is for problems that we detect before OSError
|
50 |
-
could be raised."""
|
51 |
-
|
52 |
-
pass
|
53 |
-
|
54 |
-
|
55 |
-
class DistutilsOptionError(DistutilsError):
|
56 |
-
"""Syntactic/semantic errors in command options, such as use of
|
57 |
-
mutually conflicting options, or inconsistent options,
|
58 |
-
badly-spelled values, etc. No distinction is made between option
|
59 |
-
values originating in the setup script, the command line, config
|
60 |
-
files, or what-have-you -- but if we *know* something originated in
|
61 |
-
the setup script, we'll raise DistutilsSetupError instead."""
|
62 |
-
|
63 |
-
pass
|
64 |
-
|
65 |
-
|
66 |
-
class DistutilsSetupError(DistutilsError):
|
67 |
-
"""For errors that can be definitely blamed on the setup script,
|
68 |
-
such as invalid keyword arguments to 'setup()'."""
|
69 |
-
|
70 |
-
pass
|
71 |
-
|
72 |
-
|
73 |
-
class DistutilsPlatformError(DistutilsError):
|
74 |
-
"""We don't know how to do something on the current platform (but
|
75 |
-
we do know how to do it on some platform) -- eg. trying to compile
|
76 |
-
C files on a platform not supported by a CCompiler subclass."""
|
77 |
-
|
78 |
-
pass
|
79 |
-
|
80 |
-
|
81 |
-
class DistutilsExecError(DistutilsError):
|
82 |
-
"""Any problems executing an external program (such as the C
|
83 |
-
compiler, when compiling C files)."""
|
84 |
-
|
85 |
-
pass
|
86 |
-
|
87 |
-
|
88 |
-
class DistutilsInternalError(DistutilsError):
|
89 |
-
"""Internal inconsistencies or impossibilities (obviously, this
|
90 |
-
should never be seen if the code is working!)."""
|
91 |
-
|
92 |
-
pass
|
93 |
-
|
94 |
-
|
95 |
-
class DistutilsTemplateError(DistutilsError):
|
96 |
-
"""Syntax error in a file list template."""
|
97 |
-
|
98 |
-
|
99 |
-
class DistutilsByteCompileError(DistutilsError):
|
100 |
-
"""Byte compile error."""
|
101 |
-
|
102 |
-
|
103 |
-
# Exception classes used by the CCompiler implementation classes
|
104 |
-
class CCompilerError(Exception):
|
105 |
-
"""Some compile/link operation failed."""
|
106 |
-
|
107 |
-
|
108 |
-
class PreprocessError(CCompilerError):
|
109 |
-
"""Failure to preprocess one or more C/C++ files."""
|
110 |
-
|
111 |
-
|
112 |
-
class CompileError(CCompilerError):
|
113 |
-
"""Failure to compile one or more C/C++ source files."""
|
114 |
-
|
115 |
-
|
116 |
-
class LibError(CCompilerError):
|
117 |
-
"""Failure to create a static library from one or more C/C++ object
|
118 |
-
files."""
|
119 |
-
|
120 |
-
|
121 |
-
class LinkError(CCompilerError):
|
122 |
-
"""Failure to link one or more C/C++ object files into an executable
|
123 |
-
or shared library file."""
|
124 |
-
|
125 |
-
|
126 |
-
class UnknownFileError(CCompilerError):
|
127 |
-
"""Attempt to process an unknown file type."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Billyosoro/ESRGAN/realesrgan/weights/README.md
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
# Weights
|
2 |
-
|
3 |
-
Put the downloaded weights to this folder.
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/backbone/__init__.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
from .build import build_backbone, BACKBONE_REGISTRY # noqa F401 isort:skip
|
3 |
-
|
4 |
-
from .backbone import Backbone
|
5 |
-
from .fpn import FPN
|
6 |
-
from .resnet import ResNet, ResNetBlockBase, build_resnet_backbone, make_stage
|
7 |
-
|
8 |
-
# TODO can expose more resnet blocks after careful consideration
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/DualStyleGAN/images/README.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
These images are freely-usable ones from [Unsplash](https://unsplash.com/).
|
2 |
-
|
3 |
-
- https://unsplash.com/photos/rDEOVtE7vOs
|
4 |
-
- https://unsplash.com/photos/et_78QkMMQs
|
5 |
-
- https://unsplash.com/photos/ILip77SbmOE
|
6 |
-
- https://unsplash.com/photos/95UF6LXe-Lo
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tests/test_embed/test_interpreter.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
from widget_module import Widget
|
3 |
-
|
4 |
-
|
5 |
-
class DerivedWidget(Widget):
|
6 |
-
def __init__(self, message):
|
7 |
-
super(DerivedWidget, self).__init__(message)
|
8 |
-
|
9 |
-
def the_answer(self):
|
10 |
-
return 42
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chandrasekahar2k/KVCSekharGenAIBot/app.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import gradio as gr
|
3 |
-
from langchain.chat_models import ChatOpenAI
|
4 |
-
from langchain import LLMChain, PromptTemplate
|
5 |
-
from langchain.memory import ConversationBufferMemory
|
6 |
-
|
7 |
-
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
|
8 |
-
|
9 |
-
template = """Hello,I'm Chandra Sekhar, your personal assistant, and I'm here to answer all of your questions and clarify any doubts you may have.
|
10 |
-
{chat_history}
|
11 |
-
User: {user_message}
|
12 |
-
Chatbot:"""
|
13 |
-
|
14 |
-
prompt = PromptTemplate(
|
15 |
-
input_variables=["chat_history", "user_message"], template=template
|
16 |
-
)
|
17 |
-
|
18 |
-
memory = ConversationBufferMemory(memory_key="chat_history")
|
19 |
-
|
20 |
-
llm_chain = LLMChain(
|
21 |
-
llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
|
22 |
-
prompt=prompt,
|
23 |
-
verbose=True,
|
24 |
-
memory=memory,
|
25 |
-
)
|
26 |
-
|
27 |
-
def get_text_response(user_message,history):
|
28 |
-
response = llm_chain.predict(user_message = user_message)
|
29 |
-
return response
|
30 |
-
|
31 |
-
demo = gr.ChatInterface(get_text_response)
|
32 |
-
|
33 |
-
if __name__ == "__main__":
|
34 |
-
demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChrisPreston/diff-svc_minato_aqua/utils/pl_utils.py
DELETED
@@ -1,1625 +0,0 @@
|
|
1 |
-
import matplotlib
|
2 |
-
from torch.nn import DataParallel
|
3 |
-
from torch.nn.parallel import DistributedDataParallel
|
4 |
-
|
5 |
-
matplotlib.use('Agg')
|
6 |
-
import glob
|
7 |
-
import itertools
|
8 |
-
import subprocess
|
9 |
-
import threading
|
10 |
-
import traceback
|
11 |
-
|
12 |
-
from pytorch_lightning.callbacks import GradientAccumulationScheduler
|
13 |
-
from pytorch_lightning.callbacks import ModelCheckpoint
|
14 |
-
|
15 |
-
from functools import wraps
|
16 |
-
from torch.cuda._utils import _get_device_index
|
17 |
-
import numpy as np
|
18 |
-
import torch.optim
|
19 |
-
import torch.utils.data
|
20 |
-
import copy
|
21 |
-
import logging
|
22 |
-
import os
|
23 |
-
import re
|
24 |
-
import sys
|
25 |
-
import torch
|
26 |
-
import torch.distributed as dist
|
27 |
-
import torch.multiprocessing as mp
|
28 |
-
import tqdm
|
29 |
-
from torch.optim.optimizer import Optimizer
|
30 |
-
|
31 |
-
|
32 |
-
def get_a_var(obj): # pragma: no cover
|
33 |
-
if isinstance(obj, torch.Tensor):
|
34 |
-
return obj
|
35 |
-
|
36 |
-
if isinstance(obj, list) or isinstance(obj, tuple):
|
37 |
-
for result in map(get_a_var, obj):
|
38 |
-
if isinstance(result, torch.Tensor):
|
39 |
-
return result
|
40 |
-
if isinstance(obj, dict):
|
41 |
-
for result in map(get_a_var, obj.items()):
|
42 |
-
if isinstance(result, torch.Tensor):
|
43 |
-
return result
|
44 |
-
return None
|
45 |
-
|
46 |
-
|
47 |
-
def data_loader(fn):
|
48 |
-
"""
|
49 |
-
Decorator to make any fx with this use the lazy property
|
50 |
-
:param fn:
|
51 |
-
:return:
|
52 |
-
"""
|
53 |
-
|
54 |
-
wraps(fn)
|
55 |
-
attr_name = '_lazy_' + fn.__name__
|
56 |
-
|
57 |
-
def _get_data_loader(self):
|
58 |
-
try:
|
59 |
-
value = getattr(self, attr_name)
|
60 |
-
except AttributeError:
|
61 |
-
try:
|
62 |
-
value = fn(self) # Lazy evaluation, done only once.
|
63 |
-
if (
|
64 |
-
value is not None and
|
65 |
-
not isinstance(value, list) and
|
66 |
-
fn.__name__ in ['test_dataloader', 'val_dataloader']
|
67 |
-
):
|
68 |
-
value = [value]
|
69 |
-
except AttributeError as e:
|
70 |
-
# Guard against AttributeError suppression. (Issue #142)
|
71 |
-
traceback.print_exc()
|
72 |
-
error = f'{fn.__name__}: An AttributeError was encountered: ' + str(e)
|
73 |
-
raise RuntimeError(error) from e
|
74 |
-
setattr(self, attr_name, value) # Memoize evaluation.
|
75 |
-
return value
|
76 |
-
|
77 |
-
return _get_data_loader
|
78 |
-
|
79 |
-
|
80 |
-
def parallel_apply(modules, inputs, kwargs_tup=None, devices=None): # pragma: no cover
|
81 |
-
r"""Applies each `module` in :attr:`modules` in parallel on arguments
|
82 |
-
contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
|
83 |
-
on each of :attr:`devices`.
|
84 |
-
|
85 |
-
Args:
|
86 |
-
modules (Module): modules to be parallelized
|
87 |
-
inputs (tensor): inputs to the modules
|
88 |
-
devices (list of int or torch.device): CUDA devices
|
89 |
-
|
90 |
-
:attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
|
91 |
-
:attr:`devices` (if given) should all have same length. Moreover, each
|
92 |
-
element of :attr:`inputs` can either be a single object as the only argument
|
93 |
-
to a module, or a collection of positional arguments.
|
94 |
-
"""
|
95 |
-
assert len(modules) == len(inputs)
|
96 |
-
if kwargs_tup is not None:
|
97 |
-
assert len(modules) == len(kwargs_tup)
|
98 |
-
else:
|
99 |
-
kwargs_tup = ({},) * len(modules)
|
100 |
-
if devices is not None:
|
101 |
-
assert len(modules) == len(devices)
|
102 |
-
else:
|
103 |
-
devices = [None] * len(modules)
|
104 |
-
devices = list(map(lambda x: _get_device_index(x, True), devices))
|
105 |
-
lock = threading.Lock()
|
106 |
-
results = {}
|
107 |
-
grad_enabled = torch.is_grad_enabled()
|
108 |
-
|
109 |
-
def _worker(i, module, input, kwargs, device=None):
|
110 |
-
torch.set_grad_enabled(grad_enabled)
|
111 |
-
if device is None:
|
112 |
-
device = get_a_var(input).get_device()
|
113 |
-
try:
|
114 |
-
with torch.cuda.device(device):
|
115 |
-
# this also avoids accidental slicing of `input` if it is a Tensor
|
116 |
-
if not isinstance(input, (list, tuple)):
|
117 |
-
input = (input,)
|
118 |
-
|
119 |
-
# ---------------
|
120 |
-
# CHANGE
|
121 |
-
if module.training:
|
122 |
-
output = module.training_step(*input, **kwargs)
|
123 |
-
|
124 |
-
elif module.testing:
|
125 |
-
output = module.test_step(*input, **kwargs)
|
126 |
-
|
127 |
-
else:
|
128 |
-
output = module.validation_step(*input, **kwargs)
|
129 |
-
# ---------------
|
130 |
-
|
131 |
-
with lock:
|
132 |
-
results[i] = output
|
133 |
-
except Exception as e:
|
134 |
-
with lock:
|
135 |
-
results[i] = e
|
136 |
-
|
137 |
-
# make sure each module knows what training state it's in...
|
138 |
-
# fixes weird bug where copies are out of sync
|
139 |
-
root_m = modules[0]
|
140 |
-
for m in modules[1:]:
|
141 |
-
m.training = root_m.training
|
142 |
-
m.testing = root_m.testing
|
143 |
-
|
144 |
-
if len(modules) > 1:
|
145 |
-
threads = [threading.Thread(target=_worker,
|
146 |
-
args=(i, module, input, kwargs, device))
|
147 |
-
for i, (module, input, kwargs, device) in
|
148 |
-
enumerate(zip(modules, inputs, kwargs_tup, devices))]
|
149 |
-
|
150 |
-
for thread in threads:
|
151 |
-
thread.start()
|
152 |
-
for thread in threads:
|
153 |
-
thread.join()
|
154 |
-
else:
|
155 |
-
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
|
156 |
-
|
157 |
-
outputs = []
|
158 |
-
for i in range(len(inputs)):
|
159 |
-
output = results[i]
|
160 |
-
if isinstance(output, Exception):
|
161 |
-
raise output
|
162 |
-
outputs.append(output)
|
163 |
-
return outputs
|
164 |
-
|
165 |
-
|
166 |
-
def _find_tensors(obj): # pragma: no cover
|
167 |
-
r"""
|
168 |
-
Recursively find all tensors contained in the specified object.
|
169 |
-
"""
|
170 |
-
if isinstance(obj, torch.Tensor):
|
171 |
-
return [obj]
|
172 |
-
if isinstance(obj, (list, tuple)):
|
173 |
-
return itertools.chain(*map(_find_tensors, obj))
|
174 |
-
if isinstance(obj, dict):
|
175 |
-
return itertools.chain(*map(_find_tensors, obj.values()))
|
176 |
-
return []
|
177 |
-
|
178 |
-
|
179 |
-
class DDP(DistributedDataParallel):
|
180 |
-
"""
|
181 |
-
Override the forward call in lightning so it goes to training and validation step respectively
|
182 |
-
"""
|
183 |
-
|
184 |
-
def parallel_apply(self, replicas, inputs, kwargs):
|
185 |
-
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
|
186 |
-
|
187 |
-
def forward(self, *inputs, **kwargs): # pragma: no cover
|
188 |
-
self._sync_params()
|
189 |
-
if self.device_ids:
|
190 |
-
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
|
191 |
-
if len(self.device_ids) == 1:
|
192 |
-
# --------------
|
193 |
-
# LIGHTNING MOD
|
194 |
-
# --------------
|
195 |
-
# normal
|
196 |
-
# output = self.module(*inputs[0], **kwargs[0])
|
197 |
-
# lightning
|
198 |
-
if self.module.training:
|
199 |
-
output = self.module.training_step(*inputs[0], **kwargs[0])
|
200 |
-
elif self.module.testing:
|
201 |
-
output = self.module.test_step(*inputs[0], **kwargs[0])
|
202 |
-
else:
|
203 |
-
output = self.module.validation_step(*inputs[0], **kwargs[0])
|
204 |
-
else:
|
205 |
-
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
|
206 |
-
output = self.gather(outputs, self.output_device)
|
207 |
-
else:
|
208 |
-
# normal
|
209 |
-
output = self.module(*inputs, **kwargs)
|
210 |
-
|
211 |
-
if torch.is_grad_enabled():
|
212 |
-
# We'll return the output object verbatim since it is a freeform
|
213 |
-
# object. We need to find any tensors in this object, though,
|
214 |
-
# because we need to figure out which parameters were used during
|
215 |
-
# this forward pass, to ensure we short circuit reduction for any
|
216 |
-
# unused parameters. Only if `find_unused_parameters` is set.
|
217 |
-
if self.find_unused_parameters:
|
218 |
-
self.reducer.prepare_for_backward(list(_find_tensors(output)))
|
219 |
-
else:
|
220 |
-
self.reducer.prepare_for_backward([])
|
221 |
-
return output
|
222 |
-
|
223 |
-
|
224 |
-
class DP(DataParallel):
|
225 |
-
"""
|
226 |
-
Override the forward call in lightning so it goes to training and validation step respectively
|
227 |
-
"""
|
228 |
-
|
229 |
-
def forward(self, *inputs, **kwargs):
|
230 |
-
if not self.device_ids:
|
231 |
-
return self.module(*inputs, **kwargs)
|
232 |
-
|
233 |
-
for t in itertools.chain(self.module.parameters(), self.module.buffers()):
|
234 |
-
if t.device != self.src_device_obj:
|
235 |
-
raise RuntimeError("module must have its parameters and buffers "
|
236 |
-
"on device {} (device_ids[0]) but found one of "
|
237 |
-
"them on device: {}".format(self.src_device_obj, t.device))
|
238 |
-
|
239 |
-
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
|
240 |
-
if len(self.device_ids) == 1:
|
241 |
-
# lightning
|
242 |
-
if self.module.training:
|
243 |
-
return self.module.training_step(*inputs[0], **kwargs[0])
|
244 |
-
elif self.module.testing:
|
245 |
-
return self.module.test_step(*inputs[0], **kwargs[0])
|
246 |
-
else:
|
247 |
-
return self.module.validation_step(*inputs[0], **kwargs[0])
|
248 |
-
|
249 |
-
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
|
250 |
-
outputs = self.parallel_apply(replicas, inputs, kwargs)
|
251 |
-
return self.gather(outputs, self.output_device)
|
252 |
-
|
253 |
-
def parallel_apply(self, replicas, inputs, kwargs):
|
254 |
-
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
|
255 |
-
|
256 |
-
|
257 |
-
class GradientAccumulationScheduler:
|
258 |
-
def __init__(self, scheduling: dict):
|
259 |
-
if scheduling == {}: # empty dict error
|
260 |
-
raise TypeError("Empty dict cannot be interpreted correct")
|
261 |
-
|
262 |
-
for key in scheduling.keys():
|
263 |
-
if not isinstance(key, int) or not isinstance(scheduling[key], int):
|
264 |
-
raise TypeError("All epoches and accumulation factor must be integers")
|
265 |
-
|
266 |
-
minimal_epoch = min(scheduling.keys())
|
267 |
-
if minimal_epoch < 1:
|
268 |
-
msg = f"Epochs indexing from 1, epoch {minimal_epoch} cannot be interpreted correct"
|
269 |
-
raise IndexError(msg)
|
270 |
-
elif minimal_epoch != 1: # if user didnt define first epoch accumulation factor
|
271 |
-
scheduling.update({1: 1})
|
272 |
-
|
273 |
-
self.scheduling = scheduling
|
274 |
-
self.epochs = sorted(scheduling.keys())
|
275 |
-
|
276 |
-
def on_epoch_begin(self, epoch, trainer):
|
277 |
-
epoch += 1 # indexing epochs from 1
|
278 |
-
for i in reversed(range(len(self.epochs))):
|
279 |
-
if epoch >= self.epochs[i]:
|
280 |
-
trainer.accumulate_grad_batches = self.scheduling.get(self.epochs[i])
|
281 |
-
break
|
282 |
-
|
283 |
-
|
284 |
-
class LatestModelCheckpoint(ModelCheckpoint):
|
285 |
-
def __init__(self, filepath, monitor='val_loss', verbose=0, num_ckpt_keep=5,
|
286 |
-
save_weights_only=False, mode='auto', period=1, prefix='model', save_best=True):
|
287 |
-
super(ModelCheckpoint, self).__init__()
|
288 |
-
self.monitor = monitor
|
289 |
-
self.verbose = verbose
|
290 |
-
self.filepath = filepath
|
291 |
-
os.makedirs(filepath, exist_ok=True)
|
292 |
-
self.num_ckpt_keep = num_ckpt_keep
|
293 |
-
self.save_best = save_best
|
294 |
-
self.save_weights_only = save_weights_only
|
295 |
-
self.period = period
|
296 |
-
self.epochs_since_last_check = 0
|
297 |
-
self.prefix = prefix
|
298 |
-
self.best_k_models = {}
|
299 |
-
# {filename: monitor}
|
300 |
-
self.kth_best_model = ''
|
301 |
-
self.save_top_k = 1
|
302 |
-
self.task = None
|
303 |
-
if mode == 'min':
|
304 |
-
self.monitor_op = np.less
|
305 |
-
self.best = np.Inf
|
306 |
-
self.mode = 'min'
|
307 |
-
elif mode == 'max':
|
308 |
-
self.monitor_op = np.greater
|
309 |
-
self.best = -np.Inf
|
310 |
-
self.mode = 'max'
|
311 |
-
else:
|
312 |
-
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
|
313 |
-
self.monitor_op = np.greater
|
314 |
-
self.best = -np.Inf
|
315 |
-
self.mode = 'max'
|
316 |
-
else:
|
317 |
-
self.monitor_op = np.less
|
318 |
-
self.best = np.Inf
|
319 |
-
self.mode = 'min'
|
320 |
-
if os.path.exists(f'{self.filepath}/best_valid.npy'):
|
321 |
-
self.best = np.load(f'{self.filepath}/best_valid.npy')[0]
|
322 |
-
|
323 |
-
def get_all_ckpts(self):
|
324 |
-
return sorted(glob.glob(f'{self.filepath}/{self.prefix}_ckpt_steps_*.ckpt'),
|
325 |
-
key=lambda x: -int(re.findall('.*steps\_(\d+)\.ckpt', x)[0]))
|
326 |
-
|
327 |
-
def on_epoch_end(self, epoch, logs=None):
|
328 |
-
logs = logs or {}
|
329 |
-
self.epochs_since_last_check += 1
|
330 |
-
best_filepath = f'{self.filepath}/{self.prefix}_ckpt_best.pt'
|
331 |
-
if self.epochs_since_last_check >= self.period:
|
332 |
-
self.epochs_since_last_check = 0
|
333 |
-
filepath = f'{self.filepath}/{self.prefix}_ckpt_steps_{self.task.global_step}.ckpt'
|
334 |
-
if self.verbose > 0:
|
335 |
-
logging.info(f'Epoch {epoch:05d}@{self.task.global_step}: saving model to {filepath}')
|
336 |
-
self._save_model(filepath)
|
337 |
-
for old_ckpt in self.get_all_ckpts()[self.num_ckpt_keep:]:
|
338 |
-
# TODO: test filesystem calls
|
339 |
-
os.remove(old_ckpt)
|
340 |
-
# subprocess.check_call(f'del "{old_ckpt}"', shell=True)
|
341 |
-
if self.verbose > 0:
|
342 |
-
logging.info(f'Delete ckpt: {os.path.basename(old_ckpt)}')
|
343 |
-
current = logs.get(self.monitor)
|
344 |
-
if current is not None and self.save_best:
|
345 |
-
if self.monitor_op(current, self.best):
|
346 |
-
self.best = current
|
347 |
-
if self.verbose > 0:
|
348 |
-
logging.info(
|
349 |
-
f'Epoch {epoch:05d}@{self.task.global_step}: {self.monitor} reached'
|
350 |
-
f' {current:0.5f} (best {self.best:0.5f}), saving model to'
|
351 |
-
f' {best_filepath} as top 1')
|
352 |
-
self._save_model(best_filepath)
|
353 |
-
np.save(f'{self.filepath}/best_valid.npy', [self.best])
|
354 |
-
|
355 |
-
def _save_model(self,path):
|
356 |
-
return self.save_function(path)
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
class BaseTrainer:
|
361 |
-
def __init__(
|
362 |
-
self,
|
363 |
-
logger=True,
|
364 |
-
checkpoint_callback=True,
|
365 |
-
default_save_path=None,
|
366 |
-
gradient_clip_val=0,
|
367 |
-
process_position=0,
|
368 |
-
gpus=-1,
|
369 |
-
log_gpu_memory=None,
|
370 |
-
show_progress_bar=True,
|
371 |
-
track_grad_norm=-1,
|
372 |
-
check_val_every_n_epoch=1,
|
373 |
-
accumulate_grad_batches=1,
|
374 |
-
max_updates=1000,
|
375 |
-
min_epochs=1,
|
376 |
-
val_check_interval=1.0,
|
377 |
-
log_save_interval=100,
|
378 |
-
row_log_interval=10,
|
379 |
-
print_nan_grads=False,
|
380 |
-
weights_summary='full',
|
381 |
-
num_sanity_val_steps=5,
|
382 |
-
resume_from_checkpoint=None,
|
383 |
-
):
|
384 |
-
self.log_gpu_memory = log_gpu_memory
|
385 |
-
self.gradient_clip_val = gradient_clip_val
|
386 |
-
self.check_val_every_n_epoch = check_val_every_n_epoch
|
387 |
-
self.track_grad_norm = track_grad_norm
|
388 |
-
self.on_gpu = True if (gpus and torch.cuda.is_available()) else False
|
389 |
-
self.process_position = process_position
|
390 |
-
self.weights_summary = weights_summary
|
391 |
-
self.max_updates = max_updates
|
392 |
-
self.min_epochs = min_epochs
|
393 |
-
self.num_sanity_val_steps = num_sanity_val_steps
|
394 |
-
self.print_nan_grads = print_nan_grads
|
395 |
-
self.resume_from_checkpoint = resume_from_checkpoint
|
396 |
-
self.default_save_path = default_save_path
|
397 |
-
|
398 |
-
# training bookeeping
|
399 |
-
self.total_batch_idx = 0
|
400 |
-
self.running_loss = []
|
401 |
-
self.avg_loss = 0
|
402 |
-
self.batch_idx = 0
|
403 |
-
self.tqdm_metrics = {}
|
404 |
-
self.callback_metrics = {}
|
405 |
-
self.num_val_batches = 0
|
406 |
-
self.num_training_batches = 0
|
407 |
-
self.num_test_batches = 0
|
408 |
-
self.get_train_dataloader = None
|
409 |
-
self.get_test_dataloaders = None
|
410 |
-
self.get_val_dataloaders = None
|
411 |
-
self.is_iterable_train_dataloader = False
|
412 |
-
|
413 |
-
# training state
|
414 |
-
self.model = None
|
415 |
-
self.testing = False
|
416 |
-
self.disable_validation = False
|
417 |
-
self.lr_schedulers = []
|
418 |
-
self.optimizers = None
|
419 |
-
self.global_step = 0
|
420 |
-
self.current_epoch = 0
|
421 |
-
self.total_batches = 0
|
422 |
-
|
423 |
-
# configure checkpoint callback
|
424 |
-
self.checkpoint_callback = checkpoint_callback
|
425 |
-
self.checkpoint_callback.save_function = self.save_checkpoint
|
426 |
-
self.weights_save_path = self.checkpoint_callback.filepath
|
427 |
-
|
428 |
-
# accumulated grads
|
429 |
-
self.configure_accumulated_gradients(accumulate_grad_batches)
|
430 |
-
|
431 |
-
# allow int, string and gpu list
|
432 |
-
self.data_parallel_device_ids = [
|
433 |
-
int(x) for x in os.environ.get("CUDA_VISIBLE_DEVICES", "").split(",") if x != '']
|
434 |
-
if len(self.data_parallel_device_ids) == 0:
|
435 |
-
self.root_gpu = None
|
436 |
-
self.on_gpu = False
|
437 |
-
else:
|
438 |
-
self.root_gpu = self.data_parallel_device_ids[0]
|
439 |
-
self.on_gpu = True
|
440 |
-
|
441 |
-
# distributed backend choice
|
442 |
-
self.use_ddp = False
|
443 |
-
self.use_dp = False
|
444 |
-
self.single_gpu = False
|
445 |
-
self.distributed_backend = 'ddp' if self.num_gpus > 0 else 'dp'
|
446 |
-
self.set_distributed_mode(self.distributed_backend)
|
447 |
-
|
448 |
-
self.proc_rank = 0
|
449 |
-
self.world_size = 1
|
450 |
-
self.node_rank = 0
|
451 |
-
|
452 |
-
# can't init progress bar here because starting a new process
|
453 |
-
# means the progress_bar won't survive pickling
|
454 |
-
self.show_progress_bar = show_progress_bar
|
455 |
-
|
456 |
-
# logging
|
457 |
-
self.log_save_interval = log_save_interval
|
458 |
-
self.val_check_interval = val_check_interval
|
459 |
-
self.logger = logger
|
460 |
-
self.logger.rank = 0
|
461 |
-
self.row_log_interval = row_log_interval
|
462 |
-
|
463 |
-
@property
|
464 |
-
def num_gpus(self):
|
465 |
-
gpus = self.data_parallel_device_ids
|
466 |
-
if gpus is None:
|
467 |
-
return 0
|
468 |
-
else:
|
469 |
-
return len(gpus)
|
470 |
-
|
471 |
-
@property
|
472 |
-
def data_parallel(self):
|
473 |
-
return self.use_dp or self.use_ddp
|
474 |
-
|
475 |
-
def get_model(self):
|
476 |
-
is_dp_module = isinstance(self.model, (DDP, DP))
|
477 |
-
model = self.model.module if is_dp_module else self.model
|
478 |
-
return model
|
479 |
-
|
480 |
-
# -----------------------------
|
481 |
-
# MODEL TRAINING
|
482 |
-
# -----------------------------
|
483 |
-
def fit(self, model):
|
484 |
-
if self.use_ddp:
|
485 |
-
mp.spawn(self.ddp_train, nprocs=self.num_gpus, args=(model,))
|
486 |
-
else:
|
487 |
-
model.svc_model = model.build_model()
|
488 |
-
if not self.testing:
|
489 |
-
self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
|
490 |
-
if self.use_dp:
|
491 |
-
model.cuda(self.root_gpu)
|
492 |
-
model = DP(model, device_ids=self.data_parallel_device_ids)
|
493 |
-
elif self.single_gpu:
|
494 |
-
model.cuda(self.root_gpu)
|
495 |
-
self.run_pretrain_routine(model)
|
496 |
-
return 1
|
497 |
-
|
498 |
-
def init_optimizers(self, optimizers):
|
499 |
-
|
500 |
-
# single optimizer
|
501 |
-
if isinstance(optimizers, Optimizer):
|
502 |
-
return [optimizers], []
|
503 |
-
|
504 |
-
# two lists
|
505 |
-
elif len(optimizers) == 2 and isinstance(optimizers[0], list):
|
506 |
-
optimizers, lr_schedulers = optimizers
|
507 |
-
return optimizers, lr_schedulers
|
508 |
-
|
509 |
-
# single list or tuple
|
510 |
-
elif isinstance(optimizers, list) or isinstance(optimizers, tuple):
|
511 |
-
return optimizers, []
|
512 |
-
|
513 |
-
def run_pretrain_routine(self, model):
|
514 |
-
"""Sanity check a few things before starting actual training.
|
515 |
-
|
516 |
-
:param model:
|
517 |
-
"""
|
518 |
-
ref_model = model
|
519 |
-
if self.data_parallel:
|
520 |
-
ref_model = model.module
|
521 |
-
|
522 |
-
# give model convenience properties
|
523 |
-
ref_model.trainer = self
|
524 |
-
|
525 |
-
# set local properties on the model
|
526 |
-
self.copy_trainer_model_properties(ref_model)
|
527 |
-
|
528 |
-
# link up experiment object
|
529 |
-
if self.logger is not None:
|
530 |
-
ref_model.logger = self.logger
|
531 |
-
self.logger.save()
|
532 |
-
|
533 |
-
if self.use_ddp:
|
534 |
-
dist.barrier()
|
535 |
-
|
536 |
-
# set up checkpoint callback
|
537 |
-
# self.configure_checkpoint_callback()
|
538 |
-
|
539 |
-
# transfer data loaders from model
|
540 |
-
self.get_dataloaders(ref_model)
|
541 |
-
|
542 |
-
# track model now.
|
543 |
-
# if cluster resets state, the model will update with the saved weights
|
544 |
-
self.model = model
|
545 |
-
|
546 |
-
# restore training and model before hpc call
|
547 |
-
self.restore_weights(model)
|
548 |
-
|
549 |
-
# when testing requested only run test and return
|
550 |
-
if self.testing:
|
551 |
-
self.run_evaluation(test=True)
|
552 |
-
return
|
553 |
-
|
554 |
-
# check if we should run validation during training
|
555 |
-
self.disable_validation = self.num_val_batches == 0
|
556 |
-
|
557 |
-
# run tiny validation (if validation defined)
|
558 |
-
# to make sure program won't crash during val
|
559 |
-
ref_model.on_sanity_check_start()
|
560 |
-
ref_model.on_train_start()
|
561 |
-
if not self.disable_validation and self.num_sanity_val_steps > 0:
|
562 |
-
# init progress bars for validation sanity check
|
563 |
-
pbar = tqdm.tqdm(desc='Validation sanity check',
|
564 |
-
total=self.num_sanity_val_steps * len(self.get_val_dataloaders()),
|
565 |
-
leave=False, position=2 * self.process_position,
|
566 |
-
disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch')
|
567 |
-
self.main_progress_bar = pbar
|
568 |
-
# dummy validation progress bar
|
569 |
-
self.val_progress_bar = tqdm.tqdm(disable=True)
|
570 |
-
|
571 |
-
self.evaluate(model, self.get_val_dataloaders(), self.num_sanity_val_steps, self.testing)
|
572 |
-
|
573 |
-
# close progress bars
|
574 |
-
self.main_progress_bar.close()
|
575 |
-
self.val_progress_bar.close()
|
576 |
-
|
577 |
-
# init progress bar
|
578 |
-
pbar = tqdm.tqdm(leave=True, position=2 * self.process_position,
|
579 |
-
disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch',
|
580 |
-
file=sys.stdout)
|
581 |
-
self.main_progress_bar = pbar
|
582 |
-
|
583 |
-
# clear cache before training
|
584 |
-
if self.on_gpu:
|
585 |
-
torch.cuda.empty_cache()
|
586 |
-
|
587 |
-
# CORE TRAINING LOOP
|
588 |
-
self.train()
|
589 |
-
|
590 |
-
def test(self, model):
|
591 |
-
self.testing = True
|
592 |
-
self.fit(model)
|
593 |
-
|
594 |
-
@property
|
595 |
-
def training_tqdm_dict(self):
|
596 |
-
tqdm_dict = {
|
597 |
-
'step': '{}'.format(self.global_step),
|
598 |
-
}
|
599 |
-
tqdm_dict.update(self.tqdm_metrics)
|
600 |
-
return tqdm_dict
|
601 |
-
|
602 |
-
# --------------------
|
603 |
-
# restore ckpt
|
604 |
-
# --------------------
|
605 |
-
def restore_weights(self, model):
|
606 |
-
"""
|
607 |
-
To restore weights we have two cases.
|
608 |
-
First, attempt to restore hpc weights. If successful, don't restore
|
609 |
-
other weights.
|
610 |
-
|
611 |
-
Otherwise, try to restore actual weights
|
612 |
-
:param model:
|
613 |
-
:return:
|
614 |
-
"""
|
615 |
-
# clear cache before restore
|
616 |
-
if self.on_gpu:
|
617 |
-
torch.cuda.empty_cache()
|
618 |
-
|
619 |
-
if self.resume_from_checkpoint is not None:
|
620 |
-
self.restore(self.resume_from_checkpoint, on_gpu=self.on_gpu)
|
621 |
-
else:
|
622 |
-
# restore weights if same exp version
|
623 |
-
self.restore_state_if_checkpoint_exists(model)
|
624 |
-
|
625 |
-
# wait for all models to restore weights
|
626 |
-
if self.use_ddp:
|
627 |
-
# wait for all processes to catch up
|
628 |
-
dist.barrier()
|
629 |
-
|
630 |
-
# clear cache after restore
|
631 |
-
if self.on_gpu:
|
632 |
-
torch.cuda.empty_cache()
|
633 |
-
|
634 |
-
def restore_state_if_checkpoint_exists(self, model):
|
635 |
-
did_restore = False
|
636 |
-
|
637 |
-
# do nothing if there's not dir or callback
|
638 |
-
no_ckpt_callback = (self.checkpoint_callback is None) or (not self.checkpoint_callback)
|
639 |
-
if no_ckpt_callback or not os.path.exists(self.checkpoint_callback.filepath):
|
640 |
-
return did_restore
|
641 |
-
|
642 |
-
# restore trainer state and model if there is a weight for this experiment
|
643 |
-
last_steps = -1
|
644 |
-
last_ckpt_name = None
|
645 |
-
|
646 |
-
# find last epoch
|
647 |
-
checkpoints = os.listdir(self.checkpoint_callback.filepath)
|
648 |
-
for name in checkpoints:
|
649 |
-
if '.ckpt' in name and not name.endswith('part'):
|
650 |
-
if 'steps_' in name:
|
651 |
-
steps = name.split('steps_')[1]
|
652 |
-
steps = int(re.sub('[^0-9]', '', steps))
|
653 |
-
|
654 |
-
if steps > last_steps:
|
655 |
-
last_steps = steps
|
656 |
-
last_ckpt_name = name
|
657 |
-
|
658 |
-
# restore last checkpoint
|
659 |
-
if last_ckpt_name is not None:
|
660 |
-
last_ckpt_path = os.path.join(self.checkpoint_callback.filepath, last_ckpt_name)
|
661 |
-
self.restore(last_ckpt_path, self.on_gpu)
|
662 |
-
logging.info(f'model and trainer restored from checkpoint: {last_ckpt_path}')
|
663 |
-
did_restore = True
|
664 |
-
|
665 |
-
return did_restore
|
666 |
-
|
667 |
-
def restore(self, checkpoint_path, on_gpu):
|
668 |
-
checkpoint = torch.load(checkpoint_path, map_location='cpu')
|
669 |
-
|
670 |
-
# load model state
|
671 |
-
model = self.get_model()
|
672 |
-
|
673 |
-
# load the state_dict on the model automatically
|
674 |
-
model.load_state_dict(checkpoint['state_dict'], strict=False)
|
675 |
-
if on_gpu:
|
676 |
-
model.cuda(self.root_gpu)
|
677 |
-
# load training state (affects trainer only)
|
678 |
-
self.restore_training_state(checkpoint)
|
679 |
-
model.global_step = self.global_step
|
680 |
-
del checkpoint
|
681 |
-
|
682 |
-
try:
|
683 |
-
if dist.is_initialized() and dist.get_rank() > 0:
|
684 |
-
return
|
685 |
-
except Exception as e:
|
686 |
-
print(e)
|
687 |
-
return
|
688 |
-
|
689 |
-
def restore_training_state(self, checkpoint):
|
690 |
-
"""
|
691 |
-
Restore trainer state.
|
692 |
-
Model will get its change to update
|
693 |
-
:param checkpoint:
|
694 |
-
:return:
|
695 |
-
"""
|
696 |
-
if self.checkpoint_callback is not None and self.checkpoint_callback is not False:
|
697 |
-
# return allowing checkpoints with meta information (global_step, etc)
|
698 |
-
self.checkpoint_callback.best = checkpoint['checkpoint_callback_best']
|
699 |
-
|
700 |
-
self.global_step = checkpoint['global_step']
|
701 |
-
self.current_epoch = checkpoint['epoch']
|
702 |
-
|
703 |
-
if self.testing:
|
704 |
-
return
|
705 |
-
|
706 |
-
# restore the optimizers
|
707 |
-
optimizer_states = checkpoint['optimizer_states']
|
708 |
-
for optimizer, opt_state in zip(self.optimizers, optimizer_states):
|
709 |
-
if optimizer is None:
|
710 |
-
return
|
711 |
-
optimizer.load_state_dict(opt_state)
|
712 |
-
|
713 |
-
# move optimizer to GPU 1 weight at a time
|
714 |
-
# avoids OOM
|
715 |
-
if self.root_gpu is not None:
|
716 |
-
for state in optimizer.state.values():
|
717 |
-
for k, v in state.items():
|
718 |
-
if isinstance(v, torch.Tensor):
|
719 |
-
state[k] = v.cuda(self.root_gpu)
|
720 |
-
|
721 |
-
# restore the lr schedulers
|
722 |
-
lr_schedulers = checkpoint['lr_schedulers']
|
723 |
-
for scheduler, lrs_state in zip(self.lr_schedulers, lr_schedulers):
|
724 |
-
scheduler.load_state_dict(lrs_state)
|
725 |
-
|
726 |
-
# --------------------
|
727 |
-
# MODEL SAVE CHECKPOINT
|
728 |
-
# --------------------
|
729 |
-
def _atomic_save(self, checkpoint, filepath):
|
730 |
-
"""Saves a checkpoint atomically, avoiding the creation of incomplete checkpoints.
|
731 |
-
|
732 |
-
This will create a temporary checkpoint with a suffix of ``.part``, then copy it to the final location once
|
733 |
-
saving is finished.
|
734 |
-
|
735 |
-
Args:
|
736 |
-
checkpoint (object): The object to save.
|
737 |
-
Built to be used with the ``dump_checkpoint`` method, but can deal with anything which ``torch.save``
|
738 |
-
accepts.
|
739 |
-
filepath (str|pathlib.Path): The path to which the checkpoint will be saved.
|
740 |
-
This points to the file that the checkpoint will be stored in.
|
741 |
-
"""
|
742 |
-
tmp_path = str(filepath) + ".part"
|
743 |
-
torch.save(checkpoint, tmp_path)
|
744 |
-
os.replace(tmp_path, filepath)
|
745 |
-
|
746 |
-
def save_checkpoint(self, filepath):
|
747 |
-
checkpoint = self.dump_checkpoint()
|
748 |
-
self._atomic_save(checkpoint, filepath)
|
749 |
-
|
750 |
-
def dump_checkpoint(self):
|
751 |
-
|
752 |
-
checkpoint = {
|
753 |
-
'epoch': self.current_epoch,
|
754 |
-
'global_step': self.global_step
|
755 |
-
}
|
756 |
-
|
757 |
-
if self.checkpoint_callback is not None and self.checkpoint_callback is not False:
|
758 |
-
checkpoint['checkpoint_callback_best'] = self.checkpoint_callback.best
|
759 |
-
|
760 |
-
# save optimizers
|
761 |
-
optimizer_states = []
|
762 |
-
for i, optimizer in enumerate(self.optimizers):
|
763 |
-
if optimizer is not None:
|
764 |
-
optimizer_states.append(optimizer.state_dict())
|
765 |
-
|
766 |
-
checkpoint['optimizer_states'] = optimizer_states
|
767 |
-
|
768 |
-
# save lr schedulers
|
769 |
-
lr_schedulers = []
|
770 |
-
for i, scheduler in enumerate(self.lr_schedulers):
|
771 |
-
lr_schedulers.append(scheduler.state_dict())
|
772 |
-
|
773 |
-
checkpoint['lr_schedulers'] = lr_schedulers
|
774 |
-
|
775 |
-
# add the hparams and state_dict from the model
|
776 |
-
model = self.get_model()
|
777 |
-
checkpoint['state_dict'] = model.state_dict()
|
778 |
-
# give the model a chance to add a few things
|
779 |
-
model.on_save_checkpoint(checkpoint)
|
780 |
-
|
781 |
-
return checkpoint
|
782 |
-
|
783 |
-
def copy_trainer_model_properties(self, model):
|
784 |
-
if isinstance(model, DP):
|
785 |
-
ref_model = model.module
|
786 |
-
elif isinstance(model, DDP):
|
787 |
-
ref_model = model.module
|
788 |
-
else:
|
789 |
-
ref_model = model
|
790 |
-
|
791 |
-
for m in [model, ref_model]:
|
792 |
-
m.trainer = self
|
793 |
-
m.on_gpu = self.on_gpu
|
794 |
-
m.use_dp = self.use_dp
|
795 |
-
m.use_ddp = self.use_ddp
|
796 |
-
m.testing = self.testing
|
797 |
-
m.single_gpu = self.single_gpu
|
798 |
-
|
799 |
-
def transfer_batch_to_gpu(self, batch, gpu_id):
|
800 |
-
# base case: object can be directly moved using `cuda` or `to`
|
801 |
-
if callable(getattr(batch, 'cuda', None)):
|
802 |
-
return batch.cuda(gpu_id, non_blocking=True)
|
803 |
-
|
804 |
-
elif callable(getattr(batch, 'to', None)):
|
805 |
-
return batch.to(torch.device('cuda', gpu_id), non_blocking=True)
|
806 |
-
|
807 |
-
# when list
|
808 |
-
elif isinstance(batch, list):
|
809 |
-
for i, x in enumerate(batch):
|
810 |
-
batch[i] = self.transfer_batch_to_gpu(x, gpu_id)
|
811 |
-
return batch
|
812 |
-
|
813 |
-
# when tuple
|
814 |
-
elif isinstance(batch, tuple):
|
815 |
-
batch = list(batch)
|
816 |
-
for i, x in enumerate(batch):
|
817 |
-
batch[i] = self.transfer_batch_to_gpu(x, gpu_id)
|
818 |
-
return tuple(batch)
|
819 |
-
|
820 |
-
# when dict
|
821 |
-
elif isinstance(batch, dict):
|
822 |
-
for k, v in batch.items():
|
823 |
-
batch[k] = self.transfer_batch_to_gpu(v, gpu_id)
|
824 |
-
|
825 |
-
return batch
|
826 |
-
|
827 |
-
# nothing matches, return the value as is without transform
|
828 |
-
return batch
|
829 |
-
|
830 |
-
def set_distributed_mode(self, distributed_backend):
|
831 |
-
# skip for CPU
|
832 |
-
if self.num_gpus == 0:
|
833 |
-
return
|
834 |
-
|
835 |
-
# single GPU case
|
836 |
-
# in single gpu case we allow ddp so we can train on multiple
|
837 |
-
# nodes, 1 gpu per node
|
838 |
-
elif self.num_gpus == 1:
|
839 |
-
self.single_gpu = True
|
840 |
-
self.use_dp = False
|
841 |
-
self.use_ddp = False
|
842 |
-
self.root_gpu = 0
|
843 |
-
self.data_parallel_device_ids = [0]
|
844 |
-
else:
|
845 |
-
if distributed_backend is not None:
|
846 |
-
self.use_dp = distributed_backend == 'dp'
|
847 |
-
self.use_ddp = distributed_backend == 'ddp'
|
848 |
-
elif distributed_backend is None:
|
849 |
-
self.use_dp = True
|
850 |
-
self.use_ddp = False
|
851 |
-
|
852 |
-
logging.info(f'gpu available: {torch.cuda.is_available()}, used: {self.on_gpu}')
|
853 |
-
|
854 |
-
def ddp_train(self, gpu_idx, model):
|
855 |
-
"""
|
856 |
-
Entry point into a DP thread
|
857 |
-
:param gpu_idx:
|
858 |
-
:param model:
|
859 |
-
:param cluster_obj:
|
860 |
-
:return:
|
861 |
-
"""
|
862 |
-
# otherwise default to node rank 0
|
863 |
-
self.node_rank = 0
|
864 |
-
|
865 |
-
# show progressbar only on progress_rank 0
|
866 |
-
self.show_progress_bar = self.show_progress_bar and self.node_rank == 0 and gpu_idx == 0
|
867 |
-
|
868 |
-
# determine which process we are and world size
|
869 |
-
if self.use_ddp:
|
870 |
-
self.proc_rank = self.node_rank * self.num_gpus + gpu_idx
|
871 |
-
self.world_size = self.num_gpus
|
872 |
-
|
873 |
-
# let the exp know the rank to avoid overwriting logs
|
874 |
-
if self.logger is not None:
|
875 |
-
self.logger.rank = self.proc_rank
|
876 |
-
|
877 |
-
# set up server using proc 0's ip address
|
878 |
-
# try to init for 20 times at max in case ports are taken
|
879 |
-
# where to store ip_table
|
880 |
-
model.trainer = self
|
881 |
-
model.init_ddp_connection(self.proc_rank, self.world_size)
|
882 |
-
|
883 |
-
# CHOOSE OPTIMIZER
|
884 |
-
# allow for lr schedulers as well
|
885 |
-
model.svc_model = model.build_model()
|
886 |
-
if not self.testing:
|
887 |
-
self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
|
888 |
-
|
889 |
-
# MODEL
|
890 |
-
# copy model to each gpu
|
891 |
-
if self.distributed_backend == 'ddp':
|
892 |
-
torch.cuda.set_device(gpu_idx)
|
893 |
-
model.cuda(gpu_idx)
|
894 |
-
|
895 |
-
# set model properties before going into wrapper
|
896 |
-
self.copy_trainer_model_properties(model)
|
897 |
-
|
898 |
-
# override root GPU
|
899 |
-
self.root_gpu = gpu_idx
|
900 |
-
|
901 |
-
if self.distributed_backend == 'ddp':
|
902 |
-
device_ids = [gpu_idx]
|
903 |
-
else:
|
904 |
-
device_ids = None
|
905 |
-
|
906 |
-
# allow user to configure ddp
|
907 |
-
model = model.configure_ddp(model, device_ids)
|
908 |
-
|
909 |
-
# continue training routine
|
910 |
-
self.run_pretrain_routine(model)
|
911 |
-
|
912 |
-
def resolve_root_node_address(self, root_node):
|
913 |
-
if '[' in root_node:
|
914 |
-
name = root_node.split('[')[0]
|
915 |
-
number = root_node.split(',')[0]
|
916 |
-
if '-' in number:
|
917 |
-
number = number.split('-')[0]
|
918 |
-
|
919 |
-
number = re.sub('[^0-9]', '', number)
|
920 |
-
root_node = name + number
|
921 |
-
|
922 |
-
return root_node
|
923 |
-
|
924 |
-
def log_metrics(self, metrics, grad_norm_dic, step=None):
|
925 |
-
"""Logs the metric dict passed in.
|
926 |
-
|
927 |
-
:param metrics:
|
928 |
-
:param grad_norm_dic:
|
929 |
-
"""
|
930 |
-
# added metrics by Lightning for convenience
|
931 |
-
metrics['epoch'] = self.current_epoch
|
932 |
-
|
933 |
-
# add norms
|
934 |
-
metrics.update(grad_norm_dic)
|
935 |
-
|
936 |
-
# turn all tensors to scalars
|
937 |
-
scalar_metrics = self.metrics_to_scalars(metrics)
|
938 |
-
|
939 |
-
step = step if step is not None else self.global_step
|
940 |
-
# log actual metrics
|
941 |
-
if self.proc_rank == 0 and self.logger is not None:
|
942 |
-
self.logger.log_metrics(scalar_metrics, step=step)
|
943 |
-
self.logger.save()
|
944 |
-
|
945 |
-
def add_tqdm_metrics(self, metrics):
|
946 |
-
for k, v in metrics.items():
|
947 |
-
if type(v) is torch.Tensor:
|
948 |
-
v = v.item()
|
949 |
-
|
950 |
-
self.tqdm_metrics[k] = v
|
951 |
-
|
952 |
-
def metrics_to_scalars(self, metrics):
|
953 |
-
new_metrics = {}
|
954 |
-
for k, v in metrics.items():
|
955 |
-
if isinstance(v, torch.Tensor):
|
956 |
-
v = v.item()
|
957 |
-
|
958 |
-
if type(v) is dict:
|
959 |
-
v = self.metrics_to_scalars(v)
|
960 |
-
|
961 |
-
new_metrics[k] = v
|
962 |
-
|
963 |
-
return new_metrics
|
964 |
-
|
965 |
-
def process_output(self, output, train=False):
|
966 |
-
"""Reduces output according to the training mode.
|
967 |
-
|
968 |
-
Separates loss from logging and tqdm metrics
|
969 |
-
:param output:
|
970 |
-
:return:
|
971 |
-
"""
|
972 |
-
# ---------------
|
973 |
-
# EXTRACT CALLBACK KEYS
|
974 |
-
# ---------------
|
975 |
-
# all keys not progress_bar or log are candidates for callbacks
|
976 |
-
callback_metrics = {}
|
977 |
-
for k, v in output.items():
|
978 |
-
if k not in ['progress_bar', 'log', 'hiddens']:
|
979 |
-
callback_metrics[k] = v
|
980 |
-
|
981 |
-
if train and self.use_dp:
|
982 |
-
num_gpus = self.num_gpus
|
983 |
-
callback_metrics = self.reduce_distributed_output(callback_metrics, num_gpus)
|
984 |
-
|
985 |
-
for k, v in callback_metrics.items():
|
986 |
-
if isinstance(v, torch.Tensor):
|
987 |
-
callback_metrics[k] = v.item()
|
988 |
-
|
989 |
-
# ---------------
|
990 |
-
# EXTRACT PROGRESS BAR KEYS
|
991 |
-
# ---------------
|
992 |
-
try:
|
993 |
-
progress_output = output['progress_bar']
|
994 |
-
|
995 |
-
# reduce progress metrics for tqdm when using dp
|
996 |
-
if train and self.use_dp:
|
997 |
-
num_gpus = self.num_gpus
|
998 |
-
progress_output = self.reduce_distributed_output(progress_output, num_gpus)
|
999 |
-
|
1000 |
-
progress_bar_metrics = progress_output
|
1001 |
-
except Exception:
|
1002 |
-
progress_bar_metrics = {}
|
1003 |
-
|
1004 |
-
# ---------------
|
1005 |
-
# EXTRACT LOGGING KEYS
|
1006 |
-
# ---------------
|
1007 |
-
# extract metrics to log to experiment
|
1008 |
-
try:
|
1009 |
-
log_output = output['log']
|
1010 |
-
|
1011 |
-
# reduce progress metrics for tqdm when using dp
|
1012 |
-
if train and self.use_dp:
|
1013 |
-
num_gpus = self.num_gpus
|
1014 |
-
log_output = self.reduce_distributed_output(log_output, num_gpus)
|
1015 |
-
|
1016 |
-
log_metrics = log_output
|
1017 |
-
except Exception:
|
1018 |
-
log_metrics = {}
|
1019 |
-
|
1020 |
-
# ---------------
|
1021 |
-
# EXTRACT LOSS
|
1022 |
-
# ---------------
|
1023 |
-
# if output dict doesn't have the keyword loss
|
1024 |
-
# then assume the output=loss if scalar
|
1025 |
-
loss = None
|
1026 |
-
if train:
|
1027 |
-
try:
|
1028 |
-
loss = output['loss']
|
1029 |
-
except Exception:
|
1030 |
-
if type(output) is torch.Tensor:
|
1031 |
-
loss = output
|
1032 |
-
else:
|
1033 |
-
raise RuntimeError(
|
1034 |
-
'No `loss` value in the dictionary returned from `model.training_step()`.'
|
1035 |
-
)
|
1036 |
-
|
1037 |
-
# when using dp need to reduce the loss
|
1038 |
-
if self.use_dp:
|
1039 |
-
loss = self.reduce_distributed_output(loss, self.num_gpus)
|
1040 |
-
|
1041 |
-
# ---------------
|
1042 |
-
# EXTRACT HIDDEN
|
1043 |
-
# ---------------
|
1044 |
-
hiddens = output.get('hiddens')
|
1045 |
-
|
1046 |
-
# use every metric passed in as a candidate for callback
|
1047 |
-
callback_metrics.update(progress_bar_metrics)
|
1048 |
-
callback_metrics.update(log_metrics)
|
1049 |
-
|
1050 |
-
# convert tensors to numpy
|
1051 |
-
for k, v in callback_metrics.items():
|
1052 |
-
if isinstance(v, torch.Tensor):
|
1053 |
-
callback_metrics[k] = v.item()
|
1054 |
-
|
1055 |
-
return loss, progress_bar_metrics, log_metrics, callback_metrics, hiddens
|
1056 |
-
|
1057 |
-
def reduce_distributed_output(self, output, num_gpus):
|
1058 |
-
if num_gpus <= 1:
|
1059 |
-
return output
|
1060 |
-
|
1061 |
-
# when using DP, we get one output per gpu
|
1062 |
-
# average outputs and return
|
1063 |
-
if type(output) is torch.Tensor:
|
1064 |
-
return output.mean()
|
1065 |
-
|
1066 |
-
for k, v in output.items():
|
1067 |
-
# recurse on nested dics
|
1068 |
-
if isinstance(output[k], dict):
|
1069 |
-
output[k] = self.reduce_distributed_output(output[k], num_gpus)
|
1070 |
-
|
1071 |
-
# do nothing when there's a scalar
|
1072 |
-
elif isinstance(output[k], torch.Tensor) and output[k].dim() == 0:
|
1073 |
-
pass
|
1074 |
-
|
1075 |
-
# reduce only metrics that have the same number of gpus
|
1076 |
-
elif output[k].size(0) == num_gpus:
|
1077 |
-
reduced = torch.mean(output[k])
|
1078 |
-
output[k] = reduced
|
1079 |
-
return output
|
1080 |
-
|
1081 |
-
def clip_gradients(self):
|
1082 |
-
if self.gradient_clip_val > 0:
|
1083 |
-
model = self.get_model()
|
1084 |
-
torch.nn.utils.clip_grad_norm_(model.parameters(), self.gradient_clip_val)
|
1085 |
-
|
1086 |
-
def print_nan_gradients(self):
|
1087 |
-
model = self.get_model()
|
1088 |
-
for param in model.parameters():
|
1089 |
-
if (param.grad is not None) and torch.isnan(param.grad.float()).any():
|
1090 |
-
logging.info(param, param.grad)
|
1091 |
-
|
1092 |
-
def configure_accumulated_gradients(self, accumulate_grad_batches):
|
1093 |
-
self.accumulate_grad_batches = None
|
1094 |
-
|
1095 |
-
if isinstance(accumulate_grad_batches, dict):
|
1096 |
-
self.accumulation_scheduler = GradientAccumulationScheduler(accumulate_grad_batches)
|
1097 |
-
elif isinstance(accumulate_grad_batches, int):
|
1098 |
-
schedule = {1: accumulate_grad_batches}
|
1099 |
-
self.accumulation_scheduler = GradientAccumulationScheduler(schedule)
|
1100 |
-
else:
|
1101 |
-
raise TypeError("Gradient accumulation supports only int and dict types")
|
1102 |
-
|
1103 |
-
def get_dataloaders(self, model):
|
1104 |
-
if not self.testing:
|
1105 |
-
self.init_train_dataloader(model)
|
1106 |
-
self.init_val_dataloader(model)
|
1107 |
-
else:
|
1108 |
-
self.init_test_dataloader(model)
|
1109 |
-
|
1110 |
-
if self.use_ddp:
|
1111 |
-
dist.barrier()
|
1112 |
-
if not self.testing:
|
1113 |
-
self.get_train_dataloader()
|
1114 |
-
self.get_val_dataloaders()
|
1115 |
-
else:
|
1116 |
-
self.get_test_dataloaders()
|
1117 |
-
|
1118 |
-
def init_train_dataloader(self, model):
|
1119 |
-
self.fisrt_epoch = True
|
1120 |
-
self.get_train_dataloader = model.train_dataloader
|
1121 |
-
if isinstance(self.get_train_dataloader(), torch.utils.data.DataLoader):
|
1122 |
-
self.num_training_batches = len(self.get_train_dataloader())
|
1123 |
-
self.num_training_batches = int(self.num_training_batches)
|
1124 |
-
else:
|
1125 |
-
self.num_training_batches = float('inf')
|
1126 |
-
self.is_iterable_train_dataloader = True
|
1127 |
-
if isinstance(self.val_check_interval, int):
|
1128 |
-
self.val_check_batch = self.val_check_interval
|
1129 |
-
else:
|
1130 |
-
self._percent_range_check('val_check_interval')
|
1131 |
-
self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
|
1132 |
-
self.val_check_batch = max(1, self.val_check_batch)
|
1133 |
-
|
1134 |
-
def init_val_dataloader(self, model):
|
1135 |
-
self.get_val_dataloaders = model.val_dataloader
|
1136 |
-
self.num_val_batches = 0
|
1137 |
-
if self.get_val_dataloaders() is not None:
|
1138 |
-
if isinstance(self.get_val_dataloaders()[0], torch.utils.data.DataLoader):
|
1139 |
-
self.num_val_batches = sum(len(dataloader) for dataloader in self.get_val_dataloaders())
|
1140 |
-
self.num_val_batches = int(self.num_val_batches)
|
1141 |
-
else:
|
1142 |
-
self.num_val_batches = float('inf')
|
1143 |
-
|
1144 |
-
def init_test_dataloader(self, model):
|
1145 |
-
self.get_test_dataloaders = model.test_dataloader
|
1146 |
-
if self.get_test_dataloaders() is not None:
|
1147 |
-
if isinstance(self.get_test_dataloaders()[0], torch.utils.data.DataLoader):
|
1148 |
-
self.num_test_batches = sum(len(dataloader) for dataloader in self.get_test_dataloaders())
|
1149 |
-
self.num_test_batches = int(self.num_test_batches)
|
1150 |
-
else:
|
1151 |
-
self.num_test_batches = float('inf')
|
1152 |
-
|
1153 |
-
def evaluate(self, model, dataloaders, max_batches, test=False):
|
1154 |
-
"""Run evaluation code.
|
1155 |
-
|
1156 |
-
:param model: PT model
|
1157 |
-
:param dataloaders: list of PT dataloaders
|
1158 |
-
:param max_batches: Scalar
|
1159 |
-
:param test: boolean
|
1160 |
-
:return:
|
1161 |
-
"""
|
1162 |
-
# enable eval mode
|
1163 |
-
model.zero_grad()
|
1164 |
-
model.eval()
|
1165 |
-
|
1166 |
-
# copy properties for forward overrides
|
1167 |
-
self.copy_trainer_model_properties(model)
|
1168 |
-
|
1169 |
-
# disable gradients to save memory
|
1170 |
-
torch.set_grad_enabled(False)
|
1171 |
-
|
1172 |
-
if test:
|
1173 |
-
self.get_model().test_start()
|
1174 |
-
# bookkeeping
|
1175 |
-
outputs = []
|
1176 |
-
|
1177 |
-
# run training
|
1178 |
-
for dataloader_idx, dataloader in enumerate(dataloaders):
|
1179 |
-
dl_outputs = []
|
1180 |
-
for batch_idx, batch in enumerate(dataloader):
|
1181 |
-
|
1182 |
-
if batch is None: # pragma: no cover
|
1183 |
-
continue
|
1184 |
-
|
1185 |
-
# stop short when on fast_dev_run (sets max_batch=1)
|
1186 |
-
if batch_idx >= max_batches:
|
1187 |
-
break
|
1188 |
-
|
1189 |
-
# -----------------
|
1190 |
-
# RUN EVALUATION STEP
|
1191 |
-
# -----------------
|
1192 |
-
output = self.evaluation_forward(model,
|
1193 |
-
batch,
|
1194 |
-
batch_idx,
|
1195 |
-
dataloader_idx,
|
1196 |
-
test)
|
1197 |
-
|
1198 |
-
# track outputs for collation
|
1199 |
-
dl_outputs.append(output)
|
1200 |
-
|
1201 |
-
# batch done
|
1202 |
-
if test:
|
1203 |
-
self.test_progress_bar.update(1)
|
1204 |
-
else:
|
1205 |
-
self.val_progress_bar.update(1)
|
1206 |
-
outputs.append(dl_outputs)
|
1207 |
-
|
1208 |
-
# with a single dataloader don't pass an array
|
1209 |
-
if len(dataloaders) == 1:
|
1210 |
-
outputs = outputs[0]
|
1211 |
-
|
1212 |
-
# give model a chance to do something with the outputs (and method defined)
|
1213 |
-
model = self.get_model()
|
1214 |
-
if test:
|
1215 |
-
eval_results_ = model.test_end(outputs)
|
1216 |
-
else:
|
1217 |
-
eval_results_ = model.validation_end(outputs)
|
1218 |
-
eval_results = eval_results_
|
1219 |
-
|
1220 |
-
# enable train mode again
|
1221 |
-
model.train()
|
1222 |
-
|
1223 |
-
# enable gradients to save memory
|
1224 |
-
torch.set_grad_enabled(True)
|
1225 |
-
|
1226 |
-
return eval_results
|
1227 |
-
|
1228 |
-
def run_evaluation(self, test=False):
|
1229 |
-
# when testing make sure user defined a test step
|
1230 |
-
model = self.get_model()
|
1231 |
-
model.on_pre_performance_check()
|
1232 |
-
|
1233 |
-
# select dataloaders
|
1234 |
-
if test:
|
1235 |
-
dataloaders = self.get_test_dataloaders()
|
1236 |
-
max_batches = self.num_test_batches
|
1237 |
-
else:
|
1238 |
-
# val
|
1239 |
-
dataloaders = self.get_val_dataloaders()
|
1240 |
-
max_batches = self.num_val_batches
|
1241 |
-
|
1242 |
-
# init validation or test progress bar
|
1243 |
-
# main progress bar will already be closed when testing so initial position is free
|
1244 |
-
position = 2 * self.process_position + (not test)
|
1245 |
-
desc = 'Testing' if test else 'Validating'
|
1246 |
-
pbar = tqdm.tqdm(desc=desc, total=max_batches, leave=test, position=position,
|
1247 |
-
disable=not self.show_progress_bar, dynamic_ncols=True,
|
1248 |
-
unit='batch', file=sys.stdout)
|
1249 |
-
setattr(self, f'{"test" if test else "val"}_progress_bar', pbar)
|
1250 |
-
|
1251 |
-
# run evaluation
|
1252 |
-
eval_results = self.evaluate(self.model,
|
1253 |
-
dataloaders,
|
1254 |
-
max_batches,
|
1255 |
-
test)
|
1256 |
-
if eval_results is not None:
|
1257 |
-
_, prog_bar_metrics, log_metrics, callback_metrics, _ = self.process_output(
|
1258 |
-
eval_results)
|
1259 |
-
|
1260 |
-
# add metrics to prog bar
|
1261 |
-
self.add_tqdm_metrics(prog_bar_metrics)
|
1262 |
-
|
1263 |
-
# log metrics
|
1264 |
-
self.log_metrics(log_metrics, {})
|
1265 |
-
|
1266 |
-
# track metrics for callbacks
|
1267 |
-
self.callback_metrics.update(callback_metrics)
|
1268 |
-
|
1269 |
-
# hook
|
1270 |
-
model.on_post_performance_check()
|
1271 |
-
|
1272 |
-
# add model specific metrics
|
1273 |
-
tqdm_metrics = self.training_tqdm_dict
|
1274 |
-
if not test:
|
1275 |
-
self.main_progress_bar.set_postfix(**tqdm_metrics)
|
1276 |
-
|
1277 |
-
# close progress bar
|
1278 |
-
if test:
|
1279 |
-
self.test_progress_bar.close()
|
1280 |
-
else:
|
1281 |
-
self.val_progress_bar.close()
|
1282 |
-
|
1283 |
-
# model checkpointing
|
1284 |
-
if self.proc_rank == 0 and self.checkpoint_callback is not None and not test:
|
1285 |
-
self.checkpoint_callback.on_epoch_end(epoch=self.current_epoch,
|
1286 |
-
logs=self.callback_metrics)
|
1287 |
-
|
1288 |
-
def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test=False):
|
1289 |
-
# make dataloader_idx arg in validation_step optional
|
1290 |
-
args = [batch, batch_idx]
|
1291 |
-
# print(batch)
|
1292 |
-
if test and len(self.get_test_dataloaders()) > 1:
|
1293 |
-
args.append(dataloader_idx)
|
1294 |
-
|
1295 |
-
elif not test and len(self.get_val_dataloaders()) > 1:
|
1296 |
-
args.append(dataloader_idx)
|
1297 |
-
|
1298 |
-
# handle DP, DDP forward
|
1299 |
-
if self.use_ddp or self.use_dp:
|
1300 |
-
output = model(*args)
|
1301 |
-
return output
|
1302 |
-
|
1303 |
-
# single GPU
|
1304 |
-
if self.single_gpu:
|
1305 |
-
# for single GPU put inputs on gpu manually
|
1306 |
-
root_gpu = 0
|
1307 |
-
if isinstance(self.data_parallel_device_ids, list):
|
1308 |
-
root_gpu = self.data_parallel_device_ids[0]
|
1309 |
-
batch = self.transfer_batch_to_gpu(batch, root_gpu)
|
1310 |
-
args[0] = batch
|
1311 |
-
|
1312 |
-
# CPU
|
1313 |
-
if test:
|
1314 |
-
output = model.test_step(*args)
|
1315 |
-
else:
|
1316 |
-
output = model.validation_step(*args)
|
1317 |
-
|
1318 |
-
return output
|
1319 |
-
|
1320 |
-
def train(self):
|
1321 |
-
model = self.get_model()
|
1322 |
-
# run all epochs
|
1323 |
-
for epoch in range(self.current_epoch, 1000000):
|
1324 |
-
# set seed for distributed sampler (enables shuffling for each epoch)
|
1325 |
-
if self.use_ddp and hasattr(self.get_train_dataloader().sampler, 'set_epoch'):
|
1326 |
-
self.get_train_dataloader().sampler.set_epoch(epoch)
|
1327 |
-
|
1328 |
-
# get model
|
1329 |
-
model = self.get_model()
|
1330 |
-
|
1331 |
-
# update training progress in trainer and model
|
1332 |
-
model.current_epoch = epoch
|
1333 |
-
self.current_epoch = epoch
|
1334 |
-
|
1335 |
-
total_val_batches = 0
|
1336 |
-
if not self.disable_validation:
|
1337 |
-
# val can be checked multiple times in epoch
|
1338 |
-
is_val_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0
|
1339 |
-
val_checks_per_epoch = self.num_training_batches // self.val_check_batch
|
1340 |
-
val_checks_per_epoch = val_checks_per_epoch if is_val_epoch else 0
|
1341 |
-
total_val_batches = self.num_val_batches * val_checks_per_epoch
|
1342 |
-
|
1343 |
-
# total batches includes multiple val checks
|
1344 |
-
self.total_batches = self.num_training_batches + total_val_batches
|
1345 |
-
self.batch_loss_value = 0 # accumulated grads
|
1346 |
-
|
1347 |
-
if self.is_iterable_train_dataloader:
|
1348 |
-
# for iterable train loader, the progress bar never ends
|
1349 |
-
num_iterations = None
|
1350 |
-
else:
|
1351 |
-
num_iterations = self.total_batches
|
1352 |
-
|
1353 |
-
# reset progress bar
|
1354 |
-
# .reset() doesn't work on disabled progress bar so we should check
|
1355 |
-
desc = f'Epoch {epoch + 1}' if not self.is_iterable_train_dataloader else ''
|
1356 |
-
self.main_progress_bar.set_description(desc)
|
1357 |
-
|
1358 |
-
# changing gradient according accumulation_scheduler
|
1359 |
-
self.accumulation_scheduler.on_epoch_begin(epoch, self)
|
1360 |
-
|
1361 |
-
# -----------------
|
1362 |
-
# RUN TNG EPOCH
|
1363 |
-
# -----------------
|
1364 |
-
self.run_training_epoch()
|
1365 |
-
|
1366 |
-
# update LR schedulers
|
1367 |
-
if self.lr_schedulers is not None:
|
1368 |
-
for lr_scheduler in self.lr_schedulers:
|
1369 |
-
lr_scheduler.step(epoch=self.current_epoch)
|
1370 |
-
|
1371 |
-
self.main_progress_bar.close()
|
1372 |
-
|
1373 |
-
model.on_train_end()
|
1374 |
-
|
1375 |
-
if self.logger is not None:
|
1376 |
-
self.logger.finalize("success")
|
1377 |
-
|
1378 |
-
def run_training_epoch(self):
|
1379 |
-
# before epoch hook
|
1380 |
-
if self.is_function_implemented('on_epoch_start'):
|
1381 |
-
model = self.get_model()
|
1382 |
-
model.on_epoch_start()
|
1383 |
-
|
1384 |
-
# run epoch
|
1385 |
-
for batch_idx, batch in enumerate(self.get_train_dataloader()):
|
1386 |
-
# stop epoch if we limited the number of training batches
|
1387 |
-
if batch_idx >= self.num_training_batches:
|
1388 |
-
break
|
1389 |
-
|
1390 |
-
self.batch_idx = batch_idx
|
1391 |
-
|
1392 |
-
model = self.get_model()
|
1393 |
-
model.global_step = self.global_step
|
1394 |
-
|
1395 |
-
# ---------------
|
1396 |
-
# RUN TRAIN STEP
|
1397 |
-
# ---------------
|
1398 |
-
output = self.run_training_batch(batch, batch_idx)
|
1399 |
-
batch_result, grad_norm_dic, batch_step_metrics = output
|
1400 |
-
|
1401 |
-
# when returning -1 from train_step, we end epoch early
|
1402 |
-
early_stop_epoch = batch_result == -1
|
1403 |
-
|
1404 |
-
# ---------------
|
1405 |
-
# RUN VAL STEP
|
1406 |
-
# ---------------
|
1407 |
-
should_check_val = (
|
1408 |
-
not self.disable_validation and self.global_step % self.val_check_batch == 0 and not self.fisrt_epoch)
|
1409 |
-
self.fisrt_epoch = False
|
1410 |
-
|
1411 |
-
if should_check_val:
|
1412 |
-
self.run_evaluation(test=self.testing)
|
1413 |
-
|
1414 |
-
# when logs should be saved
|
1415 |
-
should_save_log = (batch_idx + 1) % self.log_save_interval == 0 or early_stop_epoch
|
1416 |
-
if should_save_log:
|
1417 |
-
if self.proc_rank == 0 and self.logger is not None:
|
1418 |
-
self.logger.save()
|
1419 |
-
|
1420 |
-
# when metrics should be logged
|
1421 |
-
should_log_metrics = batch_idx % self.row_log_interval == 0 or early_stop_epoch
|
1422 |
-
if should_log_metrics:
|
1423 |
-
# logs user requested information to logger
|
1424 |
-
self.log_metrics(batch_step_metrics, grad_norm_dic)
|
1425 |
-
|
1426 |
-
self.global_step += 1
|
1427 |
-
self.total_batch_idx += 1
|
1428 |
-
|
1429 |
-
# end epoch early
|
1430 |
-
# stop when the flag is changed or we've gone past the amount
|
1431 |
-
# requested in the batches
|
1432 |
-
if early_stop_epoch:
|
1433 |
-
break
|
1434 |
-
if self.global_step > self.max_updates:
|
1435 |
-
print("| Training end..")
|
1436 |
-
exit()
|
1437 |
-
|
1438 |
-
# epoch end hook
|
1439 |
-
if self.is_function_implemented('on_epoch_end'):
|
1440 |
-
model = self.get_model()
|
1441 |
-
model.on_epoch_end()
|
1442 |
-
|
1443 |
-
def run_training_batch(self, batch, batch_idx):
|
1444 |
-
# track grad norms
|
1445 |
-
grad_norm_dic = {}
|
1446 |
-
|
1447 |
-
# track all metrics for callbacks
|
1448 |
-
all_callback_metrics = []
|
1449 |
-
|
1450 |
-
# track metrics to log
|
1451 |
-
all_log_metrics = []
|
1452 |
-
|
1453 |
-
if batch is None:
|
1454 |
-
return 0, grad_norm_dic, {}
|
1455 |
-
|
1456 |
-
# hook
|
1457 |
-
if self.is_function_implemented('on_batch_start'):
|
1458 |
-
model_ref = self.get_model()
|
1459 |
-
response = model_ref.on_batch_start(batch)
|
1460 |
-
|
1461 |
-
if response == -1:
|
1462 |
-
return -1, grad_norm_dic, {}
|
1463 |
-
|
1464 |
-
splits = [batch]
|
1465 |
-
self.hiddens = None
|
1466 |
-
for split_idx, split_batch in enumerate(splits):
|
1467 |
-
self.split_idx = split_idx
|
1468 |
-
|
1469 |
-
# call training_step once per optimizer
|
1470 |
-
for opt_idx, optimizer in enumerate(self.optimizers):
|
1471 |
-
if optimizer is None:
|
1472 |
-
continue
|
1473 |
-
# make sure only the gradients of the current optimizer's paramaters are calculated
|
1474 |
-
# in the training step to prevent dangling gradients in multiple-optimizer setup.
|
1475 |
-
if len(self.optimizers) > 1:
|
1476 |
-
for param in self.get_model().parameters():
|
1477 |
-
param.requires_grad = False
|
1478 |
-
for group in optimizer.param_groups:
|
1479 |
-
for param in group['params']:
|
1480 |
-
param.requires_grad = True
|
1481 |
-
|
1482 |
-
# wrap the forward step in a closure so second order methods work
|
1483 |
-
def optimizer_closure():
|
1484 |
-
# forward pass
|
1485 |
-
output = self.training_forward(
|
1486 |
-
split_batch, batch_idx, opt_idx, self.hiddens)
|
1487 |
-
|
1488 |
-
closure_loss = output[0]
|
1489 |
-
progress_bar_metrics = output[1]
|
1490 |
-
log_metrics = output[2]
|
1491 |
-
callback_metrics = output[3]
|
1492 |
-
self.hiddens = output[4]
|
1493 |
-
if closure_loss is None:
|
1494 |
-
return None
|
1495 |
-
|
1496 |
-
# accumulate loss
|
1497 |
-
# (if accumulate_grad_batches = 1 no effect)
|
1498 |
-
closure_loss = closure_loss / self.accumulate_grad_batches
|
1499 |
-
|
1500 |
-
# backward pass
|
1501 |
-
model_ref = self.get_model()
|
1502 |
-
if closure_loss.requires_grad:
|
1503 |
-
model_ref.backward(closure_loss, optimizer)
|
1504 |
-
|
1505 |
-
# track metrics for callbacks
|
1506 |
-
all_callback_metrics.append(callback_metrics)
|
1507 |
-
|
1508 |
-
# track progress bar metrics
|
1509 |
-
self.add_tqdm_metrics(progress_bar_metrics)
|
1510 |
-
all_log_metrics.append(log_metrics)
|
1511 |
-
|
1512 |
-
# insert after step hook
|
1513 |
-
if self.is_function_implemented('on_after_backward'):
|
1514 |
-
model_ref = self.get_model()
|
1515 |
-
model_ref.on_after_backward()
|
1516 |
-
|
1517 |
-
return closure_loss
|
1518 |
-
|
1519 |
-
# calculate loss
|
1520 |
-
loss = optimizer_closure()
|
1521 |
-
if loss is None:
|
1522 |
-
continue
|
1523 |
-
|
1524 |
-
# nan grads
|
1525 |
-
if self.print_nan_grads:
|
1526 |
-
self.print_nan_gradients()
|
1527 |
-
|
1528 |
-
# track total loss for logging (avoid mem leaks)
|
1529 |
-
self.batch_loss_value += loss.item()
|
1530 |
-
|
1531 |
-
# gradient update with accumulated gradients
|
1532 |
-
if (self.batch_idx + 1) % self.accumulate_grad_batches == 0:
|
1533 |
-
|
1534 |
-
# track gradient norms when requested
|
1535 |
-
if batch_idx % self.row_log_interval == 0:
|
1536 |
-
if self.track_grad_norm > 0:
|
1537 |
-
model = self.get_model()
|
1538 |
-
grad_norm_dic = model.grad_norm(
|
1539 |
-
self.track_grad_norm)
|
1540 |
-
|
1541 |
-
# clip gradients
|
1542 |
-
self.clip_gradients()
|
1543 |
-
|
1544 |
-
# calls .step(), .zero_grad()
|
1545 |
-
# override function to modify this behavior
|
1546 |
-
model = self.get_model()
|
1547 |
-
model.optimizer_step(self.current_epoch, batch_idx, optimizer, opt_idx)
|
1548 |
-
|
1549 |
-
# calculate running loss for display
|
1550 |
-
self.running_loss.append(self.batch_loss_value)
|
1551 |
-
self.batch_loss_value = 0
|
1552 |
-
self.avg_loss = np.mean(self.running_loss[-100:])
|
1553 |
-
|
1554 |
-
# activate batch end hook
|
1555 |
-
if self.is_function_implemented('on_batch_end'):
|
1556 |
-
model = self.get_model()
|
1557 |
-
model.on_batch_end()
|
1558 |
-
|
1559 |
-
# update progress bar
|
1560 |
-
self.main_progress_bar.update(1)
|
1561 |
-
self.main_progress_bar.set_postfix(**self.training_tqdm_dict)
|
1562 |
-
|
1563 |
-
# collapse all metrics into one dict
|
1564 |
-
all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()}
|
1565 |
-
|
1566 |
-
# track all metrics for callbacks
|
1567 |
-
self.callback_metrics.update({k: v for d in all_callback_metrics for k, v in d.items()})
|
1568 |
-
|
1569 |
-
return 0, grad_norm_dic, all_log_metrics
|
1570 |
-
|
1571 |
-
def training_forward(self, batch, batch_idx, opt_idx, hiddens):
|
1572 |
-
"""
|
1573 |
-
Handle forward for each training case (distributed, single gpu, etc...)
|
1574 |
-
:param batch:
|
1575 |
-
:param batch_idx:
|
1576 |
-
:return:
|
1577 |
-
"""
|
1578 |
-
# ---------------
|
1579 |
-
# FORWARD
|
1580 |
-
# ---------------
|
1581 |
-
# enable not needing to add opt_idx to training_step
|
1582 |
-
args = [batch, batch_idx, opt_idx]
|
1583 |
-
|
1584 |
-
# distributed forward
|
1585 |
-
if self.use_ddp or self.use_dp:
|
1586 |
-
output = self.model(*args)
|
1587 |
-
# single GPU forward
|
1588 |
-
elif self.single_gpu:
|
1589 |
-
gpu_id = 0
|
1590 |
-
if isinstance(self.data_parallel_device_ids, list):
|
1591 |
-
gpu_id = self.data_parallel_device_ids[0]
|
1592 |
-
batch = self.transfer_batch_to_gpu(copy.copy(batch), gpu_id)
|
1593 |
-
args[0] = batch
|
1594 |
-
output = self.model.training_step(*args)
|
1595 |
-
# CPU forward
|
1596 |
-
else:
|
1597 |
-
output = self.model.training_step(*args)
|
1598 |
-
|
1599 |
-
# allow any mode to define training_end
|
1600 |
-
model_ref = self.get_model()
|
1601 |
-
output_ = model_ref.training_end(output)
|
1602 |
-
if output_ is not None:
|
1603 |
-
output = output_
|
1604 |
-
|
1605 |
-
# format and reduce outputs accordingly
|
1606 |
-
output = self.process_output(output, train=True)
|
1607 |
-
|
1608 |
-
return output
|
1609 |
-
|
1610 |
-
# ---------------
|
1611 |
-
# Utils
|
1612 |
-
# ---------------
|
1613 |
-
def is_function_implemented(self, f_name):
|
1614 |
-
model = self.get_model()
|
1615 |
-
f_op = getattr(model, f_name, None)
|
1616 |
-
return callable(f_op)
|
1617 |
-
|
1618 |
-
def _percent_range_check(self, name):
|
1619 |
-
value = getattr(self, name)
|
1620 |
-
msg = f"`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}."
|
1621 |
-
if name == "val_check_interval":
|
1622 |
-
msg += " If you want to disable validation set `val_percent_check` to 0.0 instead."
|
1623 |
-
|
1624 |
-
if not 0. <= value <= 1.:
|
1625 |
-
raise ValueError(msg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CoreyMorris/MMLU-by-task-Leaderboard/moral_app.py
DELETED
@@ -1,248 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import pandas as pd
|
3 |
-
import plotly.express as px
|
4 |
-
from result_data_processor import ResultDataProcessor
|
5 |
-
import matplotlib.pyplot as plt
|
6 |
-
import numpy as np
|
7 |
-
import plotly.graph_objects as go
|
8 |
-
from plotting_utils import plot_top_n, create_radar_chart_unfilled, create_line_chart, create_plot
|
9 |
-
|
10 |
-
st.set_page_config(layout="wide")
|
11 |
-
|
12 |
-
def find_top_differences_table(df, target_model, closest_models, num_differences=10, exclude_columns=['Parameters', 'organization']):
|
13 |
-
# Calculate the absolute differences for each task between the target model and the closest models
|
14 |
-
new_df = df.drop(columns=exclude_columns)
|
15 |
-
differences = new_df.loc[closest_models].sub(new_df.loc[target_model]).abs()
|
16 |
-
# Unstack the differences and sort by the largest absolute difference
|
17 |
-
top_differences = differences.unstack().nlargest(num_differences)
|
18 |
-
# Convert the top differences to a DataFrame for display
|
19 |
-
top_differences_table = pd.DataFrame({
|
20 |
-
'Task': [idx[0] for idx in top_differences.index],
|
21 |
-
'Difference': top_differences.values
|
22 |
-
})
|
23 |
-
# Ensure that only unique tasks are returned
|
24 |
-
unique_top_differences_tasks = list(set(top_differences_table['Task'].tolist()))
|
25 |
-
return top_differences_table, unique_top_differences_tasks
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
# Main Application
|
30 |
-
|
31 |
-
data_provider = ResultDataProcessor()
|
32 |
-
|
33 |
-
st.title('Why are large language models so bad at the moral scenarios task?')
|
34 |
-
st.markdown("""
|
35 |
-
Here I am to answer the question: Why are large language models so bad at the moral scenarios task?
|
36 |
-
Sub questions:
|
37 |
-
- Are the models actually bad at moral reasoning ?
|
38 |
-
- Is it the structure of the task that is the causing the poor performance ?
|
39 |
-
- Are there other tasks with questions in a similar structure ?
|
40 |
-
- How do models perform when the structure of the task is changed ?
|
41 |
-
""")
|
42 |
-
|
43 |
-
filters = st.checkbox('Select Models and/or Evaluations')
|
44 |
-
|
45 |
-
# Initialize selected columns with "Parameters" and "MMLU_average" if filters are checked
|
46 |
-
selected_columns = ['Parameters', 'MMLU_average'] if filters else data_provider.data.columns.tolist()
|
47 |
-
|
48 |
-
# Initialize selected models as empty if filters are checked
|
49 |
-
selected_models = [] if filters else data_provider.data.index.tolist()
|
50 |
-
|
51 |
-
if filters:
|
52 |
-
# Create multi-select for columns with default selection
|
53 |
-
selected_columns = st.multiselect(
|
54 |
-
'Select Columns',
|
55 |
-
data_provider.data.columns.tolist(),
|
56 |
-
default=selected_columns
|
57 |
-
)
|
58 |
-
|
59 |
-
# Create multi-select for models without default selection
|
60 |
-
selected_models = st.multiselect(
|
61 |
-
'Select Models',
|
62 |
-
data_provider.data.index.tolist()
|
63 |
-
)
|
64 |
-
|
65 |
-
# Get the filtered data
|
66 |
-
filtered_data = data_provider.get_data(selected_models)
|
67 |
-
|
68 |
-
# sort the table by the MMLU_average column
|
69 |
-
filtered_data = filtered_data.sort_values(by=['MMLU_average'], ascending=False)
|
70 |
-
|
71 |
-
# Select box for filtering by Parameters
|
72 |
-
parameter_threshold = st.selectbox(
|
73 |
-
'Filter by Parameters (Less Than or Equal To):',
|
74 |
-
options=[3, 7, 13, 35, 'No threshold'],
|
75 |
-
index=4, # Set the default selected option to 'No threshold'
|
76 |
-
format_func=lambda x: f"{x}" if isinstance(x, int) else x
|
77 |
-
)
|
78 |
-
|
79 |
-
# Filter the DataFrame based on the selected parameter threshold if not 'No threshold'
|
80 |
-
if isinstance(parameter_threshold, int):
|
81 |
-
filtered_data = filtered_data[filtered_data['Parameters'] <= parameter_threshold]
|
82 |
-
|
83 |
-
|
84 |
-
# Search box
|
85 |
-
search_query = st.text_input("Filter by Model Name:", "")
|
86 |
-
|
87 |
-
# Filter the DataFrame based on the search query in the index (model name)
|
88 |
-
if search_query:
|
89 |
-
filtered_data = filtered_data[filtered_data.index.str.contains(search_query, case=False)]
|
90 |
-
|
91 |
-
|
92 |
-
# Search box for columns
|
93 |
-
column_search_query = st.text_input("Filter by Column/Task Name:", "")
|
94 |
-
|
95 |
-
# Get the columns that contain the search query
|
96 |
-
matching_columns = [col for col in filtered_data.columns if column_search_query.lower() in col.lower()]
|
97 |
-
|
98 |
-
# # Display the DataFrame with only the matching columns
|
99 |
-
# st.markdown("## Sortable Results")
|
100 |
-
# st.dataframe(filtered_data[matching_columns])
|
101 |
-
|
102 |
-
|
103 |
-
# CSV download
|
104 |
-
|
105 |
-
filtered_data.index.name = "Model Name"
|
106 |
-
|
107 |
-
csv = filtered_data.to_csv(index=True)
|
108 |
-
st.download_button(
|
109 |
-
label="Download data as CSV",
|
110 |
-
data=csv,
|
111 |
-
file_name="model_evaluation_results.csv",
|
112 |
-
mime="text/csv",
|
113 |
-
)
|
114 |
-
|
115 |
-
|
116 |
-
# Moral Scenarios section
|
117 |
-
st.markdown("## Why are large language models so bad at the moral scenarios task?")
|
118 |
-
st.markdown("### The structure of the task is odd")
|
119 |
-
|
120 |
-
# - Are the models actually bad at moral reasoning ?
|
121 |
-
# - Is it the structure of the task that is the causing the poor performance ?
|
122 |
-
# - Are there other tasks with questions in a similar structure ?
|
123 |
-
# - How do models perform when the structure of the task is changed ?
|
124 |
-
st.markdown("### Moral Scenarios Performance")
|
125 |
-
def show_random_moral_scenarios_question():
|
126 |
-
moral_scenarios_data = pd.read_csv('moral_scenarios_questions.csv')
|
127 |
-
random_question = moral_scenarios_data.sample()
|
128 |
-
expander = st.expander("Show a random moral scenarios question")
|
129 |
-
expander.write(random_question['query'].values[0])
|
130 |
-
|
131 |
-
show_random_moral_scenarios_question()
|
132 |
-
|
133 |
-
st.write("""
|
134 |
-
While smaller models can perform well at many tasks, the model size threshold for decent performance on moral scenarios is much higher.
|
135 |
-
There are no models with less than 13 billion parameters with performance much better than random chance. Further investigation into other capabilities that emerge at 13 billion parameters could help
|
136 |
-
identify capabilities that are important for moral reasoning.
|
137 |
-
""")
|
138 |
-
|
139 |
-
fig = create_plot(filtered_data, 'Parameters', 'MMLU_moral_scenarios', title="Impact of Parameter Count on Accuracy for Moral Scenarios")
|
140 |
-
st.plotly_chart(fig)
|
141 |
-
st.write()
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
fig = create_plot(filtered_data, 'MMLU_average', 'MMLU_moral_scenarios')
|
146 |
-
st.plotly_chart(fig)
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
# Custom scatter plots
|
156 |
-
st.header('Custom scatter plots')
|
157 |
-
st.write("""
|
158 |
-
The scatter plot is useful to identify models that outperform or underperform on a particular task in relation to their size or overall performance.
|
159 |
-
Identifying these models is a first step to better understand what training strategies result in better performance on a particular task.
|
160 |
-
""")
|
161 |
-
st.markdown("***The dashed red line indicates random chance accuracy of 0.25 as the MMLU evaluation is multiple choice with 4 response options.***")
|
162 |
-
# add a line separating the writing
|
163 |
-
st.markdown("***")
|
164 |
-
st.write("As expected, there is a strong positive relationship between the number of parameters and average performance on the MMLU evaluation.")
|
165 |
-
|
166 |
-
selected_x_column = st.selectbox('Select x-axis', filtered_data.columns.tolist(), index=0)
|
167 |
-
selected_y_column = st.selectbox('Select y-axis', filtered_data.columns.tolist(), index=3)
|
168 |
-
|
169 |
-
if selected_x_column != selected_y_column: # Avoid creating a plot with the same column on both axes
|
170 |
-
fig = create_plot(filtered_data, selected_x_column, selected_y_column)
|
171 |
-
st.plotly_chart(fig)
|
172 |
-
else:
|
173 |
-
st.write("Please select different columns for the x and y axes.")
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
# end of custom scatter plots
|
179 |
-
|
180 |
-
# Section to select a model and display radar and line charts
|
181 |
-
st.header("Compare a Selected Model to the 5 Models Closest in MMLU Average Performance")
|
182 |
-
st.write("""
|
183 |
-
This comparison highlights the nuances in model performance across different tasks.
|
184 |
-
While the overall MMLU average score provides a general understanding of a model's capabilities,
|
185 |
-
examining the closest models reveals variations in performance on individual tasks.
|
186 |
-
Such an analysis can uncover specific strengths and weaknesses and guide further exploration and improvement.
|
187 |
-
""")
|
188 |
-
|
189 |
-
default_model_name = "GPT-JT-6B-v0"
|
190 |
-
|
191 |
-
default_model_index = filtered_data.index.tolist().index(default_model_name) if default_model_name in filtered_data.index else 0
|
192 |
-
selected_model_name = st.selectbox("Select a Model:", filtered_data.index.tolist(), index=default_model_index)
|
193 |
-
|
194 |
-
# Get the closest 5 models with unique indices
|
195 |
-
closest_models_diffs = filtered_data['MMLU_average'].sub(filtered_data.loc[selected_model_name, 'MMLU_average']).abs()
|
196 |
-
closest_models = closest_models_diffs.nsmallest(5, keep='first').index.drop_duplicates().tolist()
|
197 |
-
|
198 |
-
|
199 |
-
# Find the top 10 tasks with the largest differences and convert to a DataFrame
|
200 |
-
top_differences_table, top_differences_tasks = find_top_differences_table(filtered_data, selected_model_name, closest_models)
|
201 |
-
|
202 |
-
# Display the DataFrame for the closest models and the top differences tasks
|
203 |
-
st.dataframe(filtered_data.loc[closest_models, top_differences_tasks])
|
204 |
-
|
205 |
-
# # Display the table in the Streamlit app
|
206 |
-
# st.markdown("## Top Differences")
|
207 |
-
# st.dataframe(top_differences_table)
|
208 |
-
|
209 |
-
# Create a radar chart for the tasks with the largest differences
|
210 |
-
fig_radar_top_differences = create_radar_chart_unfilled(filtered_data, closest_models, top_differences_tasks)
|
211 |
-
|
212 |
-
# Display the radar chart
|
213 |
-
st.plotly_chart(fig_radar_top_differences)
|
214 |
-
|
215 |
-
|
216 |
-
st.markdown("## Notable findings and plots")
|
217 |
-
|
218 |
-
st.markdown('### Abstract Algebra Performance')
|
219 |
-
st.write("Small models showed surprisingly strong performance on the abstract algebra task. A 6 Billion parameter model is tied for the best performance on this task and there are a number of other small models in the top 10.")
|
220 |
-
plot_top_n(filtered_data, 'MMLU_abstract_algebra', 10)
|
221 |
-
|
222 |
-
fig = create_plot(filtered_data, 'Parameters', 'MMLU_abstract_algebra')
|
223 |
-
st.plotly_chart(fig)
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
st.markdown("***Thank you to hugging face for running the evaluations and supplying the data as well as the original authors of the evaluations.***")
|
231 |
-
|
232 |
-
st.markdown("""
|
233 |
-
# Citation
|
234 |
-
|
235 |
-
1. Corey Morris (2023). *Exploring the Characteristics of Large Language Models: An Interactive Portal for Analyzing 700+ Open Source Models Across 57 Diverse Evaluation Tasks*. [link](https://huggingface.co/spaces/CoreyMorris/MMLU-by-task-Leaderboard)
|
236 |
-
|
237 |
-
2. Edward Beeching, Clémentine Fourrier, Nathan Habib, Sheon Han, Nathan Lambert, Nazneen Rajani, Omar Sanseviero, Lewis Tunstall, Thomas Wolf. (2023). *Open LLM Leaderboard*. Hugging Face. [link](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
238 |
-
|
239 |
-
3. Gao, Leo et al. (2021). *A framework for few-shot language model evaluation*. Zenodo. [link](https://doi.org/10.5281/zenodo.5371628)
|
240 |
-
|
241 |
-
4. Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, Oyvind Tafjord. (2018). *Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge*. arXiv. [link](https://arxiv.org/abs/1803.05457)
|
242 |
-
|
243 |
-
5. Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, Yejin Choi. (2019). *HellaSwag: Can a Machine Really Finish Your Sentence?*. arXiv. [link](https://arxiv.org/abs/1905.07830)
|
244 |
-
|
245 |
-
6. Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, Jacob Steinhardt. (2021). *Measuring Massive Multitask Language Understanding*. arXiv. [link](https://arxiv.org/abs/2009.03300)
|
246 |
-
|
247 |
-
7. Stephanie Lin, Jacob Hilton, Owain Evans. (2022). *TruthfulQA: Measuring How Models Mimic Human Falsehoods*. arXiv. [link](https://arxiv.org/abs/2109.07958)
|
248 |
-
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DaleChen/AutoGPT/tests/__init__.py
DELETED
File without changes
|
spaces/DaleChen/AutoGPT/ui/app.py
DELETED
@@ -1,145 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import utils
|
3 |
-
from api import AutoAPI, get_openai_api_key
|
4 |
-
import os, shutil
|
5 |
-
import json
|
6 |
-
|
7 |
-
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
|
8 |
-
OUTPUT_DIR = os.path.join(os.path.dirname(FILE_DIR), "auto_gpt_workspace")
|
9 |
-
if not os.path.exists(OUTPUT_DIR):
|
10 |
-
os.mkdir(OUTPUT_DIR)
|
11 |
-
|
12 |
-
CSS = """
|
13 |
-
#chatbot {font-family: monospace;}
|
14 |
-
#files .generating {display: none;}
|
15 |
-
#files .min {min-height: 0px;}
|
16 |
-
"""
|
17 |
-
|
18 |
-
with gr.Blocks(css=CSS) as app:
|
19 |
-
with gr.Column() as setup_pane:
|
20 |
-
gr.Markdown(f"""# Auto-GPT
|
21 |
-
1. Duplicate this Space: <a href="https://huggingface.co/spaces/{os.getenv('SPACE_ID')}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a> This will **NOT** work without duplication!
|
22 |
-
2. Enter your <a href="https://platform.openai.com/account/api-keys">OpenAI API Key</a> below.
|
23 |
-
""")
|
24 |
-
with gr.Row():
|
25 |
-
open_ai_key = gr.Textbox(
|
26 |
-
value=get_openai_api_key(),
|
27 |
-
label="OpenAI API Key",
|
28 |
-
type="password",
|
29 |
-
)
|
30 |
-
gr.Markdown(
|
31 |
-
"3. Fill the values below, then click 'Start'. There are example values you can load at the bottom of this page."
|
32 |
-
)
|
33 |
-
with gr.Row():
|
34 |
-
ai_name = gr.Textbox(label="AI Name", placeholder="e.g. Entrepreneur-GPT")
|
35 |
-
ai_role = gr.Textbox(
|
36 |
-
label="AI Role",
|
37 |
-
placeholder="e.g. an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.",
|
38 |
-
)
|
39 |
-
top_5_goals = gr.Dataframe(
|
40 |
-
row_count=(5, "fixed"),
|
41 |
-
col_count=(1, "fixed"),
|
42 |
-
headers=["AI Goals - Enter up to 5"],
|
43 |
-
type="array"
|
44 |
-
)
|
45 |
-
start_btn = gr.Button("Start", variant="primary")
|
46 |
-
with open(os.path.join(FILE_DIR, "examples.json"), "r") as f:
|
47 |
-
example_values = json.load(f)
|
48 |
-
gr.Examples(
|
49 |
-
example_values,
|
50 |
-
[ai_name, ai_role, top_5_goals],
|
51 |
-
)
|
52 |
-
with gr.Column(visible=False) as main_pane:
|
53 |
-
with gr.Row():
|
54 |
-
with gr.Column(scale=2):
|
55 |
-
chatbot = gr.Chatbot(elem_id="chatbot")
|
56 |
-
with gr.Row():
|
57 |
-
yes_btn = gr.Button("Yes", variant="primary", interactive=False)
|
58 |
-
consecutive_yes = gr.Slider(
|
59 |
-
1, 10, 1, step=1, label="Consecutive Yes", interactive=False
|
60 |
-
)
|
61 |
-
custom_response = gr.Textbox(
|
62 |
-
label="Custom Response",
|
63 |
-
placeholder="Press 'Enter' to Submit.",
|
64 |
-
interactive=False,
|
65 |
-
)
|
66 |
-
with gr.Column(scale=1):
|
67 |
-
gr.HTML(
|
68 |
-
lambda: f"""
|
69 |
-
Generated Files
|
70 |
-
<pre><code style='overflow-x: auto'>{utils.format_directory(OUTPUT_DIR)}</pre></code>
|
71 |
-
""", every=3, elem_id="files"
|
72 |
-
)
|
73 |
-
download_btn = gr.Button("Download All Files")
|
74 |
-
|
75 |
-
chat_history = gr.State([[None, None]])
|
76 |
-
api = gr.State(None)
|
77 |
-
|
78 |
-
def start(open_ai_key, ai_name, ai_role, top_5_goals):
|
79 |
-
auto_api = AutoAPI(open_ai_key, ai_name, ai_role, top_5_goals)
|
80 |
-
return gr.Column.update(visible=False), gr.Column.update(visible=True), auto_api
|
81 |
-
|
82 |
-
def bot_response(chat, api):
|
83 |
-
messages = []
|
84 |
-
for message in api.get_chatbot_response():
|
85 |
-
messages.append(message)
|
86 |
-
chat[-1][1] = "\n".join(messages) + "..."
|
87 |
-
yield chat
|
88 |
-
chat[-1][1] = "\n".join(messages)
|
89 |
-
yield chat
|
90 |
-
|
91 |
-
def send_message(count, chat, api, message="Y"):
|
92 |
-
if message != "Y":
|
93 |
-
count = 1
|
94 |
-
for i in range(count):
|
95 |
-
chat.append([message, None])
|
96 |
-
yield chat, count - i
|
97 |
-
api.send_message(message)
|
98 |
-
for updated_chat in bot_response(chat, api):
|
99 |
-
yield updated_chat, count - i
|
100 |
-
|
101 |
-
def activate_inputs():
|
102 |
-
return {
|
103 |
-
yes_btn: gr.Button.update(interactive=True),
|
104 |
-
consecutive_yes: gr.Slider.update(interactive=True),
|
105 |
-
custom_response: gr.Textbox.update(interactive=True),
|
106 |
-
}
|
107 |
-
|
108 |
-
def deactivate_inputs():
|
109 |
-
return {
|
110 |
-
yes_btn: gr.Button.update(interactive=False),
|
111 |
-
consecutive_yes: gr.Slider.update(interactive=False),
|
112 |
-
custom_response: gr.Textbox.update(interactive=False),
|
113 |
-
}
|
114 |
-
|
115 |
-
start_btn.click(
|
116 |
-
start,
|
117 |
-
[open_ai_key, ai_name, ai_role, top_5_goals],
|
118 |
-
[setup_pane, main_pane, api],
|
119 |
-
).then(bot_response, [chat_history, api], chatbot).then(
|
120 |
-
activate_inputs, None, [yes_btn, consecutive_yes, custom_response]
|
121 |
-
)
|
122 |
-
|
123 |
-
yes_btn.click(
|
124 |
-
deactivate_inputs, None, [yes_btn, consecutive_yes, custom_response]
|
125 |
-
).then(
|
126 |
-
send_message, [consecutive_yes, chat_history, api], [chatbot, consecutive_yes]
|
127 |
-
).then(
|
128 |
-
activate_inputs, None, [yes_btn, consecutive_yes, custom_response]
|
129 |
-
)
|
130 |
-
custom_response.submit(
|
131 |
-
deactivate_inputs, None, [yes_btn, consecutive_yes, custom_response]
|
132 |
-
).then(
|
133 |
-
send_message,
|
134 |
-
[consecutive_yes, chat_history, api, custom_response],
|
135 |
-
[chatbot, consecutive_yes],
|
136 |
-
).then(
|
137 |
-
activate_inputs, None, [yes_btn, consecutive_yes, custom_response]
|
138 |
-
)
|
139 |
-
|
140 |
-
def download_all_files():
|
141 |
-
shutil.make_archive("outputs", "zip", OUTPUT_DIR)
|
142 |
-
|
143 |
-
download_btn.click(download_all_files).then(None, _js=utils.DOWNLOAD_OUTPUTS_JS)
|
144 |
-
|
145 |
-
app.queue(concurrency_count=20).launch(file_directories=[OUTPUT_DIR])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dimentian/LLMs-Stable-Vicuna-13B/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: LLMs Stable Vicuna 13B
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.28.3
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DpNaze/webui-docker/on_start.sh
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
set -euo pipefail
|
3 |
-
|
4 |
-
function download-model() {
|
5 |
-
local _option=$1
|
6 |
-
local _filename=$2
|
7 |
-
local _url=$3
|
8 |
-
local _dir
|
9 |
-
|
10 |
-
! [ $# -eq 3 ] && (echo "usage: "; for o in checkpoint lora vae control-net embedding; do echo " \$ download-model --$o <filename> <url>"; done) || true
|
11 |
-
[ $# -eq 0 ] && return 0 || ! [ $# -eq 3 ] && (echo ""; echo "error - invalid number of arguments (expected 3, received $#)"; echo -n "\$ download-model $1"; (for arg in "${@: 2}"; do echo -n " \"${arg//\"/\\\"}\""; done) && echo "") && return 1 || true
|
12 |
-
|
13 |
-
case ${_option,,} in
|
14 |
-
--checkpoint) _dir="/app/stable-diffusion-webui/models/Stable-diffusion";;
|
15 |
-
--lora) _dir="/app/stable-diffusion-webui/models/Lora";;
|
16 |
-
--vae) _dir="/app/stable-diffusion-webui/models/VAE";;
|
17 |
-
--control-net) _dir="/app/stable-diffusion-webui/models/ControlNet";;
|
18 |
-
--embedding) _dir="/app/stable-diffusion-webui/embeddings";;
|
19 |
-
|
20 |
-
*) echo "error - unknown first argument: '$1' (valid options are --checkpoint, --lora, --vae, --control-net or --embedding):"; echo "\$ download-model $1 \"$2\" \"$3\""; return 1;;
|
21 |
-
esac
|
22 |
-
|
23 |
-
echo "\$ download-model $_option \"$2\" \"$3\"" ; echo ""
|
24 |
-
aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $_url -d $_dir -o $_filename && echo ""
|
25 |
-
}
|
26 |
-
|
27 |
-
## ----------------------------
|
28 |
-
|
29 |
-
## Adds a header to the webui on Hugging Face Spaces.
|
30 |
-
## sed -i -e '/demo:/r /app/stable-diffusion-webui/header_patch.py' /app/stable-diffusion-webui/modules/ui.py
|
31 |
-
|
32 |
-
## ----------------------------
|
33 |
-
|
34 |
-
## Installing less models if $IS_SHARED_UI environment variable is set.
|
35 |
-
if [ ${IS_SHARED_UI:-0} != 0 ]; then
|
36 |
-
download-model --checkpoint "v1-5-pruned-emaonly.safetensors" "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/39593d5650112b4cc580433f6b0435385882d819/v1-5-pruned-emaonly.safetensors"
|
37 |
-
download-model --checkpoint "v1-5-pruned-emaonly.yaml" "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/39593d5650112b4cc580433f6b0435385882d819/v1-inference.yaml"
|
38 |
-
download-model --control-net "cldm_v15.yaml" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/cldm_v15.yaml"
|
39 |
-
download-model --control-net "control_canny-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_canny-fp16.safetensors"
|
40 |
-
download-model --control-net "control_depth-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_depth-fp16.safetensors"
|
41 |
-
download-model --control-net "control_normal-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_normal-fp16.safetensors"
|
42 |
-
download-model --control-net "control_openpose-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_openpose-fp16.safetensors"
|
43 |
-
download-model --control-net "control_scribble-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_scribble-fp16.safetensors"
|
44 |
-
download-model --checkpoint "AtoZovyaRPGArtistTools15_sd15V1.safetensors" "https://civitai.com/api/download/models/10185"
|
45 |
-
download-model --embedding "bad_prompt_version2.pt" "https://huggingface.co/datasets/Nerfgun3/bad_prompt/resolve/72fd9d6011c2ba87b5847b7e45e6603917e3cbed/bad_prompt_version2.pt"
|
46 |
-
sed -i -e '/(modelmerger_interface, \"Checkpoint Merger\", \"modelmerger\"),/d' /app/stable-diffusion-webui/modules/ui.py
|
47 |
-
sed -i -e '/(train_interface, \"Train\", \"ti\"),/d' /app/stable-diffusion-webui/modules/ui.py
|
48 |
-
sed -i -e '/extensions_interface, \"Extensions\", \"extensions\"/d' /app/stable-diffusion-webui/modules/ui.py
|
49 |
-
sed -i -e '/settings_interface, \"Settings\", \"settings\"/d' /app/stable-diffusion-webui/modules/ui.py
|
50 |
-
rm -rf /app/stable-diffusion-webui/scripts /app/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui /app/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser /app/stable-diffusion-webui/extensions/sd-civitai-browser /app/stable-diffusion-webui/extensions/sd-webui-additional-networks
|
51 |
-
cp -f shared-config.json config.json
|
52 |
-
cp -f shared-ui-config.json ui-config.json
|
53 |
-
exit 0
|
54 |
-
fi
|
55 |
-
## End of lightweight installation for $IS_SHARED_UI setup.
|
56 |
-
|
57 |
-
## ----------------------------
|
58 |
-
## env $IS_SHARED_UI is not set
|
59 |
-
## ----------------------------
|
60 |
-
|
61 |
-
## ----------------------------
|
62 |
-
|
63 |
-
## LoRA (low-rank adaptation) · epi_noiseoffset v2:
|
64 |
-
|
65 |
-
## ----------------------------
|
66 |
-
|
67 |
-
## VAE (variational autoencoder):
|
68 |
-
## MSE: Smoother images
|
69 |
-
download-model --vae "vae-ft-mse-840000-ema-pruned.safetensors" "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.safetensors"
|
70 |
-
## EMA: Sharper images
|
71 |
-
download-model --vae "vae-ft-ema-560000-ema-pruned.safetensors" "https://huggingface.co/stabilityai/sd-vae-ft-ema-original/resolve/main/vae-ft-ema-560000-ema-pruned.safetensors"
|
72 |
-
## Unknown
|
73 |
-
download-model --vae "Grapefruit.vae.pt" "https://huggingface.co/iZELX1/Grapefruit/resolve/main/Grapefruit.vae.pt"
|
74 |
-
## Anime
|
75 |
-
download-model --vae "kl-f8-anime.ckpt" "https://huggingface.co/hakurei/waifu-diffusion-v1-4/resolve/main/vae/kl-f8-anime.ckpt"
|
76 |
-
|
77 |
-
## ----------------------------
|
78 |
-
|
79 |
-
## ControlNet · Pre-extracted models:
|
80 |
-
#download-model --control-net "cldm_v15.yaml" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/cldm_v15.yaml"
|
81 |
-
#download-model --control-net "cldm_v21.yaml" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/cldm_v21.yaml"
|
82 |
-
#download-model --control-net "control_canny-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_canny-fp16.safetensors"
|
83 |
-
#download-model --control-net "control_depth-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_depth-fp16.safetensors"
|
84 |
-
#download-model --control-net "control_hed-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_hed-fp16.safetensors"
|
85 |
-
#download-model --control-net "control_normal-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_normal-fp16.safetensors"
|
86 |
-
#download-model --control-net "control_openpose-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_openpose-fp16.safetensors"
|
87 |
-
#download-model --control-net "control_scribble-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_scribble-fp16.safetensors"
|
88 |
-
|
89 |
-
## ----------------------------
|
90 |
-
|
91 |
-
## Embedding
|
92 |
-
## Bad-hands-5
|
93 |
-
download-model --embedding "bad-hands-5.pt" "https://huggingface.co/yesyeahvh/bad-hands-5/resolve/main/bad-hands-5.pt"
|
94 |
-
## FastNegative
|
95 |
-
download-model --embedding "FastNegativeEmbedding.pt" "https://civitai.com/api/download/models/76712"
|
96 |
-
|
97 |
-
## ----------------------------
|
98 |
-
|
99 |
-
## Checkpoints:
|
100 |
-
## Anything
|
101 |
-
download-model --checkpoint "anything-v3-vae-swapped.ckpt" "https://huggingface.co/ckpt/anything-v3-vae-swapped/resolve/main/anything-v3-vae-swapped.ckpt"
|
102 |
-
|
103 |
-
## ----------------------------
|
104 |
-
|
105 |
-
## Add additional models that you want to install on startup. Replace URL and FILENAME from the examples below with your values.
|
106 |
-
|
107 |
-
## Usage:
|
108 |
-
## download-model --checkpoint <filename> <url>
|
109 |
-
## download-model --lora <filename> <url>
|
110 |
-
## download-model --vae <filename> <url>
|
111 |
-
## download-model --control-net <filename> <url>
|
112 |
-
## download-model --embedding <filename> <url>
|
113 |
-
|
114 |
-
## ----------------------------
|
115 |
-
|
116 |
-
|
117 |
-
## Checkpoint · Example:
|
118 |
-
# download-model --checkpoint "FILENAME" "URL"
|
119 |
-
|
120 |
-
## LORA (low-rank adaptation) · Example:
|
121 |
-
# download-model --lora "FILENAME" "URL"
|
122 |
-
|
123 |
-
## VAE (variational autoencoder) · Example:
|
124 |
-
# download-model --vae "FILENAME" "URL"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|