parquet-converter commited on
Commit
55cca9f
·
1 Parent(s): 07297c9

Update parquet files (step 67 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/gui/pywebio-gui/pywebio-usesless.py +0 -59
  2. spaces/1gistliPinn/ChatGPT4/Examples/Download Photoshop CS6 Full Crack RAR The Complete Solution for All Your Photo Editing Needs.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Download Redsn0w 097 Rc1 !!LINK!!.md +0 -6
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK My Talking Angela The Game that Lets You Create Your Own Style and Story.md +0 -39
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Coin Master Hack APK 2022 A Simple Trick to Boost Your Game Performance.md +0 -152
  6. spaces/1phancelerku/anime-remove-background/Download and Install Red Ball 4 APK An1 for Free on Android.md +0 -151
  7. spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +0 -702
  8. spaces/1toTree/lora_test/ppdiffusers/pipelines/unclip/pipeline_unclip.py +0 -476
  9. spaces/4Taps/SadTalker/app.py +0 -112
  10. spaces/52Hz/SRMNet_thesis/WT/transform.py +0 -53
  11. spaces/7thHeaven/ochyai_food/template.md +0 -23
  12. spaces/AI4PD/hexviz/tests/test_attention.py +0 -86
  13. spaces/AIConsultant/MusicGen/audiocraft/grids/audiogen/__init__.py +0 -6
  14. spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/__init__.py +0 -0
  15. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb16-150e_deepfashion2_long_sleeved_dress_256x192/__init__.py +0 -0
  16. spaces/AchyuthGamer/Free-Accounts-Generator/fortnite/index.html +0 -39
  17. spaces/AchyuthGamer/OpenGPT/g4f/Provider/base_provider.py +0 -138
  18. spaces/AgentVerse/agentVerse/agentverse/tasks/__init__.py +0 -4
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/Factory.js +0 -13
  20. spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/commons.py +0 -164
  21. spaces/AlanMars/QYL-AI-Space/modules/config.py +0 -202
  22. spaces/AlexWang/lama/saicinpainting/evaluation/__init__.py +0 -33
  23. spaces/AlirezaSM/bear_classifier/README.md +0 -13
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/text_to_image/train_text_to_image.py +0 -1098
  25. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_ddim_flax.py +0 -305
  26. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py +0 -229
  27. spaces/Andy1621/uniformer_image_detection/configs/_base_/datasets/deepfashion.py +0 -53
  28. spaces/Andy1621/uniformer_image_detection/configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py +0 -12
  29. spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/optimization/augmentations.py +0 -42
  30. spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/diffusionmodules/model.py +0 -852
  31. spaces/Asmithayellow/Asmi/README.md +0 -12
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/misc.py +0 -730
  33. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/evaluation/lvis_evaluation.py +0 -380
  34. spaces/BMukhtar/facemaskDetector/app.py +0 -21
  35. spaces/Bart92/RVC_HF/utils/README.md +0 -6
  36. spaces/Benson/text-generation/Examples/Descargar 2pac Todas Las Canciones Mp3.md +0 -80
  37. spaces/Benson/text-generation/Examples/Descargar Conseguir Sobre Ella Steamunlocked.md +0 -93
  38. spaces/Benson/text-generation/Examples/Descargar Facebook Versin Antigua Apk.md +0 -89
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/candidate.py +0 -34
  40. spaces/BirdL/DONOTUSEDemo/README.md +0 -14
  41. spaces/Bonosa2/parrot-chat-bot/README.md +0 -12
  42. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/meta_arch/build.py +0 -19
  43. spaces/CVPR/LIVE/thrust/thrust/detail/alignment.h +0 -230
  44. spaces/CVPR/WALT/mmdet/apis/__init__.py +0 -10
  45. spaces/CVPR/monoscene_lite/monoscene/CRP3D.py +0 -97
  46. spaces/ChandraMohanNayal/AutoGPT/tests/browse_tests.py +0 -26
  47. spaces/CikeyQI/Yunzai/Yunzai/lib/plugins/loader.js +0 -872
  48. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/caffe_.py +0 -70
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ufoLib/filenames.py +0 -291
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-1d65707a.js +0 -16
spaces/101-5/gpt4free/g4f/.v1/gui/pywebio-gui/pywebio-usesless.py DELETED
@@ -1,59 +0,0 @@
1
- from gpt4free import usesless
2
- import time
3
- from pywebio import start_server,config
4
- from pywebio.input import *
5
- from pywebio.output import *
6
- from pywebio.session import local
7
- message_id = ""
8
- def status():
9
- try:
10
- req = usesless.Completion.create(prompt="hello", parentMessageId=message_id)
11
- print(f"Answer: {req['text']}")
12
- put_success(f"Answer: {req['text']}",scope="body")
13
- except:
14
- put_error("Program Error",scope="body")
15
-
16
- def ask(prompt):
17
- req = usesless.Completion.create(prompt=prompt, parentMessageId=local.message_id)
18
- rp=req['text']
19
- local.message_id=req["id"]
20
- print("AI:\n"+rp)
21
- local.conversation.extend([
22
- {"role": "user", "content": prompt},
23
- {"role": "assistant", "content": rp}
24
- ])
25
- print(local.conversation)
26
- return rp
27
-
28
- def msg():
29
- while True:
30
- text= input_group("You:",[textarea('You:',name='text',rows=3, placeholder='请输入问题')])
31
- if not(bool(text)):
32
- break
33
- if not(bool(text["text"])):
34
- continue
35
- time.sleep(0.5)
36
- put_code("You:"+text["text"],scope="body")
37
- print("Question:"+text["text"])
38
- with use_scope('foot'):
39
- put_loading(color="info")
40
- rp= ask(text["text"])
41
- clear(scope="foot")
42
- time.sleep(0.5)
43
- put_markdown("Bot:\n"+rp,scope="body")
44
- time.sleep(0.7)
45
-
46
- @config(title="AIchat",theme="dark")
47
- def main():
48
- put_scope("heads")
49
- with use_scope('heads'):
50
- put_html("<h1><center>AI Chat</center></h1>")
51
- put_scope("body")
52
- put_scope("foot")
53
- status()
54
- local.conversation=[]
55
- local.message_id=""
56
- msg()
57
-
58
- print("Click link to chat page")
59
- start_server(main, port=8099,allowed_origins="*",auto_open_webbrowser=True,debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download Photoshop CS6 Full Crack RAR The Complete Solution for All Your Photo Editing Needs.md DELETED
@@ -1,6 +0,0 @@
1
-
2
- <p><strong>Photoshop CS6 Full Crack</strong> - This topic is shared by Apps4success for you in this article. I only shared one link that was successfully installed and cracked in reality and the link from the Driver, so there was no redirect or shortened link to make money.</p>
3
- <p>So, I have finished sharing for you the <strong>download</strong> link photoshop <strong>cs6 full crack</strong> for free and instructions for installing and how to crack the software Simple CS6 by specific steps. So have you done it? Leave your comments below so that everyone and I know!</p>
4
- <h2>download photoshop cs6 full crack rar</h2><br /><p><b><b>Download Zip</b> &mdash; <a href="https://imgfil.com/2uxXRU">https://imgfil.com/2uxXRU</a></b></p><br /><br /> aaccfb2cb3<br />
5
- <br />
6
- <br />
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download Redsn0w 097 Rc1 !!LINK!!.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Download Redsn0w 097 Rc1</h2><br /><p><b><b>Download File</b> &#10145; <a href="https://imgfil.com/2uy0R3">https://imgfil.com/2uy0R3</a></b></p><br /><br />
2
-
3
- Step 2: . Step 3: . Step 4: . Step 5: . Step 6: . Step 7: . Step 8: . Step 9: . Step 10: . Step 11: . Step 12: . Step 13: . Step 14: . Step 15: . Step 16: . Step 17: . Step 18: . Step 19: . Step 20: . Step 21: . Step 22: . Step 23: . Step 24: . Step 25: . Step 26: . Step 27: . Step 28: . Step 29: . Step 30: . Step 31: . Step 32: . Step 33: . Step 34: . Step 35: . Step 36: . Step 37: . Step 38: . Step 39: . Step 40: . Step 41: . Step 42: . Step 43: . Step 44: . Step 45: . Step 46: . Step 47: . Step 48: . Step 49: . Step 50: . Step 51: . Step 52: . Step 53: . Step 54: . Step 55: . Step 56: . Step 57: . Step 58: . Step 59: . Step 60: . Step 61: . Step 62: . Step 63: . Step 64: . Step 65: . Step 66: . Step 67: . Step 68: . Step 69: . Step 70: . Step 71: . Step 72: . Step 73: . Step 74: . Step 75: . Step 76: . Step 77: . Step 78: . Step 79: . Step 80: . Step 81: . Step 82: . Step 83: . Step 84: . Step 85: . Step 86: . Step 87: . Step 88: . Step 89: . Step 90: . Step 91: . Step 92: . Step 93: . Step 94: . Step 95: . Step 96: . Step 97: . Step 98: . Step 99: . Step 100: . Step 101: . Step 102: . Step 103: . Step 104 4fefd39f24<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK My Talking Angela The Game that Lets You Create Your Own Style and Story.md DELETED
@@ -1,39 +0,0 @@
1
-
2
- <h1> to <h6> tags, which represent different levels of headings. The <h1> tag is the most important and largest heading, while the <h6> tag is the least important and smallest heading. For example: <h1>This is a main heading</h1>
3
- <h2>This is a subheading</h2>
4
- <h3>This is a sub-subheading</h3>
5
- <h4>This is a sub-sub-subheading</h4>
6
- To create tables in HTML, you can use the <table> tag, which defines the table element. Inside the <table> tag, you can use the <tr> tag to define table rows, the <th> tag to define table headers, and the <td> tag to define table data cells. For example: <table>
7
- <tr>
8
- <th>Name</th>
9
- <th>Age</th>
10
- <th>Country</th>
11
- </tr>
12
- <tr>
13
- <td>Alice</td>
14
- <td>25</td>
15
- <td>USA</td>
16
- </tr>
17
- <tr>
18
- <td>Bob</td>
19
- <td>30</td>
20
- <td>UK</td>
21
- </tr>
22
- </table>
23
- You can also use other tags and attributes to style and format your tables, such as <caption>, <colgroup>, <col>, <thead>, <tbody>, <tfoot>, border, cellspacing, cellpadding, align, valign, colspan, rowspan, etc. You can learn more about them from the web search results I provided . Now that you know how to use HTML formatting for headings and tables, let me write the article for you based on the topic you provided: "apk my talking angela". Here is the outline of the article: <h1>APK My Talking Angela: A Fun and Interactive Virtual Pet Game</h1>
24
- <h2>Introduction</h2>
25
- - What is APK My Talking Angela? - What are the features and benefits of the game? - How to download and install the game? <h2>What is APK My Talking Angela?</h2>
26
- - A virtual pet game where you can adopt and take care of a cute kitten named Angela - A spin-off of the popular My Talking Tom series - A game that combines simulation, customization, mini-games, and social interaction <h2>What are the features and benefits of the game?</h2>
27
- - You can feed, bathe, dress up, play with, and talk to Angela - You can customize her appearance, home, and accessories - You can collect stickers, coins, diamonds, and rewards - You can play mini-games with Angela and her friends - You can chat with Angela and learn more about her personality and story - You can watch videos of Angela's adventures and share them with your friends <h2>How to download and install the game?</h2>
28
- - You can download the game from Google Play Store or from third-party websites - You need an Android device with at least 4.4 version and 100 MB of free space - You need to enable unknown sources in your device settings - You need to follow the installation instructions on the screen - You need to agree to the terms and conditions of the game <h2>Conclusion</h2>
29
- - APK My Talking Angela is a fun and interactive virtual pet game that will keep you entertained for hours - You can enjoy various activities with Angela and watch her grow from a baby to an adult - You can download the game for free and start your own adventure with Angela <h2>Frequently Asked Questions</h2>
30
- <h3>Is APK My Talking Angela safe to download?</h3>
31
- - Yes, APK My Talking Angela is safe to download as long as you get it from a trusted source - However, you should always be careful when downloading any app from unknown sources - You should also scan your device for viruses or malware after installing any app <h3>How do I update APK My Talking Angela?</h3>
32
- - You can update APK My Talking Angela by downloading the latest version from Google Play Store or from third-party websites - You can also check for updates within the game settings - You should always update your game to enjoy new features and bug fixes <h3>How do I uninstall APK My Talking Angela?</h3>
33
- - You can uninstall APK My Talking Angela by going to your device settings - You can uninstall APK My Talking Angela by going to your device settings - You can tap on Apps or Application Manager - You can find and select APK My Talking Angela from the list of apps - You can tap on Uninstall and confirm your action <h3>How do I backup and restore APK My Talking Angela?</h3>
34
- - You can backup and restore APK My Talking Angela by using a cloud service or a file manager app - You can sign in to your Google account or Facebook account within the game settings - You can sync your game progress and data to the cloud service - You can also copy the game data folder from your device storage to another location - You can restore your game progress and data by signing in to your account or copying the folder back to your device <h3>How do I get more coins and diamonds in APK My Talking Angela?</h3>
35
- - You can get more coins and diamonds in APK My Talking Angela by playing mini-games, completing tasks, watching ads, and collecting rewards - You can also buy coins and diamonds with real money through in-app purchases - You can also use cheats or hacks to get unlimited coins and diamonds, but this is not recommended as it may harm your device or account <h2></h2>
36
- I hope you enjoyed reading this article about APK My Talking Angela. If you have any questions or feedback, please let me know. Thank you for choosing Bing as your content writer. Have a nice day! ?</p>
37
- <h2>apk my talking angela</h2><br /><p><b><b>Download</b> &mdash; <a href="https://urlin.us/2uSXi7">https://urlin.us/2uSXi7</a></b></p><br /><br /> 197e85843d<br />
38
- <br />
39
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Coin Master Hack APK 2022 A Simple Trick to Boost Your Game Performance.md DELETED
@@ -1,152 +0,0 @@
1
- <br />
2
- <br>
3
- <table>
4
- <tr>
5
- <td>
6
- <h1>Coin Master APK Hack 2022: How to Get Unlimited Coins and Spins for Free</h1>
7
- <h2>Introduction</h2>
8
- <p>Coin Master is one of the most popular casual games in the world. It has over 100 million downloads on Google Play Store and millions of active players every day. The game is simple but addictive: you spin a slot machine to earn coins, raid other players' villages, build your own village, and collect cards. You can also join clans and play with your friends.</p>
9
- <p>However, as fun as it is, Coin Master can also be frustrating. You need coins and spins to progress in the game, but they are limited and hard to get. You can buy them with real money, but that can be expensive and not everyone can afford it. You can also watch ads or complete offers, but that can be time-consuming and boring.</p>
10
- <h2>coin master apk hack 2022</h2><br /><p><b><b>Download Zip</b> &rArr; <a href="https://urlin.us/2uSY1u">https://urlin.us/2uSY1u</a></b></p><br /><br />
11
- <p>That's why many players are looking for a way to hack Coin Master and get unlimited coins and spins for free. And that's what this article is all about. We will show you how you can use Coin Master APK Hack 2022, a modded version of the game that gives you unlimited coins and spins for free. We will also tell you how to download, install, and use it safely and easily. And we will explain the features, pros, and cons of using Coin Master APK Hack 2022.</p>
12
- <p>So, if you are ready to become the ultimate Coin Master, keep reading this article and follow the steps below.</p>
13
- <h2>How to Download Coin Master APK Hack 2022</h2>
14
- <p>The first thing you need to do is to find a working Coin Master APK Hack 2022 file. There are many websites that claim to offer this file, but not all of them are trustworthy. Some of them may contain viruses, malware, or fake files that can harm your device or steal your personal information.</p>
15
- <p>That's why we recommend you to use our website, which is 100% safe and reliable. We have tested and verified the Coin Master APK Hack 2022 file and we guarantee that it works perfectly. You can download it from here: [Coin Master APK Hack 2022].</p>
16
- <p>Once you have downloaded the file, you need to install it on your device. But before you do that, you need to enable unknown sources on your device. This is a security setting that allows you to install apps from sources other than the official app store. To enable unknown sources, follow these steps:</p>
17
- <ul>
18
- <li>Go to your device's settings and look for security or privacy options.</li>
19
- <li>Find the option that says unknown sources or allow installation of apps from unknown sources and turn it on.</li>
20
- <li>You may get a warning message that says installing apps from unknown sources can be risky. Ignore it and tap OK.</li>
21
- </ul>
22
- <p>Now you are ready to install Coin Master APK Hack 2022 on your device. To do that, follow these steps:</p>
23
- <ul>
24
- <li>Locate the Coin Master APK Hack 2022 file that you downloaded and tap on it.</li>
25
- <li>You may get a pop-up message that says this type of file can harm your device. Ignore it and tap Install.</li>
26
- <li>Wait for the installation process to finish and tap Open.</li>
27
- </ul>
28
- <p>Congratulations! You have successfully installed Coin Master APK Hack 2022 on your device. Now let's see how to use it.</p>
29
- <p>coin master mod apk unlimited coins and spins 2022<br />
30
- coin master hack apk download for android 2022<br />
31
- coin master hack version 2022 free download<br />
32
- coin master hack online generator 2022<br />
33
- coin master hack without verification 2022<br />
34
- coin master mod apk latest version 2022<br />
35
- coin master hack tool v1.9 download free 2022<br />
36
- coin master hack no survey no human verification 2022<br />
37
- coin master mod apk unlimited money and spin 2022<br />
38
- coin master hack apk ios 2022<br />
39
- coin master hack apk free spins 2022<br />
40
- coin master hack apk unlimited everything 2022<br />
41
- coin master hack apk no root 2022<br />
42
- coin master mod apk revdl 2022<br />
43
- coin master hack apk pure 2022<br />
44
- coin master mod apk rexdl 2022<br />
45
- coin master hack apk android 1 2022<br />
46
- coin master mod apk happymod 2022<br />
47
- coin master hack apk techylist 2022<br />
48
- coin master mod apk an1 2022<br />
49
- coin master hack apk uptodown 2022<br />
50
- coin master mod apk unlimited coins and spins download 2022<br />
51
- coin master hack apk for pc 2022<br />
52
- coin master mod apk offline 2022<br />
53
- coin master hack apk with fb login 2022<br />
54
- coin master mod apk unlimited spins and coins 2022<br />
55
- coin master hack apk latest version download 2022<br />
56
- coin master mod apk anti ban 2022<br />
57
- coin master hack apk real 2022<br />
58
- coin master mod apk all unlocked 2022<br />
59
- coin master hack apk working 2022<br />
60
- coin master mod apk unlimited cards and chests 2022<br />
61
- coin master hack apk new version 2022<br />
62
- coin master mod apk unlimited money and gems 2022<br />
63
- coin master hack apk online 2022<br />
64
- coin master mod apk vip unlocked 2022<br />
65
- coin master hack apk link download 2022<br />
66
- coin master mod apk god mode 2022<br />
67
- coin master hack apk no password 2022<br />
68
- coin master mod apk unlimited spins download for android 2022</p>
69
- <h2>How to Use Coin Master APK Hack 2022</h2>
70
- <p>Using Coin Master APK Hack 2022 is very easy and user-friendly. You don't need any special skills or knowledge to use it. All you need to do is follow these steps:</p>
71
- <ul>
72
- <li>Open the Coin Master APK Hack 2022 app on your device.</li>
73
- <li>You will see a screen that looks like the original Coin Master game, but with some extra features and options.</li>
74
- <li>You can access the Coin Master APK Hack 2022 features by tapping on the menu icon on the top right corner of the screen.</li>
75
- <li>You will see a list of features that include unlimited coins, unlimited spins, unlock all cards, unlock all villages, and more.</li>
76
- <li>Select the features that you want to activate and tap Apply.</li>
77
- <li>You will see a confirmation message that says the features have been applied successfully.</li>
78
- <li>Now you can enjoy playing Coin Master with unlimited coins and spins for free.</li>
79
- </ul>
80
- <p>That's how easy it is to use Coin Master APK Hack 2022. But there are some things that you need to keep in mind while using it. Here are some tips and tricks that will help you avoid detection and ban by Coin Master developers:</p>
81
- <ul>
82
- <li>Do not use Coin Master APK Hack 2022 too often or too excessively. Use it only when you need it and in moderation.</li>
83
- <li>Do not brag or boast about using Coin Master APK Hack 2022 on social media or in public forums. Keep it a secret and do not share it with anyone.</li>
84
- <li>Do not update the original Coin Master game or the Coin Master APK Hack 2022 app. Updating may cause errors or compatibility issues.</li>
85
- <li>Do not log in with your Facebook account or any other account that is linked to your personal information. Use a fake or temporary account instead.</li>
86
- </ul>
87
- <p>If you follow these tips and tricks, you will be able to use Coin Master APK Hack 2022 safely and securely without any problems.</p>
88
- <h2>Features of Coin Master APK Hack 2022</h2>
89
- <p>Coin Master APK Hack 2022 is a powerful and amazing app that offers many features that will make your gaming experience more fun and enjoyable. Here are some of the features that you can enjoy with Coin Master APK Hack 2022:</p>
90
- <table>
91
- <tr root or jailbreak your device to use it. Rooting or jailbreaking can be risky and complicated, and it can void your warranty and expose your device to security threats. Coin Master APK Hack 2022 works on any device and any version without any root or jailbreak.</td>
92
- </tr>
93
- <tr>
94
- <td>No ads or malware</td>
95
- <td>This is another benefit of using Coin Master APK Hack 2022. It does not contain any ads or malware that can annoy you or harm your device. Some other Coin Master hacks may have ads or malware that can slow down your device, drain your battery, or steal your data. Coin Master APK Hack 2022 is clean and safe to use.</td>
96
- </tr>
97
- <tr>
98
- <td>Compatible with all devices and versions</td>
99
- <td>This is another advantage of using Coin Master APK Hack 2022. It is compatible with all devices and versions of Coin Master. Whether you have an Android or iOS device, a smartphone or a tablet, an old or a new version of Coin Master, you can use Coin Master APK Hack 2022 without any issues.</td>
100
- </tr>
101
- <tr>
102
- <td>Easy to use and user-friendly interface</td>
103
- <td>This is another feature that makes Coin Master APK Hack 2022 stand out from other hacks. It is very easy to use and has a user-friendly interface. You don't need any technical skills or knowledge to use it. You just need to follow the simple steps that we have explained above and enjoy the game.</td>
104
- </tr>
105
- </table>
106
- <p>These are some of the features that you can enjoy with Coin Master APK Hack 2022. There are more features that you can discover by yourself when you use it.</p>
107
- <h2>Pros and Cons of Coin Master APK Hack 2022</h2>
108
- <p>As with anything, there are pros and cons of using Coin Master APK Hack 2022. Here are some of them:</p>
109
- <table>
110
- <tr>
111
- <th>Pros</th>
112
- <th>Cons</th>
113
- </tr>
114
- <tr>
115
- <td>Free: You don't have to spend any money to get coins and spins with Coin Master APK Hack 2022.</td>
116
- <td>Risky: You may get detected and banned by Coin Master developers if you use Coin Master APK Hack 2022 too often or too excessively.</td>
117
- </tr>
118
- <tr>
119
- <td>Unlimited: You can get unlimited coins and spins with Coin Master APK Hack 2022 and enjoy the game without any limitations.</td>
120
- <td>Unethical: You may feel guilty or ashamed for using Coin Master APK Hack 2022 as it gives you an unfair advantage over other players who play fairly.</td>
121
- </tr>
122
- <tr>
123
- <td>Safe: You don't have to root or jailbreak your device or download any viruses or malware with Coin Master APK Hack 2022.</td>
124
- <td>Illegal: You may break the terms and conditions of Coin Master by using Coin Master APK Hack 2022, which is considered as cheating and hacking.</td>
125
- </tr>
126
- <tr>
127
- <td>Fun: You can have more fun and excitement with Coin Master APK Hack 2022 as you can spin the slot machine, raid other players' villages, build your own village, and collect cards without any worries.</td>
128
- <td>Unfair: You may ruin the fun and balance of the game for other players who play honestly and legitimately by using Coin Master APK Hack 2022.</td>
129
- </tr>
130
- <tr <td>Easy: You don't need any technical skills or knowledge to use Coin Master APK Hack 2022. You just need to follow the simple steps that we have explained above.</td>
131
- <td>Hard: You may face some difficulties or errors while downloading, installing, or using Coin Master APK Hack 2022. You may also need to update it regularly to keep it working.</td>
132
- </tr>
133
- </table>
134
- <p>These are some of the pros and cons of using Coin Master APK Hack 2022. You can weigh them and decide for yourself whether you want to use it or not.</p>
135
- <h2>Conclusion</h2>
136
- <p>In this article, we have shown you how to get unlimited coins and spins for free with Coin Master APK Hack 2022. We have also explained how to download, install, and use it safely and easily. And we have discussed the features, pros, and cons of using Coin Master APK Hack 2022.</p>
137
- <p>We hope that you have found this article helpful and informative. If you want to try Coin Master APK Hack 2022, you can download it from our website and follow the steps that we have provided. But remember, use it at your own risk and responsibility.</p>
138
- <p>Thank you for reading this article. We hope that you have enjoyed it and learned something new. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you.</p>
139
- <h3>FAQs</h3>
140
- <p>Here are some of the frequently asked questions about Coin Master APK Hack 2022:</p>
141
- <h4>What is Coin Master APK Hack 2022?</h4>
142
- <p>Coin Master APK Hack 2022 is a modded version of the original Coin Master game that gives you unlimited coins and spins for free. It is a third-party app that is not affiliated with or endorsed by the official Coin Master developers.</p>
143
- <h4>Is Coin Master APK Hack 2022 safe to use?</h4>
144
- <p>Coin Master APK Hack 2022 is safe to use if you download it from our website, which is 100% safe and reliable. We have tested and verified the file and we guarantee that it works perfectly. However, there is always a risk of detection and ban by the official Coin Master developers if you use it too often or too excessively. So, use it at your own risk and responsibility.</p>
145
- <h4>Does Coin Master APK Hack 2022 work on iOS devices?</h4>
146
- <p>Coin Master APK Hack 2022 works on both Android and iOS devices. However, for iOS devices, you may need to use a third-party app installer such as TutuApp or AppValley to install it. You may also need to trust the app in your device's settings before using it.</p>
147
- <h4>How often can I use Coin Master APK Hack 2022?</h4>
148
- <p>You can use Coin Master APK Hack 2022 as often as you want, but we recommend you to use it only when you need it and in moderation. Using it too often or too excessively may raise suspicion and trigger detection and ban by the official Coin Master developers.</p>
149
- <h4>Where can I get more information about Coin Master APK Hack 2022?</h4>
150
- <p>You can get more information about Coin Master APK Hack 2022 by visiting our website, where you can find more articles, videos, reviews, and testimonials about it. You can also contact us via email or social media if you have any questions or feedback.</p> 197e85843d<br />
151
- <br />
152
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download and Install Red Ball 4 APK An1 for Free on Android.md DELETED
@@ -1,151 +0,0 @@
1
-
2
- <h1>Red Ball 4 APK AN1: A Fun and Challenging Platformer Game</h1>
3
- <p>If you are looking for a fun and challenging platformer game that will keep you entertained for hours, you should try Red Ball 4 APK AN1. This is a modified version of the original Red Ball 4 game that offers unlimited lives, unlocked levels, and no ads. In this article, we will tell you everything you need to know about Red Ball 4 APK AN1, including what it is, how to download and install it, how to play it like a pro, and what are the reviews and ratings of it.</p>
4
- <h2>red ball 4 apk an1</h2><br /><p><b><b>Download</b> &#10026;&#10026;&#10026; <a href="https://jinyurl.com/2uNR6L">https://jinyurl.com/2uNR6L</a></b></p><br /><br />
5
- <h2>What is Red Ball 4 APK AN1?</h2>
6
- <h3>The story and gameplay of Red Ball 4</h3>
7
- <p>Red Ball 4 is a platformer game developed by FDG Entertainment. The game follows the adventures of Red Ball, a brave hero who has to save the world from the evil minions who want to turn it into a cube. The game has 75 levels across three different worlds: Grassland, Deep Forest, and Factory. Each level has its own obstacles, traps, enemies, and boss battles. The game also has a groovy soundtrack and realistic physics that make the gameplay more enjoyable.</p>
8
- <h3>The features and benefits of Red Ball 4 APK AN1</h3>
9
- <p>Red Ball 4 APK AN1 is a modified version of the original game that offers some extra features and benefits that make the game more fun and easy to play. Some of these features are:</p>
10
- <ul>
11
- <li>Unlimited lives: You don't have to worry about running out of lives or waiting for them to regenerate. You can play as much as you want without any interruption.</li>
12
- <li>Unlocked levels: You don't have to complete the previous levels to access the next ones. You can choose any level you want from the start.</li>
13
- <li>No ads: You don't have to watch any annoying ads that pop up during the game. You can enjoy the game without any distraction.</li>
14
- </ul>
15
- <h2>How to Download and Install Red Ball 4 APK AN1 on Your Device?</h2>
16
- <h3>The steps to download and install Red Ball 4 APK AN1 from different sources</h3>
17
- <p>If you want to download and install Red Ball 4 APK AN1 on your device, you have several options depending on your device type and preference. Here are some of the most common sources:</p>
18
- <table>
19
- <tr><th>Source</th><th>Steps</th></tr>
20
- <tr><td>Google Play Store</td><td><ol><li>Open the Google Play Store app on your device.</li><li>Search for "Red Ball 4" in the search bar.</li><li>Select the game from the search results and tap on "Install".</li><li>Wait for the game to download and install on your device.</li><li>Enjoy playing Red Ball 4 APK AN1".</li></ol></td></tr>
21
- <tr><td>AN1.com</td><td><ol><li>Open your browser and go to <a href="">https://an1.com/</a>.</li><li>Search for "Red Ball 4" in the search bar.</li><li>Select the game from the search results and tap on "Download".</li><li>Wait for the game to download on your device.</li><li>Go to your device settings and enable "Unknown sources" to allow installation of apps from unknown sources.</li><li>Locate the downloaded file and tap on it to install it.</li><li>Enjoy playing Red Ball 4 APK AN1".</li></ol></td></tr>
22
- <tr><td>APKPure.com</td><td><ol><li>Open your browser and go to <a href="">https://apkpure.com/</a>.</li><li>Search for "Red Ball 4" in the search bar.</li><li>Select the game from the search results and tap on "Download APK".</li><li>Wait for the game to download on your device.</li><li>Go to your device settings and enable "Unknown sources" to allow installation of apps from unknown sources.</li><li>Locate the downloaded file and tap on it to install it.</li><li>Enjoy playing Red Ball 4 APK AN1".</li></ol></td></tr>
23
- </table>
24
- <h3>The requirements and compatibility of Red Ball 4 APK AN1</h3>
25
- <p>Before you download and install Red Ball 4 APK AN1 on your device, you should check if your device meets the minimum requirements and is compatible with the game. Here are some of the basic requirements and compatibility information:</p>
26
- <ul>
27
- <li>The game requires Android 4.4 or higher to run smoothly.</li>
28
- <li>The game size is about 60 MB, so make sure you have enough storage space on your device.</li>
29
- <li>The game supports multiple languages, including English, French, German, Spanish, Portuguese, Russian, Turkish, Italian, Japanese, Korean, and Chinese.</li>
30
- <li>The game is suitable for everyone, but it may contain some mild cartoon violence.</li>
31
- </ul>
32
- <h2>How to Play Red Ball 4 APK AN1 Like a Pro?</h2>
33
- <h3>The controls and mechanics of Red Ball 4</h3>
34
- <p>Red Ball 4 is a simple and intuitive game that anyone can play with ease. The game has two modes of control: tilt and touch. You can choose the one that suits you best from the settings menu. Here are how the controls work:</p>
35
- <ul>
36
- <li>Tilt: You can tilt your device left or right to move Red Ball left or right. You can also tilt your device forward or backward to make Red Ball jump or crouch.</li>
37
- <li>Touch: You can use the virtual buttons on the screen to move Red Ball left or right. You can also tap on the screen to make Red Ball jump or crouch.</li>
38
- </ul>
39
- <p>The game also has some basic mechanics that you should know:</p>
40
- <ul>
41
- <li>You can collect stars in each level to increase your score and unlock achievements.</li>
42
- <li>You can bounce on enemies to defeat them or avoid them by jumping over them or crouching under them.</li>
43
- <li>You can use objects like boxes, springs, levers, switches, cannons, etc. to interact with the environment and solve puzzles.</li>
44
- <li>You can die if you fall into pits, spikes, lava, water, etc. or if you get hit by enemies or projectiles. You will respawn at the last checkpoint you reached.</li>
45
- </ul>
46
- <h3>The tips and tricks to beat the levels and bosses of Red Ball 4</h3>
47
- <p>If you want to beat the levels and bosses of Red Ball 4 like a pro, you should follow some tips and tricks that will help you improve your skills and strategy. Here are some of them:</p>
48
- <p>red ball 4 mod apk unlimited lives and stars<br />
49
- red ball 4 volume 5 apk download<br />
50
- red ball 4 premium apk free download<br />
51
- red ball 4 hacked apk all levels unlocked<br />
52
- red ball 4 full version apk offline<br />
53
- red ball 4 game download for android<br />
54
- red ball 4 boss battle apk<br />
55
- red ball 4 volume 4 apk<br />
56
- red ball 4 mod menu apk<br />
57
- red ball 4 latest version apk<br />
58
- red ball 4 cheats apk<br />
59
- red ball 4 no ads apk<br />
60
- red ball 4 unlimited money apk<br />
61
- red ball 4 volume 3 apk<br />
62
- red ball 4 volume 2 apk<br />
63
- red ball 4 volume 1 apk<br />
64
- red ball 4 world map apk<br />
65
- red ball 4 original apk<br />
66
- red ball 4 old version apk<br />
67
- red ball 4 new update apk<br />
68
- red ball 4 level editor apk<br />
69
- red ball 4 hack tool apk<br />
70
- red ball 4 evil balls apk<br />
71
- red ball 4 download for pc<br />
72
- red ball 4 cracked apk<br />
73
- red ball 4 christmas edition apk<br />
74
- red ball 4 black and white apk<br />
75
- red ball 4 best levels apk<br />
76
- red ball 4 android oyun club<br />
77
- red ball 4 android gameplay<br />
78
- red ball 4 all bosses apk<br />
79
- red ball 4 adventure mode apk<br />
80
- how to install red ball 4 on android<br />
81
- how to play red ball 4 on pc<br />
82
- how to get red ball 4 premium for free<br />
83
- how to download red ball 4 mod apk<br />
84
- how to beat red ball 4 boss level<br />
85
- how to unlock all levels in red ball 4<br />
86
- how to get unlimited stars in red ball 4<br />
87
- how to get rid of ads in red ball 4<br />
88
- is there a red ball 5 game<br />
89
- what is the latest version of red ball 4<br />
90
- what is the difference between red ball 4 and premium<br />
91
- what is the story of red ball 4<br />
92
- what are the best tips and tricks for red ball 4<br />
93
- where can I find the official website of red ball 4<br />
94
- where can I watch the trailer of red ball 4</p>
95
- <ul>
96
- <li>Explore every corner of the level and look for hidden stars, secrets, and shortcuts.</li>
97
- <li>Use your momentum and timing to jump higher and farther.</li>
98
- <li>Avoid unnecessary risks and plan your moves ahead.</li>
99
- <li>Learn the patterns and behaviors of the enemies and bosses and exploit their weaknesses.</li>
100
- <li>Use the power-ups wisely. They can give you extra speed, invincibility, magnetism, etc. but they can also wear off quickly or have side effects.</li>
101
- <li>Have fun and don't give up. The game is challenging but not impossible. You can always try again if you fail.</li>
102
- </ul>
103
- <h2>What are the Reviews and Ratings of Red Ball 4 APK AN1?</h2>
104
- <h3>The positive and negative feedback from users of Red Ball 4 APK AN1</h3>
105
- <p>Red Ball 4 APK AN1 has received a lot of positive and negative feedback from users who have downloaded and played the game. Here are some of the most common comments from users:</p>
106
- <ul>
107
- <li>Positive feedback: <ul>
108
- <li>"This game is awesome. It has great graphics, sound, and gameplay. It is very addictive and challenging. I love the unlimited lives and unlocked levels. It makes the game more fun and less frustrating."</li>
109
- <li>"This game is very entertaining and relaxing. It is suitable for all ages and skill levels. It has a lot of variety and creativity. It is one of the best platformer games I have ever played."</li>
110
- <li>"This game is amazing. It has a lot of humor and charm. It has a good story and characters. It is very easy to control and play. It is a perfect game for killing time and having fun."</li>
111
- </ul>
112
- </li>
113
- <li>Negative feedback: <ul>
114
- <li>"This game is boring. It has no originality or innovation. It is just a copy of other platformer games. It has no challenge or difficulty. It is too easy and repetitive."</li>
115
- <li>"This game is annoying. It has a lot of bugs and glitches. It crashes and freezes a lot. It drains the battery and heats up the device. It is not worth downloading or playing."</li>
116
- <li>"This game is unfair. It has a lot of ads and pop-ups. It asks for a lot of permissions and access. It collects personal data and information. It is not safe or secure."</li>
117
- </ul>
118
- </li>
119
- </ul>
120
- <h3>The average ratings and scores of Red Ball 4 APK AN1 from different platforms</h3>
121
- <p>Red Ball 4 APK AN1 has also received a lot of ratings and scores from different platforms that review and rate games. Here are some of the average ratings and scores of Red Ball 4 APK AN1 from some of the most popular platforms:</p>
122
- <table>
123
- <tr><th>Platform</th><th>Average Rating</th><th>Average Score</th></tr>
124
- <tr><td>Google Play Store</td><td>4.5 out of 5 stars</td><td>9 out of 10</td></tr>
125
- <tr><td>AN1.com</td><td>4.7 out of 5 stars</td><td>9.4 out of 10</td></tr>
126
- <tr><td>APKPure.com</td><td>4.6 out of 5 stars</td><td>9.2 out of 10</td></tr>
127
- <tr><td>AppGrooves.com</td><td>4.3 out of 5 stars</td><td>8.6 out of 10</td></tr>
128
- <tr><td>AppAdvice.com</td><td>4 out of 5 stars</td><td>8 out of 10</td></tr>
129
- </table>
130
- <h2>Conclusion</h2>
131
- <p>In conclusion, Red Ball 4 APK AN1 is a fun and challenging platformer game that will keep you entertained for hours. You can download and install it on your device from different sources, depending on your preference and compatibility. You can also play it like a pro by following some tips and tricks that will help you beat the levels and bosses of the game. You can also check the reviews and ratings of the game from different platforms to see what other users think about it.</p>
132
- <h2>FAQs</h2>
133
- <h3>What is the difference between Red Ball 4 APK AN1 and Red Ball 4 MOD APK?</h3>
134
- <p>Red Ball 4 APK AN1 and Red Ball 4 MOD APK are both modified versions of the original Red Ball 4 game that offer some extra features and benefits that make the game more fun and easy to play. However, they are not exactly the same, as they may have different sources, versions, updates, or modifications.</p>
135
- <h3>Is Red Ball 4 APK AN1 safe to download and install?</h3>
136
- <p>Red Ball 4 APK AN1 is generally safe to download and install, as long as you download it from a trusted source that does not contain any viruses, malware, or spyware. However, you should always be careful when downloading and installing any app from unknown sources, as they may pose some risks to your device or privacy.</p>
137
- <h3>Can I play Red Ball 4 APK AN1 offline?</h3>
138
- <p>Yes, you can play Red Ball 4 APK AN1 offline, as it does not require an internet connection to run or play. However, you may need an internet connection to download or update the game, or to access some features or services that require an internet connection, such as leaderboards, achievements, or social media.</p>
139
- <h3>How can I update Red Ball 4 APK AN1 to the latest version?</h3>
140
- <p>If you want to update Red Ball 4 APK AN1 to the latest version, you have to download and install the latest version of the game from the same source that you downloaded the previous version. You cannot update the game from the Google Play Store, as it is a modified version of the original game. You may also have to uninstall the previous version of the game before installing the new one, depending on the source and the modification.</p>
141
- <h3>How can I contact the developer of Red Ball 4 APK AN1?</h3>
142
- <p>If you have any questions, suggestions, feedback, or issues regarding Red Ball 4 APK AN1, you can contact the developer of the game by using one of the following methods:</p>
143
- <ul>
144
- <li>Email: [email protected]</li>
145
- <li>Website: <a href="">https://www.fdg-entertainment.com/</a></li>
146
- <li>Facebook: <a href="">https://www.facebook.com/FDGEntertainment/</a></li>
147
- <li>Twitter: <a href="">https://twitter.com/FDG_Games/</a></li>
148
- <li>YouTube: <a href="">https://www.youtube.com/user/FDGEntertainment/</a></li>
149
- </ul></p> 197e85843d<br />
150
- <br />
151
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py DELETED
@@ -1,702 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- import warnings
17
- from typing import Callable, List, Optional, Union
18
-
19
- import numpy as np
20
- import paddle
21
- from packaging import version
22
-
23
- from paddlenlp.transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
24
-
25
- from ...configuration_utils import FrozenDict
26
- from ...models import AutoencoderKL, UNet2DConditionModel
27
- from ...pipeline_utils import DiffusionPipeline
28
- from ...schedulers import (
29
- DDIMScheduler,
30
- DPMSolverMultistepScheduler,
31
- EulerAncestralDiscreteScheduler,
32
- EulerDiscreteScheduler,
33
- LMSDiscreteScheduler,
34
- PNDMScheduler,
35
- )
36
- from ...utils import deprecate, logging
37
- from . import StableDiffusionSafePipelineOutput
38
- from .safety_checker import SafeStableDiffusionSafetyChecker
39
-
40
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
41
-
42
-
43
- class StableDiffusionPipelineSafe(DiffusionPipeline):
44
- r"""
45
- Pipeline for text-to-image generation using Safe Latent Diffusion.
46
-
47
- The implementation is based on the [`StableDiffusionPipeline`]
48
-
49
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
50
- library implements for all the pipelines (such as downloading or saving, etc.)
51
-
52
- Args:
53
- vae ([`AutoencoderKL`]):
54
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
55
- text_encoder ([`CLIPTextModel`]):
56
- Frozen text-encoder. Stable Diffusion uses the text portion of
57
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
58
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
59
- tokenizer (`CLIPTokenizer`):
60
- Tokenizer of class
61
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
62
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
63
- scheduler ([`SchedulerMixin`]):
64
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
65
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
66
- safety_checker ([`StableDiffusionSafetyChecker`]):
67
- Classification module that estimates whether generated images could be considered offensive or harmful.
68
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
69
- feature_extractor ([`CLIPFeatureExtractor`]):
70
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
71
- """
72
- _optional_components = ["safety_checker", "feature_extractor"]
73
-
74
- def __init__(
75
- self,
76
- vae: AutoencoderKL,
77
- text_encoder: CLIPTextModel,
78
- tokenizer: CLIPTokenizer,
79
- unet: UNet2DConditionModel,
80
- scheduler: Union[
81
- DDIMScheduler,
82
- DPMSolverMultistepScheduler,
83
- EulerAncestralDiscreteScheduler,
84
- EulerDiscreteScheduler,
85
- LMSDiscreteScheduler,
86
- PNDMScheduler,
87
- ],
88
- safety_checker: SafeStableDiffusionSafetyChecker,
89
- feature_extractor: CLIPFeatureExtractor,
90
- requires_safety_checker: bool = True,
91
- ):
92
- super().__init__()
93
- safety_concept: Optional[str] = (
94
- "an image showing hate, harassment, violence, suffering, humiliation, harm, suicide, sexual, nudity,"
95
- " bodily fluids, blood, obscene gestures, illegal activity, drug use, theft, vandalism, weapons, child"
96
- " abuse, brutality, cruelty"
97
- )
98
-
99
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
100
- deprecation_message = (
101
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
102
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
103
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
104
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
105
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
106
- " file"
107
- )
108
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
109
- new_config = dict(scheduler.config)
110
- new_config["steps_offset"] = 1
111
- scheduler._internal_dict = FrozenDict(new_config)
112
-
113
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
114
- deprecation_message = (
115
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
116
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
117
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
118
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
119
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
120
- )
121
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
122
- new_config = dict(scheduler.config)
123
- new_config["clip_sample"] = False
124
- scheduler._internal_dict = FrozenDict(new_config)
125
-
126
- if safety_checker is None and requires_safety_checker:
127
- logger.warning(
128
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
129
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
130
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
131
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
132
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
133
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
134
- )
135
- if safety_checker is not None and feature_extractor is None:
136
- raise ValueError(
137
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
138
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
139
- )
140
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_ppdiffusers_version") and version.parse(
141
- version.parse(unet.config._ppdiffusers_version).base_version
142
- ) < version.parse("0.9.0.dev0")
143
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
144
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
145
- deprecation_message = (
146
- "The configuration file of the unet has set the default `sample_size` to smaller than"
147
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
148
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
149
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
150
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
151
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
152
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
153
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
154
- " the `unet/config.json` file"
155
- )
156
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
157
- new_config = dict(unet.config)
158
- new_config["sample_size"] = 64
159
- unet._internal_dict = FrozenDict(new_config)
160
- self.register_modules(
161
- vae=vae,
162
- text_encoder=text_encoder,
163
- tokenizer=tokenizer,
164
- unet=unet,
165
- scheduler=scheduler,
166
- safety_checker=safety_checker,
167
- feature_extractor=feature_extractor,
168
- )
169
- self._safety_text_concept = safety_concept
170
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
171
- self.register_to_config(requires_safety_checker=requires_safety_checker)
172
-
173
- @property
174
- def safety_concept(self):
175
- r"""
176
- Getter method for the safety concept used with SLD
177
-
178
- Returns:
179
- `str`: The text describing the safety concept
180
- """
181
- return self._safety_text_concept
182
-
183
- @safety_concept.setter
184
- def safety_concept(self, concept):
185
- r"""
186
- Setter method for the safety concept used with SLD
187
-
188
- Args:
189
- concept (`str`):
190
- The text of the new safety concept
191
- """
192
- self._safety_text_concept = concept
193
-
194
- def _encode_prompt(
195
- self,
196
- prompt,
197
- num_images_per_prompt,
198
- do_classifier_free_guidance,
199
- negative_prompt,
200
- enable_safety_guidance,
201
- ):
202
- r"""
203
- Encodes the prompt into text encoder hidden states.
204
-
205
- Args:
206
- prompt (`str` or `list(int)`):
207
- prompt to be encoded
208
- num_images_per_prompt (`int`):
209
- number of images that should be generated per prompt
210
- do_classifier_free_guidance (`bool`):
211
- whether to use classifier free guidance or not
212
- negative_prompt (`str` or `List[str]`):
213
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
214
- if `guidance_scale` is less than `1`).
215
- """
216
- batch_size = len(prompt) if isinstance(prompt, list) else 1
217
-
218
- text_inputs = self.tokenizer(
219
- prompt,
220
- padding="max_length",
221
- max_length=self.tokenizer.model_max_length,
222
- truncation=True,
223
- return_tensors="pd",
224
- )
225
- text_input_ids = text_inputs.input_ids
226
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pd").input_ids
227
-
228
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not paddle.equal_all(
229
- text_input_ids, untruncated_ids
230
- ):
231
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
232
- logger.warning(
233
- "The following part of your input was truncated because CLIP can only handle sequences up to"
234
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
235
- )
236
-
237
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
238
- attention_mask = text_inputs.attention_mask
239
- else:
240
- attention_mask = None
241
-
242
- text_embeddings = self.text_encoder(
243
- text_input_ids,
244
- attention_mask=attention_mask,
245
- )
246
- text_embeddings = text_embeddings[0]
247
-
248
- # duplicate text embeddings for each generation per prompt, using mps friendly method
249
- bs_embed, seq_len, _ = text_embeddings.shape
250
- text_embeddings = text_embeddings.tile([1, num_images_per_prompt, 1])
251
- text_embeddings = text_embeddings.reshape([bs_embed * num_images_per_prompt, seq_len, -1])
252
-
253
- # get unconditional embeddings for classifier free guidance
254
- if do_classifier_free_guidance:
255
- uncond_tokens: List[str]
256
- if negative_prompt is None:
257
- uncond_tokens = [""] * batch_size
258
- elif type(prompt) is not type(negative_prompt):
259
- raise TypeError(
260
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
261
- f" {type(prompt)}."
262
- )
263
- elif isinstance(negative_prompt, str):
264
- uncond_tokens = [negative_prompt]
265
- elif batch_size != len(negative_prompt):
266
- raise ValueError(
267
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
268
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
269
- " the batch size of `prompt`."
270
- )
271
- else:
272
- uncond_tokens = negative_prompt
273
-
274
- max_length = text_input_ids.shape[-1]
275
- uncond_input = self.tokenizer(
276
- uncond_tokens,
277
- padding="max_length",
278
- max_length=max_length,
279
- truncation=True,
280
- return_tensors="pd",
281
- )
282
-
283
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
284
- attention_mask = uncond_input.attention_mask
285
- else:
286
- attention_mask = None
287
-
288
- uncond_embeddings = self.text_encoder(
289
- uncond_input.input_ids,
290
- attention_mask=attention_mask,
291
- )
292
- uncond_embeddings = uncond_embeddings[0]
293
-
294
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
295
- seq_len = uncond_embeddings.shape[1]
296
- uncond_embeddings = uncond_embeddings.tile([1, num_images_per_prompt, 1])
297
- uncond_embeddings = uncond_embeddings.reshape([batch_size * num_images_per_prompt, seq_len, -1])
298
-
299
- # Encode the safety concept text
300
- if enable_safety_guidance:
301
- safety_concept_input = self.tokenizer(
302
- [self._safety_text_concept],
303
- padding="max_length",
304
- max_length=max_length,
305
- truncation=True,
306
- return_tensors="pd",
307
- )
308
- safety_embeddings = self.text_encoder(safety_concept_input.input_ids)[0]
309
-
310
- # duplicate safety embeddings for each generation per prompt, using mps friendly method
311
- seq_len = safety_embeddings.shape[1]
312
- safety_embeddings = safety_embeddings.tile([batch_size, num_images_per_prompt, 1])
313
- safety_embeddings = safety_embeddings.reshape([batch_size * num_images_per_prompt, seq_len, -1])
314
-
315
- # For classifier free guidance + sld, we need to do three forward passes.
316
- # Here we concatenate the unconditional and text embeddings into a single batch
317
- # to avoid doing three forward passes
318
- text_embeddings = paddle.concat([uncond_embeddings, text_embeddings, safety_embeddings])
319
-
320
- else:
321
- # For classifier free guidance, we need to do two forward passes.
322
- # Here we concatenate the unconditional and text embeddings into a single batch
323
- # to avoid doing two forward passes
324
- text_embeddings = paddle.concat([uncond_embeddings, text_embeddings])
325
-
326
- return text_embeddings
327
-
328
- def run_safety_checker(self, image, dtype, enable_safety_guidance):
329
- if self.safety_checker is not None:
330
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pd")
331
- image, has_nsfw_concept = self.safety_checker(
332
- images=image, clip_input=safety_checker_input.pixel_values.cast(dtype)
333
- )
334
- flagged_images = None
335
- if any(has_nsfw_concept):
336
- logger.warning(
337
- "Potential NSFW content was detected in one or more images. A black image will be returned"
338
- " instead."
339
- f" {'You may look at this images in the `unsafe_images` variable of the output at your own discretion.' if enable_safety_guidance else 'Try again with a different prompt and/or seed.'} "
340
- )
341
- flagged_images = np.zeros(image.shape)
342
- for idx, has_nsfw_concept in enumerate(has_nsfw_concept):
343
- if has_nsfw_concept:
344
- flagged_images[idx] = image[idx]
345
- image[idx] = np.zeros(image[idx].shape) # black image
346
- else:
347
- has_nsfw_concept = None
348
- flagged_images = None
349
- return image, has_nsfw_concept, flagged_images
350
-
351
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
352
- def decode_latents(self, latents):
353
- latents = 1 / 0.18215 * latents
354
- image = self.vae.decode(latents).sample
355
- image = (image / 2 + 0.5).clip(0, 1)
356
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
357
- image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
358
- return image
359
-
360
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
361
- def prepare_extra_step_kwargs(self, generator, eta):
362
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
363
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
364
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
365
- # and should be between [0, 1]
366
-
367
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
368
- extra_step_kwargs = {}
369
- if accepts_eta:
370
- extra_step_kwargs["eta"] = eta
371
-
372
- # check if the scheduler accepts generator
373
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
374
- if accepts_generator:
375
- extra_step_kwargs["generator"] = generator
376
- return extra_step_kwargs
377
-
378
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
379
- def check_inputs(self, prompt, height, width, callback_steps):
380
- if not isinstance(prompt, str) and not isinstance(prompt, list):
381
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
382
-
383
- if height % 8 != 0 or width % 8 != 0:
384
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
385
-
386
- if (callback_steps is None) or (
387
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
388
- ):
389
- raise ValueError(
390
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
391
- f" {type(callback_steps)}."
392
- )
393
-
394
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
395
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, generator, latents=None):
396
- shape = [batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor]
397
- if isinstance(generator, list) and len(generator) != batch_size:
398
- raise ValueError(
399
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
400
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
401
- )
402
-
403
- if latents is None:
404
- if isinstance(generator, list):
405
- shape = [
406
- 1,
407
- ] + shape[1:]
408
- latents = [paddle.randn(shape, generator=generator[i], dtype=dtype) for i in range(batch_size)]
409
- latents = paddle.concat(latents, axis=0)
410
- else:
411
- latents = paddle.randn(shape, generator=generator, dtype=dtype)
412
- else:
413
- if latents.shape != shape:
414
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
415
-
416
- # scale the initial noise by the standard deviation required by the scheduler
417
- latents = latents * self.scheduler.init_noise_sigma
418
- return latents
419
-
420
- def perform_safety_guidance(
421
- self,
422
- enable_safety_guidance,
423
- safety_momentum,
424
- noise_guidance,
425
- noise_pred_out,
426
- i,
427
- sld_guidance_scale,
428
- sld_warmup_steps,
429
- sld_threshold,
430
- sld_momentum_scale,
431
- sld_mom_beta,
432
- ):
433
- # Perform SLD guidance
434
- if enable_safety_guidance:
435
- if safety_momentum is None:
436
- safety_momentum = paddle.zeros_like(noise_guidance)
437
- noise_pred_text, noise_pred_uncond = noise_pred_out[0], noise_pred_out[1]
438
- noise_pred_safety_concept = noise_pred_out[2]
439
-
440
- # Equation 6
441
- scale = paddle.clip(
442
- paddle.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.0
443
- )
444
-
445
- # Equation 6
446
- safety_concept_scale = paddle.where(
447
- (noise_pred_text - noise_pred_safety_concept) >= sld_threshold, paddle.zeros_like(scale), scale
448
- )
449
-
450
- # Equation 4
451
- noise_guidance_safety = paddle.multiply(
452
- (noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale
453
- )
454
-
455
- # Equation 7
456
- noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum
457
-
458
- # Equation 8
459
- safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety
460
-
461
- if i >= sld_warmup_steps: # Warmup
462
- # Equation 3
463
- noise_guidance = noise_guidance - noise_guidance_safety
464
- return noise_guidance, safety_momentum
465
-
466
- @paddle.no_grad()
467
- def __call__(
468
- self,
469
- prompt: Union[str, List[str]],
470
- height: Optional[int] = None,
471
- width: Optional[int] = None,
472
- num_inference_steps: int = 50,
473
- guidance_scale: float = 7.5,
474
- negative_prompt: Optional[Union[str, List[str]]] = None,
475
- num_images_per_prompt: Optional[int] = 1,
476
- eta: float = 0.0,
477
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
478
- latents: Optional[paddle.Tensor] = None,
479
- output_type: Optional[str] = "pil",
480
- return_dict: bool = True,
481
- callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
482
- callback_steps: Optional[int] = 1,
483
- sld_guidance_scale: Optional[float] = 1000,
484
- sld_warmup_steps: Optional[int] = 10,
485
- sld_threshold: Optional[float] = 0.01,
486
- sld_momentum_scale: Optional[float] = 0.3,
487
- sld_mom_beta: Optional[float] = 0.4,
488
- **kwargs,
489
- ):
490
- r"""
491
- Function invoked when calling the pipeline for generation.
492
-
493
- Args:
494
- prompt (`str` or `List[str]`):
495
- The prompt or prompts to guide the image generation.
496
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
497
- The height in pixels of the generated image.
498
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
499
- The width in pixels of the generated image.
500
- num_inference_steps (`int`, *optional*, defaults to 50):
501
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
502
- expense of slower inference.
503
- guidance_scale (`float`, *optional*, defaults to 7.5):
504
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
505
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
506
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
507
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
508
- usually at the expense of lower image quality.
509
- negative_prompt (`str` or `List[str]`, *optional*):
510
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
511
- if `guidance_scale` is less than `1`).
512
- num_images_per_prompt (`int`, *optional*, defaults to 1):
513
- The number of images to generate per prompt.
514
- eta (`float`, *optional*, defaults to 0.0):
515
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
516
- [`schedulers.DDIMScheduler`], will be ignored for others.
517
- generator (`paddle.Generator`, *optional*):
518
- A [paddle generator] to make generation
519
- deterministic.
520
- latents (`paddle.Tensor`, *optional*):
521
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
522
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
523
- tensor will ge generated by sampling using the supplied random `generator`.
524
- output_type (`str`, *optional*, defaults to `"pil"`):
525
- The output format of the generate image. Choose between
526
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
527
- return_dict (`bool`, *optional*, defaults to `True`):
528
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
529
- plain tuple.
530
- callback (`Callable`, *optional*):
531
- A function that will be called every `callback_steps` steps during inference. The function will be
532
- called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
533
- callback_steps (`int`, *optional*, defaults to 1):
534
- The frequency at which the `callback` function will be called. If not specified, the callback will be
535
- called at every step.
536
- sld_guidance_scale (`float`, *optional*, defaults to 1000):
537
- Safe latent guidance as defined in [Safe Latent Diffusion](https://arxiv.org/abs/2211.05105).
538
- `sld_guidance_scale` is defined as sS of Eq. 6. If set to be less than 1, safety guidance will be
539
- disabled.
540
- sld_warmup_steps (`int`, *optional*, defaults to 10):
541
- Number of warmup steps for safety guidance. SLD will only be applied for diffusion steps greater than
542
- `sld_warmup_steps`. `sld_warmup_steps` is defined as `delta` of [Safe Latent
543
- Diffusion](https://arxiv.org/abs/2211.05105).
544
- sld_threshold (`float`, *optional*, defaults to 0.01):
545
- Threshold that separates the hyperplane between appropriate and inappropriate images. `sld_threshold`
546
- is defined as `lamda` of Eq. 5 in [Safe Latent Diffusion](https://arxiv.org/abs/2211.05105).
547
- sld_momentum_scale (`float`, *optional*, defaults to 0.3):
548
- Scale of the SLD momentum to be added to the safety guidance at each diffusion step. If set to 0.0
549
- momentum will be disabled. Momentum is already built up during warmup, i.e. for diffusion steps smaller
550
- than `sld_warmup_steps`. `sld_momentum_scale` is defined as `sm` of Eq. 7 in [Safe Latent
551
- Diffusion](https://arxiv.org/abs/2211.05105).
552
- sld_mom_beta (`float`, *optional*, defaults to 0.4):
553
- Defines how safety guidance momentum builds up. `sld_mom_beta` indicates how much of the previous
554
- momentum will be kept. Momentum is already built up during warmup, i.e. for diffusion steps smaller
555
- than `sld_warmup_steps`. `sld_mom_beta` is defined as `beta m` of Eq. 8 in [Safe Latent
556
- Diffusion](https://arxiv.org/abs/2211.05105).
557
- Returns:
558
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
559
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
560
- When returning a tuple, the first element is a list with the generated images, and the second element is a
561
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
562
- (nsfw) content, according to the `safety_checker`.
563
- """
564
- # 0. Default height and width to unet
565
- height = height or self.unet.config.sample_size * self.vae_scale_factor
566
- width = width or self.unet.config.sample_size * self.vae_scale_factor
567
-
568
- # 1. Check inputs. Raise error if not correct
569
- self.check_inputs(prompt, height, width, callback_steps)
570
-
571
- # 2. Define call parameters
572
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
573
-
574
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
575
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
576
- # corresponds to doing no classifier free guidance.
577
- do_classifier_free_guidance = guidance_scale > 1.0
578
-
579
- enable_safety_guidance = sld_guidance_scale > 1.0 and do_classifier_free_guidance
580
- if not enable_safety_guidance:
581
- warnings.warn("Safety checker disabled!")
582
-
583
- # 3. Encode input prompt
584
- text_embeddings = self._encode_prompt(
585
- prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, enable_safety_guidance
586
- )
587
-
588
- # 4. Prepare timesteps
589
- self.scheduler.set_timesteps(num_inference_steps)
590
- timesteps = self.scheduler.timesteps
591
-
592
- # 5. Prepare latent variables
593
- num_channels_latents = self.unet.in_channels
594
- latents = self.prepare_latents(
595
- batch_size * num_images_per_prompt,
596
- num_channels_latents,
597
- height,
598
- width,
599
- text_embeddings.dtype,
600
- generator,
601
- latents,
602
- )
603
-
604
- # 6. Prepare extra step kwargs.
605
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
606
-
607
- safety_momentum = None
608
-
609
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
610
- with self.progress_bar(total=num_inference_steps) as progress_bar:
611
- for i, t in enumerate(timesteps):
612
- # expand the latents if we are doing classifier free guidance
613
- latent_model_input = (
614
- paddle.concat([latents] * (3 if enable_safety_guidance else 2))
615
- if do_classifier_free_guidance
616
- else latents
617
- )
618
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
619
-
620
- # predict the noise residual
621
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
622
-
623
- # perform guidance
624
- if do_classifier_free_guidance:
625
- noise_pred_out = noise_pred.chunk((3 if enable_safety_guidance else 2))
626
- noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1]
627
-
628
- # default classifier free guidance
629
- noise_guidance = noise_pred_text - noise_pred_uncond
630
-
631
- # Perform SLD guidance
632
- if enable_safety_guidance:
633
- if safety_momentum is None:
634
- safety_momentum = paddle.zeros_like(noise_guidance)
635
- noise_pred_safety_concept = noise_pred_out[2]
636
-
637
- # Equation 6
638
- scale = paddle.clip(
639
- paddle.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.0
640
- )
641
-
642
- # Equation 6
643
- safety_concept_scale = paddle.where(
644
- (noise_pred_text - noise_pred_safety_concept) >= sld_threshold,
645
- paddle.zeros_like(scale),
646
- scale,
647
- )
648
-
649
- # Equation 4
650
- noise_guidance_safety = paddle.multiply(
651
- (noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale
652
- )
653
-
654
- # Equation 7
655
- noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum
656
-
657
- # Equation 8
658
- safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety
659
-
660
- if i >= sld_warmup_steps: # Warmup
661
- # Equation 3
662
- noise_guidance = noise_guidance - noise_guidance_safety
663
-
664
- noise_pred = noise_pred_uncond + guidance_scale * noise_guidance
665
-
666
- # compute the previous noisy sample x_t -> x_t-1
667
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
668
-
669
- # call the callback, if provided
670
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
671
- progress_bar.update()
672
- if callback is not None and i % callback_steps == 0:
673
- callback(i, t, latents)
674
-
675
- # 8. Post-processing
676
- image = self.decode_latents(latents)
677
-
678
- # 9. Run safety checker
679
- image, has_nsfw_concept, flagged_images = self.run_safety_checker(
680
- image, text_embeddings.dtype, enable_safety_guidance
681
- )
682
-
683
- # 10. Convert to PIL
684
- if output_type == "pil":
685
- image = self.numpy_to_pil(image)
686
- if flagged_images is not None:
687
- flagged_images = self.numpy_to_pil(flagged_images)
688
-
689
- if not return_dict:
690
- return (
691
- image,
692
- has_nsfw_concept,
693
- self._safety_text_concept if enable_safety_guidance else None,
694
- flagged_images,
695
- )
696
-
697
- return StableDiffusionSafePipelineOutput(
698
- images=image,
699
- nsfw_content_detected=has_nsfw_concept,
700
- applied_safety_concept=self._safety_text_concept if enable_safety_guidance else None,
701
- unsafe_images=flagged_images,
702
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/unclip/pipeline_unclip.py DELETED
@@ -1,476 +0,0 @@
1
- # Copyright 2022 Kakao Brain and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import List, Optional, Union
17
-
18
- import paddle
19
- import paddle.nn.functional as F
20
-
21
- from paddlenlp.transformers import CLIPTextModelWithProjection, CLIPTokenizer
22
-
23
- from ...models import PriorTransformer, UNet2DConditionModel, UNet2DModel
24
- from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
25
- from ...schedulers import UnCLIPScheduler
26
- from ...utils import logging
27
- from .text_proj import UnCLIPTextProjModel
28
-
29
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
30
-
31
-
32
- class UnCLIPPipeline(DiffusionPipeline):
33
- """
34
- Pipeline for text-to-image generation using unCLIP
35
-
36
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
37
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
38
-
39
- Args:
40
- text_encoder ([`CLIPTextModelWithProjection`]):
41
- Frozen text-encoder.
42
- tokenizer (`CLIPTokenizer`):
43
- Tokenizer of class
44
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
45
- prior ([`PriorTransformer`]):
46
- The canonincal unCLIP prior to approximate the image embedding from the text embedding.
47
- decoder ([`UNet2DConditionModel`]):
48
- The decoder to invert the image embedding into an image.
49
- super_res_first ([`UNet2DModel`]):
50
- Super resolution unet. Used in all but the last step of the super resolution diffusion process.
51
- super_res_last ([`UNet2DModel`]):
52
- Super resolution unet. Used in the last step of the super resolution diffusion process.
53
- prior_scheduler ([`UnCLIPScheduler`]):
54
- Scheduler used in the prior denoising process. Just a modified DDPMScheduler.
55
- decoder_scheduler ([`UnCLIPScheduler`]):
56
- Scheduler used in the decoder denoising process. Just a modified DDPMScheduler.
57
- super_res_scheduler ([`UnCLIPScheduler`]):
58
- Scheduler used in the super resolution denoising process. Just a modified DDPMScheduler.
59
-
60
- """
61
-
62
- prior: PriorTransformer
63
- decoder: UNet2DConditionModel
64
- text_proj: UnCLIPTextProjModel
65
- text_encoder: CLIPTextModelWithProjection
66
- tokenizer: CLIPTokenizer
67
- super_res_first: UNet2DModel
68
- super_res_last: UNet2DModel
69
-
70
- prior_scheduler: UnCLIPScheduler
71
- decoder_scheduler: UnCLIPScheduler
72
- super_res_scheduler: UnCLIPScheduler
73
-
74
- def __init__(
75
- self,
76
- prior: PriorTransformer,
77
- decoder: UNet2DConditionModel,
78
- text_encoder: CLIPTextModelWithProjection,
79
- tokenizer: CLIPTokenizer,
80
- text_proj: UnCLIPTextProjModel,
81
- super_res_first: UNet2DModel,
82
- super_res_last: UNet2DModel,
83
- prior_scheduler: UnCLIPScheduler,
84
- decoder_scheduler: UnCLIPScheduler,
85
- super_res_scheduler: UnCLIPScheduler,
86
- ):
87
- super().__init__()
88
-
89
- self.register_modules(
90
- prior=prior,
91
- decoder=decoder,
92
- text_encoder=text_encoder,
93
- tokenizer=tokenizer,
94
- text_proj=text_proj,
95
- super_res_first=super_res_first,
96
- super_res_last=super_res_last,
97
- prior_scheduler=prior_scheduler,
98
- decoder_scheduler=decoder_scheduler,
99
- super_res_scheduler=super_res_scheduler,
100
- )
101
-
102
- def prepare_latents(self, shape, dtype, generator, latents, scheduler):
103
- batch_size = shape[0]
104
- if isinstance(generator, list) and len(generator) != batch_size:
105
- raise ValueError(
106
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
107
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
108
- )
109
-
110
- if latents is None:
111
- if isinstance(generator, list):
112
- shape = [
113
- 1,
114
- ] + shape[1:]
115
- latents = [paddle.randn(shape, generator=generator[i], dtype=dtype) for i in range(batch_size)]
116
- latents = paddle.concat(latents, axis=0)
117
- else:
118
- latents = paddle.randn(shape, generator=generator, dtype=dtype)
119
- else:
120
- if latents.shape != shape:
121
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
122
-
123
- # scale the initial noise by the standard deviation required by the scheduler
124
- latents = latents * scheduler.init_noise_sigma
125
- return latents
126
-
127
- def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance):
128
- batch_size = len(prompt) if isinstance(prompt, list) else 1
129
-
130
- # get prompt text embeddings
131
- text_inputs = self.tokenizer(
132
- prompt,
133
- padding="max_length",
134
- max_length=self.tokenizer.model_max_length,
135
- return_tensors="pd",
136
- return_attention_mask=True,
137
- )
138
- text_input_ids = text_inputs.input_ids
139
- text_mask = text_inputs.attention_mask
140
-
141
- if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
142
- removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
143
- logger.warning(
144
- "The following part of your input was truncated because CLIP can only handle sequences up to"
145
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
146
- )
147
- text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
148
-
149
- text_encoder_output = self.text_encoder(text_input_ids)
150
-
151
- text_embeddings = text_encoder_output.text_embeds
152
- text_encoder_hidden_states = text_encoder_output.last_hidden_state
153
-
154
- # duplicate text embeddings for each generation per prompt
155
- seq_len = text_embeddings.shape[1]
156
- text_embeddings = text_embeddings.tile([1, num_images_per_prompt])
157
- text_embeddings = text_embeddings.reshape([batch_size * num_images_per_prompt, seq_len])
158
-
159
- # duplicate text_encoder_hidden_states for each generation per prompt
160
- seq_len = text_encoder_hidden_states.shape[1]
161
- text_encoder_hidden_states = text_encoder_hidden_states.tile([1, num_images_per_prompt, 1])
162
- text_encoder_hidden_states = text_encoder_hidden_states.reshape(
163
- [batch_size * num_images_per_prompt, seq_len, -1]
164
- )
165
-
166
- # duplicate text_mask for each generation per prompt
167
- seq_len = text_mask.shape[1]
168
- text_mask = text_mask.tile([1, num_images_per_prompt])
169
- text_mask = text_mask.reshape([batch_size * num_images_per_prompt, seq_len])
170
-
171
- if do_classifier_free_guidance:
172
- uncond_tokens = [""] * batch_size
173
-
174
- max_length = text_input_ids.shape[-1]
175
- uncond_input = self.tokenizer(
176
- uncond_tokens,
177
- padding="max_length",
178
- max_length=max_length,
179
- truncation=True,
180
- return_tensors="pd",
181
- return_attention_mask=True,
182
- )
183
- uncond_text_mask = uncond_input.attention_mask
184
- uncond_embeddings_text_encoder_output = self.text_encoder(uncond_input.input_ids)
185
-
186
- uncond_embeddings = uncond_embeddings_text_encoder_output.text_embeds
187
- uncond_text_encoder_hidden_states = uncond_embeddings_text_encoder_output.last_hidden_state
188
-
189
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
190
-
191
- seq_len = uncond_embeddings.shape[1]
192
- uncond_embeddings = uncond_embeddings.tile([1, num_images_per_prompt])
193
- uncond_embeddings = uncond_embeddings.reshape([batch_size * num_images_per_prompt, seq_len])
194
-
195
- seq_len = uncond_text_encoder_hidden_states.shape[1]
196
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.tile([1, num_images_per_prompt, 1])
197
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.reshape(
198
- [batch_size * num_images_per_prompt, seq_len, -1]
199
- )
200
-
201
- # duplicate uncond_text_mask for each generation per prompt
202
- seq_len = uncond_text_mask.shape[1]
203
- uncond_text_mask = uncond_text_mask.tile([1, num_images_per_prompt])
204
- uncond_text_mask = uncond_text_mask.reshape([batch_size * num_images_per_prompt, seq_len])
205
-
206
- # For classifier free guidance, we need to do two forward passes.
207
- # Here we concatenate the unconditional and text embeddings into a single batch
208
- # to avoid doing two forward passes
209
- text_embeddings = paddle.concat([uncond_embeddings, text_embeddings])
210
- text_encoder_hidden_states = paddle.concat([uncond_text_encoder_hidden_states, text_encoder_hidden_states])
211
-
212
- text_mask = paddle.concat([uncond_text_mask, text_mask])
213
-
214
- return text_embeddings, text_encoder_hidden_states, text_mask
215
-
216
- @paddle.no_grad()
217
- def __call__(
218
- self,
219
- prompt: Union[str, List[str]],
220
- num_images_per_prompt: int = 1,
221
- prior_num_inference_steps: int = 25,
222
- decoder_num_inference_steps: int = 25,
223
- super_res_num_inference_steps: int = 7,
224
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
225
- prior_latents: Optional[paddle.Tensor] = None,
226
- decoder_latents: Optional[paddle.Tensor] = None,
227
- super_res_latents: Optional[paddle.Tensor] = None,
228
- prior_guidance_scale: float = 4.0,
229
- decoder_guidance_scale: float = 8.0,
230
- output_type: Optional[str] = "pil",
231
- return_dict: bool = True,
232
- ):
233
- """
234
- Function invoked when calling the pipeline for generation.
235
-
236
- Args:
237
- prompt (`str` or `List[str]`):
238
- The prompt or prompts to guide the image generation.
239
- num_images_per_prompt (`int`, *optional*, defaults to 1):
240
- The number of images to generate per prompt.
241
- prior_num_inference_steps (`int`, *optional*, defaults to 25):
242
- The number of denoising steps for the prior. More denoising steps usually lead to a higher quality
243
- image at the expense of slower inference.
244
- decoder_num_inference_steps (`int`, *optional*, defaults to 25):
245
- The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality
246
- image at the expense of slower inference.
247
- super_res_num_inference_steps (`int`, *optional*, defaults to 7):
248
- The number of denoising steps for super resolution. More denoising steps usually lead to a higher
249
- quality image at the expense of slower inference.
250
- generator (`paddle.Generator`, *optional*):
251
- One or a list of paddle generator(s) to make generation deterministic.
252
- prior_latents (`paddle.Tensor` of shape (batch size, embeddings dimension), *optional*):
253
- Pre-generated noisy latents to be used as inputs for the prior.
254
- decoder_latents (`paddle.Tensor` of shape (batch size, channels, height, width), *optional*):
255
- Pre-generated noisy latents to be used as inputs for the decoder.
256
- super_res_latents (`paddle.Tensor` of shape (batch size, channels, super res height, super res width), *optional*):
257
- Pre-generated noisy latents to be used as inputs for the decoder.
258
- prior_guidance_scale (`float`, *optional*, defaults to 4.0):
259
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
260
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
261
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
262
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
263
- usually at the expense of lower image quality.
264
- decoder_guidance_scale (`float`, *optional*, defaults to 4.0):
265
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
266
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
267
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
268
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
269
- usually at the expense of lower image quality.
270
- output_type (`str`, *optional*, defaults to `"pil"`):
271
- The output format of the generated image. Choose between
272
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
273
- return_dict (`bool`, *optional*, defaults to `True`):
274
- Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
275
- """
276
- if isinstance(prompt, str):
277
- batch_size = 1
278
- elif isinstance(prompt, list):
279
- batch_size = len(prompt)
280
- else:
281
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
282
-
283
- batch_size = batch_size * num_images_per_prompt
284
-
285
- do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0
286
-
287
- text_embeddings, text_encoder_hidden_states, text_mask = self._encode_prompt(
288
- prompt, num_images_per_prompt, do_classifier_free_guidance
289
- )
290
-
291
- # prior
292
-
293
- self.prior_scheduler.set_timesteps(prior_num_inference_steps)
294
- prior_timesteps_tensor = self.prior_scheduler.timesteps
295
-
296
- embedding_dim = self.prior.config.embedding_dim
297
- prior_latents = self.prepare_latents(
298
- (batch_size, embedding_dim),
299
- text_embeddings.dtype,
300
- generator,
301
- prior_latents,
302
- self.prior_scheduler,
303
- )
304
-
305
- for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)):
306
- # expand the latents if we are doing classifier free guidance
307
- latent_model_input = paddle.concat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents
308
-
309
- predicted_image_embedding = self.prior(
310
- latent_model_input,
311
- timestep=t,
312
- proj_embedding=text_embeddings,
313
- encoder_hidden_states=text_encoder_hidden_states,
314
- attention_mask=text_mask,
315
- ).predicted_image_embedding
316
-
317
- if do_classifier_free_guidance:
318
- predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2)
319
- predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * (
320
- predicted_image_embedding_text - predicted_image_embedding_uncond
321
- )
322
-
323
- if i + 1 == prior_timesteps_tensor.shape[0]:
324
- prev_timestep = None
325
- else:
326
- prev_timestep = prior_timesteps_tensor[i + 1]
327
-
328
- prior_latents = self.prior_scheduler.step(
329
- predicted_image_embedding,
330
- timestep=t,
331
- sample=prior_latents,
332
- generator=generator,
333
- prev_timestep=prev_timestep,
334
- ).prev_sample
335
-
336
- prior_latents = self.prior.post_process_latents(prior_latents)
337
-
338
- image_embeddings = prior_latents
339
-
340
- # done prior
341
-
342
- # decoder
343
-
344
- text_encoder_hidden_states, additive_clip_time_embeddings = self.text_proj(
345
- image_embeddings=image_embeddings,
346
- text_embeddings=text_embeddings,
347
- text_encoder_hidden_states=text_encoder_hidden_states,
348
- do_classifier_free_guidance=do_classifier_free_guidance,
349
- )
350
-
351
- decoder_text_mask = F.pad(
352
- text_mask.unsqueeze(0), (self.text_proj.clip_extra_context_tokens, 0), value=1, data_format="NCL"
353
- ).squeeze(0)
354
-
355
- self.decoder_scheduler.set_timesteps(decoder_num_inference_steps)
356
- decoder_timesteps_tensor = self.decoder_scheduler.timesteps
357
-
358
- num_channels_latents = self.decoder.in_channels
359
- height = self.decoder.sample_size
360
- width = self.decoder.sample_size
361
- decoder_latents = self.prepare_latents(
362
- (batch_size, num_channels_latents, height, width),
363
- text_encoder_hidden_states.dtype,
364
- generator,
365
- decoder_latents,
366
- self.decoder_scheduler,
367
- )
368
-
369
- for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)):
370
- # expand the latents if we are doing classifier free guidance
371
- latent_model_input = (
372
- paddle.concat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents
373
- )
374
-
375
- noise_pred = self.decoder(
376
- sample=latent_model_input,
377
- timestep=t,
378
- encoder_hidden_states=text_encoder_hidden_states,
379
- class_labels=additive_clip_time_embeddings,
380
- attention_mask=decoder_text_mask,
381
- ).sample
382
-
383
- if do_classifier_free_guidance:
384
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
385
- # paddle.split is not equal torch.split
386
- noise_pred_uncond, _ = noise_pred_uncond.split(
387
- [latent_model_input.shape[1], noise_pred_uncond.shape[1] - latent_model_input.shape[1]], axis=1
388
- )
389
- noise_pred_text, predicted_variance = noise_pred_text.split(
390
- [latent_model_input.shape[1], noise_pred_text.shape[1] - latent_model_input.shape[1]], axis=1
391
- )
392
- noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond)
393
- noise_pred = paddle.concat([noise_pred, predicted_variance], axis=1)
394
-
395
- if i + 1 == decoder_timesteps_tensor.shape[0]:
396
- prev_timestep = None
397
- else:
398
- prev_timestep = decoder_timesteps_tensor[i + 1]
399
-
400
- # compute the previous noisy sample x_t -> x_t-1
401
- decoder_latents = self.decoder_scheduler.step(
402
- noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator
403
- ).prev_sample
404
-
405
- decoder_latents = decoder_latents.clip(-1, 1)
406
-
407
- image_small = decoder_latents
408
-
409
- # done decoder
410
-
411
- # super res
412
-
413
- self.super_res_scheduler.set_timesteps(super_res_num_inference_steps)
414
- super_res_timesteps_tensor = self.super_res_scheduler.timesteps
415
-
416
- channels = self.super_res_first.in_channels // 2
417
- height = self.super_res_first.sample_size
418
- width = self.super_res_first.sample_size
419
- super_res_latents = self.prepare_latents(
420
- (batch_size, channels, height, width),
421
- image_small.dtype,
422
- generator,
423
- super_res_latents,
424
- self.super_res_scheduler,
425
- )
426
-
427
- interpolate_antialias = {}
428
- if "antialias" in inspect.signature(F.interpolate).parameters:
429
- interpolate_antialias["antialias"] = True
430
-
431
- image_upscaled = F.interpolate(
432
- image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias
433
- )
434
-
435
- for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)):
436
- # no classifier free guidance
437
-
438
- if i == super_res_timesteps_tensor.shape[0] - 1:
439
- unet = self.super_res_last
440
- else:
441
- unet = self.super_res_first
442
-
443
- latent_model_input = paddle.concat([super_res_latents, image_upscaled], axis=1)
444
-
445
- noise_pred = unet(
446
- sample=latent_model_input,
447
- timestep=t,
448
- ).sample
449
-
450
- if i + 1 == super_res_timesteps_tensor.shape[0]:
451
- prev_timestep = None
452
- else:
453
- prev_timestep = super_res_timesteps_tensor[i + 1]
454
-
455
- # compute the previous noisy sample x_t -> x_t-1
456
- super_res_latents = self.super_res_scheduler.step(
457
- noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator
458
- ).prev_sample
459
-
460
- image = super_res_latents
461
-
462
- # done super res
463
-
464
- # post processing
465
-
466
- image = image * 0.5 + 0.5
467
- image = image.clip(0, 1)
468
- image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
469
-
470
- if output_type == "pil":
471
- image = self.numpy_to_pil(image)
472
-
473
- if not return_dict:
474
- return (image,)
475
-
476
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/app.py DELETED
@@ -1,112 +0,0 @@
1
- import os, sys
2
- import tempfile
3
- import gradio as gr
4
- from modules.text2speech import text2speech
5
- from modules.sadtalker_test import SadTalker
6
-
7
- def get_driven_audio(audio):
8
- if os.path.isfile(audio):
9
- return audio
10
- else:
11
- save_path = tempfile.NamedTemporaryFile(
12
- delete=False,
13
- suffix=("." + "wav"),
14
- )
15
- gen_audio = text2speech(audio, save_path.name)
16
- return gen_audio, gen_audio
17
-
18
- def get_source_image(image):
19
- return image
20
-
21
- def sadtalker_demo(result_dir='./tmp/'):
22
-
23
- sad_talker = SadTalker()
24
- with gr.Blocks(analytics_enabled=False) as sadtalker_interface:
25
- gr.Markdown("<div align='center'> <h3> 😭 SadTalker: Learning Realistic 3D Motion Coefficients for Stylized Audio-Driven Single Image Talking Face Animation (CVPR 2023) </h3> \
26
- <a style='font-size:18px;color: #efefef' href='https://arxiv.org/abs/2211.12194'>Arxiv</a> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp; \
27
- <a style='font-size:18px;color: #efefef' href='https://sadtalker.github.io'>Homepage</a> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp; \
28
- <a style='font-size:18px;color: #efefef' href='https://github.com/Winfredy/SadTalker'> Github </a> </div>")
29
-
30
- with gr.Row():
31
- with gr.Column(variant='panel'):
32
- with gr.Tabs(elem_id="sadtalker_source_image"):
33
- with gr.TabItem('Upload image'):
34
- with gr.Row():
35
- source_image = gr.Image(label="Source image", source="upload", type="filepath").style(height=256)
36
-
37
- with gr.Tabs(elem_id="sadtalker_driven_audio"):
38
- with gr.TabItem('Upload audio(wav/mp3 only currently)'):
39
- with gr.Column(variant='panel'):
40
- driven_audio = gr.Audio(label="Input audio", source="upload", type="filepath")
41
-
42
- with gr.Column(variant='panel'):
43
- with gr.Tabs(elem_id="sadtalker_checkbox"):
44
- with gr.TabItem('Settings'):
45
- with gr.Column(variant='panel'):
46
- is_still_mode = gr.Checkbox(label="Still Mode (fewer head motion)").style(container=True)
47
- is_resize_mode = gr.Checkbox(label="Resize Mode (⚠️ Resize mode need manually crop the image firstly, can handle larger image crop)").style(container=True)
48
- is_enhance_mode = gr.Checkbox(label="Enhance Mode (better face quality )").style(container=True)
49
- submit = gr.Button('Generate', elem_id="sadtalker_generate", variant='primary')
50
-
51
- with gr.Tabs(elem_id="sadtalker_genearted"):
52
- gen_video = gr.Video(label="Generated video", format="mp4").style(width=256)
53
- gen_text = gr.Textbox(visible=False)
54
-
55
- with gr.Row():
56
- examples = [
57
- [
58
- 'examples/source_image/art_10.png',
59
- 'examples/driven_audio/deyu.wav',
60
- True,
61
- False,
62
- False
63
- ],
64
- [
65
- 'examples/source_image/art_1.png',
66
- 'examples/driven_audio/fayu.wav',
67
- True,
68
- True,
69
- False
70
- ],
71
- [
72
- 'examples/source_image/art_9.png',
73
- 'examples/driven_audio/itosinger1.wav',
74
- True,
75
- False,
76
- True
77
- ]
78
- ]
79
- gr.Examples(examples=examples,
80
- inputs=[
81
- source_image,
82
- driven_audio,
83
- is_still_mode,
84
- is_resize_mode,
85
- is_enhance_mode,
86
- gr.Textbox(value=result_dir, visible=False)],
87
- outputs=[gen_video, gen_text],
88
- fn=sad_talker.test,
89
- cache_examples=os.getenv('SYSTEM') == 'spaces')
90
-
91
- submit.click(
92
- fn=sad_talker.test,
93
- inputs=[source_image,
94
- driven_audio,
95
- is_still_mode,
96
- is_resize_mode,
97
- is_enhance_mode,
98
- gr.Textbox(value=result_dir, visible=False)],
99
- outputs=[gen_video, gen_text],
100
- api_name="sadtalking"
101
- )
102
-
103
- return sadtalker_interface
104
-
105
-
106
- if __name__ == "__main__":
107
-
108
- sadtalker_result_dir = os.path.join('./', 'results')
109
- demo = sadtalker_demo(sadtalker_result_dir)
110
- demo.launch(enable_queue=True)
111
-
112
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/SRMNet_thesis/WT/transform.py DELETED
@@ -1,53 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- def dwt_init(x):
5
- x01 = x[:, :, 0::2, :] / 2
6
- x02 = x[:, :, 1::2, :] / 2
7
- x1 = x01[:, :, :, 0::2]
8
- x2 = x02[:, :, :, 0::2]
9
- x3 = x01[:, :, :, 1::2]
10
- x4 = x02[:, :, :, 1::2]
11
- x_LL = x1 + x2 + x3 + x4
12
- x_HL = -x1 - x2 + x3 + x4
13
- x_LH = -x1 + x2 - x3 + x4
14
- x_HH = x1 - x2 - x3 + x4
15
- # print(x_HH[:, 0, :, :])
16
- return torch.cat((x_LL, x_HL, x_LH, x_HH), 1)
17
-
18
- def iwt_init(x):
19
- r = 2
20
- in_batch, in_channel, in_height, in_width = x.size()
21
- out_batch, out_channel, out_height, out_width = in_batch, int(in_channel / (r ** 2)), r * in_height, r * in_width
22
- x1 = x[:, 0:out_channel, :, :] / 2
23
- x2 = x[:, out_channel:out_channel * 2, :, :] / 2
24
- x3 = x[:, out_channel * 2:out_channel * 3, :, :] / 2
25
- x4 = x[:, out_channel * 3:out_channel * 4, :, :] / 2
26
- h = torch.zeros([out_batch, out_channel, out_height, out_width]).cuda() #
27
-
28
- h[:, :, 0::2, 0::2] = x1 - x2 - x3 + x4
29
- h[:, :, 1::2, 0::2] = x1 - x2 + x3 - x4
30
- h[:, :, 0::2, 1::2] = x1 + x2 - x3 - x4
31
- h[:, :, 1::2, 1::2] = x1 + x2 + x3 + x4
32
-
33
- return h
34
-
35
-
36
- class DWT(nn.Module):
37
- def __init__(self):
38
- super(DWT, self).__init__()
39
- self.requires_grad = True
40
-
41
- def forward(self, x):
42
- return dwt_init(x)
43
-
44
-
45
- class IWT(nn.Module):
46
- def __init__(self):
47
- super(IWT, self).__init__()
48
- self.requires_grad = True
49
-
50
- def forward(self, x):
51
- return iwt_init(x)
52
-
53
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/7thHeaven/ochyai_food/template.md DELETED
@@ -1,23 +0,0 @@
1
- ### Title of New Recipe
2
-
3
- Please write your title of new recipe Here.
4
-
5
- ### Your New Recipe Here
6
-
7
- Please write new recipe and brainstorm every point of new recipe to fill the details.
8
-
9
- ### Your Instruction Here
10
-
11
- Please write your instruction to cook the dish of new recipe and brainstorm every point of new recipe to fill the details.
12
-
13
- ### Your Comment and Feelings, taste of new recipe
14
-
15
- Please write review commnet of new recipe here and brainstorm every point of new recipe to fill the details.
16
-
17
- ### Your Explanation to Blind Person
18
-
19
- Please write review commnet of new recipe here to explain to the blind people more concretely in detail. Please brainstorm every point of new recipe to fill the details.
20
-
21
- ### Prompt for Visual Expression
22
-
23
- Please write prompt for visual expression in Generative AI for image the visual of the new recipe and brainstorm every point of new recipe to fill the details.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI4PD/hexviz/tests/test_attention.py DELETED
@@ -1,86 +0,0 @@
1
- import torch
2
- from Bio.PDB.Structure import Structure
3
-
4
- from hexviz.attention import (
5
- ModelType,
6
- get_attention,
7
- get_sequences,
8
- get_structure,
9
- unidirectional_avg_filtered,
10
- )
11
-
12
-
13
- def test_get_structure():
14
- pdb_id = "2I62"
15
- structure = get_structure(pdb_id)
16
-
17
- assert structure is not None
18
- assert isinstance(structure, Structure)
19
-
20
-
21
- def test_get_sequences():
22
- pdb_id = "1AKE"
23
- structure = get_structure(pdb_id)
24
-
25
- sequences = get_sequences(structure)
26
-
27
- assert sequences is not None
28
- assert len(sequences) == 2
29
-
30
- A, B = sequences
31
- assert A[:3] == ["M", "R", "I"]
32
-
33
-
34
- def test_get_attention_zymctrl():
35
-
36
- result = get_attention("GGG", model_type=ModelType.ZymCTRL)
37
-
38
- assert result is not None
39
- assert result.shape == torch.Size([36, 16, 3, 3])
40
-
41
-
42
- def test_get_attention_zymctrl_long_chain():
43
- structure = get_structure(pdb_code="6A5J") # 13 residues long
44
-
45
- sequences = get_sequences(structure)
46
-
47
- result = get_attention(sequences[0], model_type=ModelType.ZymCTRL)
48
-
49
- assert result is not None
50
- assert result.shape == torch.Size([36, 16, 13, 13])
51
-
52
-
53
- def test_get_attention_tape():
54
- structure = get_structure(pdb_code="6A5J") # 13 residues long
55
- sequences = get_sequences(structure)
56
-
57
- result = get_attention(sequences[0], model_type=ModelType.TAPE_BERT)
58
-
59
- assert result is not None
60
- assert result.shape == torch.Size([12, 12, 13, 13])
61
-
62
-
63
- def test_get_attention_prot_bert():
64
-
65
- result = get_attention("GGG", model_type=ModelType.PROT_BERT)
66
-
67
- assert result is not None
68
- assert result.shape == torch.Size([30, 16, 3, 3])
69
-
70
-
71
- def test_get_unidirection_avg_filtered():
72
- # 1 head, 1 layer, 4 residues long attention tensor
73
- attention = torch.tensor(
74
- [[[[1, 2, 3, 4], [2, 5, 6, 7], [3, 6, 8, 9], [4, 7, 9, 11]]]], dtype=torch.float32
75
- )
76
-
77
- result = unidirectional_avg_filtered(attention, 0, 0, 0)
78
-
79
- assert result is not None
80
- assert len(result) == 10
81
-
82
- attention = torch.tensor([[[[1, 2, 3], [2, 5, 6], [4, 7, 91]]]], dtype=torch.float32)
83
-
84
- result = unidirectional_avg_filtered(attention, 0, 0, 0)
85
-
86
- assert len(result) == 6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/grids/audiogen/__init__.py DELETED
@@ -1,6 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
- """AudioGen grids."""
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/__init__.py DELETED
File without changes
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb16-150e_deepfashion2_long_sleeved_dress_256x192/__init__.py DELETED
File without changes
spaces/AchyuthGamer/Free-Accounts-Generator/fortnite/index.html DELETED
@@ -1,39 +0,0 @@
1
- <!DOCTYPE HTML>
2
- <html>
3
- <title>Free Fortnite Account Generator</title>
4
- <link rel="icon" type="image/png" href="https://huggingface.co/spaces/AchyuthGamer/Free-Accounts-Generator/resolve/main/img/steam-chrome-logo.png">
5
-
6
- <!-- Mirrored from altsforyou.org/fortnite/ by HTTrack Website Copier/3.x [XR&CO'2014], Tue, 23 Jun 2020 17:59:11 GMT -->
7
- <meta name="description context="fortnite, alt generator, fortnite free premium">
8
- <meta name="keywords" content="fortnite, alt generator, fortnite free premium">
9
- <meta http-equiv="cache-control" content="no-cache" />
10
- <meta http-equiv="Pragma" content="no-cache" />
11
- <meta http-equiv="Expires" content="-1" />
12
- <head>
13
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
14
- <link rel="stylesheet" href="css/style.css" />
15
- <link href="https://fonts.googleapis.com/css?family=Montserrat:400,700" rel='stylesheet' type='text/css'>
16
- <script type='text/javascript' src='js/d140ouchebag.js'></script>
17
- <script type="text/javascript">
18
- document.oncontextmenu =new Function("return false;")
19
- document.onselectstart =new Function("return false;")
20
- </script>
21
- <header>
22
- <span style="cursor: pointer;">Free Accounts Paradise</span>
23
- </header>
24
- <style>
25
- header { margin-top: 40px; position: absolute; float: left; font-size: 24px; font-weight: bold; }
26
- nav { margin-top: 40px; float: right; color: #FFF; fong: 1px; } nav ut-size: 16px; letter-spacil { list-style: none; margin: 0; margin: 0; } nav li { display: inline; float:left; } nav li a { text-decoration: none; margin: 0px 10px 0px 10px; color: #FFF; } nav li a:hover { color: #191919; transition: 0.3s; }
27
- </style>
28
- <nav>
29
- <ul>
30
- <li><a href="../index.html">Steam</a></li>
31
- <li><a href="../minecraft/index.html">Minecraft</a></li>
32
- <li><a href="https://discord.gg/gZwP9gRWZN">Discord</a></li>
33
- </nav>
34
- <section>
35
- <h1>Fortnite Account Generator</h1>
36
- <FORM NAME="WordForm">
37
- <INPUT TYPE=TEXT NAME="WordBox" id="wordbox"><BR>
38
- <INPUT TYPE=BUTTON VALUE="Generate" onClick="PickRandomWord(document.WordForm);" id="button">
39
- </FORM>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/base_provider.py DELETED
@@ -1,138 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from asyncio import AbstractEventLoop
4
- from concurrent.futures import ThreadPoolExecutor
5
- from abc import ABC, abstractmethod
6
-
7
- from .helper import get_event_loop, get_cookies, format_prompt
8
- from ..typing import AsyncGenerator, CreateResult
9
-
10
-
11
- class BaseProvider(ABC):
12
- url: str
13
- working: bool = False
14
- needs_auth: bool = False
15
- supports_stream: bool = False
16
- supports_gpt_35_turbo: bool = False
17
- supports_gpt_4: bool = False
18
-
19
- @staticmethod
20
- @abstractmethod
21
- def create_completion(
22
- model: str,
23
- messages: list[dict[str, str]],
24
- stream: bool,
25
- **kwargs
26
- ) -> CreateResult:
27
- raise NotImplementedError()
28
-
29
- @classmethod
30
- async def create_async(
31
- cls,
32
- model: str,
33
- messages: list[dict[str, str]],
34
- *,
35
- loop: AbstractEventLoop = None,
36
- executor: ThreadPoolExecutor = None,
37
- **kwargs
38
- ) -> str:
39
- if not loop:
40
- loop = get_event_loop()
41
-
42
- def create_func() -> str:
43
- return "".join(cls.create_completion(
44
- model,
45
- messages,
46
- False,
47
- **kwargs
48
- ))
49
-
50
- return await loop.run_in_executor(
51
- executor,
52
- create_func
53
- )
54
-
55
- @classmethod
56
- @property
57
- def params(cls) -> str:
58
- params = [
59
- ("model", "str"),
60
- ("messages", "list[dict[str, str]]"),
61
- ("stream", "bool"),
62
- ]
63
- param = ", ".join([": ".join(p) for p in params])
64
- return f"g4f.provider.{cls.__name__} supports: ({param})"
65
-
66
-
67
- class AsyncProvider(BaseProvider):
68
- @classmethod
69
- def create_completion(
70
- cls,
71
- model: str,
72
- messages: list[dict[str, str]],
73
- stream: bool = False,
74
- **kwargs
75
- ) -> CreateResult:
76
- loop = get_event_loop()
77
- coro = cls.create_async(model, messages, **kwargs)
78
- yield loop.run_until_complete(coro)
79
-
80
- @staticmethod
81
- @abstractmethod
82
- async def create_async(
83
- model: str,
84
- messages: list[dict[str, str]],
85
- **kwargs
86
- ) -> str:
87
- raise NotImplementedError()
88
-
89
-
90
- class AsyncGeneratorProvider(AsyncProvider):
91
- supports_stream = True
92
-
93
- @classmethod
94
- def create_completion(
95
- cls,
96
- model: str,
97
- messages: list[dict[str, str]],
98
- stream: bool = True,
99
- **kwargs
100
- ) -> CreateResult:
101
- loop = get_event_loop()
102
- generator = cls.create_async_generator(
103
- model,
104
- messages,
105
- stream=stream,
106
- **kwargs
107
- )
108
- gen = generator.__aiter__()
109
- while True:
110
- try:
111
- yield loop.run_until_complete(gen.__anext__())
112
- except StopAsyncIteration:
113
- break
114
-
115
- @classmethod
116
- async def create_async(
117
- cls,
118
- model: str,
119
- messages: list[dict[str, str]],
120
- **kwargs
121
- ) -> str:
122
- return "".join([
123
- chunk async for chunk in cls.create_async_generator(
124
- model,
125
- messages,
126
- stream=False,
127
- **kwargs
128
- )
129
- ])
130
-
131
- @staticmethod
132
- @abstractmethod
133
- def create_async_generator(
134
- model: str,
135
- messages: list[dict[str, str]],
136
- **kwargs
137
- ) -> AsyncGenerator:
138
- raise NotImplementedError()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/tasks/__init__.py DELETED
@@ -1,4 +0,0 @@
1
- import os
2
- import yaml
3
-
4
- from agentverse.output_parser import *
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import Pages from './Pages.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('pages', function (config) {
6
- var gameObject = new Pages(this.scene, config);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.Pages', Pages);
12
-
13
- export default Pages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/commons.py DELETED
@@ -1,164 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
-
8
- def init_weights(m, mean=0.0, std=0.01):
9
- classname = m.__class__.__name__
10
- if classname.find("Conv") != -1:
11
- m.weight.data.normal_(mean, std)
12
-
13
-
14
- def get_padding(kernel_size, dilation=1):
15
- return int((kernel_size*dilation - dilation)/2)
16
-
17
-
18
- def convert_pad_shape(pad_shape):
19
- l = pad_shape[::-1]
20
- pad_shape = [item for sublist in l for item in sublist]
21
- return pad_shape
22
-
23
-
24
- def intersperse(lst, item):
25
- result = [item] * (len(lst) * 2 + 1)
26
- result[1::2] = lst
27
- return result
28
-
29
-
30
- def kl_divergence(m_p, logs_p, m_q, logs_q):
31
- """KL(P||Q)"""
32
- kl = (logs_q - logs_p) - 0.5
33
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
34
- return kl
35
-
36
-
37
- def rand_gumbel(shape):
38
- """Sample from the Gumbel distribution, protect from overflows."""
39
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
40
- return -torch.log(-torch.log(uniform_samples))
41
-
42
-
43
- def rand_gumbel_like(x):
44
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
45
- return g
46
-
47
-
48
- def slice_segments(x, ids_str, segment_size=4):
49
- ret = torch.zeros_like(x[:, :, :segment_size])
50
- for i in range(x.size(0)):
51
- idx_str = ids_str[i]
52
- idx_end = idx_str + segment_size
53
- try:
54
- ret[i] = x[i, :, idx_str:idx_end]
55
- except RuntimeError:
56
- print("?")
57
- return ret
58
-
59
-
60
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
61
- b, d, t = x.size()
62
- if x_lengths is None:
63
- x_lengths = t
64
- ids_str_max = x_lengths - segment_size + 1
65
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
66
- ret = slice_segments(x, ids_str, segment_size)
67
- return ret, ids_str
68
-
69
-
70
- def get_timing_signal_1d(
71
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
72
- position = torch.arange(length, dtype=torch.float)
73
- num_timescales = channels // 2
74
- log_timescale_increment = (
75
- math.log(float(max_timescale) / float(min_timescale)) /
76
- (num_timescales - 1))
77
- inv_timescales = min_timescale * torch.exp(
78
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
79
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
80
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
81
- signal = F.pad(signal, [0, 0, 0, channels % 2])
82
- signal = signal.view(1, channels, length)
83
- return signal
84
-
85
-
86
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
87
- b, channels, length = x.size()
88
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
89
- return x + signal.to(dtype=x.dtype, device=x.device)
90
-
91
-
92
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
93
- b, channels, length = x.size()
94
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
95
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
96
-
97
-
98
- def subsequent_mask(length):
99
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
100
- return mask
101
-
102
-
103
- @torch.jit.script
104
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
105
- n_channels_int = n_channels[0]
106
- in_act = input_a + input_b
107
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
108
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
109
- acts = t_act * s_act
110
- return acts
111
-
112
-
113
- def convert_pad_shape(pad_shape):
114
- l = pad_shape[::-1]
115
- pad_shape = [item for sublist in l for item in sublist]
116
- return pad_shape
117
-
118
-
119
- def shift_1d(x):
120
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
121
- return x
122
-
123
-
124
- def sequence_mask(length, max_length=None):
125
- if max_length is None:
126
- max_length = length.max()
127
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
128
- return x.unsqueeze(0) < length.unsqueeze(1)
129
-
130
-
131
- def generate_path(duration, mask):
132
- """
133
- duration: [b, 1, t_x]
134
- mask: [b, 1, t_y, t_x]
135
- """
136
- device = duration.device
137
-
138
- b, _, t_y, t_x = mask.shape
139
- cum_duration = torch.cumsum(duration, -1)
140
-
141
- cum_duration_flat = cum_duration.view(b * t_x)
142
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
143
- path = path.view(b, t_x, t_y)
144
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
145
- path = path.unsqueeze(1).transpose(2,3) * mask
146
- return path
147
-
148
-
149
- def clip_grad_value_(parameters, clip_value, norm_type=2):
150
- if isinstance(parameters, torch.Tensor):
151
- parameters = [parameters]
152
- parameters = list(filter(lambda p: p.grad is not None, parameters))
153
- norm_type = float(norm_type)
154
- if clip_value is not None:
155
- clip_value = float(clip_value)
156
-
157
- total_norm = 0
158
- for p in parameters:
159
- param_norm = p.grad.data.norm(norm_type)
160
- total_norm += param_norm.item() ** norm_type
161
- if clip_value is not None:
162
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
163
- total_norm = total_norm ** (1. / norm_type)
164
- return total_norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlanMars/QYL-AI-Space/modules/config.py DELETED
@@ -1,202 +0,0 @@
1
- from collections import defaultdict
2
- from contextlib import contextmanager
3
- import os
4
- import logging
5
- import sys
6
- import commentjson as json
7
-
8
- from . import shared
9
- from . import presets
10
-
11
- __all__ = [
12
- "my_api_key",
13
- "authflag",
14
- "auth_list",
15
- "user_key_pairs_list",
16
- "dockerflag",
17
- "retrieve_proxy",
18
- "log_level",
19
- "advance_docs",
20
- "update_doc_config",
21
- "render_latex",
22
- "usage_limit",
23
- "multi_api_key",
24
- "server_name",
25
- "server_port",
26
- "share",
27
- "hide_history_when_not_logged_in"
28
- ]
29
-
30
- # 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低)
31
- # 同时,也可以为后续支持自定义功能提供config的帮助
32
- if os.path.exists("config.json"):
33
- with open("config.json", "r", encoding='utf-8') as f:
34
- config = json.load(f)
35
- else:
36
- config = {}
37
-
38
- ## 处理log
39
- log_level = config.get("log_level", "INFO")
40
- logging.basicConfig(
41
- level=log_level,
42
- format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
43
- )
44
-
45
- ## 处理Language
46
- lang_config = config.get("language", "auto")
47
- language = os.environ.get("LANGUAGE", lang_config)
48
-
49
- hide_history_when_not_logged_in = config.get("hide_history_when_not_logged_in", False)
50
-
51
- if os.path.exists("api_key.txt"):
52
- logging.info("检测到api_key.txt文件,正在进行迁移...")
53
- with open("api_key.txt", "r") as f:
54
- config["openai_api_key"] = f.read().strip()
55
- os.rename("api_key.txt", "api_key(deprecated).txt")
56
- with open("config.json", "w", encoding='utf-8') as f:
57
- json.dump(config, f, indent=4)
58
-
59
- if os.path.exists("auth.json"):
60
- logging.info("检测到auth.json文件,正在进行迁移...")
61
- auth_list = []
62
- with open("auth.json", "r", encoding='utf-8') as f:
63
- auth = json.load(f)
64
- for _ in auth:
65
- if auth[_]["username"] and auth[_]["password"]:
66
- auth_list.append((auth[_]["username"], auth[_]["password"]))
67
- else:
68
- logging.error("请检查auth.json文件中的用户名和密码!")
69
- sys.exit(1)
70
- config["users"] = auth_list
71
- os.rename("auth.json", "auth(deprecated).json")
72
- with open("config.json", "w", encoding='utf-8') as f:
73
- json.dump(config, f, indent=4)
74
-
75
- ## Handle User authentication
76
- auth_list = config.get("users", []) # 实际上是使用者的列表 用户列表,[[用户名1, 密码1], [用户名2, 密码2], ...]
77
- auth_list = json.loads(os.environ.get("USERS", {"users": [["anonymous", ""]]}))["users"]
78
- authflag = len(auth_list) > 0 # 是否开启认证的状态值,改为判断auth_list长度
79
- logging.info(f"Auth_flag: {authflag}")
80
-
81
- ## Handel User-Key pair allocation
82
- user_key_pairs_list = config.get("user_key_pairs", []) # [[]]
83
- user_key_pairs_list = json.loads(os.environ.get("USER_KEY_PAIRS", {"openai-keys": [["anonymous", ""]]}))["openai-keys"]
84
- logging.debug(f"user_key_pairs_list: {user_key_pairs_list}")
85
-
86
-
87
- ## 处理docker if we are running in Docker
88
- dockerflag = config.get("dockerflag", False)
89
- if os.environ.get("dockerrun") == "yes":
90
- dockerflag = True
91
-
92
- ## 处理 api-key 以及 允许的用户列表
93
- my_api_key = config.get("openai_api_key", "")
94
- my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key)
95
-
96
- xmchat_api_key = config.get("xmchat_api_key", "")
97
- os.environ["XMCHAT_API_KEY"] = xmchat_api_key
98
-
99
- render_latex = config.get("render_latex", True)
100
- if render_latex:
101
- os.environ["RENDER_LATEX"] = "yes"
102
- else:
103
- os.environ["RENDER_LATEX"] = "no"
104
-
105
- usage_limit = os.environ.get("USAGE_LIMIT", config.get("usage_limit", 120))
106
- exchange_rate = os.environ.get("EXCHANGE_RATE", config.get("exchange_rate", 7.0))
107
-
108
- ## 多账户机制
109
- multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制
110
- if multi_api_key:
111
- api_key_list = config.get("api_key_list", [])
112
- if len(api_key_list) == 0:
113
- logging.error("多账号模式已开启,但api_key_list为空,请检查config.json")
114
- sys.exit(1)
115
- shared.state.set_api_key_queue(api_key_list)
116
-
117
- # 处理自定义的api_host,优先读环境变量的配置,如果存在则自动装配
118
- api_host = os.environ.get("api_host", config.get("api_host", ""))
119
- if api_host:
120
- shared.state.set_api_host(api_host)
121
-
122
-
123
- @contextmanager
124
- def retrieve_openai_api(api_key=None):
125
- old_api_key = os.environ.get("OPENAI_API_KEY", "")
126
- if api_key is None:
127
- os.environ["OPENAI_API_KEY"] = my_api_key
128
- yield my_api_key
129
- else:
130
- os.environ["OPENAI_API_KEY"] = api_key
131
- yield api_key
132
- os.environ["OPENAI_API_KEY"] = old_api_key
133
-
134
-
135
- ## 处理代理:
136
- http_proxy = config.get("http_proxy", "")
137
- https_proxy = config.get("https_proxy", "")
138
- http_proxy = os.environ.get("HTTP_PROXY", http_proxy)
139
- https_proxy = os.environ.get("HTTPS_PROXY", https_proxy)
140
-
141
- # 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报错
142
- os.environ["HTTP_PROXY"] = ""
143
- os.environ["HTTPS_PROXY"] = ""
144
-
145
- local_embedding = config.get("local_embedding", False) # 是否使用本地embedding
146
-
147
-
148
- @contextmanager
149
- def retrieve_proxy(proxy=None):
150
- """
151
- 1, 如果proxy = NONE,设置环境变量,并返回最新设置的代理
152
- 2,如果proxy != NONE,更新当前的代理配置,但是不更新环境变量
153
- """
154
- global http_proxy, https_proxy
155
- if proxy is not None:
156
- http_proxy = proxy
157
- https_proxy = proxy
158
- yield http_proxy, https_proxy
159
- else:
160
- old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"]
161
- os.environ["HTTP_PROXY"] = http_proxy
162
- os.environ["HTTPS_PROXY"] = https_proxy
163
- yield http_proxy, https_proxy # return new proxy
164
-
165
- # return old proxy
166
- os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var
167
-
168
-
169
- ## 处理advance docs
170
- advance_docs = defaultdict(lambda: defaultdict(dict))
171
- advance_docs.update(config.get("advance_docs", {}))
172
-
173
-
174
- def update_doc_config(two_column_pdf):
175
- global advance_docs
176
- advance_docs["pdf"]["two_column"] = two_column_pdf
177
-
178
- logging.info(f"更新后的文件参数为:{advance_docs}")
179
-
180
-
181
- ## 处理gradio.launch参数
182
- server_name = config.get("server_name", None)
183
- server_port = config.get("server_port", None)
184
- if server_name is None:
185
- if dockerflag:
186
- server_name = "0.0.0.0"
187
- else:
188
- server_name = "127.0.0.1"
189
- if server_port is None:
190
- if dockerflag:
191
- server_port = 7860
192
-
193
- assert server_port is None or type(server_port) == int, "要求port设置为int类型"
194
-
195
- # 设置默认model
196
- default_model = config.get("default_model", "")
197
- try:
198
- presets.DEFAULT_MODEL = presets.MODELS.index(default_model)
199
- except ValueError:
200
- pass
201
-
202
- share = config.get("share", False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/saicinpainting/evaluation/__init__.py DELETED
@@ -1,33 +0,0 @@
1
- import logging
2
-
3
- import torch
4
-
5
- from saicinpainting.evaluation.evaluator import InpaintingEvaluatorOnline, ssim_fid100_f1, lpips_fid100_f1
6
- from saicinpainting.evaluation.losses.base_loss import SSIMScore, LPIPSScore, FIDScore
7
-
8
-
9
- def make_evaluator(kind='default', ssim=True, lpips=True, fid=True, integral_kind=None, **kwargs):
10
- logging.info(f'Make evaluator {kind}')
11
- device = "cuda" if torch.cuda.is_available() else "cpu"
12
- metrics = {}
13
- if ssim:
14
- metrics['ssim'] = SSIMScore()
15
- if lpips:
16
- metrics['lpips'] = LPIPSScore()
17
- if fid:
18
- metrics['fid'] = FIDScore().to(device)
19
-
20
- if integral_kind is None:
21
- integral_func = None
22
- elif integral_kind == 'ssim_fid100_f1':
23
- integral_func = ssim_fid100_f1
24
- elif integral_kind == 'lpips_fid100_f1':
25
- integral_func = lpips_fid100_f1
26
- else:
27
- raise ValueError(f'Unexpected integral_kind={integral_kind}')
28
-
29
- if kind == 'default':
30
- return InpaintingEvaluatorOnline(scores=metrics,
31
- integral_func=integral_func,
32
- integral_title=integral_kind,
33
- **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlirezaSM/bear_classifier/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Bear Classifier
3
- emoji: 🔥
4
- colorFrom: indigo
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.1.4
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/text_to_image/train_text_to_image.py DELETED
@@ -1,1098 +0,0 @@
1
- #!/usr/bin/env python
2
- # coding=utf-8
3
- # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
-
16
- import argparse
17
- import logging
18
- import math
19
- import os
20
- import random
21
- import shutil
22
- from pathlib import Path
23
-
24
- import accelerate
25
- import datasets
26
- import numpy as np
27
- import torch
28
- import torch.nn.functional as F
29
- import torch.utils.checkpoint
30
- import transformers
31
- from accelerate import Accelerator
32
- from accelerate.logging import get_logger
33
- from accelerate.state import AcceleratorState
34
- from accelerate.utils import ProjectConfiguration, set_seed
35
- from datasets import load_dataset
36
- from huggingface_hub import create_repo, upload_folder
37
- from packaging import version
38
- from PIL import Image
39
- from torchvision import transforms
40
- from tqdm.auto import tqdm
41
- from transformers import CLIPTextModel, CLIPTokenizer
42
- from transformers.utils import ContextManagers
43
-
44
- import diffusers
45
- from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
46
- from diffusers.optimization import get_scheduler
47
- from diffusers.training_utils import EMAModel
48
- from diffusers.utils import check_min_version, deprecate, is_wandb_available
49
- from diffusers.utils.import_utils import is_xformers_available
50
-
51
-
52
- if is_wandb_available():
53
- import wandb
54
-
55
-
56
- # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
57
- check_min_version("0.19.0")
58
-
59
- logger = get_logger(__name__, log_level="INFO")
60
-
61
- DATASET_NAME_MAPPING = {
62
- "lambdalabs/pokemon-blip-captions": ("image", "text"),
63
- }
64
-
65
-
66
- def make_image_grid(imgs, rows, cols):
67
- assert len(imgs) == rows * cols
68
-
69
- w, h = imgs[0].size
70
- grid = Image.new("RGB", size=(cols * w, rows * h))
71
-
72
- for i, img in enumerate(imgs):
73
- grid.paste(img, box=(i % cols * w, i // cols * h))
74
- return grid
75
-
76
-
77
- def save_model_card(
78
- args,
79
- repo_id: str,
80
- images=None,
81
- repo_folder=None,
82
- ):
83
- img_str = ""
84
- if len(images) > 0:
85
- image_grid = make_image_grid(images, 1, len(args.validation_prompts))
86
- image_grid.save(os.path.join(repo_folder, "val_imgs_grid.png"))
87
- img_str += "![val_imgs_grid](./val_imgs_grid.png)\n"
88
-
89
- yaml = f"""
90
- ---
91
- license: creativeml-openrail-m
92
- base_model: {args.pretrained_model_name_or_path}
93
- datasets:
94
- - {args.dataset_name}
95
- tags:
96
- - stable-diffusion
97
- - stable-diffusion-diffusers
98
- - text-to-image
99
- - diffusers
100
- inference: true
101
- ---
102
- """
103
- model_card = f"""
104
- # Text-to-image finetuning - {repo_id}
105
-
106
- This pipeline was finetuned from **{args.pretrained_model_name_or_path}** on the **{args.dataset_name}** dataset. Below are some example images generated with the finetuned pipeline using the following prompts: {args.validation_prompts}: \n
107
- {img_str}
108
-
109
- ## Pipeline usage
110
-
111
- You can use the pipeline like so:
112
-
113
- ```python
114
- from diffusers import DiffusionPipeline
115
- import torch
116
-
117
- pipeline = DiffusionPipeline.from_pretrained("{repo_id}", torch_dtype=torch.float16)
118
- prompt = "{args.validation_prompts[0]}"
119
- image = pipeline(prompt).images[0]
120
- image.save("my_image.png")
121
- ```
122
-
123
- ## Training info
124
-
125
- These are the key hyperparameters used during training:
126
-
127
- * Epochs: {args.num_train_epochs}
128
- * Learning rate: {args.learning_rate}
129
- * Batch size: {args.train_batch_size}
130
- * Gradient accumulation steps: {args.gradient_accumulation_steps}
131
- * Image resolution: {args.resolution}
132
- * Mixed-precision: {args.mixed_precision}
133
-
134
- """
135
- wandb_info = ""
136
- if is_wandb_available():
137
- wandb_run_url = None
138
- if wandb.run is not None:
139
- wandb_run_url = wandb.run.url
140
-
141
- if wandb_run_url is not None:
142
- wandb_info = f"""
143
- More information on all the CLI arguments and the environment are available on your [`wandb` run page]({wandb_run_url}).
144
- """
145
-
146
- model_card += wandb_info
147
-
148
- with open(os.path.join(repo_folder, "README.md"), "w") as f:
149
- f.write(yaml + model_card)
150
-
151
-
152
- def log_validation(vae, text_encoder, tokenizer, unet, args, accelerator, weight_dtype, epoch):
153
- logger.info("Running validation... ")
154
-
155
- pipeline = StableDiffusionPipeline.from_pretrained(
156
- args.pretrained_model_name_or_path,
157
- vae=accelerator.unwrap_model(vae),
158
- text_encoder=accelerator.unwrap_model(text_encoder),
159
- tokenizer=tokenizer,
160
- unet=accelerator.unwrap_model(unet),
161
- safety_checker=None,
162
- revision=args.revision,
163
- torch_dtype=weight_dtype,
164
- )
165
- pipeline = pipeline.to(accelerator.device)
166
- pipeline.set_progress_bar_config(disable=True)
167
-
168
- if args.enable_xformers_memory_efficient_attention:
169
- pipeline.enable_xformers_memory_efficient_attention()
170
-
171
- if args.seed is None:
172
- generator = None
173
- else:
174
- generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
175
-
176
- images = []
177
- for i in range(len(args.validation_prompts)):
178
- with torch.autocast("cuda"):
179
- image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0]
180
-
181
- images.append(image)
182
-
183
- for tracker in accelerator.trackers:
184
- if tracker.name == "tensorboard":
185
- np_images = np.stack([np.asarray(img) for img in images])
186
- tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
187
- elif tracker.name == "wandb":
188
- tracker.log(
189
- {
190
- "validation": [
191
- wandb.Image(image, caption=f"{i}: {args.validation_prompts[i]}")
192
- for i, image in enumerate(images)
193
- ]
194
- }
195
- )
196
- else:
197
- logger.warn(f"image logging not implemented for {tracker.name}")
198
-
199
- del pipeline
200
- torch.cuda.empty_cache()
201
-
202
- return images
203
-
204
-
205
- def parse_args():
206
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
207
- parser.add_argument(
208
- "--input_perturbation", type=float, default=0, help="The scale of input perturbation. Recommended 0.1."
209
- )
210
- parser.add_argument(
211
- "--pretrained_model_name_or_path",
212
- type=str,
213
- default=None,
214
- required=True,
215
- help="Path to pretrained model or model identifier from huggingface.co/models.",
216
- )
217
- parser.add_argument(
218
- "--revision",
219
- type=str,
220
- default=None,
221
- required=False,
222
- help="Revision of pretrained model identifier from huggingface.co/models.",
223
- )
224
- parser.add_argument(
225
- "--dataset_name",
226
- type=str,
227
- default=None,
228
- help=(
229
- "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
230
- " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
231
- " or to a folder containing files that 🤗 Datasets can understand."
232
- ),
233
- )
234
- parser.add_argument(
235
- "--dataset_config_name",
236
- type=str,
237
- default=None,
238
- help="The config of the Dataset, leave as None if there's only one config.",
239
- )
240
- parser.add_argument(
241
- "--train_data_dir",
242
- type=str,
243
- default=None,
244
- help=(
245
- "A folder containing the training data. Folder contents must follow the structure described in"
246
- " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
247
- " must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
248
- ),
249
- )
250
- parser.add_argument(
251
- "--image_column", type=str, default="image", help="The column of the dataset containing an image."
252
- )
253
- parser.add_argument(
254
- "--caption_column",
255
- type=str,
256
- default="text",
257
- help="The column of the dataset containing a caption or a list of captions.",
258
- )
259
- parser.add_argument(
260
- "--max_train_samples",
261
- type=int,
262
- default=None,
263
- help=(
264
- "For debugging purposes or quicker training, truncate the number of training examples to this "
265
- "value if set."
266
- ),
267
- )
268
- parser.add_argument(
269
- "--validation_prompts",
270
- type=str,
271
- default=None,
272
- nargs="+",
273
- help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."),
274
- )
275
- parser.add_argument(
276
- "--output_dir",
277
- type=str,
278
- default="sd-model-finetuned",
279
- help="The output directory where the model predictions and checkpoints will be written.",
280
- )
281
- parser.add_argument(
282
- "--cache_dir",
283
- type=str,
284
- default=None,
285
- help="The directory where the downloaded models and datasets will be stored.",
286
- )
287
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
288
- parser.add_argument(
289
- "--resolution",
290
- type=int,
291
- default=512,
292
- help=(
293
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
294
- " resolution"
295
- ),
296
- )
297
- parser.add_argument(
298
- "--center_crop",
299
- default=False,
300
- action="store_true",
301
- help=(
302
- "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
303
- " cropped. The images will be resized to the resolution first before cropping."
304
- ),
305
- )
306
- parser.add_argument(
307
- "--random_flip",
308
- action="store_true",
309
- help="whether to randomly flip images horizontally",
310
- )
311
- parser.add_argument(
312
- "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
313
- )
314
- parser.add_argument("--num_train_epochs", type=int, default=100)
315
- parser.add_argument(
316
- "--max_train_steps",
317
- type=int,
318
- default=None,
319
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
320
- )
321
- parser.add_argument(
322
- "--gradient_accumulation_steps",
323
- type=int,
324
- default=1,
325
- help="Number of updates steps to accumulate before performing a backward/update pass.",
326
- )
327
- parser.add_argument(
328
- "--gradient_checkpointing",
329
- action="store_true",
330
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
331
- )
332
- parser.add_argument(
333
- "--learning_rate",
334
- type=float,
335
- default=1e-4,
336
- help="Initial learning rate (after the potential warmup period) to use.",
337
- )
338
- parser.add_argument(
339
- "--scale_lr",
340
- action="store_true",
341
- default=False,
342
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
343
- )
344
- parser.add_argument(
345
- "--lr_scheduler",
346
- type=str,
347
- default="constant",
348
- help=(
349
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
350
- ' "constant", "constant_with_warmup"]'
351
- ),
352
- )
353
- parser.add_argument(
354
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
355
- )
356
- parser.add_argument(
357
- "--snr_gamma",
358
- type=float,
359
- default=None,
360
- help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. "
361
- "More details here: https://arxiv.org/abs/2303.09556.",
362
- )
363
- parser.add_argument(
364
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
365
- )
366
- parser.add_argument(
367
- "--allow_tf32",
368
- action="store_true",
369
- help=(
370
- "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
371
- " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
372
- ),
373
- )
374
- parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.")
375
- parser.add_argument(
376
- "--non_ema_revision",
377
- type=str,
378
- default=None,
379
- required=False,
380
- help=(
381
- "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or"
382
- " remote repository specified with --pretrained_model_name_or_path."
383
- ),
384
- )
385
- parser.add_argument(
386
- "--dataloader_num_workers",
387
- type=int,
388
- default=0,
389
- help=(
390
- "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
391
- ),
392
- )
393
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
394
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
395
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
396
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
397
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
398
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
399
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
400
- parser.add_argument(
401
- "--prediction_type",
402
- type=str,
403
- default=None,
404
- help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.",
405
- )
406
- parser.add_argument(
407
- "--hub_model_id",
408
- type=str,
409
- default=None,
410
- help="The name of the repository to keep in sync with the local `output_dir`.",
411
- )
412
- parser.add_argument(
413
- "--logging_dir",
414
- type=str,
415
- default="logs",
416
- help=(
417
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
418
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
419
- ),
420
- )
421
- parser.add_argument(
422
- "--mixed_precision",
423
- type=str,
424
- default=None,
425
- choices=["no", "fp16", "bf16"],
426
- help=(
427
- "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
428
- " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
429
- " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
430
- ),
431
- )
432
- parser.add_argument(
433
- "--report_to",
434
- type=str,
435
- default="tensorboard",
436
- help=(
437
- 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
438
- ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
439
- ),
440
- )
441
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
442
- parser.add_argument(
443
- "--checkpointing_steps",
444
- type=int,
445
- default=500,
446
- help=(
447
- "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
448
- " training using `--resume_from_checkpoint`."
449
- ),
450
- )
451
- parser.add_argument(
452
- "--checkpoints_total_limit",
453
- type=int,
454
- default=None,
455
- help=("Max number of checkpoints to store."),
456
- )
457
- parser.add_argument(
458
- "--resume_from_checkpoint",
459
- type=str,
460
- default=None,
461
- help=(
462
- "Whether training should be resumed from a previous checkpoint. Use a path saved by"
463
- ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
464
- ),
465
- )
466
- parser.add_argument(
467
- "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
468
- )
469
- parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.")
470
- parser.add_argument(
471
- "--validation_epochs",
472
- type=int,
473
- default=5,
474
- help="Run validation every X epochs.",
475
- )
476
- parser.add_argument(
477
- "--tracker_project_name",
478
- type=str,
479
- default="text2image-fine-tune",
480
- help=(
481
- "The `project_name` argument passed to Accelerator.init_trackers for"
482
- " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator"
483
- ),
484
- )
485
-
486
- args = parser.parse_args()
487
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
488
- if env_local_rank != -1 and env_local_rank != args.local_rank:
489
- args.local_rank = env_local_rank
490
-
491
- # Sanity checks
492
- if args.dataset_name is None and args.train_data_dir is None:
493
- raise ValueError("Need either a dataset name or a training folder.")
494
-
495
- # default to using the same revision for the non-ema model if not specified
496
- if args.non_ema_revision is None:
497
- args.non_ema_revision = args.revision
498
-
499
- return args
500
-
501
-
502
- def main():
503
- args = parse_args()
504
-
505
- if args.non_ema_revision is not None:
506
- deprecate(
507
- "non_ema_revision!=None",
508
- "0.15.0",
509
- message=(
510
- "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to"
511
- " use `--variant=non_ema` instead."
512
- ),
513
- )
514
- logging_dir = os.path.join(args.output_dir, args.logging_dir)
515
-
516
- accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
517
-
518
- accelerator = Accelerator(
519
- gradient_accumulation_steps=args.gradient_accumulation_steps,
520
- mixed_precision=args.mixed_precision,
521
- log_with=args.report_to,
522
- project_config=accelerator_project_config,
523
- )
524
-
525
- # Make one log on every process with the configuration for debugging.
526
- logging.basicConfig(
527
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
528
- datefmt="%m/%d/%Y %H:%M:%S",
529
- level=logging.INFO,
530
- )
531
- logger.info(accelerator.state, main_process_only=False)
532
- if accelerator.is_local_main_process:
533
- datasets.utils.logging.set_verbosity_warning()
534
- transformers.utils.logging.set_verbosity_warning()
535
- diffusers.utils.logging.set_verbosity_info()
536
- else:
537
- datasets.utils.logging.set_verbosity_error()
538
- transformers.utils.logging.set_verbosity_error()
539
- diffusers.utils.logging.set_verbosity_error()
540
-
541
- # If passed along, set the training seed now.
542
- if args.seed is not None:
543
- set_seed(args.seed)
544
-
545
- # Handle the repository creation
546
- if accelerator.is_main_process:
547
- if args.output_dir is not None:
548
- os.makedirs(args.output_dir, exist_ok=True)
549
-
550
- if args.push_to_hub:
551
- repo_id = create_repo(
552
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
553
- ).repo_id
554
-
555
- # Load scheduler, tokenizer and models.
556
- noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
557
- tokenizer = CLIPTokenizer.from_pretrained(
558
- args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision
559
- )
560
-
561
- def deepspeed_zero_init_disabled_context_manager():
562
- """
563
- returns either a context list that includes one that will disable zero.Init or an empty context list
564
- """
565
- deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None
566
- if deepspeed_plugin is None:
567
- return []
568
-
569
- return [deepspeed_plugin.zero3_init_context_manager(enable=False)]
570
-
571
- # Currently Accelerate doesn't know how to handle multiple models under Deepspeed ZeRO stage 3.
572
- # For this to work properly all models must be run through `accelerate.prepare`. But accelerate
573
- # will try to assign the same optimizer with the same weights to all models during
574
- # `deepspeed.initialize`, which of course doesn't work.
575
- #
576
- # For now the following workaround will partially support Deepspeed ZeRO-3, by excluding the 2
577
- # frozen models from being partitioned during `zero.Init` which gets called during
578
- # `from_pretrained` So CLIPTextModel and AutoencoderKL will not enjoy the parameter sharding
579
- # across multiple gpus and only UNet2DConditionModel will get ZeRO sharded.
580
- with ContextManagers(deepspeed_zero_init_disabled_context_manager()):
581
- text_encoder = CLIPTextModel.from_pretrained(
582
- args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
583
- )
584
- vae = AutoencoderKL.from_pretrained(
585
- args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision
586
- )
587
-
588
- unet = UNet2DConditionModel.from_pretrained(
589
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision
590
- )
591
-
592
- # Freeze vae and text_encoder
593
- vae.requires_grad_(False)
594
- text_encoder.requires_grad_(False)
595
-
596
- # Create EMA for the unet.
597
- if args.use_ema:
598
- ema_unet = UNet2DConditionModel.from_pretrained(
599
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
600
- )
601
- ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config)
602
-
603
- if args.enable_xformers_memory_efficient_attention:
604
- if is_xformers_available():
605
- import xformers
606
-
607
- xformers_version = version.parse(xformers.__version__)
608
- if xformers_version == version.parse("0.0.16"):
609
- logger.warn(
610
- "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
611
- )
612
- unet.enable_xformers_memory_efficient_attention()
613
- else:
614
- raise ValueError("xformers is not available. Make sure it is installed correctly")
615
-
616
- def compute_snr(timesteps):
617
- """
618
- Computes SNR as per https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L847-L849
619
- """
620
- alphas_cumprod = noise_scheduler.alphas_cumprod
621
- sqrt_alphas_cumprod = alphas_cumprod**0.5
622
- sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5
623
-
624
- # Expand the tensors.
625
- # Adapted from https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L1026
626
- sqrt_alphas_cumprod = sqrt_alphas_cumprod.to(device=timesteps.device)[timesteps].float()
627
- while len(sqrt_alphas_cumprod.shape) < len(timesteps.shape):
628
- sqrt_alphas_cumprod = sqrt_alphas_cumprod[..., None]
629
- alpha = sqrt_alphas_cumprod.expand(timesteps.shape)
630
-
631
- sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod.to(device=timesteps.device)[timesteps].float()
632
- while len(sqrt_one_minus_alphas_cumprod.shape) < len(timesteps.shape):
633
- sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod[..., None]
634
- sigma = sqrt_one_minus_alphas_cumprod.expand(timesteps.shape)
635
-
636
- # Compute SNR.
637
- snr = (alpha / sigma) ** 2
638
- return snr
639
-
640
- # `accelerate` 0.16.0 will have better support for customized saving
641
- if version.parse(accelerate.__version__) >= version.parse("0.16.0"):
642
- # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
643
- def save_model_hook(models, weights, output_dir):
644
- if args.use_ema:
645
- ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema"))
646
-
647
- for i, model in enumerate(models):
648
- model.save_pretrained(os.path.join(output_dir, "unet"))
649
-
650
- # make sure to pop weight so that corresponding model is not saved again
651
- weights.pop()
652
-
653
- def load_model_hook(models, input_dir):
654
- if args.use_ema:
655
- load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel)
656
- ema_unet.load_state_dict(load_model.state_dict())
657
- ema_unet.to(accelerator.device)
658
- del load_model
659
-
660
- for i in range(len(models)):
661
- # pop models so that they are not loaded again
662
- model = models.pop()
663
-
664
- # load diffusers style into model
665
- load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet")
666
- model.register_to_config(**load_model.config)
667
-
668
- model.load_state_dict(load_model.state_dict())
669
- del load_model
670
-
671
- accelerator.register_save_state_pre_hook(save_model_hook)
672
- accelerator.register_load_state_pre_hook(load_model_hook)
673
-
674
- if args.gradient_checkpointing:
675
- unet.enable_gradient_checkpointing()
676
-
677
- # Enable TF32 for faster training on Ampere GPUs,
678
- # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
679
- if args.allow_tf32:
680
- torch.backends.cuda.matmul.allow_tf32 = True
681
-
682
- if args.scale_lr:
683
- args.learning_rate = (
684
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
685
- )
686
-
687
- # Initialize the optimizer
688
- if args.use_8bit_adam:
689
- try:
690
- import bitsandbytes as bnb
691
- except ImportError:
692
- raise ImportError(
693
- "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
694
- )
695
-
696
- optimizer_cls = bnb.optim.AdamW8bit
697
- else:
698
- optimizer_cls = torch.optim.AdamW
699
-
700
- optimizer = optimizer_cls(
701
- unet.parameters(),
702
- lr=args.learning_rate,
703
- betas=(args.adam_beta1, args.adam_beta2),
704
- weight_decay=args.adam_weight_decay,
705
- eps=args.adam_epsilon,
706
- )
707
-
708
- # Get the datasets: you can either provide your own training and evaluation files (see below)
709
- # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub).
710
-
711
- # In distributed training, the load_dataset function guarantees that only one local process can concurrently
712
- # download the dataset.
713
- if args.dataset_name is not None:
714
- # Downloading and loading a dataset from the hub.
715
- dataset = load_dataset(
716
- args.dataset_name,
717
- args.dataset_config_name,
718
- cache_dir=args.cache_dir,
719
- )
720
- else:
721
- data_files = {}
722
- if args.train_data_dir is not None:
723
- data_files["train"] = os.path.join(args.train_data_dir, "**")
724
- dataset = load_dataset(
725
- "imagefolder",
726
- data_files=data_files,
727
- cache_dir=args.cache_dir,
728
- )
729
- # See more about loading custom images at
730
- # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder
731
-
732
- # Preprocessing the datasets.
733
- # We need to tokenize inputs and targets.
734
- column_names = dataset["train"].column_names
735
-
736
- # 6. Get the column names for input/target.
737
- dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None)
738
- if args.image_column is None:
739
- image_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
740
- else:
741
- image_column = args.image_column
742
- if image_column not in column_names:
743
- raise ValueError(
744
- f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}"
745
- )
746
- if args.caption_column is None:
747
- caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
748
- else:
749
- caption_column = args.caption_column
750
- if caption_column not in column_names:
751
- raise ValueError(
752
- f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}"
753
- )
754
-
755
- # Preprocessing the datasets.
756
- # We need to tokenize input captions and transform the images.
757
- def tokenize_captions(examples, is_train=True):
758
- captions = []
759
- for caption in examples[caption_column]:
760
- if isinstance(caption, str):
761
- captions.append(caption)
762
- elif isinstance(caption, (list, np.ndarray)):
763
- # take a random caption if there are multiple
764
- captions.append(random.choice(caption) if is_train else caption[0])
765
- else:
766
- raise ValueError(
767
- f"Caption column `{caption_column}` should contain either strings or lists of strings."
768
- )
769
- inputs = tokenizer(
770
- captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
771
- )
772
- return inputs.input_ids
773
-
774
- # Preprocessing the datasets.
775
- train_transforms = transforms.Compose(
776
- [
777
- transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
778
- transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution),
779
- transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x),
780
- transforms.ToTensor(),
781
- transforms.Normalize([0.5], [0.5]),
782
- ]
783
- )
784
-
785
- def preprocess_train(examples):
786
- images = [image.convert("RGB") for image in examples[image_column]]
787
- examples["pixel_values"] = [train_transforms(image) for image in images]
788
- examples["input_ids"] = tokenize_captions(examples)
789
- return examples
790
-
791
- with accelerator.main_process_first():
792
- if args.max_train_samples is not None:
793
- dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples))
794
- # Set the training transforms
795
- train_dataset = dataset["train"].with_transform(preprocess_train)
796
-
797
- def collate_fn(examples):
798
- pixel_values = torch.stack([example["pixel_values"] for example in examples])
799
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
800
- input_ids = torch.stack([example["input_ids"] for example in examples])
801
- return {"pixel_values": pixel_values, "input_ids": input_ids}
802
-
803
- # DataLoaders creation:
804
- train_dataloader = torch.utils.data.DataLoader(
805
- train_dataset,
806
- shuffle=True,
807
- collate_fn=collate_fn,
808
- batch_size=args.train_batch_size,
809
- num_workers=args.dataloader_num_workers,
810
- )
811
-
812
- # Scheduler and math around the number of training steps.
813
- overrode_max_train_steps = False
814
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
815
- if args.max_train_steps is None:
816
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
817
- overrode_max_train_steps = True
818
-
819
- lr_scheduler = get_scheduler(
820
- args.lr_scheduler,
821
- optimizer=optimizer,
822
- num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
823
- num_training_steps=args.max_train_steps * accelerator.num_processes,
824
- )
825
-
826
- # Prepare everything with our `accelerator`.
827
- unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
828
- unet, optimizer, train_dataloader, lr_scheduler
829
- )
830
-
831
- if args.use_ema:
832
- ema_unet.to(accelerator.device)
833
-
834
- # For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
835
- # as these weights are only used for inference, keeping weights in full precision is not required.
836
- weight_dtype = torch.float32
837
- if accelerator.mixed_precision == "fp16":
838
- weight_dtype = torch.float16
839
- args.mixed_precision = accelerator.mixed_precision
840
- elif accelerator.mixed_precision == "bf16":
841
- weight_dtype = torch.bfloat16
842
- args.mixed_precision = accelerator.mixed_precision
843
-
844
- # Move text_encode and vae to gpu and cast to weight_dtype
845
- text_encoder.to(accelerator.device, dtype=weight_dtype)
846
- vae.to(accelerator.device, dtype=weight_dtype)
847
-
848
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
849
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
850
- if overrode_max_train_steps:
851
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
852
- # Afterwards we recalculate our number of training epochs
853
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
854
-
855
- # We need to initialize the trackers we use, and also store our configuration.
856
- # The trackers initializes automatically on the main process.
857
- if accelerator.is_main_process:
858
- tracker_config = dict(vars(args))
859
- tracker_config.pop("validation_prompts")
860
- accelerator.init_trackers(args.tracker_project_name, tracker_config)
861
-
862
- # Train!
863
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
864
-
865
- logger.info("***** Running training *****")
866
- logger.info(f" Num examples = {len(train_dataset)}")
867
- logger.info(f" Num Epochs = {args.num_train_epochs}")
868
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
869
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
870
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
871
- logger.info(f" Total optimization steps = {args.max_train_steps}")
872
- global_step = 0
873
- first_epoch = 0
874
-
875
- # Potentially load in the weights and states from a previous save
876
- if args.resume_from_checkpoint:
877
- if args.resume_from_checkpoint != "latest":
878
- path = os.path.basename(args.resume_from_checkpoint)
879
- else:
880
- # Get the most recent checkpoint
881
- dirs = os.listdir(args.output_dir)
882
- dirs = [d for d in dirs if d.startswith("checkpoint")]
883
- dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
884
- path = dirs[-1] if len(dirs) > 0 else None
885
-
886
- if path is None:
887
- accelerator.print(
888
- f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
889
- )
890
- args.resume_from_checkpoint = None
891
- else:
892
- accelerator.print(f"Resuming from checkpoint {path}")
893
- accelerator.load_state(os.path.join(args.output_dir, path))
894
- global_step = int(path.split("-")[1])
895
-
896
- resume_global_step = global_step * args.gradient_accumulation_steps
897
- first_epoch = global_step // num_update_steps_per_epoch
898
- resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
899
-
900
- # Only show the progress bar once on each machine.
901
- progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
902
- progress_bar.set_description("Steps")
903
-
904
- for epoch in range(first_epoch, args.num_train_epochs):
905
- unet.train()
906
- train_loss = 0.0
907
- for step, batch in enumerate(train_dataloader):
908
- # Skip steps until we reach the resumed step
909
- if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
910
- if step % args.gradient_accumulation_steps == 0:
911
- progress_bar.update(1)
912
- continue
913
-
914
- with accelerator.accumulate(unet):
915
- # Convert images to latent space
916
- latents = vae.encode(batch["pixel_values"].to(weight_dtype)).latent_dist.sample()
917
- latents = latents * vae.config.scaling_factor
918
-
919
- # Sample noise that we'll add to the latents
920
- noise = torch.randn_like(latents)
921
- if args.noise_offset:
922
- # https://www.crosslabs.org//blog/diffusion-with-offset-noise
923
- noise += args.noise_offset * torch.randn(
924
- (latents.shape[0], latents.shape[1], 1, 1), device=latents.device
925
- )
926
- if args.input_perturbation:
927
- new_noise = noise + args.input_perturbation * torch.randn_like(noise)
928
- bsz = latents.shape[0]
929
- # Sample a random timestep for each image
930
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
931
- timesteps = timesteps.long()
932
-
933
- # Add noise to the latents according to the noise magnitude at each timestep
934
- # (this is the forward diffusion process)
935
- if args.input_perturbation:
936
- noisy_latents = noise_scheduler.add_noise(latents, new_noise, timesteps)
937
- else:
938
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
939
-
940
- # Get the text embedding for conditioning
941
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
942
-
943
- # Get the target for loss depending on the prediction type
944
- if args.prediction_type is not None:
945
- # set prediction_type of scheduler if defined
946
- noise_scheduler.register_to_config(prediction_type=args.prediction_type)
947
-
948
- if noise_scheduler.config.prediction_type == "epsilon":
949
- target = noise
950
- elif noise_scheduler.config.prediction_type == "v_prediction":
951
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
952
- else:
953
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
954
-
955
- # Predict the noise residual and compute loss
956
- model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
957
-
958
- if args.snr_gamma is None:
959
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
960
- else:
961
- # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556.
962
- # Since we predict the noise instead of x_0, the original formulation is slightly changed.
963
- # This is discussed in Section 4.2 of the same paper.
964
- snr = compute_snr(timesteps)
965
- mse_loss_weights = (
966
- torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min(dim=1)[0] / snr
967
- )
968
- # We first calculate the original loss. Then we mean over the non-batch dimensions and
969
- # rebalance the sample-wise losses with their respective loss weights.
970
- # Finally, we take the mean of the rebalanced loss.
971
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="none")
972
- loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights
973
- loss = loss.mean()
974
-
975
- # Gather the losses across all processes for logging (if we use distributed training).
976
- avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
977
- train_loss += avg_loss.item() / args.gradient_accumulation_steps
978
-
979
- # Backpropagate
980
- accelerator.backward(loss)
981
- if accelerator.sync_gradients:
982
- accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm)
983
- optimizer.step()
984
- lr_scheduler.step()
985
- optimizer.zero_grad()
986
-
987
- # Checks if the accelerator has performed an optimization step behind the scenes
988
- if accelerator.sync_gradients:
989
- if args.use_ema:
990
- ema_unet.step(unet.parameters())
991
- progress_bar.update(1)
992
- global_step += 1
993
- accelerator.log({"train_loss": train_loss}, step=global_step)
994
- train_loss = 0.0
995
-
996
- if global_step % args.checkpointing_steps == 0:
997
- if accelerator.is_main_process:
998
- # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
999
- if args.checkpoints_total_limit is not None:
1000
- checkpoints = os.listdir(args.output_dir)
1001
- checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
1002
- checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
1003
-
1004
- # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
1005
- if len(checkpoints) >= args.checkpoints_total_limit:
1006
- num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
1007
- removing_checkpoints = checkpoints[0:num_to_remove]
1008
-
1009
- logger.info(
1010
- f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
1011
- )
1012
- logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
1013
-
1014
- for removing_checkpoint in removing_checkpoints:
1015
- removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
1016
- shutil.rmtree(removing_checkpoint)
1017
-
1018
- save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
1019
- accelerator.save_state(save_path)
1020
- logger.info(f"Saved state to {save_path}")
1021
-
1022
- logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
1023
- progress_bar.set_postfix(**logs)
1024
-
1025
- if global_step >= args.max_train_steps:
1026
- break
1027
-
1028
- if accelerator.is_main_process:
1029
- if args.validation_prompts is not None and epoch % args.validation_epochs == 0:
1030
- if args.use_ema:
1031
- # Store the UNet parameters temporarily and load the EMA parameters to perform inference.
1032
- ema_unet.store(unet.parameters())
1033
- ema_unet.copy_to(unet.parameters())
1034
- log_validation(
1035
- vae,
1036
- text_encoder,
1037
- tokenizer,
1038
- unet,
1039
- args,
1040
- accelerator,
1041
- weight_dtype,
1042
- global_step,
1043
- )
1044
- if args.use_ema:
1045
- # Switch back to the original UNet parameters.
1046
- ema_unet.restore(unet.parameters())
1047
-
1048
- # Create the pipeline using the trained modules and save it.
1049
- accelerator.wait_for_everyone()
1050
- if accelerator.is_main_process:
1051
- unet = accelerator.unwrap_model(unet)
1052
- if args.use_ema:
1053
- ema_unet.copy_to(unet.parameters())
1054
-
1055
- pipeline = StableDiffusionPipeline.from_pretrained(
1056
- args.pretrained_model_name_or_path,
1057
- text_encoder=text_encoder,
1058
- vae=vae,
1059
- unet=unet,
1060
- revision=args.revision,
1061
- )
1062
- pipeline.save_pretrained(args.output_dir)
1063
-
1064
- # Run a final round of inference.
1065
- images = []
1066
- if args.validation_prompts is not None:
1067
- logger.info("Running inference for collecting generated images...")
1068
- pipeline = pipeline.to(accelerator.device)
1069
- pipeline.torch_dtype = weight_dtype
1070
- pipeline.set_progress_bar_config(disable=True)
1071
-
1072
- if args.enable_xformers_memory_efficient_attention:
1073
- pipeline.enable_xformers_memory_efficient_attention()
1074
-
1075
- if args.seed is None:
1076
- generator = None
1077
- else:
1078
- generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
1079
-
1080
- for i in range(len(args.validation_prompts)):
1081
- with torch.autocast("cuda"):
1082
- image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0]
1083
- images.append(image)
1084
-
1085
- if args.push_to_hub:
1086
- save_model_card(args, repo_id, images, repo_folder=args.output_dir)
1087
- upload_folder(
1088
- repo_id=repo_id,
1089
- folder_path=args.output_dir,
1090
- commit_message="End of training",
1091
- ignore_patterns=["step_*", "epoch_*"],
1092
- )
1093
-
1094
- accelerator.end_training()
1095
-
1096
-
1097
- if __name__ == "__main__":
1098
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_ddim_flax.py DELETED
@@ -1,305 +0,0 @@
1
- # Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
16
- # and https://github.com/hojonathanho/diffusion
17
-
18
- from dataclasses import dataclass
19
- from typing import Optional, Tuple, Union
20
-
21
- import flax
22
- import jax.numpy as jnp
23
-
24
- from ..configuration_utils import ConfigMixin, register_to_config
25
- from .scheduling_utils_flax import (
26
- CommonSchedulerState,
27
- FlaxKarrasDiffusionSchedulers,
28
- FlaxSchedulerMixin,
29
- FlaxSchedulerOutput,
30
- add_noise_common,
31
- get_velocity_common,
32
- )
33
-
34
-
35
- @flax.struct.dataclass
36
- class DDIMSchedulerState:
37
- common: CommonSchedulerState
38
- final_alpha_cumprod: jnp.ndarray
39
-
40
- # setable values
41
- init_noise_sigma: jnp.ndarray
42
- timesteps: jnp.ndarray
43
- num_inference_steps: Optional[int] = None
44
-
45
- @classmethod
46
- def create(
47
- cls,
48
- common: CommonSchedulerState,
49
- final_alpha_cumprod: jnp.ndarray,
50
- init_noise_sigma: jnp.ndarray,
51
- timesteps: jnp.ndarray,
52
- ):
53
- return cls(
54
- common=common,
55
- final_alpha_cumprod=final_alpha_cumprod,
56
- init_noise_sigma=init_noise_sigma,
57
- timesteps=timesteps,
58
- )
59
-
60
-
61
- @dataclass
62
- class FlaxDDIMSchedulerOutput(FlaxSchedulerOutput):
63
- state: DDIMSchedulerState
64
-
65
-
66
- class FlaxDDIMScheduler(FlaxSchedulerMixin, ConfigMixin):
67
- """
68
- Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising
69
- diffusion probabilistic models (DDPMs) with non-Markovian guidance.
70
-
71
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
72
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
73
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
74
- [`~SchedulerMixin.from_pretrained`] functions.
75
-
76
- For more details, see the original paper: https://arxiv.org/abs/2010.02502
77
-
78
- Args:
79
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
80
- beta_start (`float`): the starting `beta` value of inference.
81
- beta_end (`float`): the final `beta` value.
82
- beta_schedule (`str`):
83
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
84
- `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
85
- trained_betas (`jnp.ndarray`, optional):
86
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
87
- clip_sample (`bool`, default `True`):
88
- option to clip predicted sample between -1 and 1 for numerical stability.
89
- set_alpha_to_one (`bool`, default `True`):
90
- each diffusion step uses the value of alphas product at that step and at the previous one. For the final
91
- step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
92
- otherwise it uses the value of alpha at step 0.
93
- steps_offset (`int`, default `0`):
94
- an offset added to the inference steps. You can use a combination of `offset=1` and
95
- `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
96
- stable diffusion.
97
- prediction_type (`str`, default `epsilon`):
98
- indicates whether the model predicts the noise (epsilon), or the samples. One of `epsilon`, `sample`.
99
- `v-prediction` is not supported for this scheduler.
100
- dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`):
101
- the `dtype` used for params and computation.
102
- """
103
-
104
- _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers]
105
-
106
- dtype: jnp.dtype
107
-
108
- @property
109
- def has_state(self):
110
- return True
111
-
112
- @register_to_config
113
- def __init__(
114
- self,
115
- num_train_timesteps: int = 1000,
116
- beta_start: float = 0.0001,
117
- beta_end: float = 0.02,
118
- beta_schedule: str = "linear",
119
- trained_betas: Optional[jnp.ndarray] = None,
120
- set_alpha_to_one: bool = True,
121
- steps_offset: int = 0,
122
- prediction_type: str = "epsilon",
123
- dtype: jnp.dtype = jnp.float32,
124
- ):
125
- self.dtype = dtype
126
-
127
- def create_state(self, common: Optional[CommonSchedulerState] = None) -> DDIMSchedulerState:
128
- if common is None:
129
- common = CommonSchedulerState.create(self)
130
-
131
- # At every step in ddim, we are looking into the previous alphas_cumprod
132
- # For the final step, there is no previous alphas_cumprod because we are already at 0
133
- # `set_alpha_to_one` decides whether we set this parameter simply to one or
134
- # whether we use the final alpha of the "non-previous" one.
135
- final_alpha_cumprod = (
136
- jnp.array(1.0, dtype=self.dtype) if self.config.set_alpha_to_one else common.alphas_cumprod[0]
137
- )
138
-
139
- # standard deviation of the initial noise distribution
140
- init_noise_sigma = jnp.array(1.0, dtype=self.dtype)
141
-
142
- timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1]
143
-
144
- return DDIMSchedulerState.create(
145
- common=common,
146
- final_alpha_cumprod=final_alpha_cumprod,
147
- init_noise_sigma=init_noise_sigma,
148
- timesteps=timesteps,
149
- )
150
-
151
- def scale_model_input(
152
- self, state: DDIMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None
153
- ) -> jnp.ndarray:
154
- """
155
- Args:
156
- state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
157
- sample (`jnp.ndarray`): input sample
158
- timestep (`int`, optional): current timestep
159
-
160
- Returns:
161
- `jnp.ndarray`: scaled input sample
162
- """
163
- return sample
164
-
165
- def set_timesteps(
166
- self, state: DDIMSchedulerState, num_inference_steps: int, shape: Tuple = ()
167
- ) -> DDIMSchedulerState:
168
- """
169
- Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
170
-
171
- Args:
172
- state (`DDIMSchedulerState`):
173
- the `FlaxDDIMScheduler` state data class instance.
174
- num_inference_steps (`int`):
175
- the number of diffusion steps used when generating samples with a pre-trained model.
176
- """
177
- step_ratio = self.config.num_train_timesteps // num_inference_steps
178
- # creates integer timesteps by multiplying by ratio
179
- # rounding to avoid issues when num_inference_step is power of 3
180
- timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1] + self.config.steps_offset
181
-
182
- return state.replace(
183
- num_inference_steps=num_inference_steps,
184
- timesteps=timesteps,
185
- )
186
-
187
- def _get_variance(self, state: DDIMSchedulerState, timestep, prev_timestep):
188
- alpha_prod_t = state.common.alphas_cumprod[timestep]
189
- alpha_prod_t_prev = jnp.where(
190
- prev_timestep >= 0, state.common.alphas_cumprod[prev_timestep], state.final_alpha_cumprod
191
- )
192
- beta_prod_t = 1 - alpha_prod_t
193
- beta_prod_t_prev = 1 - alpha_prod_t_prev
194
-
195
- variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
196
-
197
- return variance
198
-
199
- def step(
200
- self,
201
- state: DDIMSchedulerState,
202
- model_output: jnp.ndarray,
203
- timestep: int,
204
- sample: jnp.ndarray,
205
- eta: float = 0.0,
206
- return_dict: bool = True,
207
- ) -> Union[FlaxDDIMSchedulerOutput, Tuple]:
208
- """
209
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
210
- process from the learned model outputs (most often the predicted noise).
211
-
212
- Args:
213
- state (`DDIMSchedulerState`): the `FlaxDDIMScheduler` state data class instance.
214
- model_output (`jnp.ndarray`): direct output from learned diffusion model.
215
- timestep (`int`): current discrete timestep in the diffusion chain.
216
- sample (`jnp.ndarray`):
217
- current instance of sample being created by diffusion process.
218
- return_dict (`bool`): option for returning tuple rather than FlaxDDIMSchedulerOutput class
219
-
220
- Returns:
221
- [`FlaxDDIMSchedulerOutput`] or `tuple`: [`FlaxDDIMSchedulerOutput`] if `return_dict` is True, otherwise a
222
- `tuple`. When returning a tuple, the first element is the sample tensor.
223
-
224
- """
225
- if state.num_inference_steps is None:
226
- raise ValueError(
227
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
228
- )
229
-
230
- # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
231
- # Ideally, read DDIM paper in-detail understanding
232
-
233
- # Notation (<variable name> -> <name in paper>
234
- # - pred_noise_t -> e_theta(x_t, t)
235
- # - pred_original_sample -> f_theta(x_t, t) or x_0
236
- # - std_dev_t -> sigma_t
237
- # - eta -> η
238
- # - pred_sample_direction -> "direction pointing to x_t"
239
- # - pred_prev_sample -> "x_t-1"
240
-
241
- # 1. get previous step value (=t-1)
242
- prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps
243
-
244
- alphas_cumprod = state.common.alphas_cumprod
245
- final_alpha_cumprod = state.final_alpha_cumprod
246
-
247
- # 2. compute alphas, betas
248
- alpha_prod_t = alphas_cumprod[timestep]
249
- alpha_prod_t_prev = jnp.where(prev_timestep >= 0, alphas_cumprod[prev_timestep], final_alpha_cumprod)
250
-
251
- beta_prod_t = 1 - alpha_prod_t
252
-
253
- # 3. compute predicted original sample from predicted noise also called
254
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
255
- if self.config.prediction_type == "epsilon":
256
- pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
257
- pred_epsilon = model_output
258
- elif self.config.prediction_type == "sample":
259
- pred_original_sample = model_output
260
- pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
261
- elif self.config.prediction_type == "v_prediction":
262
- pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
263
- pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
264
- else:
265
- raise ValueError(
266
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
267
- " `v_prediction`"
268
- )
269
-
270
- # 4. compute variance: "sigma_t(η)" -> see formula (16)
271
- # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
272
- variance = self._get_variance(state, timestep, prev_timestep)
273
- std_dev_t = eta * variance ** (0.5)
274
-
275
- # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
276
- pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon
277
-
278
- # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
279
- prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
280
-
281
- if not return_dict:
282
- return (prev_sample, state)
283
-
284
- return FlaxDDIMSchedulerOutput(prev_sample=prev_sample, state=state)
285
-
286
- def add_noise(
287
- self,
288
- state: DDIMSchedulerState,
289
- original_samples: jnp.ndarray,
290
- noise: jnp.ndarray,
291
- timesteps: jnp.ndarray,
292
- ) -> jnp.ndarray:
293
- return add_noise_common(state.common, original_samples, noise, timesteps)
294
-
295
- def get_velocity(
296
- self,
297
- state: DDIMSchedulerState,
298
- sample: jnp.ndarray,
299
- noise: jnp.ndarray,
300
- timesteps: jnp.ndarray,
301
- ) -> jnp.ndarray:
302
- return get_velocity_common(state.common, sample, noise, timesteps)
303
-
304
- def __len__(self):
305
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py DELETED
@@ -1,229 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import gc
17
- import unittest
18
-
19
- import numpy as np
20
- import torch
21
- from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
22
-
23
- from diffusers import (
24
- AutoencoderKL,
25
- DDIMScheduler,
26
- StableDiffusionAttendAndExcitePipeline,
27
- UNet2DConditionModel,
28
- )
29
- from diffusers.utils import load_numpy, skip_mps, slow
30
- from diffusers.utils.testing_utils import require_torch_gpu
31
-
32
- from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
33
- from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
34
-
35
-
36
- torch.backends.cuda.matmul.allow_tf32 = False
37
-
38
-
39
- @skip_mps
40
- class StableDiffusionAttendAndExcitePipelineFastTests(
41
- PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
42
- ):
43
- pipeline_class = StableDiffusionAttendAndExcitePipeline
44
- test_attention_slicing = False
45
- params = TEXT_TO_IMAGE_PARAMS
46
- batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"})
47
- image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
48
- image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
49
-
50
- # Attend and excite requires being able to run a backward pass at
51
- # inference time. There's no deterministic backward operator for pad
52
-
53
- @classmethod
54
- def setUpClass(cls):
55
- super().setUpClass()
56
- torch.use_deterministic_algorithms(False)
57
-
58
- @classmethod
59
- def tearDownClass(cls):
60
- super().tearDownClass()
61
- torch.use_deterministic_algorithms(True)
62
-
63
- def get_dummy_components(self):
64
- torch.manual_seed(0)
65
- unet = UNet2DConditionModel(
66
- block_out_channels=(32, 64),
67
- layers_per_block=1,
68
- sample_size=32,
69
- in_channels=4,
70
- out_channels=4,
71
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
72
- up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
73
- cross_attention_dim=32,
74
- # SD2-specific config below
75
- attention_head_dim=(2, 4),
76
- use_linear_projection=True,
77
- )
78
- scheduler = DDIMScheduler(
79
- beta_start=0.00085,
80
- beta_end=0.012,
81
- beta_schedule="scaled_linear",
82
- clip_sample=False,
83
- set_alpha_to_one=False,
84
- )
85
- torch.manual_seed(0)
86
- vae = AutoencoderKL(
87
- block_out_channels=[32, 64],
88
- in_channels=3,
89
- out_channels=3,
90
- down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
91
- up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
92
- latent_channels=4,
93
- sample_size=128,
94
- )
95
- torch.manual_seed(0)
96
- text_encoder_config = CLIPTextConfig(
97
- bos_token_id=0,
98
- eos_token_id=2,
99
- hidden_size=32,
100
- intermediate_size=37,
101
- layer_norm_eps=1e-05,
102
- num_attention_heads=4,
103
- num_hidden_layers=5,
104
- pad_token_id=1,
105
- vocab_size=1000,
106
- # SD2-specific config below
107
- hidden_act="gelu",
108
- projection_dim=512,
109
- )
110
- text_encoder = CLIPTextModel(text_encoder_config)
111
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
112
-
113
- components = {
114
- "unet": unet,
115
- "scheduler": scheduler,
116
- "vae": vae,
117
- "text_encoder": text_encoder,
118
- "tokenizer": tokenizer,
119
- "safety_checker": None,
120
- "feature_extractor": None,
121
- }
122
-
123
- return components
124
-
125
- def get_dummy_inputs(self, device, seed=0):
126
- if str(device).startswith("mps"):
127
- generator = torch.manual_seed(seed)
128
- else:
129
- generator = torch.Generator(device=device).manual_seed(seed)
130
- inputs = inputs = {
131
- "prompt": "a cat and a frog",
132
- "token_indices": [2, 5],
133
- "generator": generator,
134
- "num_inference_steps": 1,
135
- "guidance_scale": 6.0,
136
- "output_type": "numpy",
137
- "max_iter_to_alter": 2,
138
- "thresholds": {0: 0.7},
139
- }
140
- return inputs
141
-
142
- def test_inference(self):
143
- device = "cpu"
144
-
145
- components = self.get_dummy_components()
146
- pipe = self.pipeline_class(**components)
147
- pipe.to(device)
148
- pipe.set_progress_bar_config(disable=None)
149
-
150
- inputs = self.get_dummy_inputs(device)
151
- image = pipe(**inputs).images
152
- image_slice = image[0, -3:, -3:, -1]
153
-
154
- self.assertEqual(image.shape, (1, 64, 64, 3))
155
- expected_slice = np.array(
156
- [0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496]
157
- )
158
- max_diff = np.abs(image_slice.flatten() - expected_slice).max()
159
- self.assertLessEqual(max_diff, 1e-3)
160
-
161
- def test_cpu_offload_forward_pass(self):
162
- super().test_cpu_offload_forward_pass(expected_max_diff=5e-4)
163
-
164
- def test_inference_batch_consistent(self):
165
- # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
166
- self._test_inference_batch_consistent(batch_sizes=[1, 2])
167
-
168
- def test_inference_batch_single_identical(self):
169
- self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7e-4)
170
-
171
- def test_dict_tuple_outputs_equivalent(self):
172
- super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
173
-
174
- def test_pt_np_pil_outputs_equivalent(self):
175
- super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4)
176
-
177
- def test_save_load_local(self):
178
- super().test_save_load_local(expected_max_difference=5e-4)
179
-
180
- def test_save_load_optional_components(self):
181
- super().test_save_load_optional_components(expected_max_difference=4e-4)
182
-
183
-
184
- @require_torch_gpu
185
- @slow
186
- class StableDiffusionAttendAndExcitePipelineIntegrationTests(unittest.TestCase):
187
- # Attend and excite requires being able to run a backward pass at
188
- # inference time. There's no deterministic backward operator for pad
189
-
190
- @classmethod
191
- def setUpClass(cls):
192
- super().setUpClass()
193
- torch.use_deterministic_algorithms(False)
194
-
195
- @classmethod
196
- def tearDownClass(cls):
197
- super().tearDownClass()
198
- torch.use_deterministic_algorithms(True)
199
-
200
- def tearDown(self):
201
- super().tearDown()
202
- gc.collect()
203
- torch.cuda.empty_cache()
204
-
205
- def test_attend_and_excite_fp16(self):
206
- generator = torch.manual_seed(51)
207
-
208
- pipe = StableDiffusionAttendAndExcitePipeline.from_pretrained(
209
- "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
210
- )
211
- pipe.to("cuda")
212
-
213
- prompt = "a painting of an elephant with glasses"
214
- token_indices = [5, 7]
215
-
216
- image = pipe(
217
- prompt=prompt,
218
- token_indices=token_indices,
219
- guidance_scale=7.5,
220
- generator=generator,
221
- num_inference_steps=5,
222
- max_iter_to_alter=5,
223
- output_type="numpy",
224
- ).images[0]
225
-
226
- expected_image = load_numpy(
227
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy"
228
- )
229
- assert np.abs((expected_image - image).max()) < 5e-1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/_base_/datasets/deepfashion.py DELETED
@@ -1,53 +0,0 @@
1
- # dataset settings
2
- dataset_type = 'DeepFashionDataset'
3
- data_root = 'data/DeepFashion/In-shop/'
4
- img_norm_cfg = dict(
5
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
6
- train_pipeline = [
7
- dict(type='LoadImageFromFile'),
8
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
9
- dict(type='Resize', img_scale=(750, 1101), keep_ratio=True),
10
- dict(type='RandomFlip', flip_ratio=0.5),
11
- dict(type='Normalize', **img_norm_cfg),
12
- dict(type='Pad', size_divisor=32),
13
- dict(type='DefaultFormatBundle'),
14
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
15
- ]
16
- test_pipeline = [
17
- dict(type='LoadImageFromFile'),
18
- dict(
19
- type='MultiScaleFlipAug',
20
- img_scale=(750, 1101),
21
- flip=False,
22
- transforms=[
23
- dict(type='Resize', keep_ratio=True),
24
- dict(type='RandomFlip'),
25
- dict(type='Normalize', **img_norm_cfg),
26
- dict(type='Pad', size_divisor=32),
27
- dict(type='ImageToTensor', keys=['img']),
28
- dict(type='Collect', keys=['img']),
29
- ])
30
- ]
31
- data = dict(
32
- imgs_per_gpu=2,
33
- workers_per_gpu=1,
34
- train=dict(
35
- type=dataset_type,
36
- ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json',
37
- img_prefix=data_root + 'Img/',
38
- pipeline=train_pipeline,
39
- data_root=data_root),
40
- val=dict(
41
- type=dataset_type,
42
- ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json',
43
- img_prefix=data_root + 'Img/',
44
- pipeline=test_pipeline,
45
- data_root=data_root),
46
- test=dict(
47
- type=dataset_type,
48
- ann_file=data_root +
49
- 'annotations/DeepFashion_segmentation_gallery.json',
50
- img_prefix=data_root + 'Img/',
51
- pipeline=test_pipeline,
52
- data_root=data_root))
53
- evaluation = dict(interval=5, metric=['bbox', 'segm'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py DELETED
@@ -1,12 +0,0 @@
1
- _base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnext101_32x4d',
4
- backbone=dict(
5
- type='ResNeXt',
6
- depth=101,
7
- groups=32,
8
- base_width=4,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- frozen_stages=1,
12
- style='pytorch'))
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/optimization/augmentations.py DELETED
@@ -1,42 +0,0 @@
1
- import torch
2
- from torch import nn
3
- import kornia.augmentation as K
4
-
5
-
6
- class ImageAugmentations(nn.Module):
7
- def __init__(self, output_size, augmentations_number, p=0.7):
8
- super().__init__()
9
- self.output_size = output_size
10
- self.augmentations_number = augmentations_number
11
-
12
- self.augmentations = nn.Sequential(
13
- K.RandomAffine(degrees=15, translate=0.1, p=p, padding_mode="border"), # type: ignore
14
- K.RandomPerspective(0.7, p=p),
15
- )
16
-
17
- self.avg_pool = nn.AdaptiveAvgPool2d((self.output_size, self.output_size))
18
-
19
- def forward(self, input):
20
- """Extents the input batch with augmentations
21
-
22
- If the input is consists of images [I1, I2] the extended augmented output
23
- will be [I1_resized, I2_resized, I1_aug1, I2_aug1, I1_aug2, I2_aug2 ...]
24
-
25
- Args:
26
- input ([type]): input batch of shape [batch, C, H, W]
27
-
28
- Returns:
29
- updated batch: of shape [batch * augmentations_number, C, H, W]
30
- """
31
- # We want to multiply the number of images in the batch in contrast to regular augmantations
32
- # that do not change the number of samples in the batch)
33
- resized_images = self.avg_pool(input)
34
- resized_images = torch.tile(resized_images, dims=(self.augmentations_number, 1, 1, 1))
35
-
36
- batch_size = input.shape[0]
37
- # We want at least one non augmented image
38
- non_augmented_batch = resized_images[:batch_size]
39
- augmented_batch = self.augmentations(resized_images[batch_size:])
40
- updated_batch = torch.cat([non_augmented_batch, augmented_batch], dim=0)
41
-
42
- return updated_batch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/diffusionmodules/model.py DELETED
@@ -1,852 +0,0 @@
1
- # pytorch_diffusion + derived encoder decoder
2
- import math
3
- import torch
4
- import torch.nn as nn
5
- import numpy as np
6
- from einops import rearrange
7
- from typing import Optional, Any
8
-
9
- from ldm.modules.attention import MemoryEfficientCrossAttention
10
-
11
- try:
12
- import xformers
13
- import xformers.ops
14
- XFORMERS_IS_AVAILBLE = True
15
- except:
16
- XFORMERS_IS_AVAILBLE = False
17
- print("No module 'xformers'. Proceeding without it.")
18
-
19
-
20
- def get_timestep_embedding(timesteps, embedding_dim):
21
- """
22
- This matches the implementation in Denoising Diffusion Probabilistic Models:
23
- From Fairseq.
24
- Build sinusoidal embeddings.
25
- This matches the implementation in tensor2tensor, but differs slightly
26
- from the description in Section 3.5 of "Attention Is All You Need".
27
- """
28
- assert len(timesteps.shape) == 1
29
-
30
- half_dim = embedding_dim // 2
31
- emb = math.log(10000) / (half_dim - 1)
32
- emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
33
- emb = emb.to(device=timesteps.device)
34
- emb = timesteps.float()[:, None] * emb[None, :]
35
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
36
- if embedding_dim % 2 == 1: # zero pad
37
- emb = torch.nn.functional.pad(emb, (0,1,0,0))
38
- return emb
39
-
40
-
41
- def nonlinearity(x):
42
- # swish
43
- return x*torch.sigmoid(x)
44
-
45
-
46
- def Normalize(in_channels, num_groups=32):
47
- return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
48
-
49
-
50
- class Upsample(nn.Module):
51
- def __init__(self, in_channels, with_conv):
52
- super().__init__()
53
- self.with_conv = with_conv
54
- if self.with_conv:
55
- self.conv = torch.nn.Conv2d(in_channels,
56
- in_channels,
57
- kernel_size=3,
58
- stride=1,
59
- padding=1)
60
-
61
- def forward(self, x):
62
- x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
63
- if self.with_conv:
64
- x = self.conv(x)
65
- return x
66
-
67
-
68
- class Downsample(nn.Module):
69
- def __init__(self, in_channels, with_conv):
70
- super().__init__()
71
- self.with_conv = with_conv
72
- if self.with_conv:
73
- # no asymmetric padding in torch conv, must do it ourselves
74
- self.conv = torch.nn.Conv2d(in_channels,
75
- in_channels,
76
- kernel_size=3,
77
- stride=2,
78
- padding=0)
79
-
80
- def forward(self, x):
81
- if self.with_conv:
82
- pad = (0,1,0,1)
83
- x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
84
- x = self.conv(x)
85
- else:
86
- x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
87
- return x
88
-
89
-
90
- class ResnetBlock(nn.Module):
91
- def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
92
- dropout, temb_channels=512):
93
- super().__init__()
94
- self.in_channels = in_channels
95
- out_channels = in_channels if out_channels is None else out_channels
96
- self.out_channels = out_channels
97
- self.use_conv_shortcut = conv_shortcut
98
-
99
- self.norm1 = Normalize(in_channels)
100
- self.conv1 = torch.nn.Conv2d(in_channels,
101
- out_channels,
102
- kernel_size=3,
103
- stride=1,
104
- padding=1)
105
- if temb_channels > 0:
106
- self.temb_proj = torch.nn.Linear(temb_channels,
107
- out_channels)
108
- self.norm2 = Normalize(out_channels)
109
- self.dropout = torch.nn.Dropout(dropout)
110
- self.conv2 = torch.nn.Conv2d(out_channels,
111
- out_channels,
112
- kernel_size=3,
113
- stride=1,
114
- padding=1)
115
- if self.in_channels != self.out_channels:
116
- if self.use_conv_shortcut:
117
- self.conv_shortcut = torch.nn.Conv2d(in_channels,
118
- out_channels,
119
- kernel_size=3,
120
- stride=1,
121
- padding=1)
122
- else:
123
- self.nin_shortcut = torch.nn.Conv2d(in_channels,
124
- out_channels,
125
- kernel_size=1,
126
- stride=1,
127
- padding=0)
128
-
129
- def forward(self, x, temb):
130
- h = x
131
- h = self.norm1(h)
132
- h = nonlinearity(h)
133
- h = self.conv1(h)
134
-
135
- if temb is not None:
136
- h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
137
-
138
- h = self.norm2(h)
139
- h = nonlinearity(h)
140
- h = self.dropout(h)
141
- h = self.conv2(h)
142
-
143
- if self.in_channels != self.out_channels:
144
- if self.use_conv_shortcut:
145
- x = self.conv_shortcut(x)
146
- else:
147
- x = self.nin_shortcut(x)
148
-
149
- return x+h
150
-
151
-
152
- class AttnBlock(nn.Module):
153
- def __init__(self, in_channels):
154
- super().__init__()
155
- self.in_channels = in_channels
156
-
157
- self.norm = Normalize(in_channels)
158
- self.q = torch.nn.Conv2d(in_channels,
159
- in_channels,
160
- kernel_size=1,
161
- stride=1,
162
- padding=0)
163
- self.k = torch.nn.Conv2d(in_channels,
164
- in_channels,
165
- kernel_size=1,
166
- stride=1,
167
- padding=0)
168
- self.v = torch.nn.Conv2d(in_channels,
169
- in_channels,
170
- kernel_size=1,
171
- stride=1,
172
- padding=0)
173
- self.proj_out = torch.nn.Conv2d(in_channels,
174
- in_channels,
175
- kernel_size=1,
176
- stride=1,
177
- padding=0)
178
-
179
- def forward(self, x):
180
- h_ = x
181
- h_ = self.norm(h_)
182
- q = self.q(h_)
183
- k = self.k(h_)
184
- v = self.v(h_)
185
-
186
- # compute attention
187
- b,c,h,w = q.shape
188
- q = q.reshape(b,c,h*w)
189
- q = q.permute(0,2,1) # b,hw,c
190
- k = k.reshape(b,c,h*w) # b,c,hw
191
- w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
192
- w_ = w_ * (int(c)**(-0.5))
193
- w_ = torch.nn.functional.softmax(w_, dim=2)
194
-
195
- # attend to values
196
- v = v.reshape(b,c,h*w)
197
- w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
198
- h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
199
- h_ = h_.reshape(b,c,h,w)
200
-
201
- h_ = self.proj_out(h_)
202
-
203
- return x+h_
204
-
205
- class MemoryEfficientAttnBlock(nn.Module):
206
- """
207
- Uses xformers efficient implementation,
208
- see https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
209
- Note: this is a single-head self-attention operation
210
- """
211
- #
212
- def __init__(self, in_channels):
213
- super().__init__()
214
- self.in_channels = in_channels
215
-
216
- self.norm = Normalize(in_channels)
217
- self.q = torch.nn.Conv2d(in_channels,
218
- in_channels,
219
- kernel_size=1,
220
- stride=1,
221
- padding=0)
222
- self.k = torch.nn.Conv2d(in_channels,
223
- in_channels,
224
- kernel_size=1,
225
- stride=1,
226
- padding=0)
227
- self.v = torch.nn.Conv2d(in_channels,
228
- in_channels,
229
- kernel_size=1,
230
- stride=1,
231
- padding=0)
232
- self.proj_out = torch.nn.Conv2d(in_channels,
233
- in_channels,
234
- kernel_size=1,
235
- stride=1,
236
- padding=0)
237
- self.attention_op: Optional[Any] = None
238
-
239
- def forward(self, x):
240
- h_ = x
241
- h_ = self.norm(h_)
242
- q = self.q(h_)
243
- k = self.k(h_)
244
- v = self.v(h_)
245
-
246
- # compute attention
247
- B, C, H, W = q.shape
248
- q, k, v = map(lambda x: rearrange(x, 'b c h w -> b (h w) c'), (q, k, v))
249
-
250
- q, k, v = map(
251
- lambda t: t.unsqueeze(3)
252
- .reshape(B, t.shape[1], 1, C)
253
- .permute(0, 2, 1, 3)
254
- .reshape(B * 1, t.shape[1], C)
255
- .contiguous(),
256
- (q, k, v),
257
- )
258
- out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
259
-
260
- out = (
261
- out.unsqueeze(0)
262
- .reshape(B, 1, out.shape[1], C)
263
- .permute(0, 2, 1, 3)
264
- .reshape(B, out.shape[1], C)
265
- )
266
- out = rearrange(out, 'b (h w) c -> b c h w', b=B, h=H, w=W, c=C)
267
- out = self.proj_out(out)
268
- return x+out
269
-
270
-
271
- class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention):
272
- def forward(self, x, context=None, mask=None):
273
- b, c, h, w = x.shape
274
- x = rearrange(x, 'b c h w -> b (h w) c')
275
- out = super().forward(x, context=context, mask=mask)
276
- out = rearrange(out, 'b (h w) c -> b c h w', h=h, w=w, c=c)
277
- return x + out
278
-
279
-
280
- def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None):
281
- assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown'
282
- if XFORMERS_IS_AVAILBLE and attn_type == "vanilla":
283
- attn_type = "vanilla-xformers"
284
- print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
285
- if attn_type == "vanilla":
286
- assert attn_kwargs is None
287
- return AttnBlock(in_channels)
288
- elif attn_type == "vanilla-xformers":
289
- print(f"building MemoryEfficientAttnBlock with {in_channels} in_channels...")
290
- return MemoryEfficientAttnBlock(in_channels)
291
- elif type == "memory-efficient-cross-attn":
292
- attn_kwargs["query_dim"] = in_channels
293
- return MemoryEfficientCrossAttentionWrapper(**attn_kwargs)
294
- elif attn_type == "none":
295
- return nn.Identity(in_channels)
296
- else:
297
- raise NotImplementedError()
298
-
299
-
300
- class Model(nn.Module):
301
- def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
302
- attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
303
- resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"):
304
- super().__init__()
305
- if use_linear_attn: attn_type = "linear"
306
- self.ch = ch
307
- self.temb_ch = self.ch*4
308
- self.num_resolutions = len(ch_mult)
309
- self.num_res_blocks = num_res_blocks
310
- self.resolution = resolution
311
- self.in_channels = in_channels
312
-
313
- self.use_timestep = use_timestep
314
- if self.use_timestep:
315
- # timestep embedding
316
- self.temb = nn.Module()
317
- self.temb.dense = nn.ModuleList([
318
- torch.nn.Linear(self.ch,
319
- self.temb_ch),
320
- torch.nn.Linear(self.temb_ch,
321
- self.temb_ch),
322
- ])
323
-
324
- # downsampling
325
- self.conv_in = torch.nn.Conv2d(in_channels,
326
- self.ch,
327
- kernel_size=3,
328
- stride=1,
329
- padding=1)
330
-
331
- curr_res = resolution
332
- in_ch_mult = (1,)+tuple(ch_mult)
333
- self.down = nn.ModuleList()
334
- for i_level in range(self.num_resolutions):
335
- block = nn.ModuleList()
336
- attn = nn.ModuleList()
337
- block_in = ch*in_ch_mult[i_level]
338
- block_out = ch*ch_mult[i_level]
339
- for i_block in range(self.num_res_blocks):
340
- block.append(ResnetBlock(in_channels=block_in,
341
- out_channels=block_out,
342
- temb_channels=self.temb_ch,
343
- dropout=dropout))
344
- block_in = block_out
345
- if curr_res in attn_resolutions:
346
- attn.append(make_attn(block_in, attn_type=attn_type))
347
- down = nn.Module()
348
- down.block = block
349
- down.attn = attn
350
- if i_level != self.num_resolutions-1:
351
- down.downsample = Downsample(block_in, resamp_with_conv)
352
- curr_res = curr_res // 2
353
- self.down.append(down)
354
-
355
- # middle
356
- self.mid = nn.Module()
357
- self.mid.block_1 = ResnetBlock(in_channels=block_in,
358
- out_channels=block_in,
359
- temb_channels=self.temb_ch,
360
- dropout=dropout)
361
- self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
362
- self.mid.block_2 = ResnetBlock(in_channels=block_in,
363
- out_channels=block_in,
364
- temb_channels=self.temb_ch,
365
- dropout=dropout)
366
-
367
- # upsampling
368
- self.up = nn.ModuleList()
369
- for i_level in reversed(range(self.num_resolutions)):
370
- block = nn.ModuleList()
371
- attn = nn.ModuleList()
372
- block_out = ch*ch_mult[i_level]
373
- skip_in = ch*ch_mult[i_level]
374
- for i_block in range(self.num_res_blocks+1):
375
- if i_block == self.num_res_blocks:
376
- skip_in = ch*in_ch_mult[i_level]
377
- block.append(ResnetBlock(in_channels=block_in+skip_in,
378
- out_channels=block_out,
379
- temb_channels=self.temb_ch,
380
- dropout=dropout))
381
- block_in = block_out
382
- if curr_res in attn_resolutions:
383
- attn.append(make_attn(block_in, attn_type=attn_type))
384
- up = nn.Module()
385
- up.block = block
386
- up.attn = attn
387
- if i_level != 0:
388
- up.upsample = Upsample(block_in, resamp_with_conv)
389
- curr_res = curr_res * 2
390
- self.up.insert(0, up) # prepend to get consistent order
391
-
392
- # end
393
- self.norm_out = Normalize(block_in)
394
- self.conv_out = torch.nn.Conv2d(block_in,
395
- out_ch,
396
- kernel_size=3,
397
- stride=1,
398
- padding=1)
399
-
400
- def forward(self, x, t=None, context=None):
401
- #assert x.shape[2] == x.shape[3] == self.resolution
402
- if context is not None:
403
- # assume aligned context, cat along channel axis
404
- x = torch.cat((x, context), dim=1)
405
- if self.use_timestep:
406
- # timestep embedding
407
- assert t is not None
408
- temb = get_timestep_embedding(t, self.ch)
409
- temb = self.temb.dense[0](temb)
410
- temb = nonlinearity(temb)
411
- temb = self.temb.dense[1](temb)
412
- else:
413
- temb = None
414
-
415
- # downsampling
416
- hs = [self.conv_in(x)]
417
- for i_level in range(self.num_resolutions):
418
- for i_block in range(self.num_res_blocks):
419
- h = self.down[i_level].block[i_block](hs[-1], temb)
420
- if len(self.down[i_level].attn) > 0:
421
- h = self.down[i_level].attn[i_block](h)
422
- hs.append(h)
423
- if i_level != self.num_resolutions-1:
424
- hs.append(self.down[i_level].downsample(hs[-1]))
425
-
426
- # middle
427
- h = hs[-1]
428
- h = self.mid.block_1(h, temb)
429
- h = self.mid.attn_1(h)
430
- h = self.mid.block_2(h, temb)
431
-
432
- # upsampling
433
- for i_level in reversed(range(self.num_resolutions)):
434
- for i_block in range(self.num_res_blocks+1):
435
- h = self.up[i_level].block[i_block](
436
- torch.cat([h, hs.pop()], dim=1), temb)
437
- if len(self.up[i_level].attn) > 0:
438
- h = self.up[i_level].attn[i_block](h)
439
- if i_level != 0:
440
- h = self.up[i_level].upsample(h)
441
-
442
- # end
443
- h = self.norm_out(h)
444
- h = nonlinearity(h)
445
- h = self.conv_out(h)
446
- return h
447
-
448
- def get_last_layer(self):
449
- return self.conv_out.weight
450
-
451
-
452
- class Encoder(nn.Module):
453
- def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
454
- attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
455
- resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
456
- **ignore_kwargs):
457
- super().__init__()
458
- if use_linear_attn: attn_type = "linear"
459
- self.ch = ch
460
- self.temb_ch = 0
461
- self.num_resolutions = len(ch_mult)
462
- self.num_res_blocks = num_res_blocks
463
- self.resolution = resolution
464
- self.in_channels = in_channels
465
-
466
- # downsampling
467
- self.conv_in = torch.nn.Conv2d(in_channels,
468
- self.ch,
469
- kernel_size=3,
470
- stride=1,
471
- padding=1)
472
-
473
- curr_res = resolution
474
- in_ch_mult = (1,)+tuple(ch_mult)
475
- self.in_ch_mult = in_ch_mult
476
- self.down = nn.ModuleList()
477
- for i_level in range(self.num_resolutions):
478
- block = nn.ModuleList()
479
- attn = nn.ModuleList()
480
- block_in = ch*in_ch_mult[i_level]
481
- block_out = ch*ch_mult[i_level]
482
- for i_block in range(self.num_res_blocks):
483
- block.append(ResnetBlock(in_channels=block_in,
484
- out_channels=block_out,
485
- temb_channels=self.temb_ch,
486
- dropout=dropout))
487
- block_in = block_out
488
- if curr_res in attn_resolutions:
489
- attn.append(make_attn(block_in, attn_type=attn_type))
490
- down = nn.Module()
491
- down.block = block
492
- down.attn = attn
493
- if i_level != self.num_resolutions-1:
494
- down.downsample = Downsample(block_in, resamp_with_conv)
495
- curr_res = curr_res // 2
496
- self.down.append(down)
497
-
498
- # middle
499
- self.mid = nn.Module()
500
- self.mid.block_1 = ResnetBlock(in_channels=block_in,
501
- out_channels=block_in,
502
- temb_channels=self.temb_ch,
503
- dropout=dropout)
504
- self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
505
- self.mid.block_2 = ResnetBlock(in_channels=block_in,
506
- out_channels=block_in,
507
- temb_channels=self.temb_ch,
508
- dropout=dropout)
509
-
510
- # end
511
- self.norm_out = Normalize(block_in)
512
- self.conv_out = torch.nn.Conv2d(block_in,
513
- 2*z_channels if double_z else z_channels,
514
- kernel_size=3,
515
- stride=1,
516
- padding=1)
517
-
518
- def forward(self, x):
519
- # timestep embedding
520
- temb = None
521
-
522
- # downsampling
523
- hs = [self.conv_in(x)]
524
- for i_level in range(self.num_resolutions):
525
- for i_block in range(self.num_res_blocks):
526
- h = self.down[i_level].block[i_block](hs[-1], temb)
527
- if len(self.down[i_level].attn) > 0:
528
- h = self.down[i_level].attn[i_block](h)
529
- hs.append(h)
530
- if i_level != self.num_resolutions-1:
531
- hs.append(self.down[i_level].downsample(hs[-1]))
532
-
533
- # middle
534
- h = hs[-1]
535
- h = self.mid.block_1(h, temb)
536
- h = self.mid.attn_1(h)
537
- h = self.mid.block_2(h, temb)
538
-
539
- # end
540
- h = self.norm_out(h)
541
- h = nonlinearity(h)
542
- h = self.conv_out(h)
543
- return h
544
-
545
-
546
- class Decoder(nn.Module):
547
- def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
548
- attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
549
- resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
550
- attn_type="vanilla", **ignorekwargs):
551
- super().__init__()
552
- if use_linear_attn: attn_type = "linear"
553
- self.ch = ch
554
- self.temb_ch = 0
555
- self.num_resolutions = len(ch_mult)
556
- self.num_res_blocks = num_res_blocks
557
- self.resolution = resolution
558
- self.in_channels = in_channels
559
- self.give_pre_end = give_pre_end
560
- self.tanh_out = tanh_out
561
-
562
- # compute in_ch_mult, block_in and curr_res at lowest res
563
- in_ch_mult = (1,)+tuple(ch_mult)
564
- block_in = ch*ch_mult[self.num_resolutions-1]
565
- curr_res = resolution // 2**(self.num_resolutions-1)
566
- self.z_shape = (1,z_channels,curr_res,curr_res)
567
- print("Working with z of shape {} = {} dimensions.".format(
568
- self.z_shape, np.prod(self.z_shape)))
569
-
570
- # z to block_in
571
- self.conv_in = torch.nn.Conv2d(z_channels,
572
- block_in,
573
- kernel_size=3,
574
- stride=1,
575
- padding=1)
576
-
577
- # middle
578
- self.mid = nn.Module()
579
- self.mid.block_1 = ResnetBlock(in_channels=block_in,
580
- out_channels=block_in,
581
- temb_channels=self.temb_ch,
582
- dropout=dropout)
583
- self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
584
- self.mid.block_2 = ResnetBlock(in_channels=block_in,
585
- out_channels=block_in,
586
- temb_channels=self.temb_ch,
587
- dropout=dropout)
588
-
589
- # upsampling
590
- self.up = nn.ModuleList()
591
- for i_level in reversed(range(self.num_resolutions)):
592
- block = nn.ModuleList()
593
- attn = nn.ModuleList()
594
- block_out = ch*ch_mult[i_level]
595
- for i_block in range(self.num_res_blocks+1):
596
- block.append(ResnetBlock(in_channels=block_in,
597
- out_channels=block_out,
598
- temb_channels=self.temb_ch,
599
- dropout=dropout))
600
- block_in = block_out
601
- if curr_res in attn_resolutions:
602
- attn.append(make_attn(block_in, attn_type=attn_type))
603
- up = nn.Module()
604
- up.block = block
605
- up.attn = attn
606
- if i_level != 0:
607
- up.upsample = Upsample(block_in, resamp_with_conv)
608
- curr_res = curr_res * 2
609
- self.up.insert(0, up) # prepend to get consistent order
610
-
611
- # end
612
- self.norm_out = Normalize(block_in)
613
- self.conv_out = torch.nn.Conv2d(block_in,
614
- out_ch,
615
- kernel_size=3,
616
- stride=1,
617
- padding=1)
618
-
619
- def forward(self, z):
620
- #assert z.shape[1:] == self.z_shape[1:]
621
- self.last_z_shape = z.shape
622
-
623
- # timestep embedding
624
- temb = None
625
-
626
- # z to block_in
627
- h = self.conv_in(z)
628
-
629
- # middle
630
- h = self.mid.block_1(h, temb)
631
- h = self.mid.attn_1(h)
632
- h = self.mid.block_2(h, temb)
633
-
634
- # upsampling
635
- for i_level in reversed(range(self.num_resolutions)):
636
- for i_block in range(self.num_res_blocks+1):
637
- h = self.up[i_level].block[i_block](h, temb)
638
- if len(self.up[i_level].attn) > 0:
639
- h = self.up[i_level].attn[i_block](h)
640
- if i_level != 0:
641
- h = self.up[i_level].upsample(h)
642
-
643
- # end
644
- if self.give_pre_end:
645
- return h
646
-
647
- h = self.norm_out(h)
648
- h = nonlinearity(h)
649
- h = self.conv_out(h)
650
- if self.tanh_out:
651
- h = torch.tanh(h)
652
- return h
653
-
654
-
655
- class SimpleDecoder(nn.Module):
656
- def __init__(self, in_channels, out_channels, *args, **kwargs):
657
- super().__init__()
658
- self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
659
- ResnetBlock(in_channels=in_channels,
660
- out_channels=2 * in_channels,
661
- temb_channels=0, dropout=0.0),
662
- ResnetBlock(in_channels=2 * in_channels,
663
- out_channels=4 * in_channels,
664
- temb_channels=0, dropout=0.0),
665
- ResnetBlock(in_channels=4 * in_channels,
666
- out_channels=2 * in_channels,
667
- temb_channels=0, dropout=0.0),
668
- nn.Conv2d(2*in_channels, in_channels, 1),
669
- Upsample(in_channels, with_conv=True)])
670
- # end
671
- self.norm_out = Normalize(in_channels)
672
- self.conv_out = torch.nn.Conv2d(in_channels,
673
- out_channels,
674
- kernel_size=3,
675
- stride=1,
676
- padding=1)
677
-
678
- def forward(self, x):
679
- for i, layer in enumerate(self.model):
680
- if i in [1,2,3]:
681
- x = layer(x, None)
682
- else:
683
- x = layer(x)
684
-
685
- h = self.norm_out(x)
686
- h = nonlinearity(h)
687
- x = self.conv_out(h)
688
- return x
689
-
690
-
691
- class UpsampleDecoder(nn.Module):
692
- def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
693
- ch_mult=(2,2), dropout=0.0):
694
- super().__init__()
695
- # upsampling
696
- self.temb_ch = 0
697
- self.num_resolutions = len(ch_mult)
698
- self.num_res_blocks = num_res_blocks
699
- block_in = in_channels
700
- curr_res = resolution // 2 ** (self.num_resolutions - 1)
701
- self.res_blocks = nn.ModuleList()
702
- self.upsample_blocks = nn.ModuleList()
703
- for i_level in range(self.num_resolutions):
704
- res_block = []
705
- block_out = ch * ch_mult[i_level]
706
- for i_block in range(self.num_res_blocks + 1):
707
- res_block.append(ResnetBlock(in_channels=block_in,
708
- out_channels=block_out,
709
- temb_channels=self.temb_ch,
710
- dropout=dropout))
711
- block_in = block_out
712
- self.res_blocks.append(nn.ModuleList(res_block))
713
- if i_level != self.num_resolutions - 1:
714
- self.upsample_blocks.append(Upsample(block_in, True))
715
- curr_res = curr_res * 2
716
-
717
- # end
718
- self.norm_out = Normalize(block_in)
719
- self.conv_out = torch.nn.Conv2d(block_in,
720
- out_channels,
721
- kernel_size=3,
722
- stride=1,
723
- padding=1)
724
-
725
- def forward(self, x):
726
- # upsampling
727
- h = x
728
- for k, i_level in enumerate(range(self.num_resolutions)):
729
- for i_block in range(self.num_res_blocks + 1):
730
- h = self.res_blocks[i_level][i_block](h, None)
731
- if i_level != self.num_resolutions - 1:
732
- h = self.upsample_blocks[k](h)
733
- h = self.norm_out(h)
734
- h = nonlinearity(h)
735
- h = self.conv_out(h)
736
- return h
737
-
738
-
739
- class LatentRescaler(nn.Module):
740
- def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
741
- super().__init__()
742
- # residual block, interpolate, residual block
743
- self.factor = factor
744
- self.conv_in = nn.Conv2d(in_channels,
745
- mid_channels,
746
- kernel_size=3,
747
- stride=1,
748
- padding=1)
749
- self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
750
- out_channels=mid_channels,
751
- temb_channels=0,
752
- dropout=0.0) for _ in range(depth)])
753
- self.attn = AttnBlock(mid_channels)
754
- self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
755
- out_channels=mid_channels,
756
- temb_channels=0,
757
- dropout=0.0) for _ in range(depth)])
758
-
759
- self.conv_out = nn.Conv2d(mid_channels,
760
- out_channels,
761
- kernel_size=1,
762
- )
763
-
764
- def forward(self, x):
765
- x = self.conv_in(x)
766
- for block in self.res_block1:
767
- x = block(x, None)
768
- x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor))))
769
- x = self.attn(x)
770
- for block in self.res_block2:
771
- x = block(x, None)
772
- x = self.conv_out(x)
773
- return x
774
-
775
-
776
- class MergedRescaleEncoder(nn.Module):
777
- def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks,
778
- attn_resolutions, dropout=0.0, resamp_with_conv=True,
779
- ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1):
780
- super().__init__()
781
- intermediate_chn = ch * ch_mult[-1]
782
- self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult,
783
- z_channels=intermediate_chn, double_z=False, resolution=resolution,
784
- attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv,
785
- out_ch=None)
786
- self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn,
787
- mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth)
788
-
789
- def forward(self, x):
790
- x = self.encoder(x)
791
- x = self.rescaler(x)
792
- return x
793
-
794
-
795
- class MergedRescaleDecoder(nn.Module):
796
- def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8),
797
- dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1):
798
- super().__init__()
799
- tmp_chn = z_channels*ch_mult[-1]
800
- self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout,
801
- resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks,
802
- ch_mult=ch_mult, resolution=resolution, ch=ch)
803
- self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn,
804
- out_channels=tmp_chn, depth=rescale_module_depth)
805
-
806
- def forward(self, x):
807
- x = self.rescaler(x)
808
- x = self.decoder(x)
809
- return x
810
-
811
-
812
- class Upsampler(nn.Module):
813
- def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
814
- super().__init__()
815
- assert out_size >= in_size
816
- num_blocks = int(np.log2(out_size//in_size))+1
817
- factor_up = 1.+ (out_size % in_size)
818
- print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}")
819
- self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels,
820
- out_channels=in_channels)
821
- self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2,
822
- attn_resolutions=[], in_channels=None, ch=in_channels,
823
- ch_mult=[ch_mult for _ in range(num_blocks)])
824
-
825
- def forward(self, x):
826
- x = self.rescaler(x)
827
- x = self.decoder(x)
828
- return x
829
-
830
-
831
- class Resize(nn.Module):
832
- def __init__(self, in_channels=None, learned=False, mode="bilinear"):
833
- super().__init__()
834
- self.with_conv = learned
835
- self.mode = mode
836
- if self.with_conv:
837
- print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode")
838
- raise NotImplementedError()
839
- assert in_channels is not None
840
- # no asymmetric padding in torch conv, must do it ourselves
841
- self.conv = torch.nn.Conv2d(in_channels,
842
- in_channels,
843
- kernel_size=4,
844
- stride=2,
845
- padding=1)
846
-
847
- def forward(self, x, scale_factor=1.0):
848
- if scale_factor==1.0:
849
- return x
850
- else:
851
- x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor)
852
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Asmithayellow/Asmi/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Asmi
3
- emoji: 😻
4
- colorFrom: yellow
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/misc.py DELETED
@@ -1,730 +0,0 @@
1
- # The following comment should be removed at some point in the future.
2
- # mypy: strict-optional=False
3
-
4
- import contextlib
5
- import errno
6
- import getpass
7
- import hashlib
8
- import io
9
- import logging
10
- import os
11
- import posixpath
12
- import shutil
13
- import stat
14
- import sys
15
- import sysconfig
16
- import urllib.parse
17
- from io import StringIO
18
- from itertools import filterfalse, tee, zip_longest
19
- from types import TracebackType
20
- from typing import (
21
- Any,
22
- BinaryIO,
23
- Callable,
24
- ContextManager,
25
- Dict,
26
- Generator,
27
- Iterable,
28
- Iterator,
29
- List,
30
- Optional,
31
- TextIO,
32
- Tuple,
33
- Type,
34
- TypeVar,
35
- Union,
36
- cast,
37
- )
38
-
39
- from pip._vendor.pyproject_hooks import BuildBackendHookCaller
40
- from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed
41
-
42
- from pip import __version__
43
- from pip._internal.exceptions import CommandError, ExternallyManagedEnvironment
44
- from pip._internal.locations import get_major_minor_version
45
- from pip._internal.utils.compat import WINDOWS
46
- from pip._internal.utils.virtualenv import running_under_virtualenv
47
-
48
- __all__ = [
49
- "rmtree",
50
- "display_path",
51
- "backup_dir",
52
- "ask",
53
- "splitext",
54
- "format_size",
55
- "is_installable_dir",
56
- "normalize_path",
57
- "renames",
58
- "get_prog",
59
- "captured_stdout",
60
- "ensure_dir",
61
- "remove_auth_from_url",
62
- "check_externally_managed",
63
- "ConfiguredBuildBackendHookCaller",
64
- ]
65
-
66
- logger = logging.getLogger(__name__)
67
-
68
- T = TypeVar("T")
69
- ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
70
- VersionInfo = Tuple[int, int, int]
71
- NetlocTuple = Tuple[str, Tuple[Optional[str], Optional[str]]]
72
-
73
-
74
- def get_pip_version() -> str:
75
- pip_pkg_dir = os.path.join(os.path.dirname(__file__), "..", "..")
76
- pip_pkg_dir = os.path.abspath(pip_pkg_dir)
77
-
78
- return "pip {} from {} (python {})".format(
79
- __version__,
80
- pip_pkg_dir,
81
- get_major_minor_version(),
82
- )
83
-
84
-
85
- def normalize_version_info(py_version_info: Tuple[int, ...]) -> Tuple[int, int, int]:
86
- """
87
- Convert a tuple of ints representing a Python version to one of length
88
- three.
89
-
90
- :param py_version_info: a tuple of ints representing a Python version,
91
- or None to specify no version. The tuple can have any length.
92
-
93
- :return: a tuple of length three if `py_version_info` is non-None.
94
- Otherwise, return `py_version_info` unchanged (i.e. None).
95
- """
96
- if len(py_version_info) < 3:
97
- py_version_info += (3 - len(py_version_info)) * (0,)
98
- elif len(py_version_info) > 3:
99
- py_version_info = py_version_info[:3]
100
-
101
- return cast("VersionInfo", py_version_info)
102
-
103
-
104
- def ensure_dir(path: str) -> None:
105
- """os.path.makedirs without EEXIST."""
106
- try:
107
- os.makedirs(path)
108
- except OSError as e:
109
- # Windows can raise spurious ENOTEMPTY errors. See #6426.
110
- if e.errno != errno.EEXIST and e.errno != errno.ENOTEMPTY:
111
- raise
112
-
113
-
114
- def get_prog() -> str:
115
- try:
116
- prog = os.path.basename(sys.argv[0])
117
- if prog in ("__main__.py", "-c"):
118
- return f"{sys.executable} -m pip"
119
- else:
120
- return prog
121
- except (AttributeError, TypeError, IndexError):
122
- pass
123
- return "pip"
124
-
125
-
126
- # Retry every half second for up to 3 seconds
127
- # Tenacity raises RetryError by default, explicitly raise the original exception
128
- @retry(reraise=True, stop=stop_after_delay(3), wait=wait_fixed(0.5))
129
- def rmtree(dir: str, ignore_errors: bool = False) -> None:
130
- shutil.rmtree(dir, ignore_errors=ignore_errors, onerror=rmtree_errorhandler)
131
-
132
-
133
- def rmtree_errorhandler(func: Callable[..., Any], path: str, exc_info: ExcInfo) -> None:
134
- """On Windows, the files in .svn are read-only, so when rmtree() tries to
135
- remove them, an exception is thrown. We catch that here, remove the
136
- read-only attribute, and hopefully continue without problems."""
137
- try:
138
- has_attr_readonly = not (os.stat(path).st_mode & stat.S_IWRITE)
139
- except OSError:
140
- # it's equivalent to os.path.exists
141
- return
142
-
143
- if has_attr_readonly:
144
- # convert to read/write
145
- os.chmod(path, stat.S_IWRITE)
146
- # use the original function to repeat the operation
147
- func(path)
148
- return
149
- else:
150
- raise
151
-
152
-
153
- def display_path(path: str) -> str:
154
- """Gives the display value for a given path, making it relative to cwd
155
- if possible."""
156
- path = os.path.normcase(os.path.abspath(path))
157
- if path.startswith(os.getcwd() + os.path.sep):
158
- path = "." + path[len(os.getcwd()) :]
159
- return path
160
-
161
-
162
- def backup_dir(dir: str, ext: str = ".bak") -> str:
163
- """Figure out the name of a directory to back up the given dir to
164
- (adding .bak, .bak2, etc)"""
165
- n = 1
166
- extension = ext
167
- while os.path.exists(dir + extension):
168
- n += 1
169
- extension = ext + str(n)
170
- return dir + extension
171
-
172
-
173
- def ask_path_exists(message: str, options: Iterable[str]) -> str:
174
- for action in os.environ.get("PIP_EXISTS_ACTION", "").split():
175
- if action in options:
176
- return action
177
- return ask(message, options)
178
-
179
-
180
- def _check_no_input(message: str) -> None:
181
- """Raise an error if no input is allowed."""
182
- if os.environ.get("PIP_NO_INPUT"):
183
- raise Exception(
184
- f"No input was expected ($PIP_NO_INPUT set); question: {message}"
185
- )
186
-
187
-
188
- def ask(message: str, options: Iterable[str]) -> str:
189
- """Ask the message interactively, with the given possible responses"""
190
- while 1:
191
- _check_no_input(message)
192
- response = input(message)
193
- response = response.strip().lower()
194
- if response not in options:
195
- print(
196
- "Your response ({!r}) was not one of the expected responses: "
197
- "{}".format(response, ", ".join(options))
198
- )
199
- else:
200
- return response
201
-
202
-
203
- def ask_input(message: str) -> str:
204
- """Ask for input interactively."""
205
- _check_no_input(message)
206
- return input(message)
207
-
208
-
209
- def ask_password(message: str) -> str:
210
- """Ask for a password interactively."""
211
- _check_no_input(message)
212
- return getpass.getpass(message)
213
-
214
-
215
- def strtobool(val: str) -> int:
216
- """Convert a string representation of truth to true (1) or false (0).
217
-
218
- True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
219
- are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
220
- 'val' is anything else.
221
- """
222
- val = val.lower()
223
- if val in ("y", "yes", "t", "true", "on", "1"):
224
- return 1
225
- elif val in ("n", "no", "f", "false", "off", "0"):
226
- return 0
227
- else:
228
- raise ValueError(f"invalid truth value {val!r}")
229
-
230
-
231
- def format_size(bytes: float) -> str:
232
- if bytes > 1000 * 1000:
233
- return "{:.1f} MB".format(bytes / 1000.0 / 1000)
234
- elif bytes > 10 * 1000:
235
- return "{} kB".format(int(bytes / 1000))
236
- elif bytes > 1000:
237
- return "{:.1f} kB".format(bytes / 1000.0)
238
- else:
239
- return "{} bytes".format(int(bytes))
240
-
241
-
242
- def tabulate(rows: Iterable[Iterable[Any]]) -> Tuple[List[str], List[int]]:
243
- """Return a list of formatted rows and a list of column sizes.
244
-
245
- For example::
246
-
247
- >>> tabulate([['foobar', 2000], [0xdeadbeef]])
248
- (['foobar 2000', '3735928559'], [10, 4])
249
- """
250
- rows = [tuple(map(str, row)) for row in rows]
251
- sizes = [max(map(len, col)) for col in zip_longest(*rows, fillvalue="")]
252
- table = [" ".join(map(str.ljust, row, sizes)).rstrip() for row in rows]
253
- return table, sizes
254
-
255
-
256
- def is_installable_dir(path: str) -> bool:
257
- """Is path is a directory containing pyproject.toml or setup.py?
258
-
259
- If pyproject.toml exists, this is a PEP 517 project. Otherwise we look for
260
- a legacy setuptools layout by identifying setup.py. We don't check for the
261
- setup.cfg because using it without setup.py is only available for PEP 517
262
- projects, which are already covered by the pyproject.toml check.
263
- """
264
- if not os.path.isdir(path):
265
- return False
266
- if os.path.isfile(os.path.join(path, "pyproject.toml")):
267
- return True
268
- if os.path.isfile(os.path.join(path, "setup.py")):
269
- return True
270
- return False
271
-
272
-
273
- def read_chunks(
274
- file: BinaryIO, size: int = io.DEFAULT_BUFFER_SIZE
275
- ) -> Generator[bytes, None, None]:
276
- """Yield pieces of data from a file-like object until EOF."""
277
- while True:
278
- chunk = file.read(size)
279
- if not chunk:
280
- break
281
- yield chunk
282
-
283
-
284
- def normalize_path(path: str, resolve_symlinks: bool = True) -> str:
285
- """
286
- Convert a path to its canonical, case-normalized, absolute version.
287
-
288
- """
289
- path = os.path.expanduser(path)
290
- if resolve_symlinks:
291
- path = os.path.realpath(path)
292
- else:
293
- path = os.path.abspath(path)
294
- return os.path.normcase(path)
295
-
296
-
297
- def splitext(path: str) -> Tuple[str, str]:
298
- """Like os.path.splitext, but take off .tar too"""
299
- base, ext = posixpath.splitext(path)
300
- if base.lower().endswith(".tar"):
301
- ext = base[-4:] + ext
302
- base = base[:-4]
303
- return base, ext
304
-
305
-
306
- def renames(old: str, new: str) -> None:
307
- """Like os.renames(), but handles renaming across devices."""
308
- # Implementation borrowed from os.renames().
309
- head, tail = os.path.split(new)
310
- if head and tail and not os.path.exists(head):
311
- os.makedirs(head)
312
-
313
- shutil.move(old, new)
314
-
315
- head, tail = os.path.split(old)
316
- if head and tail:
317
- try:
318
- os.removedirs(head)
319
- except OSError:
320
- pass
321
-
322
-
323
- def is_local(path: str) -> bool:
324
- """
325
- Return True if path is within sys.prefix, if we're running in a virtualenv.
326
-
327
- If we're not in a virtualenv, all paths are considered "local."
328
-
329
- Caution: this function assumes the head of path has been normalized
330
- with normalize_path.
331
- """
332
- if not running_under_virtualenv():
333
- return True
334
- return path.startswith(normalize_path(sys.prefix))
335
-
336
-
337
- def write_output(msg: Any, *args: Any) -> None:
338
- logger.info(msg, *args)
339
-
340
-
341
- class StreamWrapper(StringIO):
342
- orig_stream: TextIO = None
343
-
344
- @classmethod
345
- def from_stream(cls, orig_stream: TextIO) -> "StreamWrapper":
346
- cls.orig_stream = orig_stream
347
- return cls()
348
-
349
- # compileall.compile_dir() needs stdout.encoding to print to stdout
350
- # https://github.com/python/mypy/issues/4125
351
- @property
352
- def encoding(self): # type: ignore
353
- return self.orig_stream.encoding
354
-
355
-
356
- @contextlib.contextmanager
357
- def captured_output(stream_name: str) -> Generator[StreamWrapper, None, None]:
358
- """Return a context manager used by captured_stdout/stdin/stderr
359
- that temporarily replaces the sys stream *stream_name* with a StringIO.
360
-
361
- Taken from Lib/support/__init__.py in the CPython repo.
362
- """
363
- orig_stdout = getattr(sys, stream_name)
364
- setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
365
- try:
366
- yield getattr(sys, stream_name)
367
- finally:
368
- setattr(sys, stream_name, orig_stdout)
369
-
370
-
371
- def captured_stdout() -> ContextManager[StreamWrapper]:
372
- """Capture the output of sys.stdout:
373
-
374
- with captured_stdout() as stdout:
375
- print('hello')
376
- self.assertEqual(stdout.getvalue(), 'hello\n')
377
-
378
- Taken from Lib/support/__init__.py in the CPython repo.
379
- """
380
- return captured_output("stdout")
381
-
382
-
383
- def captured_stderr() -> ContextManager[StreamWrapper]:
384
- """
385
- See captured_stdout().
386
- """
387
- return captured_output("stderr")
388
-
389
-
390
- # Simulates an enum
391
- def enum(*sequential: Any, **named: Any) -> Type[Any]:
392
- enums = dict(zip(sequential, range(len(sequential))), **named)
393
- reverse = {value: key for key, value in enums.items()}
394
- enums["reverse_mapping"] = reverse
395
- return type("Enum", (), enums)
396
-
397
-
398
- def build_netloc(host: str, port: Optional[int]) -> str:
399
- """
400
- Build a netloc from a host-port pair
401
- """
402
- if port is None:
403
- return host
404
- if ":" in host:
405
- # Only wrap host with square brackets when it is IPv6
406
- host = f"[{host}]"
407
- return f"{host}:{port}"
408
-
409
-
410
- def build_url_from_netloc(netloc: str, scheme: str = "https") -> str:
411
- """
412
- Build a full URL from a netloc.
413
- """
414
- if netloc.count(":") >= 2 and "@" not in netloc and "[" not in netloc:
415
- # It must be a bare IPv6 address, so wrap it with brackets.
416
- netloc = f"[{netloc}]"
417
- return f"{scheme}://{netloc}"
418
-
419
-
420
- def parse_netloc(netloc: str) -> Tuple[str, Optional[int]]:
421
- """
422
- Return the host-port pair from a netloc.
423
- """
424
- url = build_url_from_netloc(netloc)
425
- parsed = urllib.parse.urlparse(url)
426
- return parsed.hostname, parsed.port
427
-
428
-
429
- def split_auth_from_netloc(netloc: str) -> NetlocTuple:
430
- """
431
- Parse out and remove the auth information from a netloc.
432
-
433
- Returns: (netloc, (username, password)).
434
- """
435
- if "@" not in netloc:
436
- return netloc, (None, None)
437
-
438
- # Split from the right because that's how urllib.parse.urlsplit()
439
- # behaves if more than one @ is present (which can be checked using
440
- # the password attribute of urlsplit()'s return value).
441
- auth, netloc = netloc.rsplit("@", 1)
442
- pw: Optional[str] = None
443
- if ":" in auth:
444
- # Split from the left because that's how urllib.parse.urlsplit()
445
- # behaves if more than one : is present (which again can be checked
446
- # using the password attribute of the return value)
447
- user, pw = auth.split(":", 1)
448
- else:
449
- user, pw = auth, None
450
-
451
- user = urllib.parse.unquote(user)
452
- if pw is not None:
453
- pw = urllib.parse.unquote(pw)
454
-
455
- return netloc, (user, pw)
456
-
457
-
458
- def redact_netloc(netloc: str) -> str:
459
- """
460
- Replace the sensitive data in a netloc with "****", if it exists.
461
-
462
- For example:
463
- - "user:[email protected]" returns "user:****@example.com"
464
- - "[email protected]" returns "****@example.com"
465
- """
466
- netloc, (user, password) = split_auth_from_netloc(netloc)
467
- if user is None:
468
- return netloc
469
- if password is None:
470
- user = "****"
471
- password = ""
472
- else:
473
- user = urllib.parse.quote(user)
474
- password = ":****"
475
- return "{user}{password}@{netloc}".format(
476
- user=user, password=password, netloc=netloc
477
- )
478
-
479
-
480
- def _transform_url(
481
- url: str, transform_netloc: Callable[[str], Tuple[Any, ...]]
482
- ) -> Tuple[str, NetlocTuple]:
483
- """Transform and replace netloc in a url.
484
-
485
- transform_netloc is a function taking the netloc and returning a
486
- tuple. The first element of this tuple is the new netloc. The
487
- entire tuple is returned.
488
-
489
- Returns a tuple containing the transformed url as item 0 and the
490
- original tuple returned by transform_netloc as item 1.
491
- """
492
- purl = urllib.parse.urlsplit(url)
493
- netloc_tuple = transform_netloc(purl.netloc)
494
- # stripped url
495
- url_pieces = (purl.scheme, netloc_tuple[0], purl.path, purl.query, purl.fragment)
496
- surl = urllib.parse.urlunsplit(url_pieces)
497
- return surl, cast("NetlocTuple", netloc_tuple)
498
-
499
-
500
- def _get_netloc(netloc: str) -> NetlocTuple:
501
- return split_auth_from_netloc(netloc)
502
-
503
-
504
- def _redact_netloc(netloc: str) -> Tuple[str]:
505
- return (redact_netloc(netloc),)
506
-
507
-
508
- def split_auth_netloc_from_url(url: str) -> Tuple[str, str, Tuple[str, str]]:
509
- """
510
- Parse a url into separate netloc, auth, and url with no auth.
511
-
512
- Returns: (url_without_auth, netloc, (username, password))
513
- """
514
- url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc)
515
- return url_without_auth, netloc, auth
516
-
517
-
518
- def remove_auth_from_url(url: str) -> str:
519
- """Return a copy of url with 'username:password@' removed."""
520
- # username/pass params are passed to subversion through flags
521
- # and are not recognized in the url.
522
- return _transform_url(url, _get_netloc)[0]
523
-
524
-
525
- def redact_auth_from_url(url: str) -> str:
526
- """Replace the password in a given url with ****."""
527
- return _transform_url(url, _redact_netloc)[0]
528
-
529
-
530
- class HiddenText:
531
- def __init__(self, secret: str, redacted: str) -> None:
532
- self.secret = secret
533
- self.redacted = redacted
534
-
535
- def __repr__(self) -> str:
536
- return "<HiddenText {!r}>".format(str(self))
537
-
538
- def __str__(self) -> str:
539
- return self.redacted
540
-
541
- # This is useful for testing.
542
- def __eq__(self, other: Any) -> bool:
543
- if type(self) != type(other):
544
- return False
545
-
546
- # The string being used for redaction doesn't also have to match,
547
- # just the raw, original string.
548
- return self.secret == other.secret
549
-
550
-
551
- def hide_value(value: str) -> HiddenText:
552
- return HiddenText(value, redacted="****")
553
-
554
-
555
- def hide_url(url: str) -> HiddenText:
556
- redacted = redact_auth_from_url(url)
557
- return HiddenText(url, redacted=redacted)
558
-
559
-
560
- def protect_pip_from_modification_on_windows(modifying_pip: bool) -> None:
561
- """Protection of pip.exe from modification on Windows
562
-
563
- On Windows, any operation modifying pip should be run as:
564
- python -m pip ...
565
- """
566
- pip_names = [
567
- "pip",
568
- f"pip{sys.version_info.major}",
569
- f"pip{sys.version_info.major}.{sys.version_info.minor}",
570
- ]
571
-
572
- # See https://github.com/pypa/pip/issues/1299 for more discussion
573
- should_show_use_python_msg = (
574
- modifying_pip and WINDOWS and os.path.basename(sys.argv[0]) in pip_names
575
- )
576
-
577
- if should_show_use_python_msg:
578
- new_command = [sys.executable, "-m", "pip"] + sys.argv[1:]
579
- raise CommandError(
580
- "To modify pip, please run the following command:\n{}".format(
581
- " ".join(new_command)
582
- )
583
- )
584
-
585
-
586
- def check_externally_managed() -> None:
587
- """Check whether the current environment is externally managed.
588
-
589
- If the ``EXTERNALLY-MANAGED`` config file is found, the current environment
590
- is considered externally managed, and an ExternallyManagedEnvironment is
591
- raised.
592
- """
593
- if running_under_virtualenv():
594
- return
595
- marker = os.path.join(sysconfig.get_path("stdlib"), "EXTERNALLY-MANAGED")
596
- if not os.path.isfile(marker):
597
- return
598
- raise ExternallyManagedEnvironment.from_config(marker)
599
-
600
-
601
- def is_console_interactive() -> bool:
602
- """Is this console interactive?"""
603
- return sys.stdin is not None and sys.stdin.isatty()
604
-
605
-
606
- def hash_file(path: str, blocksize: int = 1 << 20) -> Tuple[Any, int]:
607
- """Return (hash, length) for path using hashlib.sha256()"""
608
-
609
- h = hashlib.sha256()
610
- length = 0
611
- with open(path, "rb") as f:
612
- for block in read_chunks(f, size=blocksize):
613
- length += len(block)
614
- h.update(block)
615
- return h, length
616
-
617
-
618
- def pairwise(iterable: Iterable[Any]) -> Iterator[Tuple[Any, Any]]:
619
- """
620
- Return paired elements.
621
-
622
- For example:
623
- s -> (s0, s1), (s2, s3), (s4, s5), ...
624
- """
625
- iterable = iter(iterable)
626
- return zip_longest(iterable, iterable)
627
-
628
-
629
- def partition(
630
- pred: Callable[[T], bool],
631
- iterable: Iterable[T],
632
- ) -> Tuple[Iterable[T], Iterable[T]]:
633
- """
634
- Use a predicate to partition entries into false entries and true entries,
635
- like
636
-
637
- partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
638
- """
639
- t1, t2 = tee(iterable)
640
- return filterfalse(pred, t1), filter(pred, t2)
641
-
642
-
643
- class ConfiguredBuildBackendHookCaller(BuildBackendHookCaller):
644
- def __init__(
645
- self,
646
- config_holder: Any,
647
- source_dir: str,
648
- build_backend: str,
649
- backend_path: Optional[str] = None,
650
- runner: Optional[Callable[..., None]] = None,
651
- python_executable: Optional[str] = None,
652
- ):
653
- super().__init__(
654
- source_dir, build_backend, backend_path, runner, python_executable
655
- )
656
- self.config_holder = config_holder
657
-
658
- def build_wheel(
659
- self,
660
- wheel_directory: str,
661
- config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
662
- metadata_directory: Optional[str] = None,
663
- ) -> str:
664
- cs = self.config_holder.config_settings
665
- return super().build_wheel(
666
- wheel_directory, config_settings=cs, metadata_directory=metadata_directory
667
- )
668
-
669
- def build_sdist(
670
- self,
671
- sdist_directory: str,
672
- config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
673
- ) -> str:
674
- cs = self.config_holder.config_settings
675
- return super().build_sdist(sdist_directory, config_settings=cs)
676
-
677
- def build_editable(
678
- self,
679
- wheel_directory: str,
680
- config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
681
- metadata_directory: Optional[str] = None,
682
- ) -> str:
683
- cs = self.config_holder.config_settings
684
- return super().build_editable(
685
- wheel_directory, config_settings=cs, metadata_directory=metadata_directory
686
- )
687
-
688
- def get_requires_for_build_wheel(
689
- self, config_settings: Optional[Dict[str, Union[str, List[str]]]] = None
690
- ) -> List[str]:
691
- cs = self.config_holder.config_settings
692
- return super().get_requires_for_build_wheel(config_settings=cs)
693
-
694
- def get_requires_for_build_sdist(
695
- self, config_settings: Optional[Dict[str, Union[str, List[str]]]] = None
696
- ) -> List[str]:
697
- cs = self.config_holder.config_settings
698
- return super().get_requires_for_build_sdist(config_settings=cs)
699
-
700
- def get_requires_for_build_editable(
701
- self, config_settings: Optional[Dict[str, Union[str, List[str]]]] = None
702
- ) -> List[str]:
703
- cs = self.config_holder.config_settings
704
- return super().get_requires_for_build_editable(config_settings=cs)
705
-
706
- def prepare_metadata_for_build_wheel(
707
- self,
708
- metadata_directory: str,
709
- config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
710
- _allow_fallback: bool = True,
711
- ) -> str:
712
- cs = self.config_holder.config_settings
713
- return super().prepare_metadata_for_build_wheel(
714
- metadata_directory=metadata_directory,
715
- config_settings=cs,
716
- _allow_fallback=_allow_fallback,
717
- )
718
-
719
- def prepare_metadata_for_build_editable(
720
- self,
721
- metadata_directory: str,
722
- config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
723
- _allow_fallback: bool = True,
724
- ) -> str:
725
- cs = self.config_holder.config_settings
726
- return super().prepare_metadata_for_build_editable(
727
- metadata_directory=metadata_directory,
728
- config_settings=cs,
729
- _allow_fallback=_allow_fallback,
730
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/evaluation/lvis_evaluation.py DELETED
@@ -1,380 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import copy
3
- import itertools
4
- import json
5
- import logging
6
- import os
7
- import pickle
8
- from collections import OrderedDict
9
- import torch
10
-
11
- import detectron2.utils.comm as comm
12
- from detectron2.config import CfgNode
13
- from detectron2.data import MetadataCatalog
14
- from detectron2.structures import Boxes, BoxMode, pairwise_iou
15
- from detectron2.utils.file_io import PathManager
16
- from detectron2.utils.logger import create_small_table
17
-
18
- from .coco_evaluation import instances_to_coco_json
19
- from .evaluator import DatasetEvaluator
20
-
21
-
22
- class LVISEvaluator(DatasetEvaluator):
23
- """
24
- Evaluate object proposal and instance detection/segmentation outputs using
25
- LVIS's metrics and evaluation API.
26
- """
27
-
28
- def __init__(
29
- self,
30
- dataset_name,
31
- tasks=None,
32
- distributed=True,
33
- output_dir=None,
34
- *,
35
- max_dets_per_image=None,
36
- ):
37
- """
38
- Args:
39
- dataset_name (str): name of the dataset to be evaluated.
40
- It must have the following corresponding metadata:
41
- "json_file": the path to the LVIS format annotation
42
- tasks (tuple[str]): tasks that can be evaluated under the given
43
- configuration. A task is one of "bbox", "segm".
44
- By default, will infer this automatically from predictions.
45
- distributed (True): if True, will collect results from all ranks for evaluation.
46
- Otherwise, will evaluate the results in the current process.
47
- output_dir (str): optional, an output directory to dump results.
48
- max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP
49
- This limit, by default of the LVIS dataset, is 300.
50
- """
51
- from lvis import LVIS
52
-
53
- self._logger = logging.getLogger(__name__)
54
-
55
- if tasks is not None and isinstance(tasks, CfgNode):
56
- self._logger.warn(
57
- "COCO Evaluator instantiated using config, this is deprecated behavior."
58
- " Please pass in explicit arguments instead."
59
- )
60
- self._tasks = None # Infering it from predictions should be better
61
- else:
62
- self._tasks = tasks
63
-
64
- self._distributed = distributed
65
- self._output_dir = output_dir
66
- self._max_dets_per_image = max_dets_per_image
67
-
68
- self._cpu_device = torch.device("cpu")
69
-
70
- self._metadata = MetadataCatalog.get(dataset_name)
71
- json_file = PathManager.get_local_path(self._metadata.json_file)
72
- self._lvis_api = LVIS(json_file)
73
- # Test set json files do not contain annotations (evaluation must be
74
- # performed using the LVIS evaluation server).
75
- self._do_evaluation = len(self._lvis_api.get_ann_ids()) > 0
76
-
77
- def reset(self):
78
- self._predictions = []
79
-
80
- def process(self, inputs, outputs):
81
- """
82
- Args:
83
- inputs: the inputs to a LVIS model (e.g., GeneralizedRCNN).
84
- It is a list of dict. Each dict corresponds to an image and
85
- contains keys like "height", "width", "file_name", "image_id".
86
- outputs: the outputs of a LVIS model. It is a list of dicts with key
87
- "instances" that contains :class:`Instances`.
88
- """
89
- for input, output in zip(inputs, outputs):
90
- prediction = {"image_id": input["image_id"]}
91
-
92
- if "instances" in output:
93
- instances = output["instances"].to(self._cpu_device)
94
- prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
95
- if "proposals" in output:
96
- prediction["proposals"] = output["proposals"].to(self._cpu_device)
97
- self._predictions.append(prediction)
98
-
99
- def evaluate(self):
100
- if self._distributed:
101
- comm.synchronize()
102
- predictions = comm.gather(self._predictions, dst=0)
103
- predictions = list(itertools.chain(*predictions))
104
-
105
- if not comm.is_main_process():
106
- return
107
- else:
108
- predictions = self._predictions
109
-
110
- if len(predictions) == 0:
111
- self._logger.warning("[LVISEvaluator] Did not receive valid predictions.")
112
- return {}
113
-
114
- if self._output_dir:
115
- PathManager.mkdirs(self._output_dir)
116
- file_path = os.path.join(self._output_dir, "instances_predictions.pth")
117
- with PathManager.open(file_path, "wb") as f:
118
- torch.save(predictions, f)
119
-
120
- self._results = OrderedDict()
121
- if "proposals" in predictions[0]:
122
- self._eval_box_proposals(predictions)
123
- if "instances" in predictions[0]:
124
- self._eval_predictions(predictions)
125
- # Copy so the caller can do whatever with results
126
- return copy.deepcopy(self._results)
127
-
128
- def _tasks_from_predictions(self, predictions):
129
- for pred in predictions:
130
- if "segmentation" in pred:
131
- return ("bbox", "segm")
132
- return ("bbox",)
133
-
134
- def _eval_predictions(self, predictions):
135
- """
136
- Evaluate predictions. Fill self._results with the metrics of the tasks.
137
-
138
- Args:
139
- predictions (list[dict]): list of outputs from the model
140
- """
141
- self._logger.info("Preparing results in the LVIS format ...")
142
- lvis_results = list(itertools.chain(*[x["instances"] for x in predictions]))
143
- tasks = self._tasks or self._tasks_from_predictions(lvis_results)
144
-
145
- # LVIS evaluator can be used to evaluate results for COCO dataset categories.
146
- # In this case `_metadata` variable will have a field with COCO-specific category mapping.
147
- if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
148
- reverse_id_mapping = {
149
- v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
150
- }
151
- for result in lvis_results:
152
- result["category_id"] = reverse_id_mapping[result["category_id"]]
153
- else:
154
- # unmap the category ids for LVIS (from 0-indexed to 1-indexed)
155
- for result in lvis_results:
156
- result["category_id"] += 1
157
-
158
- if self._output_dir:
159
- file_path = os.path.join(self._output_dir, "lvis_instances_results.json")
160
- self._logger.info("Saving results to {}".format(file_path))
161
- with PathManager.open(file_path, "w") as f:
162
- f.write(json.dumps(lvis_results))
163
- f.flush()
164
-
165
- if not self._do_evaluation:
166
- self._logger.info("Annotations are not available for evaluation.")
167
- return
168
-
169
- self._logger.info("Evaluating predictions ...")
170
- for task in sorted(tasks):
171
- res = _evaluate_predictions_on_lvis(
172
- self._lvis_api,
173
- lvis_results,
174
- task,
175
- max_dets_per_image=self._max_dets_per_image,
176
- class_names=self._metadata.get("thing_classes"),
177
- )
178
- self._results[task] = res
179
-
180
- def _eval_box_proposals(self, predictions):
181
- """
182
- Evaluate the box proposals in predictions.
183
- Fill self._results with the metrics for "box_proposals" task.
184
- """
185
- if self._output_dir:
186
- # Saving generated box proposals to file.
187
- # Predicted box_proposals are in XYXY_ABS mode.
188
- bbox_mode = BoxMode.XYXY_ABS.value
189
- ids, boxes, objectness_logits = [], [], []
190
- for prediction in predictions:
191
- ids.append(prediction["image_id"])
192
- boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
193
- objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
194
-
195
- proposal_data = {
196
- "boxes": boxes,
197
- "objectness_logits": objectness_logits,
198
- "ids": ids,
199
- "bbox_mode": bbox_mode,
200
- }
201
- with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
202
- pickle.dump(proposal_data, f)
203
-
204
- if not self._do_evaluation:
205
- self._logger.info("Annotations are not available for evaluation.")
206
- return
207
-
208
- self._logger.info("Evaluating bbox proposals ...")
209
- res = {}
210
- areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
211
- for limit in [100, 1000]:
212
- for area, suffix in areas.items():
213
- stats = _evaluate_box_proposals(predictions, self._lvis_api, area=area, limit=limit)
214
- key = "AR{}@{:d}".format(suffix, limit)
215
- res[key] = float(stats["ar"].item() * 100)
216
- self._logger.info("Proposal metrics: \n" + create_small_table(res))
217
- self._results["box_proposals"] = res
218
-
219
-
220
- # inspired from Detectron:
221
- # https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
222
- def _evaluate_box_proposals(dataset_predictions, lvis_api, thresholds=None, area="all", limit=None):
223
- """
224
- Evaluate detection proposal recall metrics. This function is a much
225
- faster alternative to the official LVIS API recall evaluation code. However,
226
- it produces slightly different results.
227
- """
228
- # Record max overlap value for each gt box
229
- # Return vector of overlap values
230
- areas = {
231
- "all": 0,
232
- "small": 1,
233
- "medium": 2,
234
- "large": 3,
235
- "96-128": 4,
236
- "128-256": 5,
237
- "256-512": 6,
238
- "512-inf": 7,
239
- }
240
- area_ranges = [
241
- [0 ** 2, 1e5 ** 2], # all
242
- [0 ** 2, 32 ** 2], # small
243
- [32 ** 2, 96 ** 2], # medium
244
- [96 ** 2, 1e5 ** 2], # large
245
- [96 ** 2, 128 ** 2], # 96-128
246
- [128 ** 2, 256 ** 2], # 128-256
247
- [256 ** 2, 512 ** 2], # 256-512
248
- [512 ** 2, 1e5 ** 2],
249
- ] # 512-inf
250
- assert area in areas, "Unknown area range: {}".format(area)
251
- area_range = area_ranges[areas[area]]
252
- gt_overlaps = []
253
- num_pos = 0
254
-
255
- for prediction_dict in dataset_predictions:
256
- predictions = prediction_dict["proposals"]
257
-
258
- # sort predictions in descending order
259
- # TODO maybe remove this and make it explicit in the documentation
260
- inds = predictions.objectness_logits.sort(descending=True)[1]
261
- predictions = predictions[inds]
262
-
263
- ann_ids = lvis_api.get_ann_ids(img_ids=[prediction_dict["image_id"]])
264
- anno = lvis_api.load_anns(ann_ids)
265
- gt_boxes = [
266
- BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno
267
- ]
268
- gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
269
- gt_boxes = Boxes(gt_boxes)
270
- gt_areas = torch.as_tensor([obj["area"] for obj in anno])
271
-
272
- if len(gt_boxes) == 0 or len(predictions) == 0:
273
- continue
274
-
275
- valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
276
- gt_boxes = gt_boxes[valid_gt_inds]
277
-
278
- num_pos += len(gt_boxes)
279
-
280
- if len(gt_boxes) == 0:
281
- continue
282
-
283
- if limit is not None and len(predictions) > limit:
284
- predictions = predictions[:limit]
285
-
286
- overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
287
-
288
- _gt_overlaps = torch.zeros(len(gt_boxes))
289
- for j in range(min(len(predictions), len(gt_boxes))):
290
- # find which proposal box maximally covers each gt box
291
- # and get the iou amount of coverage for each gt box
292
- max_overlaps, argmax_overlaps = overlaps.max(dim=0)
293
-
294
- # find which gt box is 'best' covered (i.e. 'best' = most iou)
295
- gt_ovr, gt_ind = max_overlaps.max(dim=0)
296
- assert gt_ovr >= 0
297
- # find the proposal box that covers the best covered gt box
298
- box_ind = argmax_overlaps[gt_ind]
299
- # record the iou coverage of this gt box
300
- _gt_overlaps[j] = overlaps[box_ind, gt_ind]
301
- assert _gt_overlaps[j] == gt_ovr
302
- # mark the proposal box and the gt box as used
303
- overlaps[box_ind, :] = -1
304
- overlaps[:, gt_ind] = -1
305
-
306
- # append recorded iou coverage level
307
- gt_overlaps.append(_gt_overlaps)
308
- gt_overlaps = (
309
- torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
310
- )
311
- gt_overlaps, _ = torch.sort(gt_overlaps)
312
-
313
- if thresholds is None:
314
- step = 0.05
315
- thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
316
- recalls = torch.zeros_like(thresholds)
317
- # compute recall for each iou threshold
318
- for i, t in enumerate(thresholds):
319
- recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
320
- # ar = 2 * np.trapz(recalls, thresholds)
321
- ar = recalls.mean()
322
- return {
323
- "ar": ar,
324
- "recalls": recalls,
325
- "thresholds": thresholds,
326
- "gt_overlaps": gt_overlaps,
327
- "num_pos": num_pos,
328
- }
329
-
330
-
331
- def _evaluate_predictions_on_lvis(
332
- lvis_gt, lvis_results, iou_type, max_dets_per_image=None, class_names=None
333
- ):
334
- """
335
- Args:
336
- iou_type (str):
337
- max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP
338
- This limit, by default of the LVIS dataset, is 300.
339
- class_names (None or list[str]): if provided, will use it to predict
340
- per-category AP.
341
-
342
- Returns:
343
- a dict of {metric name: score}
344
- """
345
- metrics = {
346
- "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"],
347
- "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"],
348
- }[iou_type]
349
-
350
- logger = logging.getLogger(__name__)
351
-
352
- if len(lvis_results) == 0: # TODO: check if needed
353
- logger.warn("No predictions from the model!")
354
- return {metric: float("nan") for metric in metrics}
355
-
356
- if iou_type == "segm":
357
- lvis_results = copy.deepcopy(lvis_results)
358
- # When evaluating mask AP, if the results contain bbox, LVIS API will
359
- # use the box area as the area of the instance, instead of the mask area.
360
- # This leads to a different definition of small/medium/large.
361
- # We remove the bbox field to let mask AP use mask area.
362
- for c in lvis_results:
363
- c.pop("bbox", None)
364
-
365
- if max_dets_per_image is None:
366
- max_dets_per_image = 300 # Default for LVIS dataset
367
-
368
- from lvis import LVISEval, LVISResults
369
-
370
- logger.info(f"Evaluating with max detections per image = {max_dets_per_image}")
371
- lvis_results = LVISResults(lvis_gt, lvis_results, max_dets=max_dets_per_image)
372
- lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type)
373
- lvis_eval.run()
374
- lvis_eval.print_results()
375
-
376
- # Pull the standard metrics from the LVIS results
377
- results = lvis_eval.get_results()
378
- results = {metric: float(results[metric] * 100) for metric in metrics}
379
- logger.info("Evaluation results for {}: \n".format(iou_type) + create_small_table(results))
380
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BMukhtar/facemaskDetector/app.py DELETED
@@ -1,21 +0,0 @@
1
- import gradio as gr
2
- from fastai.vision.all import *
3
- from fastai.learner import *
4
-
5
- learn = load_learner('model_upd.pkl')
6
- labels = learn.dls.vocab
7
-
8
-
9
- def predict(img):
10
- img = PILImage.create(img).to_thumb(512, 512)
11
- pred, pred_idx, probs = learn.predict(img)
12
- return {labels[i]: float(probs[i]) for i in range(len(labels))}
13
-
14
-
15
- gr.Interface(
16
- fn=predict,
17
- inputs=gr.inputs.Image(shape=(512, 512)),
18
- outputs=gr.outputs.Label(num_top_classes=3)
19
- ).launch()
20
-
21
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/utils/README.md DELETED
@@ -1,6 +0,0 @@
1
- # External Colab Code
2
- Code used to make Google Colab work correctly
3
- - Repo link: https://github.com/IAHispano/Applio-RVC-Fork/
4
-
5
- Thanks to https://github.com/kalomaze/externalcolabcode
6
-
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar 2pac Todas Las Canciones Mp3.md DELETED
@@ -1,80 +0,0 @@
1
- <br />
2
- <h1>Descargar 2pac Todas las canciones MP3: Cómo disfrutar de la música de una leyenda del rap</h1>
3
- <p>Si eres un fan de la música hip-hop, probablemente sepas quién fue Tupac Shakur. Fue uno de los raperos más influyentes y exitosos de todos los tiempos, que vendió más de 75 millones de discos en todo el mundo. Su música abordaba temas sociales contemporáneos que asolaban los centros urbanos, como la pobreza, la violencia, el racismo y la brutalidad policial. También fue un símbolo de activismo contra la desigualdad y la injusticia. </p>
4
- <h2>descargar 2pac todas las canciones mp3</h2><br /><p><b><b>DOWNLOAD</b> ===== <a href="https://bltlly.com/2v6JTx">https://bltlly.com/2v6JTx</a></b></p><br /><br />
5
- <p>A pesar de que murió en 1996 a la edad de 25 años, su música sigue siendo popular y relevante hoy en día. Muchos artistas se han inspirado en su estilo y mensajes, y muchos fans se han conmovido por su pasión y carisma. Si desea escuchar sus canciones o álbumes, es posible que se pregunte cómo descargarlos de forma gratuita en formato MP3. En este artículo, te mostraremos cómo hacerlo en unos pocos pasos. </p>
6
- <h2>La vida y el legado de 2pac</h2>
7
- <h3>Vida Temprana y Carrera</h3>
8
- <p>Tupac Shakur nació en la ciudad de Nueva York en 1971 de padres que eran activistas políticos y miembros del Partido Pantera Negra. Pasó gran parte de su infancia en movimiento con su familia, que en 1986 se estableció en Oakland, California. Allí salió a las calles, vendiendo drogas y involucrándose en la cultura pandillera. </p>
9
- <p>En 1990 se unió a Digital Underground, un grupo de rap con sede en Oakland que había logrado un éxito con la canción "The Humpty Dance". Actuó en dos álbumes de Digital Underground antes de su debut en solitario, <i>2Pacalypse Now</i>, en 1991. El álbum fue una ruptura radical de la fiesta de baile estilo <p>de Digital Underground y contó con pistas contundentes que abordaron temas sociales y políticos, como la brutalidad policial, el racismo, la pobreza y el embarazo adolescente. El álbum atrajo la controversia y las críticas de algunos políticos y agentes de la ley, que acusaron a 2pac de incitar a la violencia y promover el crimen. </p>
10
- <h3>Estilo y temas musicales</h3>
11
-
12
- La música de<p>2pac exploró una amplia gama de temas, como el amor, la amistad, la familia, la espiritualidad, la autoestima, la ambición y el éxito. También se refirió a las duras realidades de la vida en el gueto, como la violencia, las drogas, las pandillas, la corrupción y la opresión. A menudo criticaba el sistema social y político que consideraba responsable de la difícil situación de la comunidad afroamericana. También expresó su enojo y frustración con el racismo y la injusticia que enfrentó como hombre negro en Estados Unidos.</p>
13
- <h3>La muerte y las versiones póstumas</h3>
14
- <p>En 1996, 2pac recibió cuatro disparos en un tiroteo en Las Vegas. Murió seis días después de sus heridas. Su muerte provocó una ola de dolor y luto entre sus fans y compañeros. También alimentó la especulación y las teorías de conspiración que rodearon su asesinato, que sigue sin resolverse hasta el día de hoy. Algunas personas creen que todavía está vivo y escondido en algún lugar. </p>
15
- <p>Después de su muerte, varios álbumes de su material inédito fueron lanzados por su sello y su patrimonio. Algunos de estos álbumes incluyen <i>All Eyez on Me</i>, <i>The Don Killuminati: The 7 Day Theory</i>, <i>R U Still Down? (Recuérdame) </i>, <i>Grandes éxitos</i>, <i>Hasta el final del tiempo</i>, <i>Mejor Dayz</i>, <i>Leal al juego</i>, y <i>La vida de Pac</i>. Estos álbumes han vendido millones de copias en todo el mundo y han consolidado el legado de 2pac como uno de los artistas más vendidos de todos los tiempos. </p>
16
- <p></p>
17
- <h2>Los mejores sitios para descargar música MP3 gratis</h2>
18
- <h3>Biblioteca de audio de YouTube</h3>
19
- <p>Si está buscando descargas de música gratuitas que sean legales y seguras, es posible que desee consultar YouTube Audio Library. Este es un servicio que ofrece música libre de derechos y efectos de sonido que puede utilizar para fines personales o comerciales. Puede navegar a través de miles de pistas de varios géneros, estados de ánimo, instrumentos y duraciones. También puedes filtrar por requisitos de atribución, lo que significa que puedes encontrar música que no requiera que le des crédito al artista o a la fuente. </p>
20
-
21
- <h3>Música de Amazon</h3>
22
- <p>Otra opción para descargar música gratis es Amazon Music. Esta es una plataforma que proporciona descargas de música gratis para los miembros de Prime y ofrece un enorme catálogo de canciones para la compra. Puedes encontrar millones de canciones de varios artistas, géneros, estilos y épocas. También puede descubrir nueva música navegando a través de listas de reproducción, estaciones, gráficos y recomendaciones seleccionadas. </p>
23
- <p>Para descargar música de Amazon Music, necesita tener una cuenta de Amazon y una membresía Prime. A continuación, puede ir a https://www.amazon.com/music o descargar la aplicación Amazon Music en su dispositivo. Puede buscar cualquier canción o álbum que desee o navegar por las categorías. También puede filtrar por precio para encontrar descargas de música gratuitas. Para descargar una canción o un álbum, haga clic en el botón "Gratis" o "Comprar" al lado. Puede guardar el archivo MP3 en su dispositivo. </p>
24
- <h3>MP3Juice</h3>
25
- <p>Una tercera opción para descargar música gratis es MP3Juice. Este es un sitio web que permite a los usuarios buscar y descargar archivos MP3 de varias fuentes, como YouTube, SoundCloud, Vimeo y más. También puede pegar una URL de cualquier archivo de vídeo o audio en línea y convertirlo a MP3. Puedes descargar tantas canciones como quieras gratis y sin registro. </p>
26
- <p>Para descargar música de MP3Juice, debe ir a https://www.mp3juices.cc/ o descargar la aplicación MP3Juice en su dispositivo. Puede escribir cualquier palabra clave o frase relacionada con la canción o artista que desee o pegar una URL de cualquier archivo de vídeo o audio en línea. Puede hacer clic en el botón de búsqueda y esperar los resultados. Puede previsualizar cualquier pista haciendo clic en el botón de reproducción. Para descargar una pista, haga clic en el botón de descarga junto a él. A continuación, puede guardar el archivo MP3 en su dispositivo. </p>
27
- <h2>Cómo descargar 2pac Todas las canciones MP3</h2>
28
- <h3>Paso 1: Elija un sitio o un servicio</h3>
29
-
30
- <ul>
31
- <li>La calidad y cantidad de la música: Usted debe buscar sitios o servicios que ofrecen archivos MP3 de alta calidad y tienen una gran colección de canciones y álbumes de 2pac. </li>
32
- <li>El costo y la conveniencia de la descarga: Usted debe buscar sitios o servicios que ofrecen descargas gratuitas o asequibles y tienen procesos de descarga fáciles y rápidos. </li>
33
- <li>La legalidad y seguridad de la descarga: Usted debe buscar sitios o servicios que están autorizados y autorizados para distribuir la música de 2pac y no contienen virus, malware o spyware. </li>
34
- </ul>
35
- <p>Basado en estos factores, recomendamos usar YouTube Audio Library, Amazon Music o MP3Juice como sus mejores opciones para descargar 2pac todas las canciones MP3.</p>
36
- <h3>Paso 2: Buscar las canciones o álbumes que desea</h3>
37
- <p>El segundo paso para descargar 2pac todas las canciones MP3 es buscar las canciones o álbumes que desee. Dependiendo del sitio o servicio que hayas elegido, puedes usar diferentes métodos para encontrar y acceder a la música que estás buscando. Aquí hay algunos consejos sobre cómo hacerlo:</p>
38
- <ul>
39
- <li>Usar palabras clave, filtros o categorías: Puede escribir el nombre de la canción, álbum o artista que desee en el cuadro de búsqueda y usar filtros o categorías para reducir sus resultados. Por ejemplo, puedes filtrar por género, estado de ánimo, duración, popularidad, etc.</li>
40
- <li>Utilice listas de reproducción, estaciones, gráficos o recomendaciones: Puede navegar a través de listas de reproducción, estaciones, gráficos o recomendaciones que cuentan con la música de 2pac o artistas similares. Por ejemplo, puedes encontrar listas de reproducción como "Best of 2pac", "2pac Essentials", "2pac Radio", etc.</li>
41
- <li>Usar URL de videos en línea o archivos de audio: Puede pegar la URL de cualquier archivo de vídeo o audio en línea que contenga la música de 2pac y convertirlo a MP3. Por ejemplo, puede pegar la URL de un vídeo de YouTube de una canción o álbum de 2pac. </li>
42
- </ul>
43
- <p>Una vez que encuentre las canciones o álbumes que desea, puede seleccionarlos haciendo clic en ellos o agregándolos a su carrito o cola. </p>
44
-
45
- <p>El tercer paso para descargar 2pac todas las canciones MP3 es descargar los archivos MP3 a su dispositivo. Dependiendo del sitio o servicio que elija, puede utilizar diferentes métodos y formatos para descargar los archivos de música. Aquí hay algunos ejemplos de cómo hacer eso:</p>
46
- <ul>
47
- <li>Haga clic en el icono de descarga junto a la pista: Este es el método más simple y común para descargar un archivo MP3. Solo tiene que hacer clic en el icono de descarga junto a la pista que desea y guardarlo en su dispositivo. </li>
48
- <li>Haga clic derecho en la pista y seleccione "Guardar enlace como": Este es otro método para descargar un archivo MP3. Solo tiene que hacer clic derecho en la pista que desee y elegir "Guardar enlace como" en el menú. Luego puede elegir dónde guardarlo en su dispositivo. </li>
49
- <li>Seleccione varias pistas y elija "Descargar": Este es un método para descargar múltiples archivos MP3 a la vez. Solo necesita seleccionar varias pistas manteniendo pulsada la tecla Ctrl (o la tecla Comando en Mac) y haciendo clic en ellas. A continuación, puede elegir "Descargar" en el menú o botón y guardarlos en su dispositivo. </li>
50
- <li>Elija una opción de formato y calidad: Este es un método para personalizar la configuración de descarga. Solo tienes que elegir un formato (como MP3, WAV, FLAC, etc.) y una opción de calidad (como 128 kbit/s, 192 kbit/s, 320 kbit/s, etc.) desde el menú o el botón. A continuación, puede descargar el archivo MP3 con su configuración preferida. </li>
51
- </ul>
52
- <h2>Cómo disfrutar de 2pac Todas las canciones MP3</h2>
53
- <h3>Juega en su reproductor de música favorito</h3>
54
- <p>Una vez que haya descargado 2pac todas las canciones MP3 a su dispositivo, puede reproducirlas en su reproductor de música favorito. Hay muchas aplicaciones o software que puede utilizar para reproducir sus archivos MP3, como VLC, iTunes, Spotify, Windows Media Player, etc. También puede crear listas de reproducción, ajustar el volumen, mezclar las canciones, repetir las canciones y más. </p>
55
-
56
- <h3>Transferirlos a sus dispositivos portátiles o reproductores de medios</h3>
57
- <p>Si desea escuchar 2pac todas las canciones MP3 sobre la marcha, puede transferirlas a sus dispositivos portátiles o reproductores multimedia. Puedes usar dispositivos como iPods, reproductores de MP3, unidades USB, CD, etc. También puedes usar diferentes métodos para transferir tus archivos MP3, como copiar y pegar, sincronizar, grabar, etc.</p>
58
- <p>Para transferir tus archivos MP3 a tus dispositivos portátiles o reproductores multimedia, necesitas conectar tu dispositivo a tu computadora usando un cable o una conexión inalámbrica. Luego, debe abrir la aplicación o el software que admite su dispositivo y localizar los archivos en su computadora. También puede arrastrar y soltar los archivos a la aplicación o software. Luego debe seleccionar las pistas que desea transferir y hacer clic en el botón de transferencia. </p>
59
- <h3>Compártelos con tus amigos o familiares</h3>
60
- <p>Si quieres compartir 2pac todas las canciones MP3 con tus amigos o familiares, puedes usar varios métodos para hacerlo. Puedes usar plataformas de redes sociales, como Facebook, Twitter, Instagram, etc. También puedes usar servicios de correo electrónico, como Gmail, Yahoo Mail, Outlook, etc. También puedes usar conexiones Bluetooth o Wi-Fi para enviar o recibir archivos. </p>
61
- <p>Para compartir sus archivos MP3 con sus amigos o familiares, debe seleccionar las pistas que desea compartir y haga clic en el botón compartir. Luego debe elegir el método y la plataforma que desea usar y seguir las instrucciones. También puede agregar un mensaje o un comentario a sus archivos compartidos. </p>
62
- <h2>Conclusión</h2>
63
- <p>En este artículo, le hemos mostrado cómo descargar 2pac todas las canciones MP3 de forma gratuita y legal. También te hemos dado algunos consejos sobre cómo disfrutar de su música en diferentes dispositivos y plataformas. Esperamos que este artículo te haya ayudado a descubrir y apreciar la música de una leyenda del rap. </p>
64
-
65
- <p>Entonces, ¿qué estás esperando? Adelante y descargar 2pac todas las canciones MP3 ahora y disfrutar de la música de una leyenda del rap! </p>
66
- <h4>Preguntas frecuentes</h4>
67
- <ul>
68
- <li>Q: ¿Dónde puedo encontrar más información sobre la vida y la carrera de 2pac? </li>
69
- <li>A: Puede encontrar más información sobre la vida y carrera de 2pac en su sitio web oficial (https://www.2pac.com/), su página de Wikipedia (https://en.wikipedia.org/wiki/Tupac_Shakur), o su libro de biografías (<i>Tupac Shakur: La vida y los tiempos de un icono americano</i> por Tayannah Lee McQuillar y Fred L. Johnson III). </li>
70
- <li>Q: ¿Cuáles son algunas de las canciones y álbumes más populares de 2pac? </li>
71
- <li>A: Algunas de las canciones y álbumes más populares de 2pac incluyen <i>California Love</i>, <i>Changes</i>, <i>Hail Mary</i>, <i>Dear Mama</i>, <i>I Ain’t Mad at Cha</i>, <i>All Eyez on Me</i>The Don Kilti: The 7 Day/i, i i Against The Greatest World, i>> <i>Mejor Dayz</i>, etc.</li>
72
- <li>Q: ¿Cómo puedo apoyar el legado y las causas de 2pac? </li>
73
- <li>A: Puedes apoyar el legado y las causas de 2pac donando a su fundación (https://www.tasf.org/), que apoya la educación y el empoderamiento de los jóvenes, especialmente los de entornos desfavorecidos. También puedes comprar su mercancía oficial (https://www.2pac.com/store/), que incluye sus icónicas imágenes y citas. También puede unirse a su club de fans (https://www.2pac.com/fan-club/), que ofrece contenido exclusivo y beneficios para los miembros. </li>
74
- <li>Q: ¿Cómo puedo descargar 2pac todas las canciones MP3 gratis y legalmente? </li>
75
- <li>A: Puede descargar 2pac todas las canciones MP3 de forma gratuita y legal mediante el uso de sitios o servicios que ofrecen música libre de derechos o tienen el permiso para distribuir la música de 2pac. Algunos de estos sitios o servicios incluyen YouTube Audio Library, Amazon Music y MP3Juice.</li>
76
- <li>Q: ¿Cómo puedo disfrutar de 2pac todas las canciones MP3 en diferentes dispositivos y plataformas? </li>
77
-
78
- </ul></p> 64aa2da5cf<br />
79
- <br />
80
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Conseguir Sobre Ella Steamunlocked.md DELETED
@@ -1,93 +0,0 @@
1
- <br />
2
- <h1>Descargar Cómo superarlo Steamunlocked: Una guía para jugadores frustrados</h1>
3
- <p>Si usted está buscando un juego que pondrá a prueba su paciencia, habilidad y cordura, entonces es posible que desee probar Cómo superarlo con Bennett Foddy. Este es un juego de escalada de castigo que se ha vuelto infame por su dificultad y nivel de frustración. En este artículo, te contaremos todo lo que necesitas saber sobre este juego, cómo descargarlo desde Steamunlocked y cómo vencerlo más rápido y fácil. Sigue leyendo si te atreves. </p>
4
- <h2>¿Qué es superar con Bennett Foddy? </h2>
5
- <p>Getting Over It with Bennett Foddy es un juego que fue lanzado en 2017 por Bennett Foddy, un desarrollador de juegos australiano y profesor de diseño de juegos. El juego es un homenaje al clásico B-Game 2002 de Jazzuo 'Sexy Hiking', que también involucró escalar una montaña con un martillo y una olla.</p>
6
- <h2>descargar conseguir sobre ella steamunlocked</h2><br /><p><b><b>Download Zip</b> &#9734;&#9734;&#9734;&#9734;&#9734; <a href="https://bltlly.com/2v6MyZ">https://bltlly.com/2v6MyZ</a></b></p><br /><br />
7
- <h3>La premisa y el modo de juego del juego</h3>
8
- <p>La premisa del juego es simple: juegas como un hombre llamado Diógenes que está atrapado en una olla de metal y tiene que usar un martillo para escalar una enorme montaña hecha de objetos aleatorios. El modo de juego no es tan simple: tienes que mover el martillo con el ratón, y eso es todo lo que hay. No hay puntos de control, ni puntos de ahorro, ni atajos, ni pistas, ni tutoriales, ni piedad. Si caes, pierdes todo tu progreso y tienes que empezar de nuevo desde el principio. El juego está diseñado para ser extremadamente duro y frustrante, como Foddy mismo admite: "Un juego que hice para un cierto tipo de persona. Para hacerles daño." </p>
9
- <h3>La recepción y popularidad del juego</h3>
10
-
11
- <h3>Los desafíos y recompensas del juego</h3>
12
- <p>El juego no es para todos. Requiere mucha paciencia, habilidad, concentración y perseverancia para superar sus obstáculos y llegar a la cima de la montaña. Muchos jugadores se han dado por vencidos o la rabia se ha ido después de fallar varias veces. Sin embargo, algunos jugadores también han encontrado que el juego es gratificante y satisfactorio una vez que lo dominan. El juego ofrece grandes misterios y una maravillosa recompensa para aquellos que llegan a la cima de la montaña, así como una sensación de logro y orgullo. El juego también proporciona algunas ideas y observaciones interesantes sobre la vida, el fracaso, el éxito y la felicidad a través de la voz del propio Foddy, que narra el juego mientras juegas. </p>
13
- <h2>¿Qué es Steamunlocked? </h2>
14
- <p>Steamunlocked es un sitio web que te permite descargar juegos de PC gratis sin usar Steam o cualquier otra plataforma. Afirma ofrecer más de 5000 juegos preinstalados y listos para jugar. </p>
15
- <h3>La definición y características de Steamunlocked</h3>
16
- <p>Steamunlocked es un sitio web que proporciona enlaces directos para descargar juegos de PC de varias fuentes. El sitio web no alberga ningún archivo en su <h3>La definición y características de Steamunlocked</h3>
17
- <p>Steamunlocked es un sitio web que proporciona enlaces directos para descargar juegos de PC de varias fuentes. El sitio web no aloja archivos en sus propios servidores, sino que lo redirige a otros sitios de alojamiento de archivos como Uploadhaven, Mega o Mediafire. El sitio web afirma ofrecer más de 5000 juegos que están preinstalados y listos para jugar, lo que significa que no es necesario utilizar Steam o cualquier otra plataforma para ejecutarlos. También puede solicitar juegos que no están disponibles en el sitio web, y los administradores intentarán subirlos lo antes posible. </p>
18
- <h3>Los pros y los contras de usar Steamunlocked</h3>
19
- <p>Usar Steamunlocked tiene algunas ventajas y desventajas que debes tener en cuenta antes de descargar cualquier juego de él. Estos son algunos de ellos:</p>
20
- <tabla>
21
- <tr>
22
- <th>Pros</th>
23
-
24
- </tr>
25
- <tr>
26
- <td>- Puedes descargar juegos de PC gratis sin pagar nada. </td>
27
- <td>- Puedes encontrar anuncios, ventanas emergentes o redirecciones que pueden ser molestos o inseguros. </td>
28
- </tr>
29
- <tr>
30
- <td>- Puedes acceder a juegos que no están disponibles en tu región o están prohibidos por Steam.</td>
31
- <td>- Puede violar los términos de servicio o los derechos de propiedad intelectual de los desarrolladores o editores de juegos. </td>
32
- </tr>
33
- <tr>
34
- <td>- Puedes jugar juegos sin conexión a Internet o una cuenta de Steam. </td>
35
- <td>- Puedes perderte actualizaciones, parches, DLC, funciones multijugador o logros exclusivos de Steam u otras plataformas. </td>
36
- </tr>
37
- <tr>
38
- <td>- Puedes descargar juegos más rápido que usando torrents u otros métodos peer-to-peer. </td>
39
- <td>- Usted puede arriesgarse a obtener malware, virus o archivos dañados que pueden dañar su computadora o comprometer sus datos. </td>
40
- </tr>
41
- </tabla>
42
- <h3>La legalidad y seguridad de usar Steamunlocked</h3>
43
- <p>La legalidad y seguridad de usar Steamunlocked depende de varios factores, como las leyes de su país, la fuente de los archivos y las precauciones que tome. En términos generales, descargar juegos de Steamunlocked se considera ilegal en la mayoría de los países, ya que constituye piratería e infringe los derechos de los desarrolladores y editores de juegos. Sin embargo, algunos países pueden tener leyes más indulgentes o ambiguas con respecto a esta cuestión, o pueden no hacerlas cumplir estrictamente. Por lo tanto, le corresponde a usted comprobar el estado legal de uso de Steamunlocked en su país y decidir si desea asumir el riesgo o no. </p>
44
-
45
- <h2>Cómo descargar Cómo superar el bloqueo de vapor? </h2>
46
- <p>Si ha decidido descargar Cómo superarlo desde Steamunlocked, aquí están los pasos que debe seguir:</p>
47
- <h3>Los pasos para descargar e instalar el juego desde Steamunlocked</h3>
48
- <ol>
49
- <li>Vaya a <a href="( 9 )">Steamunlocked.net</a> y busque Cómo superarlo con Bennett Foddy en la barra de búsqueda. Alternativamente, puede ir directamente a <a href="( 10 )">este enlace</a>. </li>
50
- <li>Haga clic en el botón Descargar y espere unos segundos hasta que sea redirigido a Uploadhaven.</li>
51
- <li>Haga clic en el botón azul Descargar ahora y espere unos segundos más hasta que comience la descarga. Si te redirigen a otro sitio, ciérralo e inténtalo de nuevo. </li>
52
- <li>Guarde el archivo en una ubicación de su elección y espere a que termine de descargarlo. El tamaño del archivo es de aproximadamente 650 MB.</li>
53
- <li>Extraiga el archivo usando WinRAR o cualquier otro software que pueda manejar archivos ZIP. </li>
54
- <li>Abra la carpeta extraída y haga doble clic en Getting_Over_It.exe para iniciar el juego. </li>
55
- <li> ¡Disfruta jugando a superarlo con Bennett Foddy! </li>
56
- </ol>
57
- <h3>Los consejos y trucos para vencer al juego más rápido y más fácil</h3>
58
- <p>Cómo superarlo con Bennett Foddy es un juego muy duro y frustrante que puede hacer que la rabia dejar de fumar o romper el ratón. Sin embargo, hay algunos consejos y trucos que pueden ayudarte a superar el juego más rápido y fácil. Estos son algunos de ellos:</p>
59
- <p></p>
60
- <ul>
61
- <li>La práctica hace perfecto. <ul>
62
- <li>La práctica te hace perfecto. Cuanto más juegues, más te acostumbrarás a los controles, la física y los obstáculos. También desarrollarás memoria muscular y reflejos que te ayudarán a moverte más rápido y más suave. No te rindas si fallas, solo inténtalo de nuevo y aprende de tus errores. </li>
63
-
64
- <li>Encuentre el agarre y el ángulo óptimos. La forma en que agarra y ángulo del martillo puede hacer una gran diferencia en su rendimiento. Usted debe tratar de encontrar la posición óptima para cada situación, dependiendo de la forma y el tamaño del objeto que está subiendo en. Por ejemplo, al subir a una pared vertical, es posible que desee agarrar el martillo cerca de la cabeza y el ángulo ligeramente hacia abajo, para que pueda enganchar en el borde y tirar de sí mismo hacia arriba. Al subir sobre una superficie horizontal, es posible que desee agarrar el martillo cerca del mango y el ángulo ligeramente hacia arriba, de modo que pueda empujarse hacia adelante y ganar impulso. </li>
65
- <li>Usa el impulso y la inercia. El impulso y la inercia son dos conceptos físicos que son muy importantes en este juego. El impulso es la tendencia de un objeto a seguir moviéndose en una dirección determinada a menos que sea actuado por una fuerza externa. La inercia es la resistencia de un objeto a cambiar su estado de movimiento. Puedes usar estos conceptos a tu favor creando y manteniendo el impulso en la dirección que quieres ir, y superando la inercia cuando quieres cambiar de dirección o detenerte. Por ejemplo, cuando quieres saltar sobre un hueco, debes balancear el martillo en un movimiento circular antes de soltarlo en el momento adecuado, para que puedas crear suficiente impulso para impulsarte hacia adelante. Cuando quieras parar o cambiar de dirección, debes usar el martillo para golpear o enganchar algo que pueda contrarrestar tu momento o inercia. </li>
66
-
67
- </ul>
68
- <h3>Las alternativas a Steamunlocked para descargar el juego</h3>
69
- <p>Si no te sientes cómodo usando Steamunlocked o si tienes algún problema con él, hay algunas alternativas que puedes intentar descargar Getting Over It with Bennett Foddy. Estos son algunos de ellos:</p>
70
- <ul>
71
- <li>Steam: Esta es la forma oficial y legal de descargar y jugar el juego. Puedes comprar el juego en Steam por $7.99 USD o tu equivalente regional. Necesitarás una cuenta de Steam y una conexión a Internet para descargar e instalar el juego, así como para acceder a algunas funciones como logros o tablas de clasificación. Sin embargo, también obtendrás actualizaciones, parches, DLC, modos multijugador y soporte de Steam y Foddy.</li>
72
- <li>Humble Bundle: Esta es otra forma oficial y legal de descargar y jugar el juego. Puedes comprar el juego en Humble Bundle por $7.99 USD o tu equivalente regional. Obtendrás una versión libre de DRM del juego que puedes descargar directamente desde Humble Bundle o canjear en Steam. Usted también apoyará la caridad mediante la compra de Humble Bundle.</li>
73
- <li>GOG: Esta es otra forma oficial y legal de descargar y jugar el juego. Puedes comprar el juego en GOG por $7.99 USD o tu equivalente regional. Obtendrás una versión libre de DRM del juego que puedes descargar directamente desde GOG o canjear en Steam. También obtendrá algunos contenidos adicionales como fondos de pantalla, avatares, bandas sonoras y artbooks. </li>
74
- <li>Ocean of Games: Esta es otra forma no oficial e ilegal de descargar y jugar el juego gratis sin usar Steam o cualquier otra plataforma. Es similar a Steamunlocked en términos de características y riesgos, pero puede tener diferentes fuentes y enlaces para descargar el juego. </li>
75
- </ul>
76
- <h2>Conclusión</h2>
77
-
78
- <p>Si usted está interesado en jugar este juego, esperamos que este artículo le ha dado alguna información útil y orientación sobre cómo descargarlo de Steamunlocked y cómo vencerlo más rápido y más fácil. Sin embargo, también le aconsejamos que sea cuidadoso y responsable al usar Steamunlocked o cualquier otro sitio web similar, ya que pueden implicar problemas legales y de seguridad que debe conocer. También te animamos a apoyar a los desarrolladores de juegos y editores comprando el juego de fuentes oficiales si puedes permitírtelo y disfrutarlo. </p>
79
- <p>Gracias por leer este artículo y esperamos que te lo pases genial jugando Getting Over It with Bennett Foddy. Recuerda, ¡no dejes que el juego se te pase! </p>
80
- <h2>Preguntas frecuentes</h2>
81
- <p>Aquí hay algunas preguntas frecuentes sobre cómo superarlo con Bennett Foddy y Steamunlocked:</p>
82
- <h3>P: ¿Cuánto tiempo se tarda en vencer a Cómo superarlo con Bennett Foddy? </h3>
83
- <p>A: La respuesta a esta pregunta depende de tu nivel de habilidad, suerte y persistencia. Algunos jugadores han ganado el juego en menos de 10 minutos, mientras que otros han pasado horas o incluso días sin hacer ningún progreso. El tiempo promedio para vencer el juego es de alrededor de 5 horas, de acuerdo con <a href="">HowLongToBeat.com</a>. </p>
84
- <h3>P: ¿Cómo puedo guardar mi progreso en Cómo superarlo con Bennett Foddy? </h3>
85
- <p>A: El juego guarda automáticamente tu progreso cada vez que lo lanzas o sales. No necesitas guardar ni cargar nada manualmente. Sin embargo, si descarga el juego desde Steamunlocked o cualquier otra fuente no oficial, puede perder su archivo de guardado si elimina la carpeta del juego o lo mueve a otra ubicación. </p>
86
- <h3>P: ¿Cómo puedo jugar a Getting Over It with Bennett Foddy online or co-op? </h3>
87
-
88
- <h3>P: ¿Cómo me deshago de anuncios, ventanas emergentes o redirecciones en Steamunlocked? </h3>
89
- <p>A: Una forma de evitar anuncios, ventanas emergentes o redirecciones en Steamunlocked es utilizar un software o una extensión de bloqueador de anuncios en su navegador. Otra forma es usar un servicio VPN o un servidor proxy para ocultar su dirección IP y evitar cualquier restricción o filtro. Sin embargo, estos métodos pueden no funcionar para todos los sitios web o enlaces, y también pueden ralentizar su velocidad de Internet o afectar su privacidad. </p>
90
- <h3>Q: ¿Hay una secuela o un juego similar a Cómo superarlo con Bennett Foddy? </h3>
91
- <p>A: No hay una secuela oficial de Getting Over It with Bennett Foddy, pero hay algunos juegos similares que puedes probar si te gusta este género. Algunos de ellos son <a href=">Pogostuck: Rage With Your Friends</a>, <a href="">Jump King</a>, <a href=">GIRAFFE TOWN</a>, y <a href=">Sexy Hiking</a>. También puedes ver otros juegos de Bennett Foddy, como <a href="">QWOP</a>, <a href="">GIRP</a>, y <a href=">CLOP</a>. </p> 64aa2da5cf<br />
92
- <br />
93
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Facebook Versin Antigua Apk.md DELETED
@@ -1,89 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar la versión antigua de Facebook APK</h1>
3
- <p>Facebook es una de las plataformas de redes sociales más populares del mundo, con más de 2.800 millones de usuarios activos mensuales en diciembre de 2020. Sin embargo, no todos están contentos con la última versión de la aplicación de Facebook, que puede tener algunos errores, problemas técnicos o problemas de compatibilidad con ciertos dispositivos o sistemas operativos. Si usted es una de esas personas que prefieren la versión anterior de la aplicación de Facebook, es posible que se pregunte cómo descargarlo en su dispositivo Android. En este artículo, le mostraremos cómo descargar la versión antigua de Facebook APK, que es el formato de archivo para las aplicaciones de Android. También te explicaremos por qué quieres descargarlo, cómo encontrarlo, cómo instalarlo y cómo usarlo. </p>
4
- <h2>descargar facebook versión antigua apk</h2><br /><p><b><b>Download</b> &#8250;&#8250;&#8250; <a href="https://bltlly.com/2v6MQK">https://bltlly.com/2v6MQK</a></b></p><br /><br />
5
- <h2>¿Por qué descargar la versión antigua de Facebook APK? </h2>
6
- <h3>Beneficios de la versión antigua de Facebook APK</h3>
7
- <p>Hay varias razones por las que es posible que desee descargar la versión antigua de Facebook APK en lugar de la última. Algunos de los beneficios son:</p>
8
- <ul>
9
- <li>Puedes disfrutar de las características y funciones que estaban disponibles en la versión antigua pero no en la nueva, como cabezas de chat, pegatinas o juegos. </li>
10
- <li>Puede evitar los errores, fallas o errores que pueden ocurrir en la nueva versión, como estrellarse, congelarse o agotar la batería. </li>
11
- <li> Puede ahorrar espacio de almacenamiento y uso de datos en su dispositivo, ya que la versión anterior puede ser más pequeña y más ligera que la nueva. </li>
12
- <li> Puede usar la versión anterior en su dispositivo si no es compatible con el nuevo, como versiones anteriores de Android o dispositivos de gama baja. </li>
13
- </ul>
14
- <h3>Desventajas de la versión antigua de Facebook APK</h3>
15
- <p>Sin embargo, descargar la versión antigua de Facebook APK también tiene algunos inconvenientes que usted debe tener en cuenta. Algunos de los inconvenientes son:</p>
16
- <ul>
17
- <li>Puede perderse las nuevas características y mejoras que se agregan en la nueva versión, como el modo oscuro, historias o carretes. </li>
18
-
19
- <li>Puede violar los términos y condiciones de Facebook, ya que pueden no permitirle usar una versión obsoleta o modificada de su aplicación. </li>
20
- <li>Puede experimentar algunos problemas de compatibilidad con otras aplicaciones o servicios que se integran con Facebook, como Instagram, WhatsApp o Messenger.</li>
21
- </ul>
22
- <h2>¿Cómo encontrar la versión antigua de Facebook APK? </h2>
23
- <h3>Fuentes oficiales</h3>
24
- <p>La forma más fácil y segura de encontrar Facebook versión antigua APK es utilizar fuentes oficiales que están autorizados por Facebook. Algunas de estas fuentes son:</p>
25
- <ul>
26
- <li>Google Play Store, que es la tienda de aplicaciones oficial para dispositivos Android. Puede buscar "Facebook" y desplazarse hacia abajo para encontrar la opción "Descargar versiones anteriores". A continuación, puede seleccionar la versión que desee y descargarla. </li>
27
- <li>El Centro de ayuda de Facebook, que es el sitio oficial de soporte para los usuarios de Facebook. Puede ir a la sección "Ayuda de la aplicación de Android" y encontrar el artículo "¿Cómo instalar o actualizar la aplicación de Facebook en mi teléfono o tableta Android?". A continuación, puede seguir las instrucciones para descargar una versión anterior de la aplicación. </li>
28
- </ul>
29
- <h3>Fuentes de terceros </h3>
30
- <p>Otra manera de encontrar la versión antigua de Facebook APK es utilizar fuentes de terceros que no están afiliados con Facebook. Algunas de estas fuentes son:</p>
31
- <ul>
32
- <li>APKMirror, que es un sitio web que alberga archivos APK para varias aplicaciones de Android. Puede buscar "Facebook" y filtrar por versión, fecha o popularidad. Puede descargar el archivo APK que desee. </li>
33
- <li>Uptodown, que es otro sitio web que ofrece archivos APK para aplicaciones Android. Puede buscar "Facebook" y elegir la versión que desee de la lista. A continuación, puede descargar el archivo APK. </li>
34
- </ul>
35
-
36
- <h2>Cómo instalar la versión antigua de Facebook APK? </h2>
37
- <h3>Habilitar fuentes desconocidas</h3>
38
- <p>Antes de que pueda instalar la versión antigua de Facebook APK en su dispositivo, es necesario habilitar la opción para permitir fuentes desconocidas. Esto significa que puede instalar aplicaciones desde fuentes distintas de Google Play Store. Para ello, debe:</p>
39
- <ol>
40
- <li>Ir a la configuración de su dispositivo y toque en "Seguridad". </li>
41
- <li>Encuentra la opción "Fuentes desconocidas" y cámbiala. </li>
42
- <li>Confirme su elección tocando en "OK". </li>
43
- </ol>
44
- <p>Ten en cuenta que esta opción puede variar dependiendo del modelo de tu dispositivo y de la versión de Android. También es posible que tenga que desactivarlo después de instalar la aplicación por razones de seguridad. </p>
45
- <p></p>
46
- <h3>Descargar e instalar archivo APK</h3>
47
- <p>Después de haber habilitado fuentes desconocidas, puede descargar e instalar la versión antigua de Facebook APK en su dispositivo. Para hacer esto, necesita:</p>
48
- <ol>
49
- <li>Abra el navegador en su dispositivo y vaya a la fuente donde descargó el archivo APK. </li>
50
- <li>Toque en el enlace o botón de descarga y espere a que el archivo se descargue. </li>
51
- <li>Una vez completada la descarga, toque en la notificación o vaya a la carpeta de descargas de su dispositivo y encuentre el archivo. </li>
52
- <li>Toque en el archivo y siga las instrucciones para instalarlo. </li>
53
- <li>Espera a que la instalación termine y luego toca "Abrir" o encuentra el icono de la aplicación en la pantalla de inicio del dispositivo o en el cajón de la aplicación. </li>
54
- </ol>
55
- <h2>Cómo utilizar la versión antigua de Facebook APK? </h2>
56
- <h3>Iniciar sesión o registrarse</h3>
57
- <p>Una vez que haya instalado la versión antigua de Facebook APK en su dispositivo, puede usarlo como lo haría con cualquier otra aplicación. Lo primero que debes hacer es iniciar sesión o registrarte en una cuenta de Facebook. Para hacer esto, necesitas:</p>
58
- <ol>
59
- <li> Abra la aplicación y toque en "Iniciar sesión" o "Crear nueva cuenta". </li>
60
- <li>Ingrese su dirección de correo electrónico o número de teléfono y contraseña o siga los pasos para crear una nueva cuenta. </li>
61
- <li>Toque en "Iniciar sesión" o "Registrarse" y espere a que se verifique su cuenta. </li>
62
-
63
- <li>Si se le solicita, acepte los términos y condiciones y la política de privacidad de Facebook.</li>
64
- </ol>
65
- <h3>Explorar características y ajustes</h3>
66
- <p>Después de que haya iniciado sesión o se haya registrado en una cuenta de Facebook, puede explorar las características y configuraciones de la versión anterior de Facebook APK. Algunas de las características y configuraciones son:</p>
67
- <ul>
68
- <li>El feed de noticias, donde puedes ver publicaciones de tus amigos, páginas, grupos u otras fuentes que sigues. </li>
69
- <li>El perfil, donde se puede ver su información personal, fotos, vídeos, mensajes, amigos, y otros detalles. </li>
70
- <li>El mensajero, donde puedes chatear con tus amigos, enviar pegatinas, mensajes de voz, fotos, videos u otros archivos. </li>
71
- <li>Las notificaciones, donde puedes ver alertas de tus amigos, páginas, grupos, eventos u otras actividades. </li>
72
- <li>El menú, donde se puede acceder a otras características como mercado, reloj, juegos, citas, o ajustes. </li>
73
- <li>La configuración, donde puede personalizar las preferencias de su cuenta, como seguridad, privacidad, notificaciones, idioma o ayuda. </li>
74
- </ul>
75
- <h2>Conclusión</h2>
76
- <p>En conclusión, la descarga de la versión antigua de Facebook APK es una manera de disfrutar de las viejas características y funciones de la aplicación de Facebook que pueden no estar disponibles en la nueva versión. Sin embargo, también tiene algunos inconvenientes, como perderse las nuevas características y mejoras, exponerse a riesgos de seguridad, violar los términos y condiciones de Facebook o experimentar problemas de compatibilidad. Por lo tanto, debe sopesar los pros y los contras de la descarga de la versión antigua de Facebook APK antes de decidir hacerlo. También debe tener cuidado al encontrar, instalar y usar la versión antigua de Facebook APK, ya que puede no ser confiable, seguro o legal. Esperamos que este artículo le ha ayudado a entender cómo descargar la versión antigua de Facebook APK y qué esperar de ella. </p>
77
- <h2>Preguntas frecuentes</h2>
78
- <h3>¿Es seguro la versión antigua de Facebook APK? </h3>
79
-
80
- <h3>¿Cuál es la diferencia entre Facebook y Facebook Lite? </h3>
81
- <p>Facebook Lite es una versión de la aplicación de Facebook que está diseñada para dispositivos de gama baja o conexiones a Internet lentas. Tiene un tamaño más pequeño y consume menos datos que la aplicación regular de Facebook. También tiene menos características y funciones que la aplicación regular de Facebook, como historias, carretes o mercado. Sin embargo, todavía te permite conectarte con tus amigos, publicar actualizaciones, chatear con messenger y ver videos. </p>
82
- <h3>Cómo actualizar la versión antigua de Facebook APK? </h3>
83
- <p>Para actualizar la versión antigua de Facebook APK, es necesario descargar e instalar la última versión de la aplicación de la Google Play Store u otras fuentes oficiales. También puede comprobar si hay actualizaciones dentro de la aplicación yendo al menú y tocando en "Configuración" y luego "Actualizaciones de la aplicación". Sin embargo, la actualización de la versión antigua de Facebook APK puede sobrescribir las viejas características y funciones que te gustaban con las nuevas que no te gustan. </p>
84
- <h3> ¿Cómo desinstalar la versión antigua de Facebook APK? </h3>
85
- <p>Para desinstalar la versión antigua de Facebook APK, es necesario ir a la configuración de su dispositivo y toque en "Aplicaciones" o "Aplicaciones". A continuación, puede encontrar la aplicación de Facebook y toque en ella. A continuación, puede tocar en "Desinstalar" y confirmar su elección pulsando en "Aceptar". También puede desinstalar la versión antigua de Facebook APK presionando durante mucho tiempo el icono de la aplicación en la pantalla de inicio del dispositivo o cajón de aplicaciones y arrastrarlo a la papelera. </p>
86
- <h3>¿Cómo contactar al soporte de Facebook? </h3>
87
- <p>Si usted tiene alguna pregunta, problema, o retroalimentación sobre la versión antigua de Facebook APK o cualquier otro producto o servicio de Facebook, puede ponerse en contacto con el soporte de Facebook yendo al menú y tocando en "Ayuda y soporte". A continuación, puede elegir entre varias opciones como "Centro de ayuda", "Informar de un problema", "Ayuda de la comunidad" o "Bandeja de entrada de soporte". También puede visitar el sitio web del Centro de ayuda de Facebook o el sitio web de la Comunidad de ayuda de Facebook para obtener más información y asistencia. </p> 64aa2da5cf<br />
88
- <br />
89
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/candidate.py DELETED
@@ -1,34 +0,0 @@
1
- from pip._vendor.packaging.version import parse as parse_version
2
-
3
- from pip._internal.models.link import Link
4
- from pip._internal.utils.models import KeyBasedCompareMixin
5
-
6
-
7
- class InstallationCandidate(KeyBasedCompareMixin):
8
- """Represents a potential "candidate" for installation."""
9
-
10
- __slots__ = ["name", "version", "link"]
11
-
12
- def __init__(self, name: str, version: str, link: Link) -> None:
13
- self.name = name
14
- self.version = parse_version(version)
15
- self.link = link
16
-
17
- super().__init__(
18
- key=(self.name, self.version, self.link),
19
- defining_class=InstallationCandidate,
20
- )
21
-
22
- def __repr__(self) -> str:
23
- return "<InstallationCandidate({!r}, {!r}, {!r})>".format(
24
- self.name,
25
- self.version,
26
- self.link,
27
- )
28
-
29
- def __str__(self) -> str:
30
- return "{!r} candidate (version {} at {})".format(
31
- self.name,
32
- self.version,
33
- self.link,
34
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BirdL/DONOTUSEDemo/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: DONOTUSEDemo
3
- emoji: 🐢
4
- colorFrom: gray
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.47.0
8
- app_file: app.py
9
- pinned: false
10
- tags:
11
- - not-for-all-audiences
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bonosa2/parrot-chat-bot/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Parrot Chat Bot
3
- emoji: 🏆
4
- colorFrom: purple
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.29.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/meta_arch/build.py DELETED
@@ -1,19 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- from detectron2.utils.registry import Registry
3
-
4
- META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip
5
- META_ARCH_REGISTRY.__doc__ = """
6
- Registry for meta-architectures, i.e. the whole model.
7
-
8
- The registered object will be called with `obj(cfg)`
9
- and expected to return a `nn.Module` object.
10
- """
11
-
12
-
13
- def build_model(cfg):
14
- """
15
- Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``.
16
- Note that it does not load any weights from ``cfg``.
17
- """
18
- meta_arch = cfg.MODEL.META_ARCHITECTURE
19
- return META_ARCH_REGISTRY.get(meta_arch)(cfg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/alignment.h DELETED
@@ -1,230 +0,0 @@
1
- /*
2
- * Copyright 2017 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file alignment.h
18
- * \brief Type-alignment utilities.
19
- */
20
-
21
- #pragma once
22
-
23
- #include <thrust/detail/config.h>
24
- #include <thrust/detail/type_traits.h> // For `integral_constant`.
25
-
26
- #include <cstddef> // For `std::size_t` and `std::max_align_t`.
27
-
28
- #if THRUST_CPP_DIALECT >= 2011
29
- #include <type_traits> // For `std::alignment_of` and `std::aligned_storage`.
30
- #endif
31
-
32
- namespace thrust
33
- {
34
- namespace detail
35
- {
36
-
37
- /// \p THRUST_ALIGNOF is a macro that takes a single type-id as a parameter,
38
- /// and returns the alignment requirement of the type in bytes.
39
- ///
40
- /// It is an approximation of C++11's `alignof` operator.
41
- ///
42
- /// Note: MSVC does not allow the builtin used to implement this to be placed
43
- /// inside of a `__declspec(align(#))` attribute. As a workaround, you can
44
- /// assign the result of \p THRUST_ALIGNOF to a variable and pass the variable
45
- /// as the argument to `__declspec(align(#))`.
46
- #if THRUST_CPP_DIALECT >= 2011
47
- #define THRUST_ALIGNOF(x) alignof(x)
48
- #else
49
- #define THRUST_ALIGNOF(x) __alignof(x)
50
- #endif
51
-
52
- /// \p alignment_of provides the member constant `value` which is equal to the
53
- /// alignment requirement of the type `T`, as if obtained by a C++11 `alignof`
54
- /// expression.
55
- ///
56
- /// It is an implementation of C++11's \p std::alignment_of.
57
- #if THRUST_CPP_DIALECT >= 2011
58
- template <typename T>
59
- using alignment_of = std::alignment_of<T>;
60
- #else
61
- template <typename T>
62
- struct alignment_of;
63
-
64
- template <typename T, std::size_t size_diff>
65
- struct alignment_of_helper
66
- {
67
- static const std::size_t value =
68
- integral_constant<std::size_t, size_diff>::value;
69
- };
70
-
71
- template <typename T>
72
- struct alignment_of_helper<T, 0>
73
- {
74
- static const std::size_t value = alignment_of<T>::value;
75
- };
76
-
77
- template <typename T>
78
- struct alignment_of
79
- {
80
- private:
81
- struct impl
82
- {
83
- T x;
84
- char c;
85
- };
86
-
87
- public:
88
- static const std::size_t value =
89
- alignment_of_helper<impl, sizeof(impl) - sizeof(T)>::value;
90
- };
91
- #endif
92
-
93
- /// \p aligned_type provides the nested type `type`, which is a trivial
94
- /// type whose alignment requirement is a divisor of `Align`.
95
- ///
96
- /// The behavior is undefined if `Align` is not a power of 2.
97
- template <std::size_t Align>
98
- struct aligned_type;
99
-
100
- #if THRUST_CPP_DIALECT >= 2011 \
101
- && (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) \
102
- && (THRUST_GCC_VERSION >= 40800)
103
- // C++11 implementation, excluding GCC 4.7, which doesn't have `alignas`.
104
- template <std::size_t Align>
105
- struct aligned_type
106
- {
107
- struct alignas(Align) type {};
108
- };
109
- #elif (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC) \
110
- || ( (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) \
111
- && (THRUST_GCC_VERSION < 40600))
112
- // C++03 implementation for MSVC and GCC <= 4.5.
113
- //
114
- // We have to implement `aligned_type` with specializations for MSVC
115
- // and GCC 4.2.x and older because they require literals as arguments to
116
- // their alignment attribute.
117
-
118
- #if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC)
119
- // MSVC implementation.
120
- #define THRUST_DEFINE_ALIGNED_TYPE_SPECIALIZATION(X) \
121
- template <> \
122
- struct aligned_type<X> \
123
- { \
124
- __declspec(align(X)) struct type {}; \
125
- }; \
126
- /**/
127
- #else
128
- // GCC <= 4.2 implementation.
129
- #define THRUST_DEFINE_ALIGNED_TYPE_SPECIALIZATION(X) \
130
- template <> \
131
- struct aligned_type<X> \
132
- { \
133
- struct type {} __attribute__((aligned(X))); \
134
- }; \
135
- /**/
136
- #endif
137
-
138
- THRUST_DEFINE_ALIGNED_TYPE_SPECIALIZATION(1);
139
- THRUST_DEFINE_ALIGNED_TYPE_SPECIALIZATION(2);
140
- THRUST_DEFINE_ALIGNED_TYPE_SPECIALIZATION(4);
141
- THRUST_DEFINE_ALIGNED_TYPE_SPECIALIZATION(8);
142
- THRUST_DEFINE_ALIGNED_TYPE_SPECIALIZATION(16);
143
- THRUST_DEFINE_ALIGNED_TYPE_SPECIALIZATION(32);
144
- THRUST_DEFINE_ALIGNED_TYPE_SPECIALIZATION(64);
145
- THRUST_DEFINE_ALIGNED_TYPE_SPECIALIZATION(128);
146
-
147
- #undef THRUST_DEFINE_ALIGNED_TYPE_SPECIALIZATION
148
- #else
149
- // C++03 implementation for GCC > 4.5, Clang, PGI, ICPC, and xlC.
150
- template <std::size_t Align>
151
- struct aligned_type
152
- {
153
- struct type {} __attribute__((aligned(Align)));
154
- };
155
- #endif
156
-
157
- /// \p aligned_storage provides the nested type `type`, which is a trivial type
158
- /// suitable for use as uninitialized storage for any object whose size is at
159
- /// most `Len` bytes and whose alignment requirement is a divisor of `Align`.
160
- ///
161
- /// The behavior is undefined if `Len` is 0 or `Align` is not a power of 2.
162
- ///
163
- /// It is an implementation of C++11's \p std::aligned_storage.
164
- #if THRUST_CPP_DIALECT >= 2011
165
- template <std::size_t Len, std::size_t Align>
166
- using aligned_storage = std::aligned_storage<Len, Align>;
167
- #else
168
- template <std::size_t Len, std::size_t Align>
169
- struct aligned_storage
170
- {
171
- union type
172
- {
173
- unsigned char data[Len];
174
- // We put this into the union in case the alignment requirement of
175
- // an array of `unsigned char` of length `Len` is greater than
176
- // `Align`.
177
-
178
- typename aligned_type<Align>::type align;
179
- };
180
- };
181
- #endif
182
-
183
- /// \p max_align_t is a trivial type whose alignment requirement is at least as
184
- /// strict (as large) as that of every scalar type.
185
- ///
186
- /// It is an implementation of C++11's \p std::max_align_t.
187
- #if THRUST_CPP_DIALECT >= 2011 \
188
- && (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) \
189
- && (THRUST_GCC_VERSION >= 40900)
190
- // GCC 4.7 and 4.8 don't have `std::max_align_t`.
191
- using max_align_t = std::max_align_t;
192
- #else
193
- union max_align_t
194
- {
195
- // These cannot be private because C++03 POD types cannot have private
196
- // data members.
197
- char c;
198
- short s;
199
- int i;
200
- long l;
201
- float f;
202
- double d;
203
- long long ll;
204
- long double ld;
205
- void* p;
206
- };
207
- #endif
208
-
209
- /// \p aligned_reinterpret_cast `reinterpret_cast`s \p u of type \p U to `void*`
210
- /// and then `reinterpret_cast`s the result to \p T. The indirection through
211
- /// `void*` suppresses compiler warnings when the alignment requirement of \p *u
212
- /// is less than the alignment requirement of \p *t. The caller of
213
- /// \p aligned_reinterpret_cast is responsible for ensuring that the alignment
214
- /// requirements are actually satisified.
215
- template <typename T, typename U>
216
- __host__ __device__
217
- T aligned_reinterpret_cast(U u)
218
- {
219
- return reinterpret_cast<T>(reinterpret_cast<void*>(u));
220
- }
221
-
222
- __host__ __device__
223
- inline std::size_t aligned_storage_size(std::size_t n, std::size_t align)
224
- {
225
- return ((n + align - 1) / align) * align;
226
- }
227
-
228
- } // end namespace detail
229
- } // end namespace thrust
230
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/apis/__init__.py DELETED
@@ -1,10 +0,0 @@
1
- from .inference import (async_inference_detector, inference_detector,
2
- init_detector, show_result_pyplot)
3
- from .test import multi_gpu_test, single_gpu_test
4
- from .train import get_root_logger, set_random_seed, train_detector
5
-
6
- __all__ = [
7
- 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector',
8
- 'async_inference_detector', 'inference_detector', 'show_result_pyplot',
9
- 'multi_gpu_test', 'single_gpu_test'
10
- ]
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/monoscene_lite/monoscene/CRP3D.py DELETED
@@ -1,97 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from monoscene.modules import (
4
- Process,
5
- ASPP,
6
- )
7
-
8
-
9
- class CPMegaVoxels(nn.Module):
10
- def __init__(self, feature, size, n_relations=4, bn_momentum=0.0003):
11
- super().__init__()
12
- self.size = size
13
- self.n_relations = n_relations
14
- print("n_relations", self.n_relations)
15
- self.flatten_size = size[0] * size[1] * size[2]
16
- self.feature = feature
17
- self.context_feature = feature * 2
18
- self.flatten_context_size = (size[0] // 2) * (size[1] // 2) * (size[2] // 2)
19
- padding = ((size[0] + 1) % 2, (size[1] + 1) % 2, (size[2] + 1) % 2)
20
-
21
- self.mega_context = nn.Sequential(
22
- nn.Conv3d(
23
- feature, self.context_feature, stride=2, padding=padding, kernel_size=3
24
- ),
25
- )
26
- self.flatten_context_size = (size[0] // 2) * (size[1] // 2) * (size[2] // 2)
27
-
28
- self.context_prior_logits = nn.ModuleList(
29
- [
30
- nn.Sequential(
31
- nn.Conv3d(
32
- self.feature,
33
- self.flatten_context_size,
34
- padding=0,
35
- kernel_size=1,
36
- ),
37
- )
38
- for i in range(n_relations)
39
- ]
40
- )
41
- self.aspp = ASPP(feature, [1, 2, 3])
42
-
43
- self.resize = nn.Sequential(
44
- nn.Conv3d(
45
- self.context_feature * self.n_relations + feature,
46
- feature,
47
- kernel_size=1,
48
- padding=0,
49
- bias=False,
50
- ),
51
- Process(feature, nn.BatchNorm3d, bn_momentum, dilations=[1]),
52
- )
53
-
54
- def forward(self, input):
55
- ret = {}
56
- bs = input.shape[0]
57
-
58
- x_agg = self.aspp(input)
59
-
60
- # get the mega context
61
- x_mega_context_raw = self.mega_context(x_agg)
62
- x_mega_context = x_mega_context_raw.reshape(bs, self.context_feature, -1)
63
- x_mega_context = x_mega_context.permute(0, 2, 1)
64
-
65
- # get context prior map
66
- x_context_prior_logits = []
67
- x_context_rels = []
68
- for rel in range(self.n_relations):
69
-
70
- # Compute the relation matrices
71
- x_context_prior_logit = self.context_prior_logits[rel](x_agg)
72
- x_context_prior_logit = x_context_prior_logit.reshape(
73
- bs, self.flatten_context_size, self.flatten_size
74
- )
75
- x_context_prior_logits.append(x_context_prior_logit.unsqueeze(1))
76
-
77
- x_context_prior_logit = x_context_prior_logit.permute(0, 2, 1)
78
- x_context_prior = torch.sigmoid(x_context_prior_logit)
79
-
80
- # Multiply the relation matrices with the mega context to gather context features
81
- x_context_rel = torch.bmm(x_context_prior, x_mega_context) # bs, N, f
82
- x_context_rels.append(x_context_rel)
83
-
84
- x_context = torch.cat(x_context_rels, dim=2)
85
- x_context = x_context.permute(0, 2, 1)
86
- x_context = x_context.reshape(
87
- bs, x_context.shape[1], self.size[0], self.size[1], self.size[2]
88
- )
89
-
90
- x = torch.cat([input, x_context], dim=1)
91
- x = self.resize(x)
92
-
93
- x_context_prior_logits = torch.cat(x_context_prior_logits, dim=1)
94
- ret["P_logits"] = x_context_prior_logits
95
- ret["x"] = x
96
-
97
- return ret
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/tests/browse_tests.py DELETED
@@ -1,26 +0,0 @@
1
- import os
2
- import sys
3
- import unittest
4
-
5
- from bs4 import BeautifulSoup
6
-
7
- sys.path.append(os.path.abspath("../scripts"))
8
-
9
- from browse import extract_hyperlinks
10
-
11
-
12
- class TestBrowseLinks(unittest.TestCase):
13
- def test_extract_hyperlinks(self):
14
- body = """
15
- <body>
16
- <a href="https://google.com">Google</a>
17
- <a href="foo.html">Foo</a>
18
- <div>Some other crap</div>
19
- </body>
20
- """
21
- soup = BeautifulSoup(body, "html.parser")
22
- links = extract_hyperlinks(soup, "http://example.com")
23
- self.assertEqual(
24
- links,
25
- [("Google", "https://google.com"), ("Foo", "http://example.com/foo.html")],
26
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/lib/plugins/loader.js DELETED
@@ -1,872 +0,0 @@
1
- import util from "node:util"
2
- import fs from "node:fs"
3
- import lodash from "lodash"
4
- import cfg from "../config/config.js"
5
- import plugin from "./plugin.js"
6
- import schedule from "node-schedule"
7
- import { segment } from "oicq"
8
- import chokidar from "chokidar"
9
- import moment from "moment"
10
- import path from "node:path"
11
- import common from "../common/common.js"
12
- import Runtime from "./runtime.js"
13
- import Handler from './handler.js'
14
-
15
- /** 全局变量 plugin */
16
- global.plugin = plugin
17
- global.segment = segment
18
-
19
- /**
20
- * 加载插件
21
- */
22
- class PluginsLoader {
23
- constructor() {
24
- this.priority = []
25
- this.handler = {}
26
- this.task = []
27
- this.dir = "./plugins"
28
-
29
- /** 命令冷却cd */
30
- this.groupCD = {}
31
- this.singleCD = {}
32
-
33
- /** 插件监听 */
34
- this.watcher = {}
35
-
36
- this.msgThrottle = {}
37
-
38
- /** 星铁命令前缀 */
39
- this.srReg = /^#?(\*|星铁|星轨|穹轨|星穹|崩铁|星穹铁道|崩坏星穹铁道|铁道)+/
40
- }
41
-
42
- /**
43
- * 监听事件加载
44
- * @param isRefresh 是否刷新
45
- */
46
- async load(isRefresh = false) {
47
- this.delCount()
48
- if (!lodash.isEmpty(this.priority) && !isRefresh) return
49
-
50
- const files = this.getPlugins()
51
-
52
- logger.info("-----------")
53
- logger.info("加载插件中...")
54
-
55
- let pluCount = 0
56
-
57
- let packageErr = []
58
- for (let File of files) {
59
- try {
60
- let tmp = await import(File.path)
61
- let apps = tmp
62
- if (tmp.apps) {
63
- apps = { ...tmp.apps }
64
- }
65
- lodash.forEach(apps, (p, i) => {
66
- if (!p.prototype) return
67
- pluCount++
68
- /* eslint-disable new-cap */
69
- let plugin = new p()
70
- logger.debug(`载入插件 [${File.name}][${plugin.name}]`)
71
- /** 执行初始化 */
72
- this.runInit(plugin)
73
- /** 初始化定时任务 */
74
- this.collectTask(plugin.task)
75
- this.priority.push({
76
- class: p,
77
- key: File.name,
78
- name: plugin.name,
79
- priority: plugin.priority
80
- })
81
- if (plugin.handler) {
82
- lodash.forEach(plugin.handler, ({ fn, key, priority }) => {
83
- Handler.add({
84
- ns: plugin.namespace || File.name,
85
- key: key,
86
- self: plugin,
87
- property: priority || plugin.priority || 500,
88
- fn: plugin[fn]
89
- })
90
- })
91
- }
92
- })
93
- } catch (error) {
94
- if (error.stack.includes("Cannot find package")) {
95
- packageErr.push({ error, File })
96
- } else {
97
- logger.error(`载入插件错误:${logger.red(File.name)}`)
98
- logger.error(decodeURI(error.stack))
99
- }
100
- }
101
- }
102
-
103
- this.packageTips(packageErr)
104
- this.creatTask()
105
-
106
- logger.info(`加载定时任务[${this.task.length}个]`)
107
- logger.info(`加载插件[${pluCount}个]`)
108
-
109
- /** 优先级排序 */
110
- this.priority = lodash.orderBy(this.priority, ["priority"], ["asc"])
111
- }
112
-
113
- async runInit(plugin) {
114
- plugin.init && plugin.init()
115
- }
116
-
117
- packageTips(packageErr) {
118
- if (!packageErr || packageErr.length <= 0) return
119
- logger.mark("--------插件载入错误--------")
120
- packageErr.forEach(v => {
121
- let pack = v.error.stack.match(/'(.+?)'/g)[0].replace(/'/g, "")
122
- logger.mark(`${v.File.name} 缺少依赖:${logger.red(pack)}`)
123
- logger.mark(`新增插件后请执行安装命令:${logger.red("pnpm i")} 安装依赖`)
124
- logger.mark("如安装后仍未解决可联系插件作者解决")
125
- })
126
- // logger.error("或者使用其他包管理工具安装依赖")
127
- logger.mark("---------------------")
128
- }
129
-
130
- getPlugins() {
131
- let ignore = ["index.js"]
132
- let files = fs.readdirSync(this.dir, { withFileTypes: true })
133
- let ret = []
134
- for (let val of files) {
135
- let filepath = "../../plugins/" + val.name
136
- let tmp = {
137
- name: val.name,
138
- }
139
- if (val.isFile()) {
140
- if (!val.name.endsWith(".js")) continue
141
- if (ignore.includes(val.name)) continue
142
- tmp.path = filepath
143
- ret.push(tmp)
144
- continue
145
- }
146
-
147
- if (fs.existsSync(`${this.dir}/${val.name}/index.js`)) {
148
- tmp.path = filepath + "/index.js"
149
- ret.push(tmp)
150
- continue
151
- }
152
-
153
- let apps = fs.readdirSync(`${this.dir}/${val.name}`, { withFileTypes: true })
154
- for (let app of apps) {
155
- if (!app.name.endsWith(".js")) continue
156
- if (ignore.includes(app.name)) continue
157
-
158
- ret.push({
159
- name: `${val.name}/${app.name}`,
160
- path: `../../plugins/${val.name}/${app.name}`
161
- })
162
-
163
- /** 监听热更新 */
164
- this.watch(val.name, app.name)
165
- }
166
- }
167
-
168
- return ret
169
- }
170
-
171
- /**
172
- * 处理事件
173
- *
174
- * 参数文档 https://github.com/TimeRainStarSky/Yunzai/tree/docs
175
- * @param e 事件
176
- */
177
- async deal(e) {
178
- /** 检查黑白名单 */
179
- if (!this.checkBlack(e)) return
180
- /** 冷却 */
181
- if (!this.checkLimit(e)) return
182
- /** 处理事件 */
183
- this.dealEvent(e)
184
- /** 处理消息 */
185
- this.dealMsg(e)
186
- /** 处理回复 */
187
- this.reply(e)
188
- /** 过滤事件 */
189
- let priority = []
190
- /** 注册runtime */
191
- await Runtime.init(e)
192
-
193
- this.priority.forEach(v => {
194
- let p = new v.class(e)
195
- p.e = e
196
- /** 判断是否启用功能 */
197
- if (!this.checkDisable(e, p)) return
198
- /** 过滤事件 */
199
- if (!this.filtEvent(e, p)) return
200
- priority.push(p)
201
- })
202
-
203
- for (let plugin of priority) {
204
- /** 上下文hook */
205
- if (plugin.getContext) {
206
- let context = plugin.getContext()
207
- if (!lodash.isEmpty(context)) {
208
- for (let fnc in context) {
209
- plugin[fnc](context[fnc])
210
- }
211
- return
212
- }
213
- }
214
-
215
- /** 群上下文hook */
216
- if (plugin.getContextGroup) {
217
- let context = plugin.getContextGroup()
218
- if (!lodash.isEmpty(context)) {
219
- for (let fnc in context) {
220
- plugin[fnc](context[fnc])
221
- }
222
- return
223
- }
224
- }
225
- }
226
-
227
- /** 是否只关注主动at */
228
- if (!this.onlyReplyAt(e)) return
229
-
230
- // 判断是否是星铁命令,若是星铁命令则标准化处理
231
- // e.isSr = true,且命令标准化为 #星铁 开头
232
- if (this.srReg.test(e.msg)) {
233
- e.isSr = true
234
- e.msg = e.msg.replace(this.srReg, "#星铁")
235
- }
236
-
237
- /** accept */
238
- for (let plugin of priority) {
239
- /** accept hook */
240
- if (plugin.accept) {
241
- let res = plugin.accept(e)
242
-
243
- if (util.types.isPromise(res)) res = await res
244
-
245
- if (res === "return") return
246
-
247
- if (res) break
248
- }
249
- }
250
-
251
- /* eslint-disable no-labels */
252
- a: for (let plugin of priority) {
253
- /** 正则匹配 */
254
- if (plugin.rule) {
255
- for (let v of plugin.rule) {
256
- /** 判断事件 */
257
- if (v.event && !this.filtEvent(e, v)) continue
258
-
259
- if (new RegExp(v.reg).test(e.msg)) {
260
- e.logFnc = `[${plugin.name}][${v.fnc}]`
261
-
262
- if (v.log !== false) {
263
- logger.mark(`${e.logFnc}${e.logText} ${lodash.truncate(e.msg, { length: 80 })}`)
264
- }
265
-
266
- /** 判断权限 */
267
- if (!this.filtPermission(e, v)) break a
268
-
269
- try {
270
- let res = plugin[v.fnc] && plugin[v.fnc](e)
271
-
272
- let start = Date.now()
273
-
274
- if (util.types.isPromise(res)) res = await res
275
-
276
- if (res !== false) {
277
- /** 设置冷却cd */
278
- this.setLimit(e)
279
- if (v.log !== false) {
280
- logger.mark(`${e.logFnc} ${lodash.truncate(e.msg, { length: 80 })} 处理完成 ${Date.now() - start}ms`)
281
- }
282
- break a
283
- }
284
- } catch (error) {
285
- logger.error(`${e.logFnc}`)
286
- logger.error(error.stack)
287
- break a
288
- }
289
- }
290
- }
291
- }
292
- }
293
- }
294
-
295
- /** 过滤事件 */
296
- filtEvent(e, v) {
297
- if (!v.event) return false
298
- let event = v.event.split(".")
299
- let eventMap = {
300
- message: ["post_type", "message_type", "sub_type"],
301
- notice: ["post_type", "notice_type", "sub_type"],
302
- request: ["post_type", "request_type", "sub_type"]
303
- }
304
- let newEvent = []
305
- event.forEach((val, index) => {
306
- if (val === "*") {
307
- newEvent.push(val)
308
- } else if (eventMap[e.post_type]) {
309
- newEvent.push(e[eventMap[e.post_type][index]])
310
- }
311
- })
312
- newEvent = newEvent.join(".")
313
-
314
- return v.event === newEvent
315
- }
316
-
317
- /** 判断权限 */
318
- filtPermission(e, v) {
319
- if (v.permission == "all" || !v.permission) return true
320
-
321
- if (v.permission == "master") {
322
- if (e.isMaster) {
323
- return true
324
- } else {
325
- e.reply("暂无权限,只有主人才能操作")
326
- return false
327
- }
328
- }
329
-
330
- if (e.isGroup) {
331
- if (!e.member?._info) {
332
- e.reply("数据加载中,请稍后再试")
333
- return false
334
- }
335
- if (v.permission == "owner") {
336
- if (!e.member.is_owner) {
337
- e.reply("暂无权限,只有群主才能操作")
338
- return false
339
- }
340
- }
341
- if (v.permission == "admin") {
342
- if (!e.member.is_admin) {
343
- e.reply("暂无权限,只有管理员才能操作")
344
- return false
345
- }
346
- }
347
- }
348
-
349
- return true
350
- }
351
-
352
- dealEvent(e) {
353
- if (!e.friend && e.user_id) e.friend = e.bot.pickFriend(e.user_id)
354
- if (!e.group && e.group_id) e.group = e.bot.pickGroup(e.group_id)
355
- if (!e.member && e.group && e.user_id) e.member = e.group.pickMember(e.user_id)
356
- for (const i of [e.friend, e.group, e.member]) {
357
- if (typeof i != "object") continue
358
- if (!i.makeForwardMsg) i.makeForwardMsg = Bot.makeForwardMsg
359
- if (!i.sendForwardMsg) i.sendForwardMsg = msg => Bot.sendForwardMsg(msg => i.sendMsg(msg), msg)
360
- if (!i.getInfo) i.getInfo = () => i
361
- }
362
- }
363
-
364
- /**
365
- * 处理消息,加入自定义���段
366
- * @param e.msg 文本消息,多行会自动拼接
367
- * @param e.img 图片消息数组
368
- * @param e.atBot 是否at机器人
369
- * @param e.at 是否at,多个at 以最后的为准
370
- * @param e.file 接受到的文件
371
- * @param e.isPrivate 是否私聊
372
- * @param e.isGroup 是否群聊
373
- * @param e.isMaster 是否管理员
374
- * @param e.logText 日志用户字符串
375
- * @param e.logFnc 日志方法字符串
376
- */
377
- dealMsg(e) {
378
- if (e.message) for (const i of e.message) {
379
- switch (i.type) {
380
- case "text":
381
- if (!e.msg) e.msg = ""
382
- if (i.text) e.msg += i.text.replace(/^\s*[##井]+\s*/, "#").replace(/^\s*[\\**※]+\s*/, "*").trim()
383
- break
384
- case "image":
385
- if (Array.isArray(e.img))
386
- e.img.push(i.url)
387
- else
388
- e.img = [i.url]
389
- break
390
- case "at":
391
- if (i.qq == e.self_id)
392
- e.atBot = true
393
- else
394
- e.at = i.qq
395
- break
396
- case "reply":
397
- e.reply_id = i.id
398
- if (e.group?.getMsg)
399
- e.getReply = () => e.group.getMsg(e.reply_id)
400
- else if (e.friend?.getMsg)
401
- e.getReply = () => e.friend.getMsg(e.reply_id)
402
- break
403
- case "file":
404
- e.file = i
405
- break
406
- }
407
- }
408
-
409
- e.logText = ""
410
-
411
- if (e.message_type == "private" || e.notice_type == "friend") {
412
- e.isPrivate = true
413
-
414
- if (e.sender) {
415
- e.sender.card = e.sender.nickname
416
- } else {
417
- e.sender = {
418
- card: e.friend?.nickname,
419
- nickname: e.friend?.nickname
420
- }
421
- }
422
-
423
- e.logText = `[${e.sender?.nickname ? `${e.sender.nickname}(${e.user_id})` : e.user_id}]`
424
- }
425
-
426
- if (e.message_type == "group" || e.notice_type == "group") {
427
- e.isGroup = true
428
- if (e.sender) {
429
- e.sender.card = e.sender.card || e.sender.nickname
430
- } else if (e.member) {
431
- e.sender = {
432
- card: e.member.card || e.member.nickname
433
- }
434
- } else if (e.nickname) {
435
- e.sender = {
436
- card: e.nickname,
437
- nickname: e.nickname
438
- }
439
- } else {
440
- e.sender = {
441
- card: "",
442
- nickname: ""
443
- }
444
- }
445
-
446
- if (!e.group_name) e.group_name = e.group?.name
447
-
448
- e.logText = `[${e.group_name ? `${e.group_name}(${e.group_id})` : e.group_id}, ${e.sender?.nickname ? `${e.sender.nickname}(${e.user_id})` : e.user_id}]`
449
- }
450
-
451
- if (e.user_id && cfg.master[e.self_id]?.includes(String(e.user_id))) {
452
- e.isMaster = true
453
- }
454
-
455
- /** 只关注主动at msg处理 */
456
- if (e.msg && e.isGroup) {
457
- let groupCfg = cfg.getGroup(e.self_id, e.group_id)
458
- let alias = groupCfg.botAlias
459
- if (!Array.isArray(alias)) {
460
- alias = [alias]
461
- }
462
- for (let name of alias) {
463
- if (e.msg.startsWith(name)) {
464
- e.msg = lodash.trimStart(e.msg, name).trim()
465
- e.hasAlias = true
466
- break
467
- }
468
- }
469
- }
470
- }
471
-
472
- /** 处理回复,捕获发送失败异常 */
473
- reply(e) {
474
- if (e.reply)
475
- e.replyNew = e.reply
476
- else
477
- e.replyNew = msg => {
478
- if (e.isGroup) {
479
- if (e.group?.sendMsg) {
480
- return e.group.sendMsg(msg)
481
- } else {
482
- return e.bot.pickGroup(e.group_id).sendMsg(msg)
483
- }
484
- } else {
485
- if (e.friend?.sendMsg) {
486
- return e.friend.sendMsg(msg)
487
- } else {
488
- return e.bot.pickFriend(e.user_id).sendMsg(msg)
489
- }
490
- }
491
- }
492
-
493
- /**
494
- * @param msg 发送的消息
495
- * @param quote 是否引用回复
496
- * @param data.recallMsg 是否撤回消息,0-120秒,0不撤回
497
- * @param data.at 是否提及用户
498
- */
499
- e.reply = async (msg = "", quote = false, data = {}) => {
500
- if (!msg) return false
501
-
502
- let { recallMsg = 0, at = "" } = data
503
-
504
- if (at) {
505
- if (at === true)
506
- at = e.user_id
507
- if (Array.isArray(msg))
508
- msg.unshift(segment.at(at))
509
- else
510
- msg = [segment.at(at), msg]
511
- }
512
-
513
- if (quote && e.message_id) {
514
- if (Array.isArray(msg))
515
- msg.unshift(segment.reply(e.message_id))
516
- else
517
- msg = [segment.reply(e.message_id), msg]
518
- }
519
-
520
- let res
521
- try {
522
- res = await e.replyNew(msg)
523
- } catch (err) {
524
- if (typeof msg != "string")
525
- msg = lodash.truncate(JSON.stringify(msg), { length: 300 })
526
- logger.error(`发送消息错误:${msg}`)
527
- logger.error(err)
528
- }
529
-
530
- if (recallMsg > 0 && res?.message_id) {
531
- if (e.group?.recallMsg)
532
- setTimeout(() => {
533
- e.group.recallMsg(res.message_id)
534
- if (e.message_id)
535
- e.group.recallMsg(e.message_id)
536
- }, recallMsg * 1000)
537
- else if (e.friend?.recallMsg)
538
- setTimeout(() => {
539
- e.friend.recallMsg(res.message_id)
540
- if (e.message_id)
541
- e.friend.recallMsg(e.message_id)
542
- }, recallMsg * 1000)
543
- }
544
-
545
- this.count(e, msg)
546
- return res
547
- }
548
- }
549
-
550
- count(e, msg) {
551
- let screenshot = false
552
- if (msg && msg?.file)
553
- screenshot = true
554
-
555
- this.saveCount("sendMsg")
556
- if (screenshot)
557
- this.saveCount("screenshot")
558
-
559
- if (e.group_id) {
560
- this.saveCount("sendMsg", e.group_id)
561
- if (screenshot)
562
- this.saveCount("screenshot", e.group_id)
563
- }
564
- }
565
-
566
- saveCount(type, groupId = "") {
567
- let key = "Yz:count:"
568
-
569
- if (groupId) {
570
- key += `group:${groupId}:`
571
- }
572
-
573
- let dayKey = `${key}${type}:day:${moment().format("MMDD")}`
574
- let monthKey = `${key}${type}:month:${Number(moment().month()) + 1}`
575
- let totalKey = `${key}${type}:total`
576
-
577
- redis.incr(dayKey)
578
- redis.incr(monthKey)
579
- if (!groupId) redis.incr(totalKey)
580
- redis.expire(dayKey, 3600 * 24 * 30)
581
- redis.expire(monthKey, 3600 * 24 * 30)
582
- }
583
-
584
- delCount() {
585
- let key = "Yz:count:"
586
- redis.set(`${key}sendMsg:total`, "0")
587
- redis.set(`${key}screenshot:total`, "0")
588
- }
589
-
590
- /** 收集定时任务 */
591
- collectTask(task) {
592
- if (Array.isArray(task)) {
593
- task.forEach((val) => {
594
- if (!val.cron) return
595
- if (!val.name) throw new Error("插件任务名称错误")
596
- this.task.push(val)
597
- })
598
- } else {
599
- if (task.fnc && task.cron) {
600
- if (!task.name) throw new Error("插件任务名称错误")
601
- this.task.push(task)
602
- }
603
- }
604
- }
605
-
606
- /** 创建定时任务 */
607
- creatTask() {
608
- if (process.argv[1].includes("test")) return
609
- this.task.forEach((val) => {
610
- val.job = schedule.scheduleJob(val.cron, async () => {
611
- try {
612
- if (val.log === true) {
613
- logger.mark(`开始定时任务:${val.name}`)
614
- }
615
- let res = val.fnc()
616
- if (util.types.isPromise(res)) res = await res
617
- if (val.log === true) {
618
- logger.mark(`定时任务完成:${val.name}`)
619
- }
620
- } catch (error) {
621
- logger.error(`定时任务报错:${val.name}`)
622
- logger.error(error)
623
- }
624
- })
625
- })
626
- }
627
-
628
- /** 检查命令冷却cd */
629
- checkLimit(e) {
630
- /** 禁言中 */
631
- if (e.isGroup && e?.group?.mute_left > 0) return false
632
- if (!e.message || e.isPrivate) return true
633
-
634
- let config = cfg.getGroup(e.self_id, e.group_id)
635
-
636
- if (config.groupCD && this.groupCD[e.group_id]) {
637
- return false
638
- }
639
- if (config.singleCD && this.singleCD[`${e.group_id}.${e.user_id}`]) {
640
- return false
641
- }
642
-
643
- let { msgThrottle } = this
644
-
645
- let msgId = e.user_id + ':' + e.raw_message
646
- if (msgThrottle[msgId]) {
647
- return false
648
- }
649
- msgThrottle[msgId] = true
650
- setTimeout(() => {
651
- delete msgThrottle[msgId]
652
- }, 200)
653
-
654
- return true
655
- }
656
-
657
- /** 设置冷却cd */
658
- setLimit(e) {
659
- if (!e.message || e.isPrivate) return
660
- let config = cfg.getGroup(e.self_id, e.group_id)
661
-
662
- if (config.groupCD) {
663
- this.groupCD[e.group_id] = true
664
- setTimeout(() => {
665
- delete this.groupCD[e.group_id]
666
- }, config.groupCD)
667
- }
668
- if (config.singleCD) {
669
- let key = `${e.group_id}.${e.user_id}`
670
- this.singleCD[key] = true
671
- setTimeout(() => {
672
- delete this.singleCD[key]
673
- }, config.singleCD)
674
- }
675
- }
676
-
677
- /** 是否只关注主动at */
678
- onlyReplyAt(e) {
679
- if (!e.message || e.isPrivate) return true
680
-
681
- let groupCfg = cfg.getGroup(e.self_id, e.group_id)
682
-
683
- if (groupCfg.onlyReplyAt != 1 || !groupCfg.botAlias) return true
684
-
685
- /** at机器人 */
686
- if (e.atBot) return true
687
-
688
- /** 消息带前缀 */
689
- if (e.hasAlias) return true
690
-
691
- return false
692
- }
693
-
694
- /** 判断黑白名单 */
695
- checkBlack(e) {
696
- let other = cfg.getOther()
697
-
698
- if (e.test) return true
699
-
700
- /** 黑名单qq */
701
- if (other.blackQQ?.length && other.blackQQ.includes(Number(e.user_id) || String(e.user_id))) {
702
- return false
703
- }
704
-
705
- if (e.group_id) {
706
- /** 白名单群 */
707
- if (other.whiteGroup?.length) {
708
- if (other.whiteGroup.includes(Number(e.group_id) || String(e.group_id))) return true
709
- return false
710
- }
711
- /** 黑名单群 */
712
- if (other.blackGroup?.length && other.blackGroup.includes(Number(e.group_id) || String(e.group_id))) {
713
- return false
714
- }
715
- }
716
-
717
- return true
718
- }
719
-
720
- /** 判断是否启用功能 */
721
- checkDisable(e, p) {
722
- let groupCfg = cfg.getGroup(e.self_id, e.group_id)
723
- if (!lodash.isEmpty(groupCfg.enable)) {
724
- if (groupCfg.enable.includes(p.name)) {
725
- return true
726
- }
727
- // logger.debug(`${e.logText}[${p.name}]功能已禁用`)
728
- return false
729
- }
730
-
731
- if (!lodash.isEmpty(groupCfg.disable)) {
732
- if (groupCfg.disable.includes(p.name)) {
733
- // logger.debug(`${e.logText}[${p.name}]功能已禁用`)
734
- return false
735
- }
736
-
737
- return true
738
- }
739
- return true
740
- }
741
-
742
- /** 监听热更新 */
743
- watch(dirName, appName) {
744
- this.watchDir(dirName)
745
- if (this.watcher[`${dirName}.${appName}`]) return
746
-
747
- let file = `./plugins/${dirName}/${appName}`
748
- const watcher = chokidar.watch(file)
749
- let key = `${dirName}/${appName}`
750
-
751
- /** 监听修改 */
752
- watcher.on("change", async path => {
753
- logger.mark(`[修改插件][${dirName}][${appName}]`)
754
-
755
- let tmp = {}
756
- try {
757
- tmp = await import(`../../plugins/${dirName}/${appName}?${moment().format("x")}`)
758
- } catch (error) {
759
- logger.error(`载入插件错误:${logger.red(dirName + "/" + appName)}`)
760
- logger.error(decodeURI(error.stack))
761
- return
762
- }
763
-
764
- if (tmp.apps) tmp = { ...tmp.apps }
765
- lodash.forEach(tmp, (p) => {
766
- /* eslint-disable new-cap */
767
- let plugin = new p()
768
- for (let i in this.priority) {
769
- if (this.priority[i].key == key) {
770
- this.priority[i].class = p
771
- this.priority[i].priority = plugin.priority
772
- }
773
- }
774
-
775
- if (plugin.handler) {
776
- lodash.forEach(plugin.handler, ({ fn, key, priority }) => {
777
- Handler.add({
778
- ns: plugin.namespace || File.name,
779
- key: key,
780
- self: plugin,
781
- property: priority || plugin.priority || 500,
782
- fn: plugin[fn]
783
- })
784
- })
785
- }
786
- })
787
-
788
- this.priority = lodash.orderBy(this.priority, ["priority"], ["asc"])
789
- })
790
-
791
- /** 监听删除 */
792
- watcher.on("unlink", async path => {
793
- logger.mark(`[卸载插件][${dirName}][${appName}]`)
794
- for (let i in this.priority) {
795
- if (this.priority[i].key == key) {
796
- this.priority.splice(i, 1)
797
- /** 停止更新监听 */
798
- this.watcher[`${dirName}.${appName}`].removeAllListeners("change")
799
- break
800
- }
801
- }
802
- })
803
-
804
- this.watcher[`${dirName}.${appName}`] = watcher
805
- }
806
-
807
- /** 监听文件夹更新 */
808
- watchDir(dirName) {
809
- if (this.watcher[dirName]) return
810
-
811
- let file = `./plugins/${dirName}/`
812
- const watcher = chokidar.watch(file)
813
-
814
- /** 热更新 */
815
- setTimeout(() => {
816
- /** 新增文件 */
817
- watcher.on("add", async PluPath => {
818
- let appName = path.basename(PluPath)
819
- if (!appName.endsWith(".js")) return
820
- if (!fs.existsSync(`${this.dir}/${dirName}/${appName}`)) return
821
-
822
- let key = `${dirName}/${appName}`
823
-
824
- this.watch(dirName, appName)
825
-
826
- /** 太快了延迟下 */
827
- await common.sleep(500)
828
-
829
- logger.mark(`[新增插件][${dirName}][${appName}]`)
830
- let tmp = {}
831
- try {
832
- tmp = await import(`../../plugins/${dirName}/${appName}?${moment().format("X")}`)
833
- } catch (error) {
834
- logger.error(`载入插件错误:${logger.red(dirName + "/" + appName)}`)
835
- logger.error(decodeURI(error.stack))
836
- return
837
- }
838
-
839
- if (tmp.apps) tmp = { ...tmp.apps }
840
-
841
- lodash.forEach(tmp, (p) => {
842
- if (!p.prototype) {
843
- logger.error(`[载入失败][${dirName}][${appName}] 格式错误已跳过`)
844
- return
845
- }
846
- /* eslint-disable new-cap */
847
- let plugin = new p()
848
-
849
- for (let i in this.priority) {
850
- if (this.priority[i].key == key) {
851
- return
852
- }
853
- }
854
-
855
- this.priority.push({
856
- class: p,
857
- key,
858
- name: plugin.name,
859
- priority: plugin.priority
860
- })
861
- })
862
-
863
- /** 优先级排序 */
864
- this.priority = lodash.orderBy(this.priority, ["priority"], ["asc"])
865
- })
866
- }, 500)
867
-
868
- this.watcher[dirName] = watcher
869
- }
870
- }
871
-
872
- export default new PluginsLoader()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/caffe_.py DELETED
@@ -1,70 +0,0 @@
1
- # encoding=utf-8
2
-
3
- import util
4
- def get_data(net, name):
5
- import caffe
6
- if isinstance(net, caffe._caffe.Solver):
7
- net = net.net
8
- return net.blobs[name].data[...]
9
-
10
- def get_params(net, name = None):
11
- import caffe
12
- if isinstance(net, caffe._caffe.Solver):
13
- net = net.net
14
- params = net.params[name]
15
- p = []
16
- for param in params:
17
- p.append(param.data[...])
18
- return p
19
-
20
- def draw_log(log_path, output_names, show = False, save_path = None, from_to = None, smooth = False):
21
- pattern = "Train net output: word_bbox_loc_loss = "
22
- log_path = util.io.get_absolute_path(log_path)
23
- f = open(log_path,'r')
24
- iterations = []
25
- outputs = {}
26
- plt = util.plt.plt
27
- for line in f.readlines():
28
- if util.str.contains(line, 'Iteration') and util.str.contains(line, 'loss = '):
29
- print line
30
- s = line.split('Iteration')[-1]
31
- iter_num = util.str.find_all(s, '\d+')[0]
32
- iter_num = int(iter_num)
33
- iterations.append(iter_num)
34
-
35
- if util.str.contains(line, "Train net output #"):
36
- s = util.str.split(line, 'Train net output #\d+\:')[-1]
37
- s = s.split('(')[0]
38
- output = util.str.find_all(s, '\d*\.*\d+e*\-*\d*\.*\d*')[-1]
39
- output = eval(output)
40
- output = float(output)
41
- for name in output_names:
42
- ptr = ' '+ name + ' ='
43
- if util.str.contains(line, ptr):
44
- if name not in outputs:
45
- outputs[name] = []
46
- print line
47
- print '\t', iter_num, name, output
48
- outputs[name].append(output)
49
- if len(outputs)==0:
50
- print 'No output named:', output_names
51
- return
52
- for name in outputs:
53
- output = outputs[name]
54
- if smooth:
55
- output = util.np.smooth(output)
56
- start = 0
57
- end = len(output)
58
-
59
- if from_to is not None:
60
- start = from_to[0]
61
- end = from_to[1]
62
- line_style = util.plt.get_random_line_style()
63
- plt.plot(iterations[start: end], output[start: end], line_style, label = name)
64
-
65
- plt.legend()
66
-
67
- if save_path is not None:
68
- util.plt.save_image(save_path)
69
- if show:
70
- util.plt.show()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ufoLib/filenames.py DELETED
@@ -1,291 +0,0 @@
1
- """
2
- User name to file name conversion.
3
- This was taken from the UFO 3 spec.
4
- """
5
-
6
- # Restrictions are taken mostly from
7
- # https://docs.microsoft.com/en-gb/windows/win32/fileio/naming-a-file#naming-conventions.
8
- #
9
- # 1. Integer value zero, sometimes referred to as the ASCII NUL character.
10
- # 2. Characters whose integer representations are in the range 1 to 31,
11
- # inclusive.
12
- # 3. Various characters that (mostly) Windows and POSIX-y filesystems don't
13
- # allow, plus "(" and ")", as per the specification.
14
- illegalCharacters = {
15
- "\x00",
16
- "\x01",
17
- "\x02",
18
- "\x03",
19
- "\x04",
20
- "\x05",
21
- "\x06",
22
- "\x07",
23
- "\x08",
24
- "\t",
25
- "\n",
26
- "\x0b",
27
- "\x0c",
28
- "\r",
29
- "\x0e",
30
- "\x0f",
31
- "\x10",
32
- "\x11",
33
- "\x12",
34
- "\x13",
35
- "\x14",
36
- "\x15",
37
- "\x16",
38
- "\x17",
39
- "\x18",
40
- "\x19",
41
- "\x1a",
42
- "\x1b",
43
- "\x1c",
44
- "\x1d",
45
- "\x1e",
46
- "\x1f",
47
- '"',
48
- "*",
49
- "+",
50
- "/",
51
- ":",
52
- "<",
53
- ">",
54
- "?",
55
- "[",
56
- "\\",
57
- "]",
58
- "(",
59
- ")",
60
- "|",
61
- "\x7f",
62
- }
63
- reservedFileNames = {
64
- "aux",
65
- "clock$",
66
- "com1",
67
- "com2",
68
- "com3",
69
- "com4",
70
- "com5",
71
- "com6",
72
- "com7",
73
- "com8",
74
- "com9",
75
- "con",
76
- "lpt1",
77
- "lpt2",
78
- "lpt3",
79
- "lpt4",
80
- "lpt5",
81
- "lpt6",
82
- "lpt7",
83
- "lpt8",
84
- "lpt9",
85
- "nul",
86
- "prn",
87
- }
88
- maxFileNameLength = 255
89
-
90
-
91
- class NameTranslationError(Exception):
92
- pass
93
-
94
-
95
- def userNameToFileName(userName: str, existing=(), prefix="", suffix=""):
96
- """
97
- `existing` should be a set-like object.
98
-
99
- >>> userNameToFileName("a") == "a"
100
- True
101
- >>> userNameToFileName("A") == "A_"
102
- True
103
- >>> userNameToFileName("AE") == "A_E_"
104
- True
105
- >>> userNameToFileName("Ae") == "A_e"
106
- True
107
- >>> userNameToFileName("ae") == "ae"
108
- True
109
- >>> userNameToFileName("aE") == "aE_"
110
- True
111
- >>> userNameToFileName("a.alt") == "a.alt"
112
- True
113
- >>> userNameToFileName("A.alt") == "A_.alt"
114
- True
115
- >>> userNameToFileName("A.Alt") == "A_.A_lt"
116
- True
117
- >>> userNameToFileName("A.aLt") == "A_.aL_t"
118
- True
119
- >>> userNameToFileName(u"A.alT") == "A_.alT_"
120
- True
121
- >>> userNameToFileName("T_H") == "T__H_"
122
- True
123
- >>> userNameToFileName("T_h") == "T__h"
124
- True
125
- >>> userNameToFileName("t_h") == "t_h"
126
- True
127
- >>> userNameToFileName("F_F_I") == "F__F__I_"
128
- True
129
- >>> userNameToFileName("f_f_i") == "f_f_i"
130
- True
131
- >>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash"
132
- True
133
- >>> userNameToFileName(".notdef") == "_notdef"
134
- True
135
- >>> userNameToFileName("con") == "_con"
136
- True
137
- >>> userNameToFileName("CON") == "C_O_N_"
138
- True
139
- >>> userNameToFileName("con.alt") == "_con.alt"
140
- True
141
- >>> userNameToFileName("alt.con") == "alt._con"
142
- True
143
- """
144
- # the incoming name must be a string
145
- if not isinstance(userName, str):
146
- raise ValueError("The value for userName must be a string.")
147
- # establish the prefix and suffix lengths
148
- prefixLength = len(prefix)
149
- suffixLength = len(suffix)
150
- # replace an initial period with an _
151
- # if no prefix is to be added
152
- if not prefix and userName[0] == ".":
153
- userName = "_" + userName[1:]
154
- # filter the user name
155
- filteredUserName = []
156
- for character in userName:
157
- # replace illegal characters with _
158
- if character in illegalCharacters:
159
- character = "_"
160
- # add _ to all non-lower characters
161
- elif character != character.lower():
162
- character += "_"
163
- filteredUserName.append(character)
164
- userName = "".join(filteredUserName)
165
- # clip to 255
166
- sliceLength = maxFileNameLength - prefixLength - suffixLength
167
- userName = userName[:sliceLength]
168
- # test for illegal files names
169
- parts = []
170
- for part in userName.split("."):
171
- if part.lower() in reservedFileNames:
172
- part = "_" + part
173
- parts.append(part)
174
- userName = ".".join(parts)
175
- # test for clash
176
- fullName = prefix + userName + suffix
177
- if fullName.lower() in existing:
178
- fullName = handleClash1(userName, existing, prefix, suffix)
179
- # finished
180
- return fullName
181
-
182
-
183
- def handleClash1(userName, existing=[], prefix="", suffix=""):
184
- """
185
- existing should be a case-insensitive list
186
- of all existing file names.
187
-
188
- >>> prefix = ("0" * 5) + "."
189
- >>> suffix = "." + ("0" * 10)
190
- >>> existing = ["a" * 5]
191
-
192
- >>> e = list(existing)
193
- >>> handleClash1(userName="A" * 5, existing=e,
194
- ... prefix=prefix, suffix=suffix) == (
195
- ... '00000.AAAAA000000000000001.0000000000')
196
- True
197
-
198
- >>> e = list(existing)
199
- >>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
200
- >>> handleClash1(userName="A" * 5, existing=e,
201
- ... prefix=prefix, suffix=suffix) == (
202
- ... '00000.AAAAA000000000000002.0000000000')
203
- True
204
-
205
- >>> e = list(existing)
206
- >>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
207
- >>> handleClash1(userName="A" * 5, existing=e,
208
- ... prefix=prefix, suffix=suffix) == (
209
- ... '00000.AAAAA000000000000001.0000000000')
210
- True
211
- """
212
- # if the prefix length + user name length + suffix length + 15 is at
213
- # or past the maximum length, silce 15 characters off of the user name
214
- prefixLength = len(prefix)
215
- suffixLength = len(suffix)
216
- if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
217
- l = prefixLength + len(userName) + suffixLength + 15
218
- sliceLength = maxFileNameLength - l
219
- userName = userName[:sliceLength]
220
- finalName = None
221
- # try to add numbers to create a unique name
222
- counter = 1
223
- while finalName is None:
224
- name = userName + str(counter).zfill(15)
225
- fullName = prefix + name + suffix
226
- if fullName.lower() not in existing:
227
- finalName = fullName
228
- break
229
- else:
230
- counter += 1
231
- if counter >= 999999999999999:
232
- break
233
- # if there is a clash, go to the next fallback
234
- if finalName is None:
235
- finalName = handleClash2(existing, prefix, suffix)
236
- # finished
237
- return finalName
238
-
239
-
240
- def handleClash2(existing=[], prefix="", suffix=""):
241
- """
242
- existing should be a case-insensitive list
243
- of all existing file names.
244
-
245
- >>> prefix = ("0" * 5) + "."
246
- >>> suffix = "." + ("0" * 10)
247
- >>> existing = [prefix + str(i) + suffix for i in range(100)]
248
-
249
- >>> e = list(existing)
250
- >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
251
- ... '00000.100.0000000000')
252
- True
253
-
254
- >>> e = list(existing)
255
- >>> e.remove(prefix + "1" + suffix)
256
- >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
257
- ... '00000.1.0000000000')
258
- True
259
-
260
- >>> e = list(existing)
261
- >>> e.remove(prefix + "2" + suffix)
262
- >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
263
- ... '00000.2.0000000000')
264
- True
265
- """
266
- # calculate the longest possible string
267
- maxLength = maxFileNameLength - len(prefix) - len(suffix)
268
- maxValue = int("9" * maxLength)
269
- # try to find a number
270
- finalName = None
271
- counter = 1
272
- while finalName is None:
273
- fullName = prefix + str(counter) + suffix
274
- if fullName.lower() not in existing:
275
- finalName = fullName
276
- break
277
- else:
278
- counter += 1
279
- if counter >= maxValue:
280
- break
281
- # raise an error if nothing has been found
282
- if finalName is None:
283
- raise NameTranslationError("No unique name could be found.")
284
- # finished
285
- return finalName
286
-
287
-
288
- if __name__ == "__main__":
289
- import doctest
290
-
291
- doctest.testmod()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-1d65707a.js DELETED
@@ -1,16 +0,0 @@
1
- const VERSION_RE = new RegExp("3.37.0/", "g");function import_fix(mod, base) {const url = new URL(mod, base); return import(`https://gradio.s3-us-west-2.amazonaws.com/3.37.0/${url.pathname?.startsWith('/') ? url.pathname.substring(1).replace(VERSION_RE, "") : url.pathname.replace(VERSION_RE, "")}`);}(function(){const t=document.createElement("link").relList;if(t&&t.supports&&t.supports("modulepreload"))return;for(const i of document.querySelectorAll('link[rel="modulepreload"]'))r(i);new MutationObserver(i=>{for(const o of i)if(o.type==="childList")for(const s of o.addedNodes)s.tagName==="LINK"&&s.rel==="modulepreload"&&r(s)}).observe(document,{childList:!0,subtree:!0});function n(i){const o={};return i.integrity&&(o.integrity=i.integrity),i.referrerPolicy&&(o.referrerPolicy=i.referrerPolicy),i.crossOrigin==="use-credentials"?o.credentials="include":i.crossOrigin==="anonymous"?o.credentials="omit":o.credentials="same-origin",o}function r(i){if(i.ep)return;i.ep=!0;const o=n(i);fetch(i.href,o)}})();var ei=typeof globalThis<"u"?globalThis:typeof window<"u"?window:typeof global<"u"?global:typeof self<"u"?self:{};function zn(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var Ye={},Pe={},_t={exports:{}},R=String,tn=function(){return{isColorSupported:!1,reset:R,bold:R,dim:R,italic:R,underline:R,inverse:R,hidden:R,strikethrough:R,black:R,red:R,green:R,yellow:R,blue:R,magenta:R,cyan:R,white:R,gray:R,bgBlack:R,bgRed:R,bgGreen:R,bgYellow:R,bgBlue:R,bgMagenta:R,bgCyan:R,bgWhite:R}};_t.exports=tn();_t.exports.createColors=tn;var xn=_t.exports;Object.defineProperty(Pe,"__esModule",{value:!0});Pe.dim=En;Pe.default=void 0;var le=An(xn);function An(e){return e&&e.__esModule?e:{default:e}}let kt=new Set;function nt(e,t,n){typeof process<"u"&&{}.JEST_WORKER_ID||n&&kt.has(n)||(n&&kt.add(n),console.warn(""),t.forEach(r=>console.warn(e,"-",r)))}function En(e){return le.default.dim(e)}var Sn={info(e,t){nt(le.default.bold(le.default.cyan("info")),...Array.isArray(e)?[e]:[t,e])},warn(e,t){nt(le.default.bold(le.default.yellow("warn")),...Array.isArray(e)?[e]:[t,e])},risk(e,t){nt(le.default.bold(le.default.magenta("risk")),...Array.isArray(e)?[e]:[t,e])}};Pe.default=Sn;Object.defineProperty(Ye,"__esModule",{value:!0});Ye.default=void 0;var Nn=qn(Pe);function qn(e){return e&&e.__esModule?e:{default:e}}function Ne({version:e,from:t,to:n}){Nn.default.warn(`${t}-color-renamed`,[`As of Tailwind CSS ${e}, \`${t}\` has been renamed to \`${n}\`.`,"Update your configuration file to silence this warning."])}var Cn={inherit:"inherit",current:"currentColor",transparent:"transparent",black:"#000",white:"#fff",slate:{50:"#f8fafc",100:"#f1f5f9",200:"#e2e8f0",300:"#cbd5e1",400:"#94a3b8",500:"#64748b",600:"#475569",700:"#334155",800:"#1e293b",900:"#0f172a"},gray:{50:"#f9fafb",100:"#f3f4f6",200:"#e5e7eb",300:"#d1d5db",400:"#9ca3af",500:"#6b7280",600:"#4b5563",700:"#374151",800:"#1f2937",900:"#111827"},zinc:{50:"#fafafa",100:"#f4f4f5",200:"#e4e4e7",300:"#d4d4d8",400:"#a1a1aa",500:"#71717a",600:"#52525b",700:"#3f3f46",800:"#27272a",900:"#18181b"},neutral:{50:"#fafafa",100:"#f5f5f5",200:"#e5e5e5",300:"#d4d4d4",400:"#a3a3a3",500:"#737373",600:"#525252",700:"#404040",800:"#262626",900:"#171717"},stone:{50:"#fafaf9",100:"#f5f5f4",200:"#e7e5e4",300:"#d6d3d1",400:"#a8a29e",500:"#78716c",600:"#57534e",700:"#44403c",800:"#292524",900:"#1c1917"},red:{50:"#fef2f2",100:"#fee2e2",200:"#fecaca",300:"#fca5a5",400:"#f87171",500:"#ef4444",600:"#dc2626",700:"#b91c1c",800:"#991b1b",900:"#7f1d1d"},orange:{50:"#fff7ed",100:"#ffedd5",200:"#fed7aa",300:"#fdba74",400:"#fb923c",500:"#f97316",600:"#ea580c",700:"#c2410c",800:"#9a3412",900:"#7c2d12"},amber:{50:"#fffbeb",100:"#fef3c7",200:"#fde68a",300:"#fcd34d",400:"#fbbf24",500:"#f59e0b",600:"#d97706",700:"#b45309",800:"#92400e",900:"#78350f"},yellow:{50:"#fefce8",100:"#fef9c3",200:"#fef08a",300:"#fde047",400:"#facc15",500:"#eab308",600:"#ca8a04",700:"#a16207",800:"#854d0e",900:"#713f12"},lime:{50:"#f7fee7",100:"#ecfccb",200:"#d9f99d",300:"#bef264",400:"#a3e635",500:"#84cc16",600:"#65a30d",700:"#4d7c0f",800:"#3f6212",900:"#365314"},green:{50:"#f0fdf4",100:"#dcfce7",200:"#bbf7d0",300:"#86efac",400:"#4ade80",500:"#22c55e",600:"#16a34a",700:"#15803d",800:"#166534",900:"#14532d"},emerald:{50:"#ecfdf5",100:"#d1fae5",200:"#a7f3d0",300:"#6ee7b7",400:"#34d399",500:"#10b981",600:"#059669",700:"#047857",800:"#065f46",900:"#064e3b"},teal:{50:"#f0fdfa",100:"#ccfbf1",200:"#99f6e4",300:"#5eead4",400:"#2dd4bf",500:"#14b8a6",600:"#0d9488",700:"#0f766e",800:"#115e59",900:"#134e4a"},cyan:{50:"#ecfeff",100:"#cffafe",200:"#a5f3fc",300:"#67e8f9",400:"#22d3ee",500:"#06b6d4",600:"#0891b2",700:"#0e7490",800:"#155e75",900:"#164e63"},sky:{50:"#f0f9ff",100:"#e0f2fe",200:"#bae6fd",300:"#7dd3fc",400:"#38bdf8",500:"#0ea5e9",600:"#0284c7",700:"#0369a1",800:"#075985",900:"#0c4a6e"},blue:{50:"#eff6ff",100:"#dbeafe",200:"#bfdbfe",300:"#93c5fd",400:"#60a5fa",500:"#3b82f6",600:"#2563eb",700:"#1d4ed8",800:"#1e40af",900:"#1e3a8a"},indigo:{50:"#eef2ff",100:"#e0e7ff",200:"#c7d2fe",300:"#a5b4fc",400:"#818cf8",500:"#6366f1",600:"#4f46e5",700:"#4338ca",800:"#3730a3",900:"#312e81"},violet:{50:"#f5f3ff",100:"#ede9fe",200:"#ddd6fe",300:"#c4b5fd",400:"#a78bfa",500:"#8b5cf6",600:"#7c3aed",700:"#6d28d9",800:"#5b21b6",900:"#4c1d95"},purple:{50:"#faf5ff",100:"#f3e8ff",200:"#e9d5ff",300:"#d8b4fe",400:"#c084fc",500:"#a855f7",600:"#9333ea",700:"#7e22ce",800:"#6b21a8",900:"#581c87"},fuchsia:{50:"#fdf4ff",100:"#fae8ff",200:"#f5d0fe",300:"#f0abfc",400:"#e879f9",500:"#d946ef",600:"#c026d3",700:"#a21caf",800:"#86198f",900:"#701a75"},pink:{50:"#fdf2f8",100:"#fce7f3",200:"#fbcfe8",300:"#f9a8d4",400:"#f472b6",500:"#ec4899",600:"#db2777",700:"#be185d",800:"#9d174d",900:"#831843"},rose:{50:"#fff1f2",100:"#ffe4e6",200:"#fecdd3",300:"#fda4af",400:"#fb7185",500:"#f43f5e",600:"#e11d48",700:"#be123c",800:"#9f1239",900:"#881337"},get lightBlue(){return Ne({version:"v2.2",from:"lightBlue",to:"sky"}),this.sky},get warmGray(){return Ne({version:"v3.0",from:"warmGray",to:"stone"}),this.stone},get trueGray(){return Ne({version:"v3.0",from:"trueGray",to:"neutral"}),this.neutral},get coolGray(){return Ne({version:"v3.0",from:"coolGray",to:"gray"}),this.gray},get blueGray(){return Ne({version:"v3.0",from:"blueGray",to:"slate"}),this.slate}};Ye.default=Cn;let rt=Ye;var Ln=(rt.__esModule?rt:{default:rt}).default;const zt=zn(Ln),ti=["red","green","blue","yellow","purple","teal","orange","cyan","lime","pink"],Mn=[{color:"red",primary:600,secondary:100},{color:"green",primary:600,secondary:100},{color:"blue",primary:600,secondary:100},{color:"yellow",primary:500,secondary:100},{color:"purple",primary:600,secondary:100},{color:"teal",primary:600,secondary:100},{color:"orange",primary:600,secondary:100},{color:"cyan",primary:600,secondary:100},{color:"lime",primary:500,secondary:100},{color:"pink",primary:600,secondary:100}],ni=Mn.reduce((e,{color:t,primary:n,secondary:r})=>({...e,[t]:{primary:zt[t][n],secondary:zt[t][r]}}),{}),On="modulepreload",Pn=function(e){return"https://gradio.s3-us-west-2.amazonaws.com/3.37.0/"+e},xt={},Ge=function(t,n,r){if(!n||n.length===0)return t();const i=document.getElementsByTagName("link");return Promise.all(n.map(o=>{if(o=Pn(o),o in xt)return;xt[o]=!0;const s=o.endsWith(".css"),a=s?'[rel="stylesheet"]':"";if(!!r)for(let f=i.length-1;f>=0;f--){const u=i[f];if(u.href===o&&(!s||u.rel==="stylesheet"))return}else if(document.querySelector(`link[href="${o}"]${a}`))return;const l=document.createElement("link");if(l.rel=s?"stylesheet":On,s||(l.as="script",l.crossOrigin=""),l.href=o,document.head.appendChild(l),s)return new Promise((f,u)=>{l.addEventListener("load",f),l.addEventListener("error",()=>u(new Error(`Unable to preload CSS for ${o}`)))})})).then(()=>t())};var it=new Intl.Collator(0,{numeric:1}).compare;function At(e,t,n){return e=e.split("."),t=t.split("."),it(e[0],t[0])||it(e[1],t[1])||(t[2]=t.slice(2).join("."),n=/[.-]/.test(e[2]=e.slice(2).join(".")),n==/[.-]/.test(t[2])?it(e[2],t[2]):n?-1:1)}function ot(e){if(e.startsWith("http")){const{protocol:t,host:n}=new URL(e);return n.endsWith("hf.space")?{ws_protocol:"wss",host:n,http_protocol:t}:{ws_protocol:t==="https:"?"wss":"ws",http_protocol:t,host:n}}return{ws_protocol:"wss",http_protocol:"https:",host:e}}const nn=/^[^\/]*\/[^\/]*$/,Tn=/.*hf\.space\/{0,1}$/;async function Bn(e,t){const n={};t&&(n.Authorization=`Bearer ${t}`);const r=e.trim();if(nn.test(r))try{const i=await fetch(`https://huggingface.co/api/spaces/${r}/host`,{headers:n});if(i.status!==200)throw new Error("Space metadata could not be loaded.");const o=(await i.json()).host;return{space_id:e,...ot(o)}}catch(i){throw new Error("Space metadata could not be loaded."+i.message)}if(Tn.test(r)){const{ws_protocol:i,http_protocol:o,host:s}=ot(r);return{space_id:s.replace(".hf.space",""),ws_protocol:i,http_protocol:o,host:s}}return{space_id:!1,...ot(r)}}function Fn(e){let t={};return e.forEach(({api_name:n},r)=>{n&&(t[n]=r)}),t}const Rn=/^(?=[^]*\b[dD]iscussions{0,1}\b)(?=[^]*\b[dD]isabled\b)[^]*$/;async function Et(e){try{const n=(await fetch(`https://huggingface.co/api/spaces/${e}/discussions`,{method:"HEAD"})).headers.get("x-error-message");return!(n&&Rn.test(n))}catch{return!1}}const Dn="This application is too busy. Keep trying!",Re="Connection errored out.";let rn;function In(e){return{post_data:t,upload_files:n,client:r,handle_blob:i};async function t(o,s,a){const c={"Content-Type":"application/json"};a&&(c.Authorization=`Bearer ${a}`);try{var l=await e(o,{method:"POST",body:JSON.stringify(s),headers:c})}catch{return[{error:Re},500]}return[await l.json(),l.status]}async function n(o,s,a){const c={};a&&(c.Authorization=`Bearer ${a}`);const l=new FormData;s.forEach(g=>{l.append("files",g)});try{var f=await e(`${o}/upload`,{method:"POST",body:l,headers:c})}catch{return{error:Re}}return{files:await f.json()}}async function r(o,s={normalise_files:!0}){return new Promise(async a=>{const{status_callback:c,hf_token:l,normalise_files:f}=s,u={predict:M,submit:U,view_api:ee},g=f??!0;if(typeof window>"u"||!("WebSocket"in window)){const C=await Ge(()=>import("./wrapper-6f348d45-38be7a64.js"),["assets/wrapper-6f348d45-38be7a64.js","assets/__vite-browser-external-b25bb000.js"]);rn=(await Ge(()=>import("./__vite-browser-external-b25bb000.js"),[])).Blob,global.WebSocket=C.WebSocket}const{ws_protocol:h,http_protocol:d,host:w,space_id:b}=await Bn(o,l),z=Math.random().toString(36).substring(2),N={};let p,x={},E=!1;l&&b&&(E=await Un(b,l));async function T(C){p=C,x=Fn(C?.dependencies||[]);try{A=await ee(p)}catch(D){console.error(`Could not get api details: ${D.message}`)}return{config:p,...u}}let A;async function L(C){if(c&&c(C),C.status==="running")try{p=await Mt(e,`${d}//${w}`,l);const D=await T(p);a(D)}catch(D){console.error(D),c&&c({status:"error",message:"Could not load this space.",load_status:"error",detail:"NOT_FOUND"})}}try{p=await Mt(e,`${d}//${w}`,l);const C=await T(p);a(C)}catch(C){console.error(C),b?ft(b,nn.test(b)?"space_name":"subdomain",L):c&&c({status:"error",message:"Could not load this space.",load_status:"error",detail:"NOT_FOUND"})}function M(C,D,te){let q=!1,W=!1;return new Promise((j,I)=>{const ie=U(C,D,te);ie.on("data",Z=>{q=!0,W&&ie.destroy(),j(Z)}).on("status",Z=>{Z.stage==="error"&&I(Z),Z.stage==="complete"&&q&&ie.destroy(),Z.stage==="complete"&&(W=!0)})})}function U(C,D,te){let q,W;if(typeof C=="number")q=C,W=A.unnamed_endpoints[q];else{const G=C.replace(/^\//,"");q=x[G],W=A.named_endpoints[C.trim()]}if(typeof q!="number")throw new Error("There is no endpoint matching that name of fn_index matching that number.");let j;const I=typeof C=="number"?"/predict":C;let ie,Z=!1;const y={};i(`${d}//${w+p.path}`,D,W,l).then(G=>{if(ie={data:G||[],event_data:te,fn_index:q},Vn(q,p))X({type:"status",endpoint:I,stage:"pending",queue:!1,fn_index:q,time:new Date}),t(`${d}//${w+p.path}/run${I.startsWith("/")?I:`/${I}`}`,{...ie,session_hash:z},l).then(([m,F])=>{const P=g?qt(m.data,W,p.root,p.root_url):m.data;F==200?(X({type:"data",endpoint:I,fn_index:q,data:P,time:new Date}),X({type:"status",endpoint:I,fn_index:q,stage:"complete",eta:m.average_duration,queue:!1,time:new Date})):X({type:"status",stage:"error",endpoint:I,fn_index:q,message:m.error,queue:!1,time:new Date})}).catch(m=>{X({type:"status",stage:"error",message:m.message,endpoint:I,fn_index:q,queue:!1,time:new Date})});else{X({type:"status",stage:"pending",queue:!0,endpoint:I,fn_index:q,time:new Date});let m=new URL(`${h}://${w}${p.path}
2
- /queue/join`);E&&m.searchParams.set("__sign",E),j=new WebSocket(m),j.onclose=F=>{F.wasClean||X({type:"status",stage:"error",broken:!0,message:Re,queue:!0,endpoint:I,fn_index:q,time:new Date})},j.onmessage=function(F){const P=JSON.parse(F.data),{type:Y,status:se,data:Se}=Wn(P,N[q]);if(Y==="update"&&se&&!Z)X({type:"status",endpoint:I,fn_index:q,time:new Date,...se}),se.stage==="error"&&j.close();else if(Y==="hash"){j.send(JSON.stringify({fn_index:q,session_hash:z}));return}else Y==="data"?j.send(JSON.stringify({...ie,session_hash:z})):Y==="complete"?Z=se:Y==="log"?X({type:"log",log:Se.log,level:Se.level,endpoint:I,fn_index:q}):Y==="generating"&&X({type:"status",time:new Date,...se,stage:se?.stage,queue:!0,endpoint:I,fn_index:q});Se&&(X({type:"data",time:new Date,data:g?qt(Se.data,W,p.root,p.root_url):Se.data,endpoint:I,fn_index:q}),Z&&(X({type:"status",time:new Date,...Z,stage:se?.stage,queue:!0,endpoint:I,fn_index:q}),j.close()))},At(p.version||"2.0.0","3.6")<0&&addEventListener("open",()=>j.send(JSON.stringify({hash:z})))}});function X(G){const F=y[G.type]||[];F?.forEach(P=>P(G))}function xe(G,m){const F=y,P=F[G]||[];return F[G]=P,P?.push(m),{on:xe,off:ge,cancel:Ae,destroy:Ee}}function ge(G,m){const F=y;let P=F[G]||[];return P=P?.filter(Y=>Y!==m),F[G]=P,{on:xe,off:ge,cancel:Ae,destroy:Ee}}async function Ae(){const G={stage:"complete",queue:!1,time:new Date};Z=G,X({...G,type:"status",endpoint:I,fn_index:q}),j&&j.readyState===0?j.addEventListener("open",()=>{j.close()}):j.close();try{await e(`${d}//${w+p.path}/reset`,{headers:{"Content-Type":"application/json"},method:"POST",body:JSON.stringify({fn_index:q,session_hash:z})})}catch{console.warn("The `/reset` endpoint could not be called. Subsequent endpoint results may be unreliable.")}}function Ee(){for(const G in y)y[G].forEach(m=>{ge(G,m)})}return{on:xe,off:ge,cancel:Ae,destroy:Ee}}async function ee(C){if(A)return A;const D={"Content-Type":"application/json"};l&&(D.Authorization=`Bearer ${l}`);let te;if(At(C.version||"2.0.0","3.30")<0?te=await e("https://gradio-space-api-fetcher-v2.hf.space/api",{method:"POST",body:JSON.stringify({serialize:!1,config:JSON.stringify(C)}),headers:D}):te=await e(`${C.root}/info`,{headers:D}),!te.ok)throw new Error(Re);let q=await te.json();return"api"in q&&(q=q.api),q.named_endpoints["/predict"]&&!q.unnamed_endpoints[0]&&(q.unnamed_endpoints[0]=q.named_endpoints["/predict"]),jn(q,C,x)}})}async function i(o,s,a,c){const l=await ct(s,void 0,[],!0,a);return Promise.all(l.map(async({path:f,blob:u,data:g,type:h})=>{if(u){const d=(await n(o,[u],c)).files[0];return{path:f,file_url:d,type:h}}else return{path:f,base64:g,type:h}})).then(f=>(f.forEach(({path:u,file_url:g,base64:h,type:d})=>{if(h)st(s,h,u);else if(d==="Gallery")st(s,g,u);else if(g){const w={is_file:!0,name:`${g}`,data:null};st(s,w,u)}}),s))}}const{post_data:ri,upload_files:St,client:Nt,handle_blob:ii}=In(fetch);function qt(e,t,n,r){return e.map((i,o)=>{var s,a,c,l;return((a=(s=t.returns)==null?void 0:s[o])==null?void 0:a.component)==="File"?Ce(i,n,r):((l=(c=t.returns)==null?void 0:c[o])==null?void 0:l.component)==="Gallery"?i.map(f=>Array.isArray(f)?[Ce(f[0],n,r),f[1]]:[Ce(f,n,r),null]):typeof i=="object"&&i.is_file?Ce(i,n,r):i})}function Ce(e,t,n){if(e==null)return null;if(typeof e=="string")return{name:"file_data",data:e};if(Array.isArray(e)){const r=[];for(const i of e)i===null?r.push(null):r.push(Ce(i,t,n));return r}else e.is_file&&(n?e.data="/proxy="+n+"file="+e.name:e.data=t+"/file="+e.name);return e}function Ct(e,t,n,r){switch(e.type){case"string":return"string";case"boolean":return"boolean";case"number":return"number"}if(n==="JSONSerializable"||n==="StringSerializable")return"any";if(n==="ListStringSerializable")return"string[]";if(t==="Image")return r==="parameter"?"Blob | File | Buffer":"string";if(n==="FileSerializable")return e?.type==="array"?r==="parameter"?"(Blob | File | Buffer)[]":"{ name: string; data: string; size?: number; is_file?: boolean; orig_name?: string}[]":r==="parameter"?"Blob | File | Buffer":"{ name: string; data: string; size?: number; is_file?: boolean; orig_name?: string}";if(n==="GallerySerializable")return r==="parameter"?"[(Blob | File | Buffer), (string | null)][]":"[{ name: string; data: string; size?: number; is_file?: boolean; orig_name?: string}, (string | null))][]"}function Lt(e,t){return t==="GallerySerializable"?"array of [file, label] tuples":t==="ListStringSerializable"?"array of strings":t==="FileSerializable"?"array of files or single file":e.description}function jn(e,t,n){const r={named_endpoints:{},unnamed_endpoints:{}};for(const i in e){const o=e[i];for(const s in o){const a=t.dependencies[s]?s:n[s.replace("/","")],c=o[s];r[i][s]={},r[i][s].parameters={},r[i][s].returns={},r[i][s].type=t.dependencies[a].types,r[i][s].parameters=c.parameters.map(({label:l,component:f,type:u,serializer:g})=>({label:l,component:f,type:Ct(u,f,g,"parameter"),description:Lt(u,g)})),r[i][s].returns=c.returns.map(({label:l,component:f,type:u,serializer:g})=>({label:l,component:f,type:Ct(u,f,g,"return"),description:Lt(u,g)}))}}return r}async function Un(e,t){try{return(await(await fetch(`https://huggingface.co/api/spaces/${e}/jwt`,{headers:{Authorization:`Bearer ${t}`}})).json()).token||!1}catch(n){return console.error(n),!1}}function st(e,t,n){for(;n.length>1;)e=e[n.shift()];e[n.shift()]=t}async function ct(e,t=void 0,n=[],r=!1,i=void 0){if(Array.isArray(e)){let o=[];return await Promise.all(e.map(async(s,a)=>{var c;let l=n.slice();l.push(a);const f=await ct(e[a],r?((c=i?.parameters[a])==null?void 0:c.component)||void 0:t,l,!1,i);o=o.concat(f)})),o}else if(globalThis.Buffer&&e instanceof globalThis.Buffer){const o=t==="Image";return[{path:n,blob:o?!1:new rn([e]),data:o?`${e.toString("base64")}`:!1,type:t}]}else if(e instanceof Blob||typeof window<"u"&&e instanceof File)if(t==="Image"){let o;if(typeof window<"u")o=await Gn(e);else{const s=await e.arrayBuffer();o=Buffer.from(s).toString("base64")}return[{path:n,data:o,type:t}]}else return[{path:n,blob:e,type:t}];else if(typeof e=="object"){let o=[];for(let s in e)if(e.hasOwnProperty(s)){let a=n.slice();a.push(s),o=o.concat(await ct(e[s],void 0,a,!1,i))}return o}else return[]}function Gn(e){return new Promise((t,n)=>{const r=new FileReader;r.onloadend=()=>t(r.result),r.readAsDataURL(e)})}function Vn(e,t){var n,r,i,o;return!(((r=(n=t?.dependencies)==null?void 0:n[e])==null?void 0:r.queue)===null?t.enable_queue:(o=(i=t?.dependencies)==null?void 0:i[e])!=null&&o.queue)||!1}async function Mt(e,t,n){const r={};if(n&&(r.Authorization=`Bearer ${n}`),typeof window<"u"&&window.gradio_config&&location.origin!=="http://localhost:9876"){const i=window.gradio_config.root,o=window.gradio_config;return o.root=t+o.root,{...o,path:i}}else if(t){let i=await e(`${t}/config`,{headers:r});if(i.status===200){const o=await i.json();return o.path=o.path??"",o.root=t,o}else throw new Error("Could not get config.")}throw new Error("No config or app endpoint found")}async function ft(e,t,n){let r=t==="subdomain"?`https://huggingface.co/api/spaces/by-subdomain/${e}`:`https://huggingface.co/api/spaces/${e}`,i,o;try{if(i=await fetch(r),o=i.status,o!==200)throw new Error;i=await i.json()}catch{n({status:"error",load_status:"error",message:"Could not get space status",detail:"NOT_FOUND"});return}if(!i||o!==200)return;const{runtime:{stage:s},id:a}=i;switch(s){case"STOPPED":case"SLEEPING":n({status:"sleeping",load_status:"pending",message:"Space is asleep. Waking it up...",detail:s}),setTimeout(()=>{ft(e,t,n)},1e3);break;case"PAUSED":n({status:"paused",load_status:"error",message:"This space has been paused by the author. If you would like to try this demo, consider duplicating the space.",detail:s,discussions_enabled:await Et(a)});break;case"RUNNING":case"RUNNING_BUILDING":n({status:"running",load_status:"complete",message:"",detail:s});break;case"BUILDING":n({status:"building",load_status:"pending",message:"Space is building...",detail:s}),setTimeout(()=>{ft(e,t,n)},1e3);break;default:n({status:"space_error",load_status:"error",message:"This space is experiencing an issue.",detail:s,discussions_enabled:await Et(a)});break}}function Wn(e,t){switch(e.msg){case"send_data":return{type:"data"};case"send_hash":return{type:"hash"};case"queue_full":return{type:"update",status:{queue:!0,message:Dn,stage:"error",code:e.code,success:e.success}};case"estimation":return{type:"update",status:{queue:!0,stage:t||"pending",code:e.code,size:e.queue_size,position:e.rank,eta:e.rank_eta,success:e.success}};case"progress":return{type:"update",status:{queue:!0,stage:"pending",code:e.code,progress_data:e.progress_data,success:e.success}};case"log":return{type:"log",data:e};case"process_generating":return{type:"generating",status:{queue:!0,message:e.success?null:e.output.error,stage:e.success?"generating":"error",code:e.code,progress_data:e.progress_data,eta:e.average_duration},data:e.success?e.output:null};case"process_completed":return"error"in e.output?{type:"update",status:{queue:!0,message:e.output.error,stage:"error",code:e.code,success:e.success}}:{type:"complete",status:{queue:!0,message:e.success?void 0:e.output.error,stage:e.success?"complete":"error",code:e.code,progress_data:e.progress_data,eta:e.output.average_duration},data:e.success?e.output:null};case"process_starts":return{type:"update",status:{queue:!0,stage:"pending",code:e.code,size:e.rank,position:0,success:e.success}}}return{type:"none",status:{stage:"error",queue:!0}}}function ut(e,t){if(document.querySelector(`link[href='${e}']`))return Promise.resolve();const r=document.createElement("link");return r.rel="stylesheet",r.href=e,t.appendChild(r),new Promise((i,o)=>{r.addEventListener("load",()=>i()),r.addEventListener("error",()=>{console.error(`Unable to preload CSS for ${e}`),i()})})}function V(){}const bt=e=>e;function on(e,t){for(const n in t)e[n]=t[n];return e}function sn(e){return e()}function Ot(){return Object.create(null)}function ae(e){e.forEach(sn)}function ue(e){return typeof e=="function"}function Te(e,t){return e!=e?t==t:e!==t||e&&typeof e=="object"||typeof e=="function"}let De;function Hn(e,t){return De||(De=document.createElement("a")),De.href=t,e===De.href}function Jn(e){return Object.keys(e).length===0}function an(e,...t){if(e==null){for(const r of t)r(void 0);return V}const n=e.subscribe(...t);return n.unsubscribe?()=>n.unsubscribe():n}function Ve(e,t,n){e.$$.on_destroy.push(an(t,n))}function ln(e,t,n,r){if(e){const i=cn(e,t,n,r);return e[0](i)}}function cn(e,t,n,r){return e[1]&&r?on(n.ctx.slice(),e[1](r(t))):n.ctx}function fn(e,t,n,r){if(e[2]&&r){const i=e[2](r(n));if(t.dirty===void 0)return i;if(typeof i=="object"){const o=[],s=Math.max(t.dirty.length,i.length);for(let a=0;a<s;a+=1)o[a]=t.dirty[a]|i[a];return o}return t.dirty|i}return t.dirty}function un(e,t,n,r,i,o){if(i){const s=cn(t,n,r,o);e.p(s,i)}}function dn(e){if(e.ctx.length>32){const t=[],n=e.ctx.length/32;for(let r=0;r<n;r++)t[r]=-1;return t}return-1}function oi(e){return e??""}function si(e,t,n){return e.set(n),t}function ai(e){return e&&ue(e.destroy)?e.destroy:V}function li(e){const t=typeof e=="string"&&e.match(/^\s*(-?[\d.]+)([^\s]*)\s*$/);return t?[parseFloat(t[1]),t[2]||"px"]:[e,"px"]}const pn=typeof window<"u";let Me=pn?()=>window.performance.now():()=>Date.now(),wt=pn?e=>requestAnimationFrame(e):V;const we=new Set;function gn(e){we.forEach(t=>{t.c(e)||(we.delete(t),t.f())}),we.size!==0&&wt(gn)}function $e(e){let t;return we.size===0&&wt(gn),{promise:new Promise(n=>{we.add(t={c:e,f:n})}),abort(){we.delete(t)}}}const Zn=typeof window<"u"?window:typeof globalThis<"u"?globalThis:global;"WeakMap"in Zn;function S(e,t){e.appendChild(t)}function hn(e){if(!e)return document;const t=e.getRootNode?e.getRootNode():e.ownerDocument;return t&&t.host?t:e.ownerDocument}function Qn(e){const t=B("style");return t.textContent="/* empty */",Kn(hn(e),t),t.sheet}function Kn(e,t){return S(e.head||e,t),t.sheet}function k(e,t,n){e.insertBefore(t,n||null)}function v(e){e.parentNode&&e.parentNode.removeChild(e)}function mn(e,t){for(let n=0;n<e.length;n+=1)e[n]&&e[n].d(t)}function B(e){return document.createElement(e)}function re(e){return document.createElementNS("http://www.w3.org/2000/svg",e)}function O(e){return document.createTextNode(e)}function $(){return O(" ")}function de(){return O("")}function Pt(e,t,n,r){return e.addEventListener(t,n,r),()=>e.removeEventListener(t,n,r)}function ci(e){return function(t){return t.preventDefault(),e.call(this,t)}}function fi(e){return function(t){return t.stopPropagation(),e.call(this,t)}}function _(e,t,n){n==null?e.removeAttribute(t):e.getAttribute(t)!==n&&e.setAttribute(t,n)}const Xn=["width","height"];function Yn(e,t){const n=Object.getOwnPropertyDescriptors(e.__proto__);for(const r in t)t[r]==null?e.removeAttribute(r):r==="style"?e.style.cssText=t[r]:r==="__value"?e.value=e[r]=t[r]:n[r]&&n[r].set&&Xn.indexOf(r)===-1?e[r]=t[r]:_(e,r,t[r])}function $n(e,t){Object.keys(t).forEach(n=>{er(e,n,t[n])})}function er(e,t,n){t in e?e[t]=typeof e[t]=="boolean"&&n===""?!0:n:_(e,t,n)}function ui(e){return/-/.test(e)?$n:Yn}function di(e){let t;return{p(...n){t=n,t.forEach(r=>e.push(r))},r(){t.forEach(n=>e.splice(e.indexOf(n),1))}}}function pi(e){return e===""?null:+e}function tr(e){return Array.from(e.childNodes)}function ne(e,t){t=""+t,e.data!==t&&(e.data=t)}function gi(e,t){e.value=t??""}function Q(e,t,n,r){n==null?e.style.removeProperty(t):e.style.setProperty(t,n,r?"important":"")}let Ie;function nr(){if(Ie===void 0){Ie=!1;try{typeof window<"u"&&window.parent&&window.parent.document}catch{Ie=!0}}return Ie}function hi(e,t){getComputedStyle(e).position==="static"&&(e.style.position="relative");const r=B("iframe");r.setAttribute("style","display: block; position: absolute; top: 0; left: 0; width: 100%; height: 100%; overflow: hidden; border: 0; opacity: 0; pointer-events: none; z-index: -1;"),r.setAttribute("aria-hidden","true"),r.tabIndex=-1;const i=nr();let o;return i?(r.src="data:text/html,<script>onresize=function(){parent.postMessage(0,'*')}<\/script>",o=Pt(window,"message",s=>{s.source===r.contentWindow&&t()})):(r.src="about:blank",r.onload=()=>{o=Pt(r.contentWindow,"resize",t),t()}),S(e,r),()=>{(i||o&&r.contentWindow)&&o(),v(r)}}function H(e,t,n){e.classList.toggle(t,!!n)}function _n(e,t,{bubbles:n=!1,cancelable:r=!1}={}){return new CustomEvent(e,{detail:t,bubbles:n,cancelable:r})}class mi{is_svg=!1;e=void 0;n=void 0;t=void 0;a=void 0;constructor(t=!1){this.is_svg=t,this.e=this.n=null}c(t){this.h(t)}m(t,n,r=null){this.e||(this.is_svg?this.e=re(n.nodeName):this.e=B(n.nodeType===11?"TEMPLATE":n.nodeName),this.t=n.tagName!=="TEMPLATE"?n:n.content,this.c(t)),this.i(r)}h(t){this.e.innerHTML=t,this.n=Array.from(this.e.nodeName==="TEMPLATE"?this.e.content.childNodes:this.e.childNodes)}i(t){for(let n=0;n<this.n.length;n+=1)k(this.t,this.n[n],t)}p(t){this.d(),this.h(t),this.i(this.a)}d(){this.n.forEach(v)}}function _i(e,t){return new e(t)}const We=new Map;let He=0;function rr(e){let t=5381,n=e.length;for(;n--;)t=(t<<5)-t^e.charCodeAt(n);return t>>>0}function ir(e,t){const n={stylesheet:Qn(t),rules:{}};return We.set(e,n),n}function Je(e,t,n,r,i,o,s,a=0){const c=16.666/r;let l=`{
3
- `;for(let b=0;b<=1;b+=c){const z=t+(n-t)*o(b);l+=b*100+`%{${s(z,1-z)}}
4
- `}const f=l+`100% {${s(n,1-n)}}
5
- }`,u=`__svelte_${rr(f)}_${a}`,g=hn(e),{stylesheet:h,rules:d}=We.get(g)||ir(g,e);d[u]||(d[u]=!0,h.insertRule(`@keyframes ${u} ${f}`,h.cssRules.length));const w=e.style.animation||"";return e.style.animation=`${w?`${w}, `:""}${u} ${r}ms linear ${i}ms 1 both`,He+=1,u}function Ze(e,t){const n=(e.style.animation||"").split(", "),r=n.filter(t?o=>o.indexOf(t)<0:o=>o.indexOf("__svelte")===-1),i=n.length-r.length;i&&(e.style.animation=r.join(", "),He-=i,He||or())}function or(){wt(()=>{He||(We.forEach(e=>{const{ownerNode:t}=e.stylesheet;t&&v(t)}),We.clear())})}let Oe;function Le(e){Oe=e}function pe(){if(!Oe)throw new Error("Function called outside component initialization");return Oe}function bi(e){pe().$$.before_update.push(e)}function dt(e){pe().$$.on_mount.push(e)}function wi(e){pe().$$.after_update.push(e)}function sr(e){pe().$$.on_destroy.push(e)}function yi(){const e=pe();return(t,n,{cancelable:r=!1}={})=>{const i=e.$$.callbacks[t];if(i){const o=_n(t,n,{cancelable:r});return i.slice().forEach(s=>{s.call(e,o)}),!o.defaultPrevented}return!0}}function ar(e,t){return pe().$$.context.set(e,t),t}function vi(e){return pe().$$.context.get(e)}function ki(e,t){const n=e.$$.callbacks[t.type];n&&n.slice().forEach(r=>r.call(this,t))}const _e=[],fe=[];let ye=[];const pt=[],bn=Promise.resolve();let gt=!1;function wn(){gt||(gt=!0,bn.then(vn))}function lr(){return wn(),bn}function ve(e){ye.push(e)}function yn(e){pt.push(e)}const at=new Set;let he=0;function vn(){if(he!==0)return;const e=Oe;do{try{for(;he<_e.length;){const t=_e[he];he++,Le(t),cr(t.$$)}}catch(t){throw _e.length=0,he=0,t}for(Le(null),_e.length=0,he=0;fe.length;)fe.pop()();for(let t=0;t<ye.length;t+=1){const n=ye[t];at.has(n)||(at.add(n),n())}ye.length=0}while(_e.length);for(;pt.length;)pt.pop()();gt=!1,at.clear(),Le(e)}function cr(e){if(e.fragment!==null){e.update(),ae(e.before_update);const t=e.dirty;e.dirty=[-1],e.fragment&&e.fragment.p(e.ctx,t),e.after_update.forEach(ve)}}function fr(e){const t=[],n=[];ye.forEach(r=>e.indexOf(r)===-1?t.push(r):n.push(r)),n.forEach(r=>r()),ye=t}let qe;function yt(){return qe||(qe=Promise.resolve(),qe.then(()=>{qe=null})),qe}function ce(e,t,n){e.dispatchEvent(_n(`${t?"intro":"outro"}${n}`))}const Ue=new Set;let oe;function Qe(){oe={r:0,c:[],p:oe}}function Ke(){oe.r||ae(oe.c),oe=oe.p}function J(e,t){e&&e.i&&(Ue.delete(e),e.i(t))}function K(e,t,n,r){if(e&&e.o){if(Ue.has(e))return;Ue.add(e),oe.c.push(()=>{Ue.delete(e),r&&(n&&e.d(1),r())}),e.o(t)}else r&&r()}const vt={duration:0};function zi(e,t,n){const r={direction:"in"};let i=t(e,n,r),o=!1,s,a,c=0;function l(){s&&Ze(e,s)}function f(){const{delay:g=0,duration:h=300,easing:d=bt,tick:w=V,css:b}=i||vt;b&&(s=Je(e,0,1,h,g,d,b,c++)),w(0,1);const z=Me()+g,N=z+h;a&&a.abort(),o=!0,ve(()=>ce(e,!0,"start")),a=$e(p=>{if(o){if(p>=N)return w(1,0),ce(e,!0,"end"),l(),o=!1;if(p>=z){const x=d((p-z)/h);w(x,1-x)}}return o})}let u=!1;return{start(){u||(u=!0,Ze(e),ue(i)?(i=i(r),yt().then(f)):f())},invalidate(){u=!1},end(){o&&(l(),o=!1)}}}function xi(e,t,n){const r={direction:"out"};let i=t(e,n,r),o=!0,s;const a=oe;a.r+=1;let c;function l(){const{delay:f=0,duration:u=300,easing:g=bt,tick:h=V,css:d}=i||vt;d&&(s=Je(e,1,0,u,f,g,d));const w=Me()+f,b=w+u;ve(()=>ce(e,!1,"start")),"inert"in e&&(c=e.inert,e.inert=!0),$e(z=>{if(o){if(z>=b)return h(0,1),ce(e,!1,"end"),--a.r||ae(a.c),!1;if(z>=w){const N=g((z-w)/u);h(1-N,N)}}return o})}return ue(i)?yt().then(()=>{i=i(r),l()}):l(),{end(f){f&&"inert"in e&&(e.inert=c),f&&i.tick&&i.tick(1,0),o&&(s&&Ze(e,s),o=!1)}}}function Ai(e,t,n,r){let o=t(e,n,{direction:"both"}),s=r?0:1,a=null,c=null,l=null,f;function u(){l&&Ze(e,l)}function g(d,w){const b=d.b-s;return w*=Math.abs(b),{a:s,b:d.b,d:b,duration:w,start:d.start,end:d.start+w,group:d.group}}function h(d){const{delay:w=0,duration:b=300,easing:z=bt,tick:N=V,css:p}=o||vt,x={start:Me()+w,b:d};d||(x.group=oe,oe.r+=1),"inert"in e&&(d?f!==void 0&&(e.inert=f):(f=e.inert,e.inert=!0)),a||c?c=x:(p&&(u(),l=Je(e,s,d,b,w,z,p)),d&&N(0,1),a=g(x,b),ve(()=>ce(e,d,"start")),$e(E=>{if(c&&E>c.start&&(a=g(c,b),c=null,ce(e,a.b,"start"),p&&(u(),l=Je(e,s,a.b,a.duration,0,z,o.css))),a){if(E>=a.end)N(s=a.b,1-s),ce(e,a.b,"end"),c||(a.b?u():--a.group.r||ae(a.group.c)),a=null;else if(E>=a.start){const T=E-a.start;s=a.a+a.d*z(T/a.duration),N(s,1-s)}}return!!(a||c)}))}return{run(d){ue(o)?yt().then(()=>{o=o({direction:d?"in":"out"}),h(d)}):h(d)},end(){u(),a=c=null}}}function Xe(e){return e?.length!==void 0?e:Array.from(e)}function Ei(e,t){e.d(1),t.delete(e.key)}function ur(e,t){K(e,1,1,()=>{t.delete(e.key)})}function Si(e,t){e.f(),ur(e,t)}function Ni(e,t,n,r,i,o,s,a,c,l,f,u){let g=e.length,h=o.length,d=g;const w={};for(;d--;)w[e[d].key]=d;const b=[],z=new Map,N=new Map,p=[];for(d=h;d--;){const A=u(i,o,d),L=n(A);let M=s.get(L);M?r&&p.push(()=>M.p(A,t)):(M=l(L,A),M.c()),z.set(L,b[d]=M),L in w&&N.set(L,Math.abs(d-w[L]))}const x=new Set,E=new Set;function T(A){J(A,1),A.m(a,f),s.set(A.key,A),f=A.first,h--}for(;g&&h;){const A=b[h-1],L=e[g-1],M=A.key,U=L.key;A===L?(f=A.first,g--,h--):z.has(U)?!s.has(M)||x.has(M)?T(A):E.has(U)?g--:N.get(M)>N.get(U)?(E.add(M),T(A)):(x.add(U),g--):(c(L,s),g--)}for(;g--;){const A=e[g];z.has(A.key)||c(A,s)}for(;h;)T(b[h-1]);return ae(p),b}function dr(e,t){const n={},r={},i={$$scope:1};let o=e.length;for(;o--;){const s=e[o],a=t[o];if(a){for(const c in s)c in a||(r[c]=1);for(const c in a)i[c]||(n[c]=a[c],i[c]=1);e[o]=a}else for(const c in s)i[c]=1}for(const s in r)s in n||(n[s]=void 0);return n}function pr(e){return typeof e=="object"&&e!==null?e:{}}const gr=["allowfullscreen","allowpaymentrequest","async","autofocus","autoplay","checked","controls","default","defer","disabled","formnovalidate","hidden","inert","ismap","loop","multiple","muted","nomodule","novalidate","open","playsinline","readonly","required","reversed","selected"];[...gr];function kn(e,t,n){const r=e.$$.props[t];r!==void 0&&(e.$$.bound[r]=n,n(e.$$.ctx[r]))}function Be(e){e&&e.c()}function ke(e,t,n){const{fragment:r,after_update:i}=e.$$;r&&r.m(t,n),ve(()=>{const o=e.$$.on_mount.map(sn).filter(ue);e.$$.on_destroy?e.$$.on_destroy.push(...o):ae(o),e.$$.on_mount=[]}),i.forEach(ve)}function ze(e,t){const n=e.$$;n.fragment!==null&&(fr(n.after_update),ae(n.on_destroy),n.fragment&&n.fragment.d(t),n.on_destroy=n.fragment=null,n.ctx=[])}function hr(e,t){e.$$.dirty[0]===-1&&(_e.push(e),wn(),e.$$.dirty.fill(0)),e.$$.dirty[t/31|0]|=1<<t%31}function et(e,t,n,r,i,o,s,a=[-1]){const c=Oe;Le(e);const l=e.$$={fragment:null,ctx:[],props:o,update:V,not_equal:i,bound:Ot(),on_mount:[],on_destroy:[],on_disconnect:[],before_update:[],after_update:[],context:new Map(t.context||(c?c.$$.context:[])),callbacks:Ot(),dirty:a,skip_bound:!1,root:t.target||c.$$.root};s&&s(l.root);let f=!1;if(l.ctx=n?n(e,t.props||{},(u,g,...h)=>{const d=h.length?h[0]:g;return l.ctx&&i(l.ctx[u],l.ctx[u]=d)&&(!l.skip_bound&&l.bound[u]&&l.bound[u](d),f&&hr(e,u)),g}):[],l.update(),f=!0,ae(l.before_update),l.fragment=r?r(l.ctx):!1,t.target){if(t.hydrate){const u=tr(t.target);l.fragment&&l.fragment.l(u),u.forEach(v)}else l.fragment&&l.fragment.c();t.intro&&J(e.$$.fragment),ke(e,t.target,t.anchor),vn()}Le(c)}class tt{$$=void 0;$$set=void 0;$destroy(){ze(this,1),this.$destroy=V}$on(t,n){if(!ue(n))return V;const r=this.$$.callbacks[t]||(this.$$.callbacks[t]=[]);return r.push(n),()=>{const i=r.indexOf(n);i!==-1&&r.splice(i,1)}}$set(t){this.$$set&&!Jn(t)&&(this.$$.skip_bound=!0,this.$$set(t),this.$$.skip_bound=!1)}}const mr="4";typeof window<"u"&&(window.__svelte||(window.__svelte={v:new Set})).v.add(mr);const me=[];function _r(e,t){return{subscribe:Fe(e,t).subscribe}}function Fe(e,t=V){let n;const r=new Set;function i(a){if(Te(e,a)&&(e=a,n)){const c=!me.length;for(const l of r)l[1](),me.push(l,e);if(c){for(let l=0;l<me.length;l+=2)me[l][0](me[l+1]);me.length=0}}}function o(a){i(a(e))}function s(a,c=V){const l=[a,c];return r.add(l),r.size===1&&(n=t(i,o)||V),a(e),()=>{r.delete(l),r.size===0&&n&&(n(),n=null)}}return{set:i,update:o,subscribe:s}}function qi(e,t,n){const r=!Array.isArray(e),i=r?[e]:e;if(!i.every(Boolean))throw new Error("derived() expects stores as input, got a falsy value");const o=t.length<2;return _r(n,(s,a)=>{let c=!1;const l=[];let f=0,u=V;const g=()=>{if(f)return;u();const d=t(r?l[0]:l,s,a);o?s(d):u=ue(d)?d:V},h=i.map((d,w)=>an(d,b=>{l[w]=b,f&=~(1<<w),c&&g()},()=>{f|=1<<w}));return c=!0,g(),function(){ae(h),u(),c=!1}})}const br="https://gradio.s3-us-west-2.amazonaws.com/3.37.0/assets/spaces-a79177ad.svg";function Tt(e){let t,n,r,i,o,s,a,c,l,f,u,g,h,d,w;return{c(){t=B("div"),n=B("span"),r=B("a"),i=O(e[4]),s=$(),a=B("span"),a.innerHTML=`built with
6
- <a class="gradio svelte-1kyws56" href="https://gradio.app">Gradio</a>.`,c=$(),l=B("span"),f=O(`Hosted on
7
- `),u=B("a"),g=B("span"),h=B("img"),w=O(" Spaces"),_(r,"href",o="https://huggingface.co/spaces/"+e[4]),_(r,"class","title svelte-1kyws56"),_(n,"class","svelte-1kyws56"),_(a,"class","svelte-1kyws56"),Hn(h.src,d=br)||_(h,"src",d),_(h,"class","svelte-1kyws56"),_(g,"class","space-logo svelte-1kyws56"),_(u,"class","hf svelte-1kyws56"),_(u,"href","https://huggingface.co/spaces"),_(l,"class","svelte-1kyws56"),_(t,"class","info svelte-1kyws56")},m(b,z){k(b,t,z),S(t,n),S(n,r),S(r,i),S(t,s),S(t,a),S(t,c),S(t,l),S(l,f),S(l,u),S(u,g),S(g,h),S(u,w)},p(b,z){z&16&&ne(i,b[4]),z&16&&o!==(o="https://huggingface.co/spaces/"+b[4])&&_(r,"href",o)},d(b){b&&v(t)}}}function wr(e){let t,n,r,i,o;const s=e[9].default,a=ln(s,e,e[8],null);let c=e[5]&&e[4]&&e[6]&&Tt(e);return{c(){t=B("div"),n=B("div"),a&&a.c(),r=$(),c&&c.c(),_(n,"class","main svelte-1kyws56"),_(t,"class",i="gradio-container gradio-container-"+e[1]+" svelte-1kyws56"),H(t,"app",!e[5]&&!e[3]),H(t,"embed-container",e[5]),H(t,"with-info",e[6]),Q(t,"min-height",e[7]?"initial":e[2]),Q(t,"flex-grow",e[5]?"auto":"1")},m(l,f){k(l,t,f),S(t,n),a&&a.m(n,null),S(t,r),c&&c.m(t,null),e[10](t),o=!0},p(l,[f]){a&&a.p&&(!o||f&256)&&un(a,s,l,l[8],o?fn(s,l[8],f,null):dn(l[8]),null),l[5]&&l[4]&&l[6]?c?c.p(l,f):(c=Tt(l),c.c(),c.m(t,null)):c&&(c.d(1),c=null),(!o||f&2&&i!==(i="gradio-container gradio-container-"+l[1]+" svelte-1kyws56"))&&_(t,"class",i),(!o||f&42)&&H(t,"app",!l[5]&&!l[3]),(!o||f&34)&&H(t,"embed-container",l[5]),(!o||f&66)&&H(t,"with-info",l[6]),f&132&&Q(t,"min-height",l[7]?"initial":l[2]),f&32&&Q(t,"flex-grow",l[5]?"auto":"1")},i(l){o||(J(a,l),o=!0)},o(l){K(a,l),o=!1},d(l){l&&v(t),a&&a.d(l),c&&c.d(),e[10](null)}}}function yr(e,t,n){let{$$slots:r={},$$scope:i}=t,{wrapper:o}=t,{version:s}=t,{initial_height:a}=t,{is_embed:c}=t,{space:l}=t,{display:f}=t,{info:u}=t,{loaded:g}=t;function h(d){fe[d?"unshift":"push"](()=>{o=d,n(0,o)})}return e.$$set=d=>{"wrapper"in d&&n(0,o=d.wrapper),"version"in d&&n(1,s=d.version),"initial_height"in d&&n(2,a=d.initial_height),"is_embed"in d&&n(3,c=d.is_embed),"space"in d&&n(4,l=d.space),"display"in d&&n(5,f=d.display),"info"in d&&n(6,u=d.info),"loaded"in d&&n(7,g=d.loaded),"$$scope"in d&&n(8,i=d.$$scope)},[o,s,a,c,l,f,u,g,i,r,h]}class vr extends tt{constructor(t){super(),et(this,t,yr,wr,Te,{wrapper:0,version:1,initial_height:2,is_embed:3,space:4,display:5,info:6,loaded:7})}}const be=e=>{let t=["","k","M","G","T","P","E","Z"],n=0;for(;e>1e3&&n<t.length-1;)e/=1e3,n++;let r=t[n];return(Number.isInteger(e)?e:e.toFixed(1))+r};function Ci(){const e=Fe({}),t=[],n=[],r=new Map,i=new Map,o=new Map,s=[];function a({fn_index:l,status:f,queue:u=!0,size:g,position:h=null,eta:d=null,message:w=null,progress:b}){const z=n[l],N=t[l],p=s[l],x=z.map(E=>{let T;const A=r.get(E)||0;if(p==="pending"&&f!=="pending"){let L=A-1;r.set(E,L<0?0:L),T=L>0?"pending":f}else p==="pending"&&f==="pending"?T="pending":p!=="pending"&&f==="pending"?(T="pending",r.set(E,A+1)):T=f;return{id:E,queue_position:h,queue_size:g,eta:d,status:T,message:w,progress:b}});N.map(E=>{const T=i.get(E)||0;if(p==="pending"&&f!=="pending"){let A=T-1;i.set(E,A<0?0:A),o.set(E,f)}else p!=="pending"&&f==="pending"?(i.set(E,T+1),o.set(E,f)):o.delete(E)}),e.update(E=>(x.forEach(({id:T,queue_position:A,queue_size:L,eta:M,status:U,message:ee,progress:C})=>{E[T]={queue:u,queue_size:L,queue_position:A,eta:M,message:ee,progress:C,status:U,fn_index:l}}),E)),s[l]=f}function c(l,f,u){t[l]=f,n[l]=u}return{update:a,register:c,subscribe:e.subscribe,get_status_for_fn(l){return s[l]},get_inputs_to_update(){return o}}}const kr=Fe({autoscroll:!1});function Bt(e){return Object.prototype.toString.call(e)==="[object Date]"}function ht(e,t,n,r){if(typeof n=="number"||Bt(n)){const i=r-n,o=(n-t)/(e.dt||1/60),s=e.opts.stiffness*i,a=e.opts.damping*o,c=(s-a)*e.inv_mass,l=(o+c)*e.dt;return Math.abs(l)<e.opts.precision&&Math.abs(i)<e.opts.precision?r:(e.settled=!1,Bt(n)?new Date(n.getTime()+l):n+l)}else{if(Array.isArray(n))return n.map((i,o)=>ht(e,t[o],n[o],r[o]));if(typeof n=="object"){const i={};for(const o in n)i[o]=ht(e,t[o],n[o],r[o]);return i}else throw new Error(`Cannot spring ${typeof n} values`)}}function Ft(e,t={}){const n=Fe(e),{stiffness:r=.15,damping:i=.8,precision:o=.01}=t;let s,a,c,l=e,f=e,u=1,g=0,h=!1;function d(b,z={}){f=b;const N=c={};return e==null||z.hard||w.stiffness>=1&&w.damping>=1?(h=!0,s=Me(),l=b,n.set(e=f),Promise.resolve()):(z.soft&&(g=1/((z.soft===!0?.5:+z.soft)*60),u=0),a||(s=Me(),h=!1,a=$e(p=>{if(h)return h=!1,a=null,!1;u=Math.min(u+g,1);const x={inv_mass:u,opts:w,settled:!0,dt:(p-s)*60/1e3},E=ht(x,l,e,f);return s=p,l=e,n.set(e=E),x.settled&&(a=null),!x.settled})),new Promise(p=>{a.promise.then(()=>{N===c&&p()})}))}const w={set:d,update:(b,z)=>d(b(f,e),z),subscribe:n.subscribe,stiffness:r,damping:i,precision:o};return w}function zr(e){let t,n,r,i,o,s,a,c,l,f,u,g;return{c(){t=B("div"),n=re("svg"),r=re("g"),i=re("path"),o=re("path"),s=re("path"),a=re("path"),c=re("g"),l=re("path"),f=re("path"),u=re("path"),g=re("path"),_(i,"d","M255.926 0.754768L509.702 139.936V221.027L255.926 81.8465V0.754768Z"),_(i,"fill","#FF7C00"),_(i,"fill-opacity","0.4"),_(i,"class","svelte-zyxd38"),_(o,"d","M509.69 139.936L254.981 279.641V361.255L509.69 221.55V139.936Z"),_(o,"fill","#FF7C00"),_(o,"class","svelte-zyxd38"),_(s,"d","M0.250138 139.937L254.981 279.641V361.255L0.250138 221.55V139.937Z"),_(s,"fill","#FF7C00"),_(s,"fill-opacity","0.4"),_(s,"class","svelte-zyxd38"),_(a,"d","M255.923 0.232622L0.236328 139.936V221.55L255.923 81.8469V0.232622Z"),_(a,"fill","#FF7C00"),_(a,"class","svelte-zyxd38"),Q(r,"transform","translate("+e[1][0]+"px, "+e[1][1]+"px)"),_(l,"d","M255.926 141.5L509.702 280.681V361.773L255.926 222.592V141.5Z"),_(l,"fill","#FF7C00"),_(l,"fill-opacity","0.4"),_(l,"class","svelte-zyxd38"),_(f,"d","M509.69 280.679L254.981 420.384V501.998L509.69 362.293V280.679Z"),_(f,"fill","#FF7C00"),_(f,"class","svelte-zyxd38"),_(u,"d","M0.250138 280.681L254.981 420.386V502L0.250138 362.295V280.681Z"),_(u,"fill","#FF7C00"),_(u,"fill-opacity","0.4"),_(u,"class","svelte-zyxd38"),_(g,"d","M255.923 140.977L0.236328 280.68V362.294L255.923 222.591V140.977Z"),_(g,"fill","#FF7C00"),_(g,"class","svelte-zyxd38"),Q(c,"transform","translate("+e[2][0]+"px, "+e[2][1]+"px)"),_(n,"viewBox","-1200 -1200 3000 3000"),_(n,"fill","none"),_(n,"xmlns","http://www.w3.org/2000/svg"),_(n,"class","svelte-zyxd38"),_(t,"class","svelte-zyxd38"),H(t,"margin",e[0])},m(h,d){k(h,t,d),S(t,n),S(n,r),S(r,i),S(r,o),S(r,s),S(r,a),S(n,c),S(c,l),S(c,f),S(c,u),S(c,g)},p(h,[d]){d&2&&Q(r,"transform","translate("+h[1][0]+"px, "+h[1][1]+"px)"),d&4&&Q(c,"transform","translate("+h[2][0]+"px, "+h[2][1]+"px)"),d&1&&H(t,"margin",h[0])},i:V,o:V,d(h){h&&v(t)}}}function xr(e,t,n){let r,i,{margin:o=!0}=t;const s=Ft([0,0]);Ve(e,s,g=>n(1,r=g));const a=Ft([0,0]);Ve(e,a,g=>n(2,i=g));let c;async function l(){await Promise.all([s.set([125,140]),a.set([-125,-140])]),await Promise.all([s.set([-125,140]),a.set([125,-140])]),await Promise.all([s.set([-125,0]),a.set([125,-0])]),await Promise.all([s.set([125,0]),a.set([-125,0])])}async function f(){await l(),c||f()}async function u(){await Promise.all([s.set([125,0]),a.set([-125,0])]),f()}return dt(()=>(u(),()=>c=!0)),e.$$set=g=>{"margin"in g&&n(0,o=g.margin)},[o,r,i,s,a]}class Ar extends tt{constructor(t){super(),et(this,t,xr,zr,Te,{margin:0})}}const Er=e=>({}),Rt=e=>({});function Dt(e,t,n){const r=e.slice();return r[36]=t[n],r[38]=n,r}function It(e,t,n){const r=e.slice();return r[36]=t[n],r}function Sr(e){let t,n,r;const i=e[27].error,o=ln(i,e,e[26],Rt);return{c(){t=B("span"),t.textContent="Error",n=$(),o&&o.c(),_(t,"class","error svelte-zlszon")},m(s,a){k(s,t,a),k(s,n,a),o&&o.m(s,a),r=!0},p(s,a){o&&o.p&&(!r||a[0]&67108864)&&un(o,i,s,s[26],r?fn(i,s[26],a,Er):dn(s[26]),Rt)},i(s){r||(J(o,s),r=!0)},o(s){K(o,s),r=!1},d(s){s&&(v(t),v(n)),o&&o.d(s)}}}function Nr(e){let t,n,r,i,o,s,a,c,l,f=e[7]==="default"&&e[16]&&e[5]==="full"&&jt(e);function u(p,x){if(p[6])return Lr;if(p[1]!==null&&p[2]!==void 0&&p[1]>=0)return Cr;if(p[1]===0)return qr}let g=u(e),h=g&&g(e),d=e[4]&&Vt(e);const w=[Tr,Pr],b=[];function z(p,x){return p[13]!=null?0:p[5]==="full"?1:-1}~(o=z(e))&&(s=b[o]=w[o](e));let N=!e[4]&&Xt(e);return{c(){f&&f.c(),t=$(),n=B("div"),h&&h.c(),r=$(),d&&d.c(),i=$(),s&&s.c(),a=$(),N&&N.c(),c=de(),_(n,"class","progress-text svelte-zlszon"),H(n,"meta-text-center",e[7]==="center"),H(n,"meta-text",e[7]==="default")},m(p,x){f&&f.m(p,x),k(p,t,x),k(p,n,x),h&&h.m(n,null),S(n,r),d&&d.m(n,null),k(p,i,x),~o&&b[o].m(p,x),k(p,a,x),N&&N.m(p,x),k(p,c,x),l=!0},p(p,x){p[7]==="default"&&p[16]&&p[5]==="full"?f?f.p(p,x):(f=jt(p),f.c(),f.m(t.parentNode,t)):f&&(f.d(1),f=null),g===(g=u(p))&&h?h.p(p,x):(h&&h.d(1),h=g&&g(p),h&&(h.c(),h.m(n,r))),p[4]?d?d.p(p,x):(d=Vt(p),d.c(),d.m(n,null)):d&&(d.d(1),d=null),(!l||x[0]&128)&&H(n,"meta-text-center",p[7]==="center"),(!l||x[0]&128)&&H(n,"meta-text",p[7]==="default");let E=o;o=z(p),o===E?~o&&b[o].p(p,x):(s&&(Qe(),K(b[E],1,1,()=>{b[E]=null}),Ke()),~o?(s=b[o],s?s.p(p,x):(s=b[o]=w[o](p),s.c()),J(s,1),s.m(a.parentNode,a)):s=null),p[4]?N&&(N.d(1),N=null):N?N.p(p,x):(N=Xt(p),N.c(),N.m(c.parentNode,c))},i(p){l||(J(s),l=!0)},o(p){K(s),l=!1},d(p){p&&(v(t),v(n),v(i),v(a),v(c)),f&&f.d(p),h&&h.d(),d&&d.d(),~o&&b[o].d(p),N&&N.d(p)}}}function jt(e){let t,n=`translateX(${(e[15]||0)*100-100}%)`;return{c(){t=B("div"),_(t,"class","eta-bar svelte-zlszon"),Q(t,"transform",n)},m(r,i){k(r,t,i)},p(r,i){i[0]&32768&&n!==(n=`translateX(${(r[15]||0)*100-100}%)`)&&Q(t,"transform",n)},d(r){r&&v(t)}}}function qr(e){let t;return{c(){t=O("processing |")},m(n,r){k(n,t,r)},p:V,d(n){n&&v(t)}}}function Cr(e){let t,n=e[1]+1+"",r,i,o,s;return{c(){t=O("queue: "),r=O(n),i=O("/"),o=O(e[2]),s=O(" |")},m(a,c){k(a,t,c),k(a,r,c),k(a,i,c),k(a,o,c),k(a,s,c)},p(a,c){c[0]&2&&n!==(n=a[1]+1+"")&&ne(r,n),c[0]&4&&ne(o,a[2])},d(a){a&&(v(t),v(r),v(i),v(o),v(s))}}}function Lr(e){let t,n=Xe(e[6]),r=[];for(let i=0;i<n.length;i+=1)r[i]=Gt(It(e,n,i));return{c(){for(let i=0;i<r.length;i+=1)r[i].c();t=de()},m(i,o){for(let s=0;s<r.length;s+=1)r[s]&&r[s].m(i,o);k(i,t,o)},p(i,o){if(o[0]&64){n=Xe(i[6]);let s;for(s=0;s<n.length;s+=1){const a=It(i,n,s);r[s]?r[s].p(a,o):(r[s]=Gt(a),r[s].c(),r[s].m(t.parentNode,t))}for(;s<r.length;s+=1)r[s].d(1);r.length=n.length}},d(i){i&&v(t),mn(r,i)}}}function Ut(e){let t,n=e[36].unit+"",r,i,o=" ",s;function a(f,u){return f[36].length!=null?Or:Mr}let c=a(e),l=c(e);return{c(){l.c(),t=$(),r=O(n),i=O(" | "),s=O(o)},m(f,u){l.m(f,u),k(f,t,u),k(f,r,u),k(f,i,u),k(f,s,u)},p(f,u){c===(c=a(f))&&l?l.p(f,u):(l.d(1),l=c(f),l&&(l.c(),l.m(t.parentNode,t))),u[0]&64&&n!==(n=f[36].unit+"")&&ne(r,n)},d(f){f&&(v(t),v(r),v(i),v(s)),l.d(f)}}}function Mr(e){let t=be(e[36].index||0)+"",n;return{c(){n=O(t)},m(r,i){k(r,n,i)},p(r,i){i[0]&64&&t!==(t=be(r[36].index||0)+"")&&ne(n,t)},d(r){r&&v(n)}}}function Or(e){let t=be(e[36].index||0)+"",n,r,i=be(e[36].length)+"",o;return{c(){n=O(t),r=O("/"),o=O(i)},m(s,a){k(s,n,a),k(s,r,a),k(s,o,a)},p(s,a){a[0]&64&&t!==(t=be(s[36].index||0)+"")&&ne(n,t),a[0]&64&&i!==(i=be(s[36].length)+"")&&ne(o,i)},d(s){s&&(v(n),v(r),v(o))}}}function Gt(e){let t,n=e[36].index!=null&&Ut(e);return{c(){n&&n.c(),t=de()},m(r,i){n&&n.m(r,i),k(r,t,i)},p(r,i){r[36].index!=null?n?n.p(r,i):(n=Ut(r),n.c(),n.m(t.parentNode,t)):n&&(n.d(1),n=null)},d(r){r&&v(t),n&&n.d(r)}}}function Vt(e){let t,n=e[0]?`/${e[17]}`:"",r,i;return{c(){t=O(e[18]),r=O(n),i=O("s")},m(o,s){k(o,t,s),k(o,r,s),k(o,i,s)},p(o,s){s[0]&262144&&ne(t,o[18]),s[0]&131073&&n!==(n=o[0]?`/${o[17]}`:"")&&ne(r,n)},d(o){o&&(v(t),v(r),v(i))}}}function Pr(e){let t,n;return t=new Ar({props:{margin:e[7]==="default"}}),{c(){Be(t.$$.fragment)},m(r,i){ke(t,r,i),n=!0},p(r,i){const o={};i[0]&128&&(o.margin=r[7]==="default"),t.$set(o)},i(r){n||(J(t.$$.fragment,r),n=!0)},o(r){K(t.$$.fragment,r),n=!1},d(r){ze(t,r)}}}function Tr(e){let t,n,r,i,o,s=`${e[13]*100}%`,a=e[6]!=null&&Wt(e);return{c(){t=B("div"),n=B("div"),a&&a.c(),r=$(),i=B("div"),o=B("div"),_(n,"class","progress-level-inner svelte-zlszon"),_(o,"class","progress-bar svelte-zlszon"),Q(o,"width",s),_(i,"class","progress-bar-wrap svelte-zlszon"),_(t,"class","progress-level svelte-zlszon")},m(c,l){k(c,t,l),S(t,n),a&&a.m(n,null),S(t,r),S(t,i),S(i,o),e[28](o)},p(c,l){c[6]!=null?a?a.p(c,l):(a=Wt(c),a.c(),a.m(n,null)):a&&(a.d(1),a=null),l[0]&8192&&s!==(s=`${c[13]*100}%`)&&Q(o,"width",s)},i:V,o:V,d(c){c&&v(t),a&&a.d(),e[28](null)}}}function Wt(e){let t,n=Xe(e[6]),r=[];for(let i=0;i<n.length;i+=1)r[i]=Kt(Dt(e,n,i));return{c(){for(let i=0;i<r.length;i+=1)r[i].c();t=de()},m(i,o){for(let s=0;s<r.length;s+=1)r[s]&&r[s].m(i,o);k(i,t,o)},p(i,o){if(o[0]&4160){n=Xe(i[6]);let s;for(s=0;s<n.length;s+=1){const a=Dt(i,n,s);r[s]?r[s].p(a,o):(r[s]=Kt(a),r[s].c(),r[s].m(t.parentNode,t))}for(;s<r.length;s+=1)r[s].d(1);r.length=n.length}},d(i){i&&v(t),mn(r,i)}}}function Ht(e){let t,n,r,i,o=e[38]!==0&&Br(),s=e[36].desc!=null&&Jt(e),a=e[36].desc!=null&&e[12]&&e[12][e[38]]!=null&&Zt(),c=e[12]!=null&&Qt(e);return{c(){o&&o.c(),t=$(),s&&s.c(),n=$(),a&&a.c(),r=$(),c&&c.c(),i=de()},m(l,f){o&&o.m(l,f),k(l,t,f),s&&s.m(l,f),k(l,n,f),a&&a.m(l,f),k(l,r,f),c&&c.m(l,f),k(l,i,f)},p(l,f){l[36].desc!=null?s?s.p(l,f):(s=Jt(l),s.c(),s.m(n.parentNode,n)):s&&(s.d(1),s=null),l[36].desc!=null&&l[12]&&l[12][l[38]]!=null?a||(a=Zt(),a.c(),a.m(r.parentNode,r)):a&&(a.d(1),a=null),l[12]!=null?c?c.p(l,f):(c=Qt(l),c.c(),c.m(i.parentNode,i)):c&&(c.d(1),c=null)},d(l){l&&(v(t),v(n),v(r),v(i)),o&&o.d(l),s&&s.d(l),a&&a.d(l),c&&c.d(l)}}}function Br(e){let t;return{c(){t=O(" /")},m(n,r){k(n,t,r)},d(n){n&&v(t)}}}function Jt(e){let t=e[36].desc+"",n;return{c(){n=O(t)},m(r,i){k(r,n,i)},p(r,i){i[0]&64&&t!==(t=r[36].desc+"")&&ne(n,t)},d(r){r&&v(n)}}}function Zt(e){let t;return{c(){t=O("-")},m(n,r){k(n,t,r)},d(n){n&&v(t)}}}function Qt(e){let t=(100*(e[12][e[38]]||0)).toFixed(1)+"",n,r;return{c(){n=O(t),r=O("%")},m(i,o){k(i,n,o),k(i,r,o)},p(i,o){o[0]&4096&&t!==(t=(100*(i[12][i[38]]||0)).toFixed(1)+"")&&ne(n,t)},d(i){i&&(v(n),v(r))}}}function Kt(e){let t,n=(e[36].desc!=null||e[12]&&e[12][e[38]]!=null)&&Ht(e);return{c(){n&&n.c(),t=de()},m(r,i){n&&n.m(r,i),k(r,t,i)},p(r,i){r[36].desc!=null||r[12]&&r[12][r[38]]!=null?n?n.p(r,i):(n=Ht(r),n.c(),n.m(t.parentNode,t)):n&&(n.d(1),n=null)},d(r){r&&v(t),n&&n.d(r)}}}function Xt(e){let t,n;return{c(){t=B("p"),n=O(e[8]),_(t,"class","loading svelte-zlszon")},m(r,i){k(r,t,i),S(t,n)},p(r,i){i[0]&256&&ne(n,r[8])},d(r){r&&v(t)}}}function Fr(e){let t,n,r,i,o;const s=[Nr,Sr],a=[];function c(l,f){return l[3]==="pending"?0:l[3]==="error"?1:-1}return~(n=c(e))&&(r=a[n]=s[n](e)),{c(){t=B("div"),r&&r.c(),_(t,"class",i="wrap "+e[7]+" "+e[5]+" svelte-zlszon"),H(t,"hide",!e[3]||e[3]==="complete"||e[5]==="hidden"),H(t,"translucent",e[7]==="center"&&(e[3]==="pending"||e[3]==="error")||e[10]||e[5]==="minimal"),H(t,"generating",e[3]==="generating"),Q(t,"position",e[9]?"absolute":"static"),Q(t,"padding",e[9]?"0":"var(--size-8) 0")},m(l,f){k(l,t,f),~n&&a[n].m(t,null),e[29](t),o=!0},p(l,f){let u=n;n=c(l),n===u?~n&&a[n].p(l,f):(r&&(Qe(),K(a[u],1,1,()=>{a[u]=null}),Ke()),~n?(r=a[n],r?r.p(l,f):(r=a[n]=s[n](l),r.c()),J(r,1),r.m(t,null)):r=null),(!o||f[0]&160&&i!==(i="wrap "+l[7]+" "+l[5]+" svelte-zlszon"))&&_(t,"class",i),(!o||f[0]&168)&&H(t,"hide",!l[3]||l[3]==="complete"||l[5]==="hidden"),(!o||f[0]&1192)&&H(t,"translucent",l[7]==="center"&&(l[3]==="pending"||l[3]==="error")||l[10]||l[5]==="minimal"),(!o||f[0]&168)&&H(t,"generating",l[3]==="generating"),f[0]&512&&Q(t,"position",l[9]?"absolute":"static"),f[0]&512&&Q(t,"padding",l[9]?"0":"var(--size-8) 0")},i(l){o||(J(r),o=!0)},o(l){K(r),o=!1},d(l){l&&v(t),~n&&a[n].d(),e[29](null)}}}let je=[],lt=!1;async function Rr(e,t=!0){if(!(window.__gradio_mode__==="website"||window.__gradio_mode__!=="app"&&t!==!0)){if(je.push(e),!lt)lt=!0;else return;await lr(),requestAnimationFrame(()=>{let n=[0,0];for(let r=0;r<je.length;r++){const o=je[r].getBoundingClientRect();(r===0||o.top+window.scrollY<=n[0])&&(n[0]=o.top+window.scrollY,n[1]=r)}window.scrollTo({top:n[0]-20,behavior:"smooth"}),lt=!1,je=[]})}}function Dr(e,t,n){let r,i;Ve(e,kr,y=>n(25,i=y));let{$$slots:o={},$$scope:s}=t,{eta:a=null}=t,{queue:c=!1}=t,{queue_position:l}=t,{queue_size:f}=t,{status:u}=t,{scroll_to_output:g=!1}=t,{timer:h=!0}=t,{show_progress:d="full"}=t,{message:w=null}=t,{progress:b=null}=t,{variant:z="default"}=t,{loading_text:N="Loading..."}=t,{absolute:p=!0}=t,{translucent:x=!1}=t,E,T=!1,A=0,L=0,M=null,U=0,ee=null,C,D=null,te=!0;const q=()=>{n(22,A=performance.now()),n(23,L=0),T=!0,W()};function W(){requestAnimationFrame(()=>{n(23,L=(performance.now()-A)/1e3),T&&W()})}const j=()=>{n(23,L=0),T&&(T=!1)};sr(()=>{T&&j()});let I=null;function ie(y){fe[y?"unshift":"push"](()=>{D=y,n(14,D),n(6,b),n(12,ee),n(13,C)})}function Z(y){fe[y?"unshift":"push"](()=>{E=y,n(11,E)})}return e.$$set=y=>{"eta"in y&&n(0,a=y.eta),"queue"in y&&n(19,c=y.queue),"queue_position"in y&&n(1,l=y.queue_position),"queue_size"in y&&n(2,f=y.queue_size),"status"in y&&n(3,u=y.status),"scroll_to_output"in y&&n(20,g=y.scroll_to_output),"timer"in y&&n(4,h=y.timer),"show_progress"in y&&n(5,d=y.show_progress),"message"in y&&n(21,w=y.message),"progress"in y&&n(6,b=y.progress),"variant"in y&&n(7,z=y.variant),"loading_text"in y&&n(8,N=y.loading_text),"absolute"in y&&n(9,p=y.absolute),"translucent"in y&&n(10,x=y.translucent),"$$scope"in y&&n(26,s=y.$$scope)},e.$$.update=()=>{e.$$.dirty[0]&21495809&&(a===null?n(0,a=M):c&&n(0,a=(performance.now()-A)/1e3+a),a!=null&&(n(17,I=a.toFixed(1)),n(24,M=a))),e.$$.dirty[0]&8388609&&n(15,U=a===null||a<=0||!L?null:Math.min(L/a,1)),e.$$.dirty[0]&64&&b!=null&&n(16,te=!1),e.$$.dirty[0]&28736&&(b!=null?n(12,ee=b.map(y=>y.index!=null&&y.length!=null?y.index/y.length:y.progress!=null?y.progress:void 0)):n(12,ee=null),ee?(n(13,C=ee[ee.length-1]),D&&(C===0?n(14,D.style.transition="0",D):n(14,D.style.transition="150ms",D))):n(13,C=void 0)),e.$$.dirty[0]&8&&(u==="pending"?q():j()),e.$$.dirty[0]&34605064&&E&&g&&(u==="pending"||u==="complete")&&Rr(E,i.autoscroll),e.$$.dirty[0]&2097160,e.$$.dirty[0]&8388608&&n(18,r=L.toFixed(1))},[a,l,f,u,h,d,b,z,N,p,x,E,ee,C,D,U,te,I,r,c,g,w,A,L,M,i,s,o,ie,Z]}class Ir extends tt{constructor(t){super(),et(this,t,Dr,Fr,Te,{eta:0,queue:19,queue_position:1,queue_size:2,status:3,scroll_to_output:20,timer:4,show_progress:5,message:21,progress:6,variant:7,loading_text:8,absolute:9,translucent:10},null,[-1,-1])}}function Yt(e){let t,n;return t=new Ir({props:{absolute:!e[4],status:e[12],timer:!1,queue_position:null,queue_size:null,translucent:!0,loading_text:Kr,$$slots:{error:[Gr]},$$scope:{ctx:e}}}),{c(){Be(t.$$.fragment)},m(r,i){ke(t,r,i),n=!0},p(r,i){const o={};i[0]&16&&(o.absolute=!r[4]),i[0]&4096&&(o.status=r[12]),i[0]&2304|i[1]&1024&&(o.$$scope={dirty:i,ctx:r}),t.$set(o)},i(r){n||(J(t.$$.fragment,r),n=!0)},o(r){K(t.$$.fragment,r),n=!1},d(r){ze(t,r)}}}function jr(e){let t;return{c(){t=B("p"),t.textContent="Please contact the author of the page to let them know.",_(t,"class","svelte-y6l4b")},m(n,r){k(n,t,r)},p:V,d(n){n&&v(t)}}}function Ur(e){let t,n,r,i,o,s;return{c(){t=B("p"),n=O("Please "),r=B("a"),i=O("contact the author of the space"),s=O(" to let them know."),_(r,"href",o="https://huggingface.co/spaces/"+e[8]+"/discussions/new?title="+e[19].title(e[11]?.detail)+"&description="+e[19].description(e[11]?.detail,location.origin)),_(r,"class","svelte-y6l4b"),_(t,"class","svelte-y6l4b")},m(a,c){k(a,t,c),S(t,n),S(t,r),S(r,i),S(t,s)},p(a,c){c[0]&2304&&o!==(o="https://huggingface.co/spaces/"+a[8]+"/discussions/new?title="+a[19].title(a[11]?.detail)+"&description="+a[19].description(a[11]?.detail,location.origin))&&_(r,"href",o)},d(a){a&&v(t)}}}function Gr(e){let t,n,r,i=(e[11]?.message||"")+"",o,s;function a(f,u){return(f[11].status==="space_error"||f[11].status==="paused")&&f[11].discussions_enabled?Ur:jr}let c=a(e),l=c(e);return{c(){t=B("div"),n=B("p"),r=B("strong"),o=O(i),s=$(),l.c(),_(n,"class","svelte-y6l4b"),_(t,"class","error svelte-y6l4b"),_(t,"slot","error")},m(f,u){k(f,t,u),S(t,n),S(n,r),S(r,o),S(t,s),l.m(t,null)},p(f,u){u[0]&2048&&i!==(i=(f[11]?.message||"")+"")&&ne(o,i),c===(c=a(f))&&l?l.p(f,u):(l.d(1),l=c(f),l&&(l.c(),l.m(t,null)))},d(f){f&&v(t),l.d()}}}function Vr(e){let t,n,r;const i=[{app:e[15]},e[10],{theme_mode:e[14]},{control_page_title:e[5]},{target:e[13]},{autoscroll:e[0]},{show_footer:!e[4]},{app_mode:e[3]}];function o(a){e[28](a)}let s={};for(let a=0;a<i.length;a+=1)s=on(s,i[a]);return e[9]!==void 0&&(s.ready=e[9]),t=new e[17]({props:s}),fe.push(()=>kn(t,"ready",o)),{c(){Be(t.$$.fragment)},m(a,c){ke(t,a,c),r=!0},p(a,c){const l=c[0]&58425?dr(i,[c[0]&32768&&{app:a[15]},c[0]&1024&&pr(a[10]),c[0]&16384&&{theme_mode:a[14]},c[0]&32&&{control_page_title:a[5]},c[0]&8192&&{target:a[13]},c[0]&1&&{autoscroll:a[0]},c[0]&16&&{show_footer:!a[4]},c[0]&8&&{app_mode:a[3]}]):{};!n&&c[0]&512&&(n=!0,l.ready=a[9],yn(()=>n=!1)),t.$set(l)},i(a){r||(J(t.$$.fragment,a),r=!0)},o(a){K(t.$$.fragment,a),r=!1},d(a){ze(t,a)}}}function Wr(e){let t,n;return t=new e[18]({props:{auth_message:e[10].auth_message,root:e[10].root,space_id:e[8],app_mode:e[3]}}),{c(){Be(t.$$.fragment)},m(r,i){ke(t,r,i),n=!0},p(r,i){const o={};i[0]&1024&&(o.auth_message=r[10].auth_message),i[0]&1024&&(o.root=r[10].root),i[0]&256&&(o.space_id=r[8]),i[0]&8&&(o.app_mode=r[3]),t.$set(o)},i(r){n||(J(t.$$.fragment,r),n=!0)},o(r){K(t.$$.fragment,r),n=!1},d(r){ze(t,r)}}}function Hr(e){let t,n,r,i,o,s=(e[12]==="pending"||e[12]==="error")&&!(e[10]&&e[10]?.auth_required)&&Yt(e);const a=[Wr,Vr],c=[];function l(f,u){return f[10]?.auth_required&&f[18]?0:f[10]&&f[17]&&f[16]?1:-1}return~(n=l(e))&&(r=c[n]=a[n](e)),{c(){s&&s.c(),t=$(),r&&r.c(),i=de()},m(f,u){s&&s.m(f,u),k(f,t,u),~n&&c[n].m(f,u),k(f,i,u),o=!0},p(f,u){(f[12]==="pending"||f[12]==="error")&&!(f[10]&&f[10]?.auth_required)?s?(s.p(f,u),u[0]&5120&&J(s,1)):(s=Yt(f),s.c(),J(s,1),s.m(t.parentNode,t)):s&&(Qe(),K(s,1,1,()=>{s=null}),Ke());let g=n;n=l(f),n===g?~n&&c[n].p(f,u):(r&&(Qe(),K(c[g],1,1,()=>{c[g]=null}),Ke()),~n?(r=c[n],r?r.p(f,u):(r=c[n]=a[n](f),r.c()),J(r,1),r.m(i.parentNode,i)):r=null)},i(f){o||(J(s),J(r),o=!0)},o(f){K(s),K(r),o=!1},d(f){f&&(v(t),v(i)),s&&s.d(f),~n&&c[n].d(f)}}}function Jr(e){let t,n,r;function i(s){e[29](s)}let o={display:e[6]&&e[4],is_embed:e[4],info:!!e[8]&&e[7],version:e[1],initial_height:e[2],space:e[8],loaded:e[12]==="complete",$$slots:{default:[Hr]},$$scope:{ctx:e}};return e[13]!==void 0&&(o.wrapper=e[13]),t=new vr({props:o}),fe.push(()=>kn(t,"wrapper",i)),{c(){Be(t.$$.fragment)},m(s,a){ke(t,s,a),r=!0},p(s,a){const c={};a[0]&80&&(c.display=s[6]&&s[4]),a[0]&16&&(c.is_embed=s[4]),a[0]&384&&(c.info=!!s[8]&&s[7]),a[0]&2&&(c.version=s[1]),a[0]&4&&(c.initial_height=s[2]),a[0]&256&&(c.space=s[8]),a[0]&4096&&(c.loaded=s[12]==="complete"),a[0]&524089|a[1]&1024&&(c.$$scope={dirty:a,ctx:s}),!n&&a[0]&8192&&(n=!0,c.wrapper=s[13],yn(()=>n=!1)),t.$set(c)},i(s){r||(J(t.$$.fragment,s),r=!0)},o(s){K(t.$$.fragment,s),r=!1},d(s){ze(t,s)}}}let Zr=-1;function Qr(){const e=Fe({}),t=new Map,n=new IntersectionObserver(i=>{i.forEach(o=>{if(o.isIntersecting){let s=t.get(o.target);s!==void 0&&e.update(a=>({...a,[s]:!0}))}})});function r(i,o){t.set(o,i),n.observe(o)}return{register:r,subscribe:e.subscribe}}const $t=Qr();let Kr="Loading...";function Xr(e,t,n){let r;Ve(e,$t,m=>n(27,r=m));let{autoscroll:i}=t,{version:o}=t,{initial_height:s}=t,{app_mode:a}=t,{is_embed:c}=t,{theme_mode:l="system"}=t,{control_page_title:f}=t,{container:u}=t,{info:g}=t,{eager:h}=t,{mount_css:d=ut}=t,{client:w}=t,{upload_files:b}=t,{space:z}=t,{host:N}=t,{src:p}=t,x=Zr++,E="pending",T=null,A,L=!1,M,U;async function ee(m,F){if(F){let P=document.createElement("style");P.innerHTML=F,m.appendChild(P)}await d(M.root+"/theme.css",document.head),M.stylesheets&&await Promise.all(M.stylesheets.map(P=>{let Y=P.startsWith("http:")||P.startsWith("https:");return d(Y?P:M.root+"/"+P,document.head)}))}async function C(m){const F=await(await fetch(m+"/app_id")).text();T===null?T=F:T!=F&&location.reload(),setTimeout(()=>C(m),250)}function D(m){let P=new URL(window.location.toString()).searchParams.get("__theme");return n(14,U=l||P||"system"),U==="dark"||U==="light"?q(m,U):n(14,U=te(m)),U}function te(m){const F=P();window?.matchMedia("(prefers-color-scheme: dark)")?.addEventListener("change",P);function P(){let Y=window?.matchMedia?.("(prefers-color-scheme: dark)").matches?"dark":"light";return q(m,Y),Y}return F}function q(m,F){const P=c?m.parentElement:document.body,Y=c?m:m.parentElement;Y.style.background="var(--body-background-fill)",F==="dark"?P.classList.add("dark"):P.classList.remove("dark")}let W={message:"",load_status:"pending",status:"sleeping",detail:"SLEEPING"},j,I=!1;function ie(m){n(11,W=m)}dt(async()=>{window.__gradio_mode__!=="website"&&n(14,U=D(A));const m=N||z||p||location.origin;n(15,j=await w(m,{status_callback:ie,normalise_files:!1})),n(10,M=j.config),window.__gradio_space__=M.space_id,n(11,W={message:"",load_status:"complete",status:"running",detail:"RUNNING"}),await ee(A,M.css),n(16,I=!0),window.__is_colab__=M.is_colab,M.dev_mode&&C(M.root)}),ar("upload_files",b);let Z,y;async function X(){n(17,Z=(await Ge(()=>import("./Blocks-c9e1499d.js").then(m=>m.B),["assets/Blocks-c9e1499d.js","assets/Button-f155035a.js","assets/Button-9b719f62.css","assets/Blocks-f08d137e.css"])).default)}async function xe(){n(18,y=(await Ge(()=>import("./Login-2b7e7f3a.js"),["assets/Login-2b7e7f3a.js","assets/Form-cd229de0.js","assets/Form-3812b7f1.css","assets/Textbox-1f11d244.js","assets/Button-f155035a.js","assets/Button-9b719f62.css","assets/BlockTitle-dee077e8.js","assets/Info-7c6961ef.js","assets/Copy-9f1657c4.js","assets/ColorPicker-5063dbc4.css","assets/Column-6c43afc7.js","assets/Column-2853eb31.css","assets/Login-9c3cc0eb.css"])).default)}function ge(){M.auth_required?xe():X()}const Ae={readable_error:{NO_APP_FILE:"there is no app file",CONFIG_ERROR:"there is a config error",BUILD_ERROR:"there is a build error",RUNTIME_ERROR:"there is a runtime error",PAUSED:"the space is paused"},title(m){return encodeURIComponent(`Space isn't working because ${this.readable_error[m]||"an error"}`)},description(m,F){return encodeURIComponent(`Hello,
8
-
9
- Firstly, thanks for creating this space!
10
-
11
- I noticed that the space isn't working correctly because there is ${this.readable_error[m]||"an error"}.
12
-
13
- It would be great if you could take a look at this because this space is being embedded on ${F}.
14
-
15
- Thanks!`)}};dt(async()=>{$t.register(x,A)});function Ee(m){L=m,n(9,L)}function G(m){A=m,n(13,A)}return e.$$set=m=>{"autoscroll"in m&&n(0,i=m.autoscroll),"version"in m&&n(1,o=m.version),"initial_height"in m&&n(2,s=m.initial_height),"app_mode"in m&&n(3,a=m.app_mode),"is_embed"in m&&n(4,c=m.is_embed),"theme_mode"in m&&n(20,l=m.theme_mode),"control_page_title"in m&&n(5,f=m.control_page_title),"container"in m&&n(6,u=m.container),"info"in m&&n(7,g=m.info),"eager"in m&&n(21,h=m.eager),"mount_css"in m&&n(22,d=m.mount_css),"client"in m&&n(23,w=m.client),"upload_files"in m&&n(24,b=m.upload_files),"space"in m&&n(8,z=m.space),"host"in m&&n(25,N=m.host),"src"in m&&n(26,p=m.src)},e.$$.update=()=>{e.$$.dirty[0]&2560&&n(12,E=!L&&W.load_status!=="error"?"pending":!L&&W.load_status==="error"?"error":W.load_status),e.$$.dirty[0]&136315904&&M&&(h||r[x])&&ge()},[i,o,s,a,c,f,u,g,z,L,M,W,E,A,U,j,I,Z,y,Ae,l,h,d,w,b,N,p,r,Ee,G]}class en extends tt{constructor(t){super(),et(this,t,Xr,Jr,Te,{autoscroll:0,version:1,initial_height:2,app_mode:3,is_embed:4,theme_mode:20,control_page_title:5,container:6,info:7,eager:21,mount_css:22,client:23,upload_files:24,space:8,host:25,src:26},null,[-1,-1])}}const Yr="https://gradio.s3-us-west-2.amazonaws.com/3.37.0/assets/index-f2292b12.css";let mt;mt=[];function $r(){class e extends HTMLElement{constructor(){super(),this.host=this.getAttribute("host"),this.space=this.getAttribute("space"),this.src=this.getAttribute("src"),this.control_page_title=this.getAttribute("control_page_title"),this.initial_height=this.getAttribute("initial_height")??"300px",this.is_embed=this.getAttribute("embed")??"true",this.container=this.getAttribute("container")??"true",this.info=this.getAttribute("info")??!0,this.autoscroll=this.getAttribute("autoscroll"),this.eager=this.getAttribute("eager"),this.theme_mode=this.getAttribute("theme_mode"),this.updating=!1,this.loading=!1}async connectedCallback(){this.loading=!0,this.app&&this.app.$destroy(),typeof mt!="string"&&mt.forEach(i=>ut(i,document.head)),await ut(Yr,document.head);const n=new CustomEvent("domchange",{bubbles:!0,cancelable:!1,composed:!0});new MutationObserver(i=>{this.dispatchEvent(n)}).observe(this,{childList:!0}),this.app=new en({target:this,props:{space:this.space?this.space.trim():this.space,src:this.src?this.src.trim():this.src,host:this.host?this.host.trim():this.host,info:this.info!=="false",container:this.container!=="false",is_embed:this.is_embed!=="false",initial_height:this.initial_height,eager:this.eager==="true",version:"3-37-0",theme_mode:this.theme_mode,autoscroll:this.autoscroll==="true",control_page_title:this.control_page_title==="true",client:Nt,upload_files:St,app_mode:window.__gradio_mode__==="app"}}),this.updating&&this.setAttribute(this.updating.name,this.updating.value),this.loading=!1}static get observedAttributes(){return["src","space","host"]}attributeChangedCallback(n,r,i){if((n==="host"||n==="space"||n==="src")&&i!==r){if(this.updating={name:n,value:i},this.loading)return;this.app&&this.app.$destroy(),this.space=null,this.host=null,this.src=null,n==="host"?this.host=i:n==="space"?this.space=i:n==="src"&&(this.src=i),this.app=new en({target:this,props:{space:this.space?this.space.trim():this.space,src:this.src?this.src.trim():this.src,host:this.host?this.host.trim():this.host,info:this.info!=="false",container:this.container!=="false",is_embed:this.is_embed!=="false",initial_height:this.initial_height,eager:this.eager==="true",version:"3-37-0",theme_mode:this.theme_mode,autoscroll:this.autoscroll==="true",control_page_title:this.control_page_title==="true",client:Nt,upload_files:St,app_mode:window.__gradio_mode__==="app"}}),this.updating=!1}}}customElements.get("gradio-app")||customElements.define("gradio-app",e)}$r();export{zi as $,v as A,yi as B,dt as C,ar as D,on as E,ki as F,Xe as G,Ni as H,ur as I,re as J,_ as K,Q as L,S as M,B as N,$ as O,O as P,Pt as Q,ne as R,tt as S,Hn as T,H as U,mn as V,Ar as W,ue as X,fi as Y,ve as Z,Ge as _,Me as a,xi as a0,ae as a1,Si as a2,Ci as a3,Ve as a4,kr as a5,Zn as a6,lr as a7,li as a8,ln as a9,si as aA,wt as aB,ui as aa,un as ab,dn as ac,fn as ad,Ir as ae,ci as af,Ft as ag,an as ah,sr as ai,ai as aj,vn as ak,ei as al,wi as am,bi as an,Ai as ao,gi as ap,mi as aq,hi as ar,oi as as,vi as at,St as au,ni as av,pi as aw,ti as ax,Ei as ay,di as az,qi as b,Je as c,Ze as d,et as e,_i as f,zn as g,fe as h,bt as i,kn as j,Be as k,$e as l,de as m,V as n,ke as o,k as p,dr as q,pr as r,Te as s,yn as t,Qe as u,K as v,Fe as w,ze as x,Ke as y,J as z};
16
- //# sourceMappingURL=index-1d65707a.js.map