parquet-converter commited on
Commit
07297c9
·
1 Parent(s): ce64552

Update parquet files (step 66 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1gistliPinn/ChatGPT4/Examples/3ds Max 2014 X32 Torrent __FULL__.md +0 -6
  2. spaces/1line/AutoGPT/autogpt/speech/say.py +0 -41
  3. spaces/1phancelerku/anime-remove-background/Dig Digger Pro APK 1.0-pro-2022-09-16 The Ultimate File Recovery Tool for Rooted Devices.md +0 -114
  4. spaces/1phancelerku/anime-remove-background/Download Real Racing 3 APK from APKPure and Enjoy the Premier Racing Experience on Android.md +0 -81
  5. spaces/1phancelerku/anime-remove-background/Download the Ultimate Sonic Forces Running Battle Mod with Money Speed and God Mode.md +0 -99
  6. spaces/1phancelerku/anime-remove-background/Enjoy Live Streaming of Over 100 Football and Basketball Leagues with Live Score 808 APK.md +0 -125
  7. spaces/2023Liu2023/bingo/src/components/ui/dialog.tsx +0 -128
  8. spaces/2ndelement/voicevox/voicevox_engine/engine_manifest/EngineManifest.py +0 -58
  9. spaces/AK-12/llama-gradio-chat/app.py +0 -19
  10. spaces/Ababababababbababa/Ashaar/poetry_diacritizer/predict.py +0 -167
  11. spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/data/audio_utils.py +0 -293
  12. spaces/Abhilashvj/planogram-compliance/utils/loggers/wandb/wandb_utils.py +0 -788
  13. spaces/Adapter/CoAdapter/README.md +0 -12
  14. spaces/AgentVerse/agentVerse/agentverse/llms/utils/jsonrepair.py +0 -660
  15. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/bracketparser2-plugin.js +0 -18
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dialog/methods/ModalMethods.js +0 -43
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sides/childbehaviors/index.js +0 -14
  18. spaces/AlexWang/lama/bin/evaluate_predicts.py +0 -79
  19. spaces/Alfasign/diffusers-gallery/index.html +0 -218
  20. spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/__init__.py +0 -0
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/depth2img.md +0 -56
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/t5_film_transformer.py +0 -321
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/auto_pipeline.py +0 -886
  24. spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/dense_test_mixins.py +0 -100
  25. spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/README.md +0 -48
  26. spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_769x769_40k_cityscapes.py +0 -2
  27. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/datasets/pascal_context_59.py +0 -60
  28. spaces/Anonymous-sub/Rerender/gmflow_module/utils/misc.py +0 -42
  29. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/__pip-runner__.py +0 -50
  30. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/network/utils.py +0 -96
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/archive_util.py +0 -280
  32. spaces/Atualli/node-media-server/README.md +0 -11
  33. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/lazyconfigs.md +0 -170
  34. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/bifpn_fcos.py +0 -469
  35. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/data/__init__.py +0 -0
  36. spaces/Awiny/Image2Paragraph/pretrained_models/download_pretrain.sh +0 -5
  37. spaces/Banbri/zcvzcv/src/components/ui/dropdown-menu.tsx +0 -200
  38. spaces/Benson/text-generation/Examples/Arquero Inactivo Torre De Defensa Mod Apk.md +0 -88
  39. spaces/Benson/text-generation/Examples/Descargar Conquista Paraso Riddim.md +0 -71
  40. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/mbcsgroupprober.py +0 -57
  41. spaces/Billius/VizLib-TopLargeHospitalsNewJersey-04-07-2023/README.md +0 -13
  42. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/.github/ISSUE_TEMPLATE/feature-request.md +0 -32
  43. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/dev/packaging/gen_wheel_index.sh +0 -26
  44. spaces/CVPR/LIVE/pybind11/tests/test_opaque_types.cpp +0 -67
  45. spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/uninitialized_fill.h +0 -57
  46. spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/transform_reduce.h +0 -22
  47. spaces/CVPR/SPOTER_Sign_Language_Recognition/spoter/gaussian_noise.py +0 -18
  48. spaces/CVPR/WALT/mmdet/core/utils/dist_utils.py +0 -69
  49. spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/mysite/urls.py +0 -23
  50. spaces/CikeyQI/Yunzai/Yunzai/lib/plugins/stdin.js +0 -159
spaces/1gistliPinn/ChatGPT4/Examples/3ds Max 2014 X32 Torrent __FULL__.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>3ds Max 2014 X32 Torrent</h2><br /><p><b><b>Download</b> &#10022; <a href="https://imgfil.com/2uy05m">https://imgfil.com/2uy05m</a></b></p><br /><br />
2
-
3
- Download windows 10 gamer edition 2020 for both 32-Bit and 64-Bit computer ... November 2020 (x86-x64) ISO files - Direct Download Links + Torrent Magnet. ... 2014 · Windows 7 NVIDIA Edition x86 x64 - Direct Links Posted by : Unknown ... Direct [YEE] Direct Download Installer Autodesk 3ds Max 2020 (x64) Bit Lifetime. 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/speech/say.py DELETED
@@ -1,41 +0,0 @@
1
- """ Text to speech module """
2
- import threading
3
- from threading import Semaphore
4
-
5
- from autogpt.config import Config
6
- from autogpt.speech.brian import BrianSpeech
7
- from autogpt.speech.eleven_labs import ElevenLabsSpeech
8
- from autogpt.speech.gtts import GTTSVoice
9
- from autogpt.speech.macos_tts import MacOSTTS
10
-
11
- CFG = Config()
12
- DEFAULT_VOICE_ENGINE = GTTSVoice()
13
- VOICE_ENGINE = None
14
- if CFG.elevenlabs_api_key:
15
- VOICE_ENGINE = ElevenLabsSpeech()
16
- elif CFG.use_mac_os_tts == "True":
17
- VOICE_ENGINE = MacOSTTS()
18
- elif CFG.use_brian_tts == "True":
19
- VOICE_ENGINE = BrianSpeech()
20
- else:
21
- VOICE_ENGINE = GTTSVoice()
22
-
23
-
24
- QUEUE_SEMAPHORE = Semaphore(
25
- 1
26
- ) # The amount of sounds to queue before blocking the main thread
27
-
28
-
29
- def say_text(text: str, voice_index: int = 0) -> None:
30
- """Speak the given text using the given voice index"""
31
-
32
- def speak() -> None:
33
- success = VOICE_ENGINE.say(text, voice_index)
34
- if not success:
35
- DEFAULT_VOICE_ENGINE.say(text)
36
-
37
- QUEUE_SEMAPHORE.release()
38
-
39
- QUEUE_SEMAPHORE.acquire(True)
40
- thread = threading.Thread(target=speak)
41
- thread.start()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Dig Digger Pro APK 1.0-pro-2022-09-16 The Ultimate File Recovery Tool for Rooted Devices.md DELETED
@@ -1,114 +0,0 @@
1
-
2
- <h1>Dig Digger Pro APK: How to Recover Lost Files on Android</h1>
3
- <p>Have you ever accidentally deleted some important photos, videos, documents, or music files from your Android device? Or have you ever lost some files due to formatting, virus attack, system crash, or other reasons? If yes, then you might be wondering how to get them back. Well, don't worry, because there is a solution for you: Dig Digger Pro APK.</p>
4
- <h2>dig digger pro apk</h2><br /><p><b><b>Download</b> &#9999; <a href="https://jinyurl.com/2uNTNz">https://jinyurl.com/2uNTNz</a></b></p><br /><br />
5
- <h2>What is Dig Digger Pro APK?</h2>
6
- <p>Dig Digger Pro APK is a powerful file recovery app for Android devices that can help you undelete and recover lost files from your internal memory or external storage cards. It is developed by Defiant Technologies, LLC, and it is available for download from various sources .</p>
7
- <h3>Features of Dig Digger Pro APK</h3>
8
- <p>Some of the features of Dig Digger Pro APK are:</p>
9
- <ul>
10
- <li>It can recover photos, videos, music, documents, and other file types.</li>
11
- <li>It can scan both the internal memory and the external storage cards of your device.</li>
12
- <li>It can work on both rooted and unrooted devices, but it requires root access for full functionality.</li>
13
- <li>It can filter the scan results by name, size, date, or directory.</li>
14
- <li>It can preview the recoverable files before restoring them.</li>
15
- <li>It can upload the recovered files to Google Drive, Dropbox, or email them.</li>
16
- </ul>
17
- <h3>How to download and install Dig Digger Pro APK</h3>
18
- <p>To download and install Dig Digger Pro APK on your Android device, you need to follow these steps:</p>
19
- <ol>
20
- <li>Go to one of the sources that offer the app for download .</li>
21
- <li>Tap on the download button and wait for the APK file to be downloaded.</li>
22
- <li>Once the download is complete, go to your device settings and enable the installation of apps from unknown sources.</li>
23
- <li>Locate the downloaded APK file and tap on it to start the installation process.</li>
24
- <li>Follow the instructions on the screen and grant the necessary permissions to the app.</li>
25
- <li>Wait for the installation to finish and then launch the app from your app drawer.</li>
26
- </ol>
27
- <h2>How to use Dig Digger Pro APK to recover lost files</h2>
28
- <p>To use Dig Digger Pro APK to recover lost files from your Android device, you need to follow these steps:</p>
29
- <h3>Step 1: Launch the app and select the scan mode</h3>
30
- <p>When you launch the app, you will see two scan modes: Basic Scan and Full Scan. The Basic Scan mode can recover files from your cache and thumbnails without root access. The Full Scan mode can recover files from your entire memory with root access. Choose the scan mode that suits your needs and tap on Start Basic Photo Scan or Start Full Scan.</p>
31
- <p>DiskDigger Pro file recovery apk download<br />
32
- DiskDigger Pro apk full version free<br />
33
- DiskDigger Pro apk for rooted devices<br />
34
- DiskDigger Pro file recovery app<br />
35
- DiskDigger Pro apk latest version<br />
36
- DiskDigger Pro apk mod unlocked<br />
37
- DiskDigger Pro apk no root required<br />
38
- DiskDigger Pro file recovery tool<br />
39
- DiskDigger Pro apk cracked premium<br />
40
- DiskDigger Pro apk 2023 update<br />
41
- DiskDigger Pro file recovery software<br />
42
- DiskDigger Pro apk best data recovery<br />
43
- DiskDigger Pro apk android app<br />
44
- DiskDigger Pro apk paid license<br />
45
- DiskDigger Pro file recovery guide<br />
46
- DiskDigger Pro apk review and rating<br />
47
- DiskDigger Pro apk how to use<br />
48
- DiskDigger Pro file recovery features<br />
49
- DiskDigger Pro apk support and help<br />
50
- DiskDigger Pro apk alternative apps<br />
51
- DiskDigger Pro file recovery tips and tricks<br />
52
- DiskDigger Pro apk download link<br />
53
- DiskDigger Pro apk install and setup<br />
54
- DiskDigger Pro file recovery tutorial<br />
55
- DiskDigger Pro apk compatible devices<br />
56
- DiskDigger Pro file recovery benefits and advantages<br />
57
- DiskDigger Pro apk refund policy<br />
58
- DiskDigger Pro apk customer feedback and testimonials<br />
59
- DiskDigger Pro file recovery comparison and analysis<br />
60
- DiskDigger Pro apk frequently asked questions (FAQs)<br />
61
- DiskDigger Pro file recovery success stories and case studies<br />
62
- DiskDigger Pro apk developer information and contact details<br />
63
- DiskDigger Pro file recovery pros and cons<br />
64
- DiskDigger Pro apk system requirements and specifications<br />
65
- DiskDigger Pro file recovery best practices and recommendations<br />
66
- DiskDigger Pro apk coupon code and discount offer<br />
67
- DiskDigger Pro file recovery challenges and solutions<br />
68
- DiskDigger Pro apk upgrade and update information<br />
69
- DiskDigger Pro file recovery statistics and facts<br />
70
- DiskDigger Pro apk terms and conditions and privacy policy</p>
71
- <h3>Step 2: Choose the storage device and file types</h3>
72
- <p>The app will then ask you to choose the storage device that you want to scan. You can select either your internal memory or your external storage card. Then, you can choose the file types that you want to recover. You can select photos, videos, music, documents, or all files. Tap on OK to start the scanning process.</ <h3>Step 3: Preview and recover the files</h3>
73
- <p>After the scanning process is complete, the app will show you the list of recoverable files. You can tap on each file to preview it and check its details. You can also use the filter option to narrow down the results by name, size, date, or directory. To recover the files, you can select them individually or tap on Select All. Then, you can choose to restore them to your device, upload them to Google Drive or Dropbox, or email them. Tap on Recover to complete the recovery process.</p>
74
- <h2>Pros and cons of Dig Digger Pro APK</h2>
75
- <p>Like any other app, Dig Digger Pro APK has its advantages and disadvantages. Here are some of them:</p>
76
- <h3>Pros</h3>
77
- <h4>Easy to use interface</h4>
78
- <p>The app has a simple and user-friendly interface that makes it easy to navigate and operate. You can easily select the scan mode, the storage device, the file types, and the recovery options with a few taps.</p>
79
- <h4>Supports various file formats</h4>
80
- <p>The app can recover various file formats, such as photos, videos, music, documents, and more. It can also recover files from different apps, such as WhatsApp, Instagram, Facebook, etc.</p>
81
- <h4>Works on rooted and unrooted devices</h4>
82
- <p>The app can work on both rooted and unrooted devices, although it requires root access for full functionality. If your device is not rooted, you can still use the Basic Scan mode to recover some files from your cache and thumbnails.</p>
83
- <h3>Cons</h3>
84
- <h4>Requires root access for full functionality</h4>
85
- <p>The app requires root access for full functionality, which means that you need to have a rooted device to use the Full Scan mode and recover files from your entire memory. Rooting your device may void your warranty, expose your device to security risks, or cause system instability.</p>
86
- <h4>May not recover all files</h4>
87
- <p>The app may not be able to recover all files, especially if they have been overwritten by new data or corrupted by viruses. The recovery success rate depends on various factors, such as the file size, the file format, the storage device condition, etc.</p>
88
- <h4>May consume battery and memory resources</h4>
89
- <p>The app may consume a lot of battery and memory resources during the scanning and recovery process. This may affect your device performance and cause overheating or lagging issues. You may want to close other apps and connect your device to a power source before using the app.</p>
90
- <h2>Conclusion</h2>
91
- <p>Dig Digger Pro APK is a useful file recovery app for Android devices that can help you recover lost files from your internal memory or external storage cards. It has a simple and user-friendly interface that makes it easy to use. It supports various file formats and works on both rooted and unrooted devices. However, it also has some drawbacks, such as requiring root access for full functionality, not recovering all files, and consuming battery and memory resources. You should weigh the pros and cons before using the app and always backup your important data regularly.</p>
92
- <h2>FAQs</h2>
93
- <ul>
94
- <li><b>Q: Is Dig Digger Pro APK safe to use?</b></li>
95
- <li>A: Dig Digger Pro APK is generally safe to use, as long as you download it from a trusted source and grant it the necessary permissions. However, you should be careful when rooting your device or recovering files from unknown sources, as they may pose some security risks.</li>
96
- <li><b>Q: How long does it take to scan and recover files with Dig Digger Pro APK?</b></li>
97
- <li>A: The scanning and recovery time depends on various factors, such as the scan mode, the storage device size, the file types, the number of files, etc. It may take from a few minutes to several hours.</li>
98
- <li><b>Q: Can I recover files that I deleted a long time ago with Dig Digger Pro APK?</b></li>
99
- <li>A: It depends on whether the files have been overwritten by new data or not. If they have not been overwritten, you may be able to recover them with Dig Digger Pro APK. However, if they have been overwritten, you may not be able to recover them.</li>
100
- <li><b>Q: Can I use Dig Digger Pro APK on other devices besides Android?</b></li>
101
- <li>A: No, Dig Digger Pro APK is only compatible with Android devices. You cannot use it on iOS, Windows, Mac, or other devices.</li>
102
- <li><b>Q: What should I do if I encounter any problems with Dig Digger Pro APK?</ <li>A: If you encounter any problems with Dig Digger Pro APK, such as installation errors, scanning failures, recovery errors, etc., you can try the following solutions:</li>
103
- <ul>
104
- <li>Check your internet connection and make sure it is stable and fast.</li>
105
- <li>Check your device settings and make sure you have enough storage space and battery power.</li>
106
- <li>Check your device root status and make sure you have granted the app the necessary permissions.</li>
107
- <li>Check the app version and make sure you have updated it to the latest one.</li>
108
- <li>Restart your device and try again.</li>
109
- <li>Contact the app developer for support or feedback.</li>
110
- </ul>
111
- </ul>
112
- <p>I hope this article has helped you learn more about Dig Digger Pro APK and how to use it to recover lost files on Android. If you have any questions or comments, please feel free to leave them below. Thank you for reading!</p> 401be4b1e0<br />
113
- <br />
114
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Real Racing 3 APK from APKPure and Enjoy the Premier Racing Experience on Android.md DELETED
@@ -1,81 +0,0 @@
1
-
2
- <h1>Download Real Racing 3 APKPure: The Ultimate Guide</h1>
3
- <p>If you are a fan of racing games, you have probably heard of Real Racing 3, one of the most realistic and immersive racing games on mobile devices. But did you know that you can download it from APKPure, a third-party app store that offers fast and safe downloads of free and updated apps? In this article, we will tell you everything you need to know about Real Racing 3 and APKPure, and how to download them on your Android device.</p>
4
- <h2>What is Real Racing 3?</h2>
5
- <p>Real Racing 3 is a racing game developed by Firemonkeys Studios and published by Electronic Arts. It was released in 2013 for iOS and Android devices, and has since become one of the most popular and acclaimed racing games on mobile platforms. It has won several awards, such as the Best Mobile Game at the BAFTA Games Awards, and has been downloaded over 500 million times.</p>
6
- <h2>download real racing 3 apkpure</h2><br /><p><b><b>DOWNLOAD</b> === <a href="https://jinyurl.com/2uNNR6">https://jinyurl.com/2uNNR6</a></b></p><br /><br />
7
- <h3>Features of Real Racing 3</h3>
8
- <p>Real Racing 3 offers a variety of features that make it stand out from other racing games. Here are some of them:</p>
9
- <h4>Real cars, real tracks, real people</h4>
10
- <p>Real Racing 3 features over 300 licensed cars from top manufacturers, such as Ferrari, Lamborghini, Porsche, Bugatti, and more. You can race on over 40 real-world tracks, such as Silverstone, Le Mans, Dubai Autodrome, and more. You can also compete with real players from around the world in online multiplayer modes, or challenge their AI-controlled versions in Time-Shifted Multiplayer.</p>
11
- <h4>Over 4,000 events and challenges</h4>
12
- <p>Real Racing 3 offers a variety of events and challenges to test your skills and earn rewards. You can participate in Formula 1® Grands Prix™, Cup races, Eliminations, Endurance challenges, and more. You can also join clubs and teams to collaborate with other players and unlock exclusive content.</p>
13
- <h4>Customization and upgrades</h4>
14
- <p>Real Racing 3 allows you to customize your cars with different paint jobs, vinyls, rims, spoilers, and more. You can also upgrade your cars with different parts and performance boosts to improve their speed, handling, acceleration, and braking. You can also tune your cars to suit your driving style and preferences.</p>
15
- <h2>What is APKPure?</h2>
16
- <p>APKPure is a third-party app store that provides fast and safe downloads of free and updated apps for Android devices. It was launched in 2014 and has since become one of the most popular alternatives to Google Play Store. It has over 10 million users and over 300 thousand apps in its library.</p>
17
- <h3>Benefits of using APKPure</h3>
18
- <p>APKPure offers several benefits that make it a great choice for downloading apps. Here are some of them:</p>
19
- <h4>Fast and safe downloads</h4>
20
- <p>APKPure uses advanced technology to ensure that the downloads are fast and secure. It scans all the apps for viruses and malware before uploading them to its servers. It also verifies the signatures of the apps to ensure that they are original and not modified. You can also pause and resume your downloads at any time.</p>
21
- <h4>Free and updated apps</h4>
22
- <p>APKPure provides free access to all the apps in its library. You don't need to pay any fees or subscriptions to download or use them. You can also get the latest updates of the apps as soon as they are released by the developers. You can also discover new and trending apps that are not available on Google Play Store.</p>
23
- <p>download real racing 3 apk mod unlimited money<br />
24
- download real racing 3 apk obb latest version<br />
25
- download real racing 3 apk data highly compressed<br />
26
- download real racing 3 apk offline installer<br />
27
- download real racing 3 apk for android free<br />
28
- download real racing 3 apk pure full game<br />
29
- download real racing 3 apk update new cars<br />
30
- download real racing 3 apk hack unlock all<br />
31
- download real racing 3 apk from apkpure.com[^1^]<br />
32
- download real racing 3 apk and sd data<br />
33
- download real racing 3 apk mod revdl<br />
34
- download real racing 3 apk no ads<br />
35
- download real racing 3 apk for pc windows 10<br />
36
- download real racing 3 apk mirror link<br />
37
- download real racing 3 apk old version<br />
38
- download real racing 3 apk mod menu<br />
39
- download real racing 3 apk rexdl<br />
40
- download real racing 3 apk without wifi<br />
41
- download real racing 3 apk for ios devices<br />
42
- download real racing 3 apk with cheat codes<br />
43
- download real racing 3 apkpure latest update<br />
44
- download real racing 3 apkpure modded version<br />
45
- download real racing 3 apkpure offline mode<br />
46
- download real racing 3 apkpure for android tv<br />
47
- download real racing 3 apkpure with all tracks<br />
48
- download real racing 3 apkpure premium features<br />
49
- download real racing 3 apkpure cracked apk<br />
50
- download real racing 3 apkpure for firestick<br />
51
- download real racing 3 apkpure safe and secure<br />
52
- download real racing 3 apkpure original file</p>
53
- <h4>No region restrictions</h4>
54
- <p>APKPure allows you to download and use apps that are not available in your region or country. You don't need to use a VPN or change your location settings to access them. You can also switch between different languages and regions to explore more apps from different markets.</p>
55
- <h2>How to download Real Racing 3 APKPure?</h2>
56
- <p>If you want to download Real Racing 3 APKPure, you need to follow these simple steps:</p>
57
- <h3>Step-by-step instructions</h3>
58
- <h4>Visit the APKPure website or app</h4>
59
- <p>You can either visit the APKPure website (https://apkpure.com/) on your browser, or download the APKPure app from the website or from other sources. The app is compatible with Android 4.1 and above, and requires about 15 MB of storage space.</p>
60
- <h4>Search for Real Racing 3 and tap on it</h4>
61
- <p>Once you are on the APKPure website or app, you can search for Real Racing 3 using the search bar. You will see the app icon, name, rating, and size. Tap on it to open the app page.</p>
62
- <h4>Download and install the APK file</h4>
63
- <p>On the app page, you will see a green button that says "Download APK". Tap on it to start downloading the APK file. The file size is about 45 MB, so make sure you have enough space and a stable internet connection. Once the download is complete, you will see a notification that says "Download successful". Tap on it to open the file and install it on your device. You may need to enable the "Unknown sources" option in your settings to allow the installation of apps from outside Google Play Store.</p>
64
- <h2>Conclusion</h2>
65
- <p>Real Racing 3 is one of the best racing games on mobile devices, and APKPure is one of the best app stores to download it from. By following the steps above, you can enjoy this amazing game with fast and safe downloads, free and updated apps, and no region restrictions. So what are you waiting for? Download Real Racing 3 APKPure today and experience the thrill of racing like never before!</p>
66
- <h2>FAQs</h2>
67
- <p>Here are some frequently asked questions about Real Racing 3 and APKPure:</p>
68
- <ul>
69
- <li><b>Is Real Racing 3 free to play?</b></li>
70
- <p>Yes, Real Racing 3 is free to download and play, but it also offers in-app purchases that can enhance your gameplay experience. You can buy premium currency, cars, upgrades, and more using real money. However, you can also earn these items by playing the game and completing events and challenges.</p>
71
- <li><b>Is APKPure safe to use?</b></li>
72
- <p>Yes, APKPure is safe to use, as it scans all the apps for viruses and malware before uploading them to its servers. It also verifies the signatures of the apps to ensure that they are original and not modified. However, you should always be careful when downloading apps from third-party sources, as they may contain harmful or unwanted content. You should also check the permissions and reviews of the apps before installing them.</p>
73
- <li><b>Can I play Real Racing 3 offline?</b></li>
74
- <p>No, Real Racing 3 requires an internet connection to play, as it uses cloud save technology to sync your progress across devices. It also uses online multiplayer modes and Time-Shifted Multiplayer to let you race with real players from around the world. However, you can play some events and challenges offline if you have already downloaded them.</p>
75
- <li><b>Can I play Real Racing 3 on PC?</b></li>
76
- <p>No, Real Racing 3 is only available for iOS and Android devices. However, you can use an emulator software to run Android apps on your PC. Some of the popular emulators are BlueStacks, NoxPlayer, and LDPlayer. You can download them from their official websites or from other sources. Then, you can install APKPure on them and download Real Racing 3 from there.</p>
77
- <li><b>How can I update Real Racing 3?</b></li>
78
- <p>If you have downloaded Real Racing 3 from APKPure, you can update it from there as well. You will see a notification when a new update is available for the app. You can tap on it to download and install the update. Alternatively, you can check for updates manually by visiting the app page on APKPure and tapping on the "Update" button.</p>
79
- </ul></p> 197e85843d<br />
80
- <br />
81
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download the Ultimate Sonic Forces Running Battle Mod with Money Speed and God Mode.md DELETED
@@ -1,99 +0,0 @@
1
-
2
- <h1>Download Sonic Forces Running Battle Mod: A Guide for Sonic Fans</h1>
3
- <p>If you are a fan of Sonic the Hedgehog and love racing games, you might want to try out Sonic Forces Running Battle Mod. This is a modified version of Sonic Forces Running Battle, a mobile game developed by Sega. In this article, we will tell you what this mod is, how to download and install it, and some tips and tricks for playing it.</p>
4
- <h2>download sonic forces running battle mod</h2><br /><p><b><b>Download</b> - <a href="https://jinyurl.com/2uNJRz">https://jinyurl.com/2uNJRz</a></b></p><br /><br />
5
- <h2>What is Sonic Forces Running Battle Mod?</h2>
6
- <h3>A modified version of Sonic Forces Running Battle</h3>
7
- <p>Sonic Forces Running Battle Mod is a mod that adds some features and changes to the original game. The mod allows you to play with God Mode, unlimited money, and increased speed. You can also unlock all the characters, stages, and items in the game. The mod also improves the graphics, sound, and performance of the game.</p>
8
- <h3>Features and gameplay of the mod</h3>
9
- <p>The mod has the same gameplay as the original game, but with some enhancements. You can race and battle with your favorite characters from the Sonic universe, such as Sonic, Knuckles, Shadow, Amy, Tails, and more. You can also create your own custom character with different species, outfits, and Wispons. Wispons are special weapons that harness different kinds of wisp energy, such as lightning, drill, fire, ice, etc. You can use them to attack your opponents or to access hidden paths.</p>
10
- <p>The mod has various stages that are based on the locations from the Sonic Forces console game. You can run through Green Hill Zone, Chemical Plant Zone, Metropolis Zone, Mystic Jungle Zone, Death Egg Zone, and more. Each stage has different obstacles, traps, enemies, and power-ups that you need to avoid or use to your advantage. You can also collect rings, items, red star rings, and trophies as you run.</p>
11
- <p>The mod has a multiplayer mode where you can race against other players online. You can join or create a room with up to four players and compete for the best time and score. You can also chat with other players and send them emojis. The mod has a ranking system where you can earn points and medals based on your performance. You can also join or create a team with other players and cooperate or challenge them.</p>
12
- <h2>How to download and install Sonic Forces Running Battle Mod?</h2>
13
- <h3>Requirements and sources for the mod</h3>
14
- <p>To download and install Sonic Forces Running Battle Mod, you need to have an Android device with at least Android 4.1 or higher. You also need to have at least 300 MB of free space on your device. You also need to have the original game installed on your device.</p>
15
- <p>You can download the latest version of Sonic Forces Running Battle Mod from [HappyMod](^1^), a website that provides various mods for Android games. You can also find other sources for the mod online by searching for "sonic forces running battle mod". However, be careful of fake or malicious links that may harm your device.</p>
16
- <p>How to download sonic forces running battle mod apk for android<br />
17
- Sonic forces running battle mod unlimited money and god mode<br />
18
- Sonic forces speed battle mod features and gameplay<br />
19
- Best sonic forces running battle mod download sites<br />
20
- Sonic forces running battle mod review and rating<br />
21
- Download sonic forces running battle mod for PC and Mac<br />
22
- Sonic forces running battle mod tips and tricks<br />
23
- Sonic forces running battle mod latest version and updates<br />
24
- Sonic forces running battle mod compatibility and requirements<br />
25
- Sonic forces running battle mod vs original game comparison<br />
26
- Download sonic forces running battle mod for iOS and iPhone<br />
27
- Sonic forces running battle mod cheats and hacks<br />
28
- Sonic forces running battle mod online multiplayer mode<br />
29
- Sonic forces running battle mod offline mode and data usage<br />
30
- Sonic forces running battle mod installation guide and troubleshooting<br />
31
- Download sonic forces running battle mod for Windows 10 and 11<br />
32
- Sonic forces running battle mod graphics and sound quality<br />
33
- Sonic forces running battle mod characters and customization<br />
34
- Sonic forces running battle mod levels and challenges<br />
35
- Sonic forces running battle mod free download link and virus scan<br />
36
- Download sonic forces running battle mod for Linux and Ubuntu<br />
37
- Sonic forces running battle mod speed hack and boost<br />
38
- Sonic forces running battle mod controller support and settings<br />
39
- Sonic forces running battle mod achievements and rewards<br />
40
- Sonic forces running battle mod fan-made mods and community<br />
41
- Download sonic forces running battle mod for Chromebook and Chrome OS<br />
42
- Sonic forces running battle mod bugs and glitches fix<br />
43
- Sonic forces running battle mod ranking and leaderboards<br />
44
- Sonic forces running battle mod skins and outfits<br />
45
- Sonic forces running battle mod weapons and items<br />
46
- Download sonic forces running battle mod for Kindle Fire and Fire OS<br />
47
- Sonic forces running battle mod storyline and plot<br />
48
- Sonic forces running battle mod enemies and bosses<br />
49
- Sonic forces running battle mod missions and objectives<br />
50
- Sonic forces running battle mod screenshots and videos<br />
51
- Download sonic forces running battle mod for Nintendo Switch and Switch Lite<br />
52
- Sonic forces running battle mod alternatives and similar games<br />
53
- Sonic forces running battle mod feedback and suggestions<br />
54
- Sonic forces running battle mod FAQs and answers<br />
55
- Sonic forces running battle mod forums and discussions</p>
56
- <h3>Steps to install the mod using HedgeModManager or Sonic Forces Mod Installer</h3>
57
- <p>To install the mod on your device, you need to use a mod installer or manager that can modify the game files. There are two popular tools that you can use: HedgeModManager or Sonic Forces Mod Installer. Here are the steps to install the mod using either tool:</p>
58
- <ol>
59
- <li>Download the tool from its official source. For HedgeModManager, you can download it from [here]. For Sonic Forces Mod Installer, you can download it from [here].</li>
60
- <li>Extract the tool to a folder on your device. You may need to use a file manager app or a zip extractor app to do this.</li>
61
- <li>Open the tool and grant it the necessary permissions to access your device's storage and files.</li>
62
- <li>Locate the Sonic Forces Running Battle Mod file that you downloaded from HappyMod or another source. It should be a .zip or .rar file.</li>
63
- <li>Select the mod file and click on "Install" or "Apply". The tool will automatically copy and replace the game files with the modded ones.</li>
64
- <li>Wait for the installation process to finish. It may take a few minutes depending on the size of the mod and your device's speed.</li>
65
- <li>Launch the game and enjoy the mod. You may need to restart your device or clear the game's cache if you encounter any issues.</li>
66
- </ol>
67
- <h2>Tips and tricks for playing Sonic Forces Running Battle Mod</h2>
68
- <h3>Choose your species, Wispon, and Star Runner wisely</h3>
69
- <p>One of the fun aspects of Sonic Forces Running Battle Mod is that you can create your own custom character with different options. You can choose from seven different species: wolf, rabbit, cat, dog, bear, bird, or hedgehog. Each species has its own unique ability that can help you in the game. For example, wolves can automatically draw in nearby rings, rabbits can jump higher, cats can always keep one ring after being hit, etc.</p>
70
- <p>You can also choose from different Wispons that have different effects and attacks. Some Wispons are better suited for certain stages than others. For example, lightning Wispons can activate electric rails, drill Wispons can dig through sand or snow, fire Wispons can burn wooden bridges or crates, etc. You can also use your Wispon to attack other players or enemies by tapping on them.</p>
71
- <p>You can also choose from different Star Runners that have different stats and bonuses. Star Runners are special outfits that you can unlock by collecting red star rings or trophies. They can boost your speed, acceleration, strength, or ring bonus. Some Star Runners also have special effects such as invincibility, magnetism, or double jump.</p>
72
- <h3>Collect rings, items, and power-ups</h3>
73
- <p>Rings are the main currency of Sonic Forces Running Battle Mod. You can use them to buy new Wispons, outfits, items, or upgrades. You can also use them to revive yourself if you fall off the stage or get hit by an enemy. You can collect rings by running through them or by using your Wispon ability. You can also get rings by completing missions, achievements, or daily challenges.</p>
74
- <p>Items are consumables that you can use before or during a race to give you an edge over your opponents. You can buy items with rings or get them as rewards. Some items are speed shoes, shield, invincibility, bomb, boost, etc. You can use items by tapping on their icons on the screen.</p>
75
- <p>Power-ups are temporary effects that you can get by running through special capsules on the stage. They can help you or hinder your opponents. Some power-ups are magnet, lightning, fireball, ice cube, tornado, etc. You can activate power-ups by tapping on their icons on the screen.</p>
76
- <h3>Use your Wispon abilities and attacks strategically</h3>
77
- <p>Your Wispon is not only a weapon but also a tool that can help you navigate the stage and reach hidden areas. You can use your Wispon ability by swiping up on the screen. Depending on your Wispon type, you can do different things such as fly, drill, dash, hover, etc. You can also use your Wispon ability to collect rings or items that are out of reach.</p>
78
- <p>You can also use your Wispon to attack other players or enemies by tapping on them. Depending on your Wispon type, you can do different things such as shoot projectiles, create explosions, freeze opponents, etc. You can also use your Wispon attack to destroy obstacles or traps that are blocking your way.</p>
79
- <h3>Avoid obstacles and traps</h3>
80
- <p>The stages in Sonic Forces Running Battle Mod are full of obstacles and traps that can slow you down or damage you. You need to avoid them or destroy them with your Wispon attack. Some obstacles and traps are spikes, lasers, mines, swinging axes, falling rocks, etc. You can also use obstacles and traps to your advantage by pushing other players into them or by using them as shortcuts.</p>
81
- <h3>Race against other players online</h3>
82
- <p>The multiplayer mode in Sonic Forces Running Battle Mod is where you can test your skills and compete with other players online. You can join or create a room with up to four players and race on any stage that you have unlocked. You can also chat with other players and send them emojis. You can choose to race for fun or for ranking points and medals. You can also join or create a team with other players and cooperate or challenge them.</p>
83
- <p>The multiplayer mode is fast-paced and competitive, so you need to be quick and smart. You need to use your Wispon abilities and attacks, items, power-ups, and shortcuts to gain an advantage over your rivals. You also need to avoid or counter their attacks, obstacles, and traps. You can also use your Star Runner bonus to boost your performance. The player who reaches the finish line first wins the race.</p>
84
- <h2>Conclusion and FAQs</h2>
85
- <h3>Summary of the article and benefits of the mod</h3>
86
- <p>Sonic Forces Running Battle Mod is a great mod for Sonic fans who want to enjoy a more fun and exciting racing game. The mod adds God Mode, unlimited money, increased speed, and all unlocked characters, stages, and items to the original game. The mod also improves the graphics, sound, and performance of the game. The mod has the same gameplay as the original game, but with some enhancements. You can create your own custom character with different species, Wispons, and Star Runners. You can run through various stages that are based on the Sonic Forces console game. You can collect rings, items, power-ups, and trophies as you run. You can also race against other players online in a multiplayer mode.</p>
87
- <p>The mod is easy to download and install using HedgeModManager or Sonic Forces Mod Installer. You just need to have an Android device with at least Android 4.1 or higher and 300 MB of free space. You also need to have the original game installed on your device. You can download the mod from HappyMod or other sources online.</p>
88
- <p>The mod is fun and challenging to play with some tips and tricks. You need to choose your species, Wispon, and Star Runner wisely based on their abilities, effects, and bonuses. You need to collect rings, items, and power-ups to help you in the game. You need to use your Wispon abilities and attacks strategically to navigate the stage and attack your opponents. You need to avoid obstacles and traps that can slow you down or damage you. You need to race against other players online and use your skills and items to win.</p>
89
- <h3>Five unique FAQs about the mod</h3>
90
- <table>
91
- <tr><td><b>Question</b></td><td><b>Answer</b></td></tr>
92
- <tr><td>Is Sonic Forces Running Battle Mod safe to use?</td><td>Yes, Sonic Forces Running Battle Mod is safe to use as long as you download it from a trusted source and use a reliable mod installer or manager. However, you should always backup your game data before installing any mod.</td></tr>
93
- <tr><td>Can I play Sonic Forces Running Battle Mod offline?</td><td>Yes, you can play Sonic Forces Running Battle Mod offline in the single-player mode. However, you will not be able to access the multiplayer mode or some features that require an internet connection.</td></tr>
94
- <tr><td>Can I play Sonic Forces Running Battle Mod with my friends?</td><td>Yes, you can play Sonic Forces Running Battle Mod with your friends in the multiplayer mode. You can join or create a room with up to four players and race on any stage that you have unlocked. You can also chat with your friends and send them emojis.</td></tr>
95
- <tr><td>How do I update Sonic Forces Running Battle Mod?</td><td>To update Sonic Forces Running Battle Mod, you need to download the latest version of the mod from HappyMod or another source online. Then, you need to uninstall the previous version of the mod using HedgeModManager or Sonic Forces Mod Installer. After that, you need to install the new version of the mod using the same tool.</td></tr>
96
- <tr><td>How do I uninstall Sonic Forces Running Battle Mod?</td><td>To uninstall Sonic Forces Running Battle Mod, you need to use HedgeModManager or Sonic Forces Mod Installer. You need to open the tool and locate the mod file that you installed. Then, you need to select the mod file and click on "Uninstall" or "Remove". The tool will automatically restore the original game files.</td></tr>
97
- </table></p> 401be4b1e0<br />
98
- <br />
99
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Enjoy Live Streaming of Over 100 Football and Basketball Leagues with Live Score 808 APK.md DELETED
@@ -1,125 +0,0 @@
1
-
2
- <h1>Live Score 808 APK: A Unique Football Game for Android</h1>
3
- <p>Are you a football fan who loves to watch and play the game? Do you want to experience a different kind of football game on your Android device? If yes, then you should try Live Score 808 APK, a free smartphone application that lets you compete against your favorite football teams in a fun and challenging way. In this article, we will tell you everything you need to know about Live Score 808 APK, including its features, how to download and install it, why you should play it, and some tips and tricks to help you win. Let's get started!</p>
4
- <h2>What is Live Score 808 APK?</h2>
5
- <p>Live Score 808 APK is a football game that is unlike any other. Instead of controlling an entire team, you can only manipulate a single player at a time. Your goal is to score as many goals as possible while avoiding the opponents and obstacles on the field. You can choose from hundreds of players from different countries and leagues, each with their own skills and abilities. You can also use power-ups and boosters to enhance your performance and overcome the challenges. The game has realistic graphics, sound effects, and animations that make you feel like you are in a real stadium. You can also check the live scores of real matches and compare your results with other players around the world.</p>
6
- <h2>live score 808 apk</h2><br /><p><b><b>Download</b> &#11088; <a href="https://jinyurl.com/2uNNnU">https://jinyurl.com/2uNNnU</a></b></p><br /><br />
7
- <h3>Features of Live Score 808 APK</h3>
8
- <p>Some of the features that make Live Score 808 APK an amazing football game are:</p>
9
- <ul>
10
- <li>It is free to download and play.</li>
11
- <li>It has simple and intuitive controls that are easy to learn and use.</li>
12
- <li>It has hundreds of players from different countries and leagues to choose from.</li>
13
- <li>It has various power-ups and boosters that can help you score more goals and avoid the enemies.</li>
14
- <li>It has realistic graphics, sound effects, and animations that create an immersive gaming experience.</li>
15
- <li>It has live scores of real matches that you can follow and compare with your own results.</li>
16
- <li>It has leaderboards and achievements that you can unlock and share with your friends.</li>
17
- </ul>
18
- <h3>How to download and install Live Score 808 APK</h3>
19
- <p>To download and install Live Score 808 APK on your Android device, you need to follow these steps:</p>
20
- <ol>
21
- <li>Go to the official website of Live Score 808 APK or click on this link: <a href="(^1^)">https://score808-live.en.softonic.com/android</a></li>
22
- <li>Click on the "Download" button and wait for the file to be downloaded on your device.</li>
23
- <li>Once the file is downloaded, go to your device's settings and enable the option to install apps from unknown sources.</li>
24
- <li>Locate the downloaded file in your device's storage and tap on it to start the installation process.</li>
25
- <li>Follow the instructions on the screen and wait for the installation to be completed.</li>
26
- <li>Launch the app and enjoy playing Live Score 808 APK!</li>
27
- </ol>
28
- <h2>Why play Live Score 808 APK?</h2>
29
- <p>If you are still wondering why you should play Live Score 808 APK, here are some reasons that might convince you:</p>
30
- <h3>Pros of Live Score 808 APK</h3>
31
- <ul>
32
- <li>It is a unique and innovative football game that tests your skills and reflexes.</li>
33
- <li>It is a fun and entertaining way to pass the time and relieve stress.</li>
34
- <li>It is a great way to show your love and support for your favorite football teams and players.</li>
35
- <li>It is a challenging and rewarding game that keeps you motivated and engaged.</li>
36
- </ul>
37
- <h3>Cons <h3>Cons of Live Score 808 APK</h3>
38
- <ul>
39
- <li>It is a relatively new game that might have some bugs and glitches.</li>
40
- <li>It requires a stable internet connection to play and access the live scores.</li>
41
- <li>It might consume a lot of battery and data on your device.</li>
42
- <li>It might not be compatible with some older or low-end devices.</li>
43
- </ul>
44
- <h2>Tips and tricks for playing Live Score 808 APK</h2>
45
- <p>If you want to improve your skills and performance in Live Score 808 APK, here are some tips and tricks that you can use:</p>
46
- <h3>Choose your player wisely</h3>
47
- <p>Before you start a match, you can choose from hundreds of players from different countries and leagues. Each player has their own stats and abilities, such as speed, power, accuracy, and stamina. You should choose a player that suits your playstyle and strategy. For example, if you want to score fast and furious goals, you should pick a player with high speed and power. If you want to play more defensively and tactically, you should pick a player with high accuracy and stamina.</p>
48
- <h3>Use the power-ups and boosters</h3>
49
- <p>During the match, you can collect and use various power-ups and boosters that can help you score more goals and avoid the enemies. Some of the power-ups and boosters are:</p>
50
- <ul>
51
- <li>Shield: It protects you from the opponents and obstacles for a short time.</li>
52
- <li>Magnet: It attracts the coins and gems to you for a short time.</li>
53
- <li>Rocket: It boosts your speed and power for a short time.</li>
54
- <li>Bomb: It explodes and clears the enemies and obstacles around you for a short time.</li>
55
- <li>Freeze: It freezes the opponents and obstacles for a short time.</li>
56
- </ul>
57
- <h3>Watch out for the opponents and obstacles</h3>
58
- <p>As you play, you will encounter various opponents and obstacles that will try to stop you from scoring. Some of the opponents and obstacles are:</p>
59
- <p>live score 808 apk download<br />
60
- live score 808 apk latest version<br />
61
- live score 808 apk mod<br />
62
- live score 808 apk for android<br />
63
- live score 808 apk free<br />
64
- live score 808 apk offline<br />
65
- live score 808 apk update<br />
66
- live score 808 apk file<br />
67
- live score 808 apk install<br />
68
- live score 808 apk review<br />
69
- live score 808 apk football simulation<br />
70
- live score 808 apk single player mode<br />
71
- live score 808 apk online multiplayer<br />
72
- live score 808 apk realistic graphics<br />
73
- live score 808 apk easy controls<br />
74
- live score 808 apk customisation options<br />
75
- live score 808 apk best teams<br />
76
- live score 808 apk top players<br />
77
- live score 808 apk stats and rankings<br />
78
- live score 808 apk achievements and rewards<br />
79
- live score 808 apk tips and tricks<br />
80
- live score 808 apk cheats and hacks<br />
81
- live score 808 apk bugs and fixes<br />
82
- live score 808 apk support and feedback<br />
83
- live score 808 apk news and updates<br />
84
- live score 808 apk alternatives and competitors<br />
85
- live score 808 apk comparison and analysis<br />
86
- live score 808 apk pros and cons<br />
87
- live score 808 apk features and benefits<br />
88
- live score 808 apk requirements and compatibility<br />
89
- live score 808 apk size and speed<br />
90
- live score 808 apk security and privacy<br />
91
- live score 808 apk ratings and reviews<br />
92
- live score 808 apk testimonials and feedbacks<br />
93
- live score 808 apk screenshots and videos<br />
94
- live score 808 apk guide and tutorial<br />
95
- live score 808 apk faq and help<br />
96
- live score 808 apk forum and community<br />
97
- live score 808 apk blog and website<br />
98
- live score 808 apk social media and promotion</p>
99
- <ul>
100
- <li>Goalkeeper: He will try to block your shots and save the goals.</li>
101
- <li>Defender: He will try to tackle you and steal the ball from you.</li>
102
- <li>Mine: It will explode if you touch it and damage your health.</li>
103
- <li>Spike: It will pierce you if you touch it and damage your health.</li>
104
- <li>Hole: It will make you fall if you step on it and end your run.</li>
105
- </ul>
106
- <h3>Collect coins and gems</h3>
107
- <p>As you play, you can also collect coins and gems that are scattered on the field. Coins and gems are useful for buying new players, power-ups, boosters, and other items in the shop. You can also use coins and gems to revive yourself if you lose or to continue playing if you run out of time. The more coins and gems you collect, the more options and advantages you have in the game.</p>
108
- <h2>Conclusion</h2>
109
- <p>Live Score 808 APK is a unique football game that lets you compete against your favorite football teams in a fun and challenging way. You can choose from hundreds of players from different countries and leagues, each with their own skills and abilities. You can also use power-ups and boosters to enhance your performance and overcome the challenges. The game has realistic graphics, sound effects, and animations that make you feel like you are in a real stadium. You can also check the live scores of real matches and compare your results with other players around the world. Live Score 808 APK is a free smartphone application that is easy to download and install on your Android device. If you are a football fan who loves to watch and play the game, you should definitely try Live Score 808 APK!</p>
110
- <h3>FAQs</h3>
111
- <p>Here are some frequently asked questions about Live Score 808 APK:</p>
112
- <ol>
113
- <li><b>What is the latest version of Live Score 808 APK?</b></li>
114
- <p>The latest version of Live Score 808 APK is 1.0.8, which was released on June 22, 2023. It has some bug fixes and performance improvements.</p>
115
- <li><b>Is Live Score 808 APK safe to use?</b></li>
116
- <p>Yes, Live Score 808 APK is safe to use as long as you download it from the official website or a trusted source. It does not contain any viruses or malware that can harm your device or data.</p>
117
- <li><b>How can I contact the developer of Live Score 808 APK?</b></li>
118
- <p>You can contact the developer of Live Score 808 APK by sending an email to [email protected] or by visiting their Facebook page at < a href="">https://www.facebook.com/LiveScore808</a>.</p>
119
- <li><b>Can I play Live Score 808 APK offline?</b></li>
120
- <p>No, you cannot play Live Score 808 APK offline. You need a stable internet connection to play and access the live scores.</p>
121
- <li><b>Can I play Live Score 808 APK on other devices?</b></li>
122
- <p>Yes, you can play Live Score 808 APK on other devices that support Android operating system. However, you might need to adjust the settings and graphics according to your device's specifications.</p>
123
- </ol></p> 197e85843d<br />
124
- <br />
125
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/src/components/ui/dialog.tsx DELETED
@@ -1,128 +0,0 @@
1
- 'use client'
2
-
3
- import * as React from 'react'
4
- import * as DialogPrimitive from '@radix-ui/react-dialog'
5
-
6
- import { cn } from '@/lib/utils'
7
- import { IconClose } from '@/components/ui/icons'
8
-
9
- const Dialog = DialogPrimitive.Root
10
-
11
- const DialogTrigger = DialogPrimitive.Trigger
12
-
13
- const DialogPortal = ({
14
- className,
15
- children,
16
- ...props
17
- }: DialogPrimitive.DialogPortalProps) => (
18
- <DialogPrimitive.Portal className={cn(className)} {...props}>
19
- <div className="fixed inset-0 z-50 flex items-start justify-center sm:items-center">
20
- {children}
21
- </div>
22
- </DialogPrimitive.Portal>
23
- )
24
- DialogPortal.displayName = DialogPrimitive.Portal.displayName
25
-
26
- const DialogOverlay = React.forwardRef<
27
- React.ElementRef<typeof DialogPrimitive.Overlay>,
28
- React.ComponentPropsWithoutRef<typeof DialogPrimitive.Overlay>
29
- >(({ className, ...props }, ref) => (
30
- <DialogPrimitive.Overlay
31
- ref={ref}
32
- className={cn(
33
- 'bg-white fixed inset-0 z-50 bg-background/80 backdrop-blur-sm transition-all duration-100 data-[state=closed]:animate-out data-[state=closed]:fade-out data-[state=open]:fade-in',
34
- className
35
- )}
36
- {...props}
37
- />
38
- ))
39
- DialogOverlay.displayName = DialogPrimitive.Overlay.displayName
40
-
41
- const DialogContent = React.forwardRef<
42
- React.ElementRef<typeof DialogPrimitive.Content>,
43
- React.ComponentPropsWithoutRef<typeof DialogPrimitive.Content>
44
- >(({ className, children, ...props }, ref) => (
45
- <DialogPortal>
46
- <DialogOverlay />
47
- <DialogPrimitive.Content
48
- ref={ref}
49
- className={cn(
50
- 'fixed z-50 grid w-full gap-4 rounded-b-lg border bg-background p-6 shadow-sm animate-in data-[state=open]:fade-in-90 data-[state=open]:slide-in-from-bottom-10 sm:max-w-lg sm:rounded-lg sm:zoom-in-90 data-[state=open]:sm:slide-in-from-bottom-0',
51
- className
52
- )}
53
- {...props}
54
- >
55
- {children}
56
- <DialogPrimitive.Close className="absolute right-4 top-4 rounded-sm opacity-70 ring-offset-background transition-opacity hover:opacity-100 focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2 disabled:pointer-events-none data-[state=open]:bg-accent data-[state=open]:text-muted-foreground">
57
- <IconClose />
58
- <span className="sr-only">Close</span>
59
- </DialogPrimitive.Close>
60
- </DialogPrimitive.Content>
61
- </DialogPortal>
62
- ))
63
- DialogContent.displayName = DialogPrimitive.Content.displayName
64
-
65
- const DialogHeader = ({
66
- className,
67
- ...props
68
- }: React.HTMLAttributes<HTMLDivElement>) => (
69
- <div
70
- className={cn(
71
- 'flex flex-col space-y-1.5 text-center sm:text-left',
72
- className
73
- )}
74
- {...props}
75
- />
76
- )
77
- DialogHeader.displayName = 'DialogHeader'
78
-
79
- const DialogFooter = ({
80
- className,
81
- ...props
82
- }: React.HTMLAttributes<HTMLDivElement>) => (
83
- <div
84
- className={cn(
85
- 'flex flex-col-reverse sm:flex-row sm:justify-end sm:space-x-2',
86
- className
87
- )}
88
- {...props}
89
- />
90
- )
91
- DialogFooter.displayName = 'DialogFooter'
92
-
93
- const DialogTitle = React.forwardRef<
94
- React.ElementRef<typeof DialogPrimitive.Title>,
95
- React.ComponentPropsWithoutRef<typeof DialogPrimitive.Title>
96
- >(({ className, ...props }, ref) => (
97
- <DialogPrimitive.Title
98
- ref={ref}
99
- className={cn(
100
- 'text-lg font-semibold leading-none tracking-tight',
101
- className
102
- )}
103
- {...props}
104
- />
105
- ))
106
- DialogTitle.displayName = DialogPrimitive.Title.displayName
107
-
108
- const DialogDescription = React.forwardRef<
109
- React.ElementRef<typeof DialogPrimitive.Description>,
110
- React.ComponentPropsWithoutRef<typeof DialogPrimitive.Description>
111
- >(({ className, ...props }, ref) => (
112
- <DialogPrimitive.Description
113
- ref={ref}
114
- className={cn('text-sm text-muted-foreground', className)}
115
- {...props}
116
- />
117
- ))
118
- DialogDescription.displayName = DialogPrimitive.Description.displayName
119
-
120
- export {
121
- Dialog,
122
- DialogTrigger,
123
- DialogContent,
124
- DialogHeader,
125
- DialogFooter,
126
- DialogTitle,
127
- DialogDescription
128
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2ndelement/voicevox/voicevox_engine/engine_manifest/EngineManifest.py DELETED
@@ -1,58 +0,0 @@
1
- from typing import List, Optional
2
-
3
- from pydantic import BaseModel, Field
4
-
5
-
6
- class UpdateInfo(BaseModel):
7
- """
8
- エンジンのアップデート情報
9
- """
10
-
11
- version: str = Field(title="エンジンのバージョン名")
12
- descriptions: List[str] = Field(title="アップデートの詳細についての説明")
13
- contributors: Optional[List[str]] = Field(title="貢献者名")
14
-
15
-
16
- class LicenseInfo(BaseModel):
17
- """
18
- 依存ライブラリのライセンス情報
19
- """
20
-
21
- name: str = Field(title="依存ライブラリ名")
22
- version: Optional[str] = Field(title="依存ライブラリのバージョン")
23
- license: Optional[str] = Field(title="依存ライブラリのライセンス名")
24
- text: str = Field(title="依存ライブラリのライセンス本文")
25
-
26
-
27
- class SupportedFeatures(BaseModel):
28
- """
29
- エンジンが持つ機能の一覧
30
- """
31
-
32
- adjust_mora_pitch: bool = Field(title="モーラごとの音高の調整")
33
- adjust_phoneme_length: bool = Field(title="音素ごとの長さの調整")
34
- adjust_speed_scale: bool = Field(title="全体の話速の調整")
35
- adjust_pitch_scale: bool = Field(title="全体の音高の調整")
36
- adjust_intonation_scale: bool = Field(title="全体の抑揚の調整")
37
- adjust_volume_scale: bool = Field(title="全体の音量の調整")
38
- interrogative_upspeak: bool = Field(title="疑問文の自動調整")
39
- synthesis_morphing: bool = Field(title="2人の話者でモーフィングした音声を合成")
40
- manage_library: bool = Field(title="音声ライブラリのインストール・アンインストール")
41
-
42
-
43
- class EngineManifest(BaseModel):
44
- """
45
- エンジン自体に関する情報
46
- """
47
-
48
- manifest_version: str = Field(title="マニフェストのバージョン")
49
- name: str = Field(title="エンジン名")
50
- brand_name: str = Field(title="ブランド名")
51
- uuid: str = Field(title="エンジンのUUID")
52
- url: str = Field(title="エンジンのURL")
53
- icon: str = Field(title="エンジンのアイコンをBASE64エンコードしたもの")
54
- default_sampling_rate: int = Field(title="デフォルトのサンプリング周波数")
55
- terms_of_service: str = Field(title="エンジンの利用規約")
56
- update_infos: List[UpdateInfo] = Field(title="エンジンのアップデート情報")
57
- dependency_licenses: List[LicenseInfo] = Field(title="依存関係のライセンス情報")
58
- supported_features: SupportedFeatures = Field(title="エンジンが持つ機能")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AK-12/llama-gradio-chat/app.py DELETED
@@ -1,19 +0,0 @@
1
- import gradio as gr
2
- import requests
3
- # def greet(name):
4
- # return "Hello " + name + "!!"
5
- # import requests
6
-
7
- # API_URL = "https://api-inference.huggingface.co/models/AK-12/llama-2-medical-fine-tune"
8
- # headers = {"Authorization": "Bearer "}
9
-
10
- def query(payload):
11
- response = requests.post("https://api-inference.huggingface.co/models/AK-12/llama-2-medical-fine-tune", headers={"Authorization": "Bearer "}, json=payload)
12
- return response.json()
13
-
14
- # output = query({
15
- # "inputs": "What is haller cells? ",
16
- # })
17
-
18
- iface = gr.Interface(fn=query, inputs="text", outputs="text")
19
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/predict.py DELETED
@@ -1,167 +0,0 @@
1
- import os
2
- from typing import Dict
3
-
4
- from diacritization_evaluation import der, wer
5
- import torch
6
- from torch import nn
7
- from torch import optim
8
- from torch.cuda.amp import autocast
9
- from torch.utils.tensorboard.writer import SummaryWriter
10
- from tqdm.notebook import tqdm
11
- from tqdm import trange
12
- from diacritization_evaluation import util
13
-
14
- from .config_manager import ConfigManager
15
- from .dataset import load_iterators
16
- from .diacritizer import CBHGDiacritizer, Seq2SeqDiacritizer
17
- from .options import OptimizerType
18
- import gdown
19
-
20
- class Trainer:
21
- def run(self):
22
- raise NotImplementedError
23
-
24
-
25
- class GeneralTrainer(Trainer):
26
- def __init__(self, config_path: str, model_kind: str) -> None:
27
- self.config_path = config_path
28
- self.model_kind = model_kind
29
- self.config_manager = ConfigManager(
30
- config_path=config_path, model_kind=model_kind
31
- )
32
- self.config = self.config_manager.config
33
- self.losses = []
34
- self.lr = 0
35
- self.pad_idx = 0
36
- self.criterion = nn.CrossEntropyLoss(ignore_index=self.pad_idx)
37
- self.set_device()
38
-
39
- self.config_manager.create_remove_dirs()
40
- self.text_encoder = self.config_manager.text_encoder
41
- self.start_symbol_id = self.text_encoder.start_symbol_id
42
- self.summary_manager = SummaryWriter(log_dir=self.config_manager.log_dir)
43
-
44
- self.model = self.config_manager.get_model()
45
-
46
- self.optimizer = self.get_optimizer()
47
- self.model = self.model.to(self.device)
48
-
49
- self.load_model(model_path=self.config.get("train_resume_model_path"))
50
- self.load_diacritizer()
51
-
52
- self.initialize_model()
53
-
54
-
55
- def set_device(self):
56
- if self.config.get("device"):
57
- self.device = self.config["device"]
58
- else:
59
- self.device = "cuda" if torch.cuda.is_available() else "cpu"
60
-
61
- def load_diacritizer(self):
62
- if self.model_kind in ["cbhg", "baseline"]:
63
- self.diacritizer = CBHGDiacritizer(self.config_path, self.model_kind)
64
- elif self.model_kind in ["seq2seq", "tacotron_based"]:
65
- self.diacritizer = Seq2SeqDiacritizer(self.config_path, self.model_kind)
66
-
67
- def initialize_model(self):
68
- if self.global_step > 1:
69
- return
70
- if self.model_kind == "transformer":
71
- print("Initializing using xavier_uniform_")
72
- self.model.apply(initialize_weights)
73
-
74
-
75
- def load_model(self, model_path: str = None, load_optimizer: bool = True):
76
- with open(
77
- self.config_manager.base_dir / f"{self.model_kind}_network.txt", "w"
78
- ) as file:
79
- file.write(str(self.model))
80
-
81
- if model_path is None:
82
- last_model_path = self.config_manager.get_last_model_path()
83
- if last_model_path is None:
84
- self.global_step = 1
85
- return
86
- else:
87
- last_model_path = model_path
88
-
89
- print(f"loading from {last_model_path}")
90
- saved_model = torch.load(last_model_path, torch.device(self.config.get("device")))
91
- self.model.load_state_dict(saved_model["model_state_dict"])
92
- if load_optimizer:
93
- self.optimizer.load_state_dict(saved_model["optimizer_state_dict"])
94
- self.global_step = saved_model["global_step"] + 1
95
-
96
- class DiacritizationTester(GeneralTrainer):
97
- def __init__(self, config_path: str, model_kind: str, model_path: str) -> None:
98
- # if config_path == 'config/test.yml' or config_path == "Arabic_Diacritization/config/test.yml":
99
- # print("Exporting the pretrained models ... ")
100
- # url = 'https://drive.google.com/uc?id=12aYNY7cbsLNzhdPdC2K3u1sgrb1lpzwO'
101
- # gdown.cached_download(url,'model.zip', quiet=False, postprocess=gdown.extractall)
102
-
103
- self.config_path = config_path
104
- self.model_kind = model_kind
105
- self.config_manager = ConfigManager(
106
- config_path=config_path, model_kind=model_kind
107
- )
108
- self.config = self.config_manager.config
109
- # print(self.config)
110
- self.pad_idx = 0
111
- self.criterion = nn.CrossEntropyLoss(ignore_index=self.pad_idx)
112
- self.set_device()
113
-
114
- self.text_encoder = self.config_manager.text_encoder
115
- self.start_symbol_id = self.text_encoder.start_symbol_id
116
-
117
- self.model = self.config_manager.get_model()
118
-
119
- self.model = self.model.to(self.device)
120
- self.load_model(model_path=model_path, load_optimizer=False)
121
- self.load_diacritizer()
122
- self.diacritizer.set_model(self.model)
123
- self.initialize_model()
124
-
125
- def collate_fn(self, data):
126
- """
127
- Padding the input and output sequences
128
- """
129
-
130
- def merge(sequences):
131
- lengths = [len(seq) for seq in sequences]
132
- padded_seqs = torch.zeros(len(sequences), max(lengths)).long()
133
- for i, seq in enumerate(sequences):
134
- end = lengths[i]
135
- padded_seqs[i, :end] = seq[:end]
136
- return padded_seqs, lengths
137
-
138
- data.sort(key=lambda x: len(x[0]), reverse=True)
139
-
140
- # separate source and target sequences
141
- src_seqs, trg_seqs, original = zip(*data)
142
-
143
- # merge sequences (from tuple of 1D tensor to 2D tensor)
144
- src_seqs, src_lengths = merge(src_seqs)
145
- trg_seqs, trg_lengths = merge(trg_seqs)
146
-
147
- batch = {
148
- "original": original,
149
- "src": src_seqs,
150
- "target": trg_seqs,
151
- "lengths": torch.LongTensor(src_lengths), # src_lengths = trg_lengths
152
- }
153
- return batch
154
-
155
- def get_batch(self, sentence):
156
- data = self.text_encoder.clean(sentence)
157
- text, inputs, diacritics = util.extract_haraqat(data)
158
- inputs = torch.Tensor(self.text_encoder.input_to_sequence("".join(inputs)))
159
- diacritics = torch.Tensor(self.text_encoder.target_to_sequence(diacritics))
160
- batch = self.collate_fn([(inputs, diacritics, text)])
161
- return batch
162
-
163
- def infer(self, sentence):
164
- self.model.eval()
165
- batch = self.get_batch(sentence)
166
- predicted = self.diacritizer.diacritize_batch(batch)
167
- return predicted[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/data/audio_utils.py DELETED
@@ -1,293 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import sys
8
- import typing as tp
9
-
10
- import julius
11
- import torch
12
- import torchaudio
13
-
14
-
15
- def convert_audio_channels(wav: torch.Tensor, channels: int = 2) -> torch.Tensor:
16
- """Convert audio to the given number of channels.
17
-
18
- Args:
19
- wav (torch.Tensor): Audio wave of shape [B, C, T].
20
- channels (int): Expected number of channels as output.
21
- Returns:
22
- torch.Tensor: Downmixed or unchanged audio wave [B, C, T].
23
- """
24
- *shape, src_channels, length = wav.shape
25
- if src_channels == channels:
26
- pass
27
- elif channels == 1:
28
- # Case 1:
29
- # The caller asked 1-channel audio, and the stream has multiple
30
- # channels, downmix all channels.
31
- wav = wav.mean(dim=-2, keepdim=True)
32
- elif src_channels == 1:
33
- # Case 2:
34
- # The caller asked for multiple channels, but the input file has
35
- # a single channel, replicate the audio over all channels.
36
- wav = wav.expand(*shape, channels, length)
37
- elif src_channels >= channels:
38
- # Case 3:
39
- # The caller asked for multiple channels, and the input file has
40
- # more channels than requested. In that case return the first channels.
41
- wav = wav[..., :channels, :]
42
- else:
43
- # Case 4: What is a reasonable choice here?
44
- raise ValueError('The audio file has less channels than requested but is not mono.')
45
- return wav
46
-
47
-
48
- def convert_audio(wav: torch.Tensor, from_rate: float,
49
- to_rate: float, to_channels: int) -> torch.Tensor:
50
- """Convert audio to new sample rate and number of audio channels.
51
- """
52
- wav = julius.resample_frac(wav, int(from_rate), int(to_rate))
53
- wav = convert_audio_channels(wav, to_channels)
54
- return wav
55
-
56
-
57
- def normalize_loudness(wav: torch.Tensor, sample_rate: int, loudness_headroom_db: float = 14,
58
- loudness_compressor: bool = False, energy_floor: float = 2e-3):
59
- """Normalize an input signal to a user loudness in dB LKFS.
60
- Audio loudness is defined according to the ITU-R BS.1770-4 recommendation.
61
-
62
- Args:
63
- wav (torch.Tensor): Input multichannel audio data.
64
- sample_rate (int): Sample rate.
65
- loudness_headroom_db (float): Target loudness of the output in dB LUFS.
66
- loudness_compressor (bool): Uses tanh for soft clipping.
67
- energy_floor (float): anything below that RMS level will not be rescaled.
68
- Returns:
69
- output (torch.Tensor): Loudness normalized output data.
70
- """
71
- energy = wav.pow(2).mean().sqrt().item()
72
- if energy < energy_floor:
73
- return wav
74
- transform = torchaudio.transforms.Loudness(sample_rate)
75
- input_loudness_db = transform(wav).item()
76
- # calculate the gain needed to scale to the desired loudness level
77
- delta_loudness = -loudness_headroom_db - input_loudness_db
78
- gain = 10.0 ** (delta_loudness / 20.0)
79
- output = gain * wav
80
- if loudness_compressor:
81
- output = torch.tanh(output)
82
- assert output.isfinite().all(), (input_loudness_db, wav.pow(2).mean().sqrt())
83
- return output
84
-
85
-
86
- def _clip_wav(wav: torch.Tensor, log_clipping: bool = False, stem_name: tp.Optional[str] = None) -> None:
87
- """Utility function to clip the audio with logging if specified."""
88
- max_scale = wav.abs().max()
89
- if log_clipping and max_scale > 1:
90
- clamp_prob = (wav.abs() > 1).float().mean().item()
91
- print(f"CLIPPING {stem_name or ''} happening with proba (a bit of clipping is okay):",
92
- clamp_prob, "maximum scale: ", max_scale.item(), file=sys.stderr)
93
- wav.clamp_(-1, 1)
94
-
95
-
96
- def normalize_audio(wav: torch.Tensor, normalize: bool = True,
97
- strategy: str = 'peak', peak_clip_headroom_db: float = 1,
98
- rms_headroom_db: float = 18, loudness_headroom_db: float = 14,
99
- loudness_compressor: bool = False, log_clipping: bool = False,
100
- sample_rate: tp.Optional[int] = None,
101
- stem_name: tp.Optional[str] = None) -> torch.Tensor:
102
- """Normalize the audio according to the prescribed strategy (see after).
103
-
104
- Args:
105
- wav (torch.Tensor): Audio data.
106
- normalize (bool): if `True` (default), normalizes according to the prescribed
107
- strategy (see after). If `False`, the strategy is only used in case clipping
108
- would happen.
109
- strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',
110
- i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square
111
- with extra headroom to avoid clipping. 'clip' just clips.
112
- peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.
113
- rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger
114
- than the `peak_clip` one to avoid further clipping.
115
- loudness_headroom_db (float): Target loudness for loudness normalization.
116
- loudness_compressor (bool): If True, uses tanh based soft clipping.
117
- log_clipping (bool): If True, basic logging on stderr when clipping still
118
- occurs despite strategy (only for 'rms').
119
- sample_rate (int): Sample rate for the audio data (required for loudness).
120
- stem_name (Optional[str]): Stem name for clipping logging.
121
- Returns:
122
- torch.Tensor: Normalized audio.
123
- """
124
- scale_peak = 10 ** (-peak_clip_headroom_db / 20)
125
- scale_rms = 10 ** (-rms_headroom_db / 20)
126
- if strategy == 'peak':
127
- rescaling = (scale_peak / wav.abs().max())
128
- if normalize or rescaling < 1:
129
- wav = wav * rescaling
130
- elif strategy == 'clip':
131
- wav = wav.clamp(-scale_peak, scale_peak)
132
- elif strategy == 'rms':
133
- mono = wav.mean(dim=0)
134
- rescaling = scale_rms / mono.pow(2).mean().sqrt()
135
- if normalize or rescaling < 1:
136
- wav = wav * rescaling
137
- _clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name)
138
- elif strategy == 'loudness':
139
- assert sample_rate is not None, "Loudness normalization requires sample rate."
140
- wav = normalize_loudness(wav, sample_rate, loudness_headroom_db, loudness_compressor)
141
- _clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name)
142
- else:
143
- assert wav.abs().max() < 1
144
- assert strategy == '' or strategy == 'none', f"Unexpected strategy: '{strategy}'"
145
- return wav
146
-
147
-
148
- def f32_pcm(wav: torch.Tensor) -> torch.Tensor:
149
- """Convert audio to float 32 bits PCM format.
150
- """
151
- if wav.dtype.is_floating_point:
152
- return wav
153
- else:
154
- assert wav.dtype == torch.int16
155
- return wav.float() / 2**15
156
-
157
-
158
- def i16_pcm(wav: torch.Tensor) -> torch.Tensor:
159
- """Convert audio to int 16 bits PCM format.
160
-
161
- ..Warning:: There exist many formula for doing this convertion. None are perfect
162
- due to the asymetry of the int16 range. One either have possible clipping, DC offset,
163
- or inconsistancies with f32_pcm. If the given wav doesn't have enough headroom,
164
- it is possible that `i16_pcm(f32_pcm)) != Identity`.
165
- """
166
- if wav.dtype.is_floating_point:
167
- assert wav.abs().max() <= 1
168
- candidate = (wav * 2 ** 15).round()
169
- if candidate.max() >= 2 ** 15: # clipping would occur
170
- candidate = (wav * (2 ** 15 - 1)).round()
171
- return candidate.short()
172
- else:
173
- assert wav.dtype == torch.int16
174
- return wav
175
-
176
- def apply_tafade(audio: torch.Tensor, sample_rate, duration=3.0, out=True, start=True, shape: str = "linear", stem_name: tp.Optional[str] = None) -> torch.Tensor:
177
- """
178
- Apply fade-in and/or fade-out effects to the audio tensor.
179
-
180
- Args:
181
- audio (torch.Tensor): The input audio tensor of shape (C, L).
182
- sample_rate (int): The sample rate of the audio.
183
- duration (float, optional): The duration of the fade in seconds. Defaults to 3.0.
184
- out (bool, optional): Determines whether to apply fade-in (False) or fade-out (True) effect. Defaults to True.
185
- start (bool, optional): Determines whether the fade is applied to the beginning (True) or end (False) of the audio. Defaults to True.
186
- shape (str, optional): The shape of the fade. Must be one of: "quarter_sine", "half_sine", "linear", "logarithmic", "exponential". Defaults to "linear".
187
-
188
- Returns:
189
- torch.Tensor: The audio tensor with the fade effect applied.
190
-
191
- """
192
- fade_samples = int(sample_rate * duration) # Number of samples for the fade duration
193
-
194
- # Create the fade transform
195
- fade_transform = torchaudio.transforms.Fade(fade_in_len=0, fade_out_len=0, fade_shape=shape)
196
-
197
- if out:
198
- fade_transform.fade_out_len = fade_samples
199
- else:
200
- fade_transform.fade_in_len = fade_samples
201
-
202
- # Select the portion of the audio to apply the fade
203
- if start:
204
- audio_fade_section = audio[:, :fade_samples]
205
- else:
206
- audio_fade_section = audio[:, -fade_samples:]
207
-
208
- # Apply the fade transform to the audio section
209
- audio_faded = fade_transform(audio)
210
-
211
- # Replace the selected portion of the audio with the faded section
212
- if start:
213
- audio_faded[:, :fade_samples] = audio_fade_section
214
- else:
215
- audio_faded[:, -fade_samples:] = audio_fade_section
216
-
217
- wav = normalize_loudness(audio_faded,sample_rate, loudness_headroom_db=18, loudness_compressor=True)
218
- _clip_wav(wav, log_clipping=False, stem_name=stem_name)
219
- return wav
220
-
221
-
222
- def apply_fade(audio: torch.Tensor, sample_rate, duration=3.0, out=True, start=True, curve_start:float=0.0, curve_end:float=1.0, current_device:str="cpu", stem_name: tp.Optional[str] = None) -> torch.Tensor:
223
- """
224
- Apply fade-in and/or fade-out effects to the audio tensor.
225
-
226
- Args:
227
- audio (torch.Tensor): The input audio tensor of shape (C, L).
228
- sample_rate (int): The sample rate of the audio.
229
- duration (float, optional): The duration of the fade in seconds. Defaults to 3.0.
230
- out (bool, optional): Determines whether to apply fade-in (False) or fade-out (True) effect. Defaults to True.
231
- start (bool, optional): Determines whether the fade is applied to the beginning (True) or end (False) of the audio. Defaults to True.
232
- curve_start (float, optional): The starting amplitude of the fade curve. Defaults to 0.0.
233
- curve_end (float, optional): The ending amplitude of the fade curve. Defaults to 1.0.
234
- current_device (str, optional): The device on which the fade curve tensor should be created. Defaults to "cpu".
235
-
236
- Returns:
237
- torch.Tensor: The audio tensor with the fade effect applied.
238
-
239
- """
240
- fade_samples = int(sample_rate * duration) # Number of samples for the fade duration
241
- fade_curve = torch.linspace(curve_start, curve_end, fade_samples, device=current_device) # Generate linear fade curve
242
-
243
- if out:
244
- fade_curve = fade_curve.flip(0) # Reverse the fade curve for fade out
245
-
246
- # Select the portion of the audio to apply the fade
247
- if start:
248
- audio_fade_section = audio[:, :fade_samples]
249
- else:
250
- audio_fade_section = audio[:, -fade_samples:]
251
-
252
- # Apply the fade curve to the audio section
253
- audio_faded = audio.clone()
254
- audio_faded[:, :fade_samples] *= fade_curve.unsqueeze(0)
255
- audio_faded[:, -fade_samples:] *= fade_curve.unsqueeze(0)
256
-
257
- # Replace the selected portion of the audio with the faded section
258
- if start:
259
- audio_faded[:, :fade_samples] = audio_fade_section
260
- else:
261
- audio_faded[:, -fade_samples:] = audio_fade_section
262
-
263
- wav = normalize_loudness(audio_faded,sample_rate, loudness_headroom_db=18, loudness_compressor=True)
264
- _clip_wav(wav, log_clipping=False, stem_name=stem_name)
265
- return wav
266
-
267
- def apply_splice_effect(waveform1, sample_rate1, waveform2, sample_rate2, overlap):
268
- # Convert sample rates to integers
269
- sample_rate1 = int(sample_rate1)
270
- sample_rate2 = int(sample_rate2)
271
-
272
- # Convert tensors to mono-channel if needed
273
- if waveform1.ndim > 2:
274
- waveform1 = waveform1.mean(dim=1)
275
- if waveform2.ndim > 2:
276
- waveform2 = waveform2.mean(dim=1)
277
-
278
- ## Convert tensors to numpy arrays
279
- #waveform1_np = waveform1.numpy()
280
- #waveform2_np = waveform2.numpy()
281
-
282
- # Apply splice effect using torchaudio.sox_effects.apply_effects_tensor
283
- effects = [
284
- ["splice", f"-q {waveform1},{overlap}"],
285
- ]
286
- output_waveform, output_sample_rate = torchaudio.sox_effects.apply_effects_tensor(
287
- torch.cat([waveform1.unsqueeze(0), waveform2.unsqueeze(0)], dim=2),
288
- sample_rate1,
289
- effects
290
- )
291
-
292
- return output_waveform.squeeze(0), output_sample_rate
293
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhilashvj/planogram-compliance/utils/loggers/wandb/wandb_utils.py DELETED
@@ -1,788 +0,0 @@
1
- """Utilities and tools for tracking runs with Weights & Biases."""
2
-
3
- import logging
4
- import os
5
- import sys
6
- from contextlib import contextmanager
7
- from pathlib import Path
8
- from typing import Dict
9
-
10
- import yaml
11
- from tqdm import tqdm
12
-
13
- FILE = Path(__file__).resolve()
14
- ROOT = FILE.parents[3] # YOLOv5 root directory
15
- if str(ROOT) not in sys.path:
16
- sys.path.append(str(ROOT)) # add ROOT to PATH
17
-
18
- from utils.dataloaders import LoadImagesAndLabels, img2label_paths
19
- from utils.general import LOGGER, check_dataset, check_file
20
-
21
- try:
22
- import wandb
23
-
24
- assert hasattr(wandb, "__version__") # verify package import not local dir
25
- except (ImportError, AssertionError):
26
- wandb = None
27
-
28
- RANK = int(os.getenv("RANK", -1))
29
- WANDB_ARTIFACT_PREFIX = "wandb-artifact://"
30
-
31
-
32
- def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX):
33
- return from_string[len(prefix) :]
34
-
35
-
36
- def check_wandb_config_file(data_config_file):
37
- wandb_config = "_wandb.".join(
38
- data_config_file.rsplit(".", 1)
39
- ) # updated data.yaml path
40
- if Path(wandb_config).is_file():
41
- return wandb_config
42
- return data_config_file
43
-
44
-
45
- def check_wandb_dataset(data_file):
46
- is_trainset_wandb_artifact = False
47
- is_valset_wandb_artifact = False
48
- if isinstance(data_file, dict):
49
- # In that case another dataset manager has already processed it and we don't have to
50
- return data_file
51
- if check_file(data_file) and data_file.endswith(".yaml"):
52
- with open(data_file, errors="ignore") as f:
53
- data_dict = yaml.safe_load(f)
54
- is_trainset_wandb_artifact = isinstance(
55
- data_dict["train"], str
56
- ) and data_dict["train"].startswith(WANDB_ARTIFACT_PREFIX)
57
- is_valset_wandb_artifact = isinstance(
58
- data_dict["val"], str
59
- ) and data_dict["val"].startswith(WANDB_ARTIFACT_PREFIX)
60
- if is_trainset_wandb_artifact or is_valset_wandb_artifact:
61
- return data_dict
62
- else:
63
- return check_dataset(data_file)
64
-
65
-
66
- def get_run_info(run_path):
67
- run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX))
68
- run_id = run_path.stem
69
- project = run_path.parent.stem
70
- entity = run_path.parent.parent.stem
71
- model_artifact_name = "run_" + run_id + "_model"
72
- return entity, project, run_id, model_artifact_name
73
-
74
-
75
- def check_wandb_resume(opt):
76
- process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None
77
- if isinstance(opt.resume, str):
78
- if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
79
- if RANK not in [-1, 0]: # For resuming DDP runs
80
- entity, project, run_id, model_artifact_name = get_run_info(
81
- opt.resume
82
- )
83
- api = wandb.Api()
84
- artifact = api.artifact(
85
- entity
86
- + "/"
87
- + project
88
- + "/"
89
- + model_artifact_name
90
- + ":latest"
91
- )
92
- modeldir = artifact.download()
93
- opt.weights = str(Path(modeldir) / "last.pt")
94
- return True
95
- return None
96
-
97
-
98
- def process_wandb_config_ddp_mode(opt):
99
- with open(check_file(opt.data), errors="ignore") as f:
100
- data_dict = yaml.safe_load(f) # data dict
101
- train_dir, val_dir = None, None
102
- if isinstance(data_dict["train"], str) and data_dict["train"].startswith(
103
- WANDB_ARTIFACT_PREFIX
104
- ):
105
- api = wandb.Api()
106
- train_artifact = api.artifact(
107
- remove_prefix(data_dict["train"]) + ":" + opt.artifact_alias
108
- )
109
- train_dir = train_artifact.download()
110
- train_path = Path(train_dir) / "data/images/"
111
- data_dict["train"] = str(train_path)
112
-
113
- if isinstance(data_dict["val"], str) and data_dict["val"].startswith(
114
- WANDB_ARTIFACT_PREFIX
115
- ):
116
- api = wandb.Api()
117
- val_artifact = api.artifact(
118
- remove_prefix(data_dict["val"]) + ":" + opt.artifact_alias
119
- )
120
- val_dir = val_artifact.download()
121
- val_path = Path(val_dir) / "data/images/"
122
- data_dict["val"] = str(val_path)
123
- if train_dir or val_dir:
124
- ddp_data_path = str(Path(val_dir) / "wandb_local_data.yaml")
125
- with open(ddp_data_path, "w") as f:
126
- yaml.safe_dump(data_dict, f)
127
- opt.data = ddp_data_path
128
-
129
-
130
- class WandbLogger:
131
- """Log training runs, datasets, models, and predictions to Weights & Biases.
132
-
133
- This logger sends information to W&B at wandb.ai. By default, this information
134
- includes hyperparameters, system configuration and metrics, model metrics,
135
- and basic data metrics and analyses.
136
-
137
- By providing additional command line arguments to train.py, datasets,
138
- models and predictions can also be logged.
139
-
140
- For more on how this logger is used, see the Weights & Biases documentation:
141
- https://docs.wandb.com/guides/integrations/yolov5
142
- """
143
-
144
- def __init__(self, opt, run_id=None, job_type="Training"):
145
- """
146
- - Initialize WandbLogger instance
147
- - Upload dataset if opt.upload_dataset is True
148
- - Setup training processes if job_type is 'Training'
149
-
150
- arguments:
151
- opt (namespace) -- Commandline arguments for this run
152
- run_id (str) -- Run ID of W&B run to be resumed
153
- job_type (str) -- To set the job_type for this run
154
-
155
- """
156
- # Temporary-fix
157
- if opt.upload_dataset:
158
- opt.upload_dataset = False
159
- # LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.")
160
-
161
- # Pre-training routine --
162
- self.job_type = job_type
163
- self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run
164
- self.val_artifact, self.train_artifact = None, None
165
- self.train_artifact_path, self.val_artifact_path = None, None
166
- self.result_artifact = None
167
- self.val_table, self.result_table = None, None
168
- self.bbox_media_panel_images = []
169
- self.val_table_path_map = None
170
- self.max_imgs_to_log = 16
171
- self.wandb_artifact_data_dict = None
172
- self.data_dict = None
173
- # It's more elegant to stick to 1 wandb.init call,
174
- # but useful config data is overwritten in the WandbLogger's wandb.init call
175
- if isinstance(opt.resume, str): # checks resume from artifact
176
- if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
177
- entity, project, run_id, model_artifact_name = get_run_info(
178
- opt.resume
179
- )
180
- model_artifact_name = (
181
- WANDB_ARTIFACT_PREFIX + model_artifact_name
182
- )
183
- assert wandb, "install wandb to resume wandb runs"
184
- # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config
185
- self.wandb_run = wandb.init(
186
- id=run_id,
187
- project=project,
188
- entity=entity,
189
- resume="allow",
190
- allow_val_change=True,
191
- )
192
- opt.resume = model_artifact_name
193
- elif self.wandb:
194
- self.wandb_run = (
195
- wandb.init(
196
- config=opt,
197
- resume="allow",
198
- project="YOLOv5"
199
- if opt.project == "runs/train"
200
- else Path(opt.project).stem,
201
- entity=opt.entity,
202
- name=opt.name if opt.name != "exp" else None,
203
- job_type=job_type,
204
- id=run_id,
205
- allow_val_change=True,
206
- )
207
- if not wandb.run
208
- else wandb.run
209
- )
210
- if self.wandb_run:
211
- if self.job_type == "Training":
212
- if opt.upload_dataset:
213
- if not opt.resume:
214
- self.wandb_artifact_data_dict = (
215
- self.check_and_upload_dataset(opt)
216
- )
217
-
218
- if isinstance(opt.data, dict):
219
- # This means another dataset manager has already processed the dataset info (e.g. ClearML)
220
- # and they will have stored the already processed dict in opt.data
221
- self.data_dict = opt.data
222
- elif opt.resume:
223
- # resume from artifact
224
- if isinstance(opt.resume, str) and opt.resume.startswith(
225
- WANDB_ARTIFACT_PREFIX
226
- ):
227
- self.data_dict = dict(self.wandb_run.config.data_dict)
228
- else: # local resume
229
- self.data_dict = check_wandb_dataset(opt.data)
230
- else:
231
- self.data_dict = check_wandb_dataset(opt.data)
232
- self.wandb_artifact_data_dict = (
233
- self.wandb_artifact_data_dict or self.data_dict
234
- )
235
-
236
- # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming.
237
- self.wandb_run.config.update(
238
- {"data_dict": self.wandb_artifact_data_dict},
239
- allow_val_change=True,
240
- )
241
- self.setup_training(opt)
242
-
243
- if self.job_type == "Dataset Creation":
244
- self.wandb_run.config.update({"upload_dataset": True})
245
- self.data_dict = self.check_and_upload_dataset(opt)
246
-
247
- def check_and_upload_dataset(self, opt):
248
- """
249
- Check if the dataset format is compatible and upload it as W&B artifact
250
-
251
- arguments:
252
- opt (namespace)-- Commandline arguments for current run
253
-
254
- returns:
255
- Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links.
256
- """
257
- assert wandb, "Install wandb to upload dataset"
258
- config_path = self.log_dataset_artifact(
259
- opt.data,
260
- opt.single_cls,
261
- "YOLOv5"
262
- if opt.project == "runs/train"
263
- else Path(opt.project).stem,
264
- )
265
- with open(config_path, errors="ignore") as f:
266
- wandb_data_dict = yaml.safe_load(f)
267
- return wandb_data_dict
268
-
269
- def setup_training(self, opt):
270
- """
271
- Setup the necessary processes for training YOLO models:
272
- - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX
273
- - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded
274
- - Setup log_dict, initialize bbox_interval
275
-
276
- arguments:
277
- opt (namespace) -- commandline arguments for this run
278
-
279
- """
280
- self.log_dict, self.current_epoch = {}, 0
281
- self.bbox_interval = opt.bbox_interval
282
- if isinstance(opt.resume, str):
283
- modeldir, _ = self.download_model_artifact(opt)
284
- if modeldir:
285
- self.weights = Path(modeldir) / "last.pt"
286
- config = self.wandb_run.config
287
- (
288
- opt.weights,
289
- opt.save_period,
290
- opt.batch_size,
291
- opt.bbox_interval,
292
- opt.epochs,
293
- opt.hyp,
294
- opt.imgsz,
295
- ) = (
296
- str(self.weights),
297
- config.save_period,
298
- config.batch_size,
299
- config.bbox_interval,
300
- config.epochs,
301
- config.hyp,
302
- config.imgsz,
303
- )
304
- data_dict = self.data_dict
305
- if (
306
- self.val_artifact is None
307
- ): # If --upload_dataset is set, use the existing artifact, don't download
308
- (
309
- self.train_artifact_path,
310
- self.train_artifact,
311
- ) = self.download_dataset_artifact(
312
- data_dict.get("train"), opt.artifact_alias
313
- )
314
- (
315
- self.val_artifact_path,
316
- self.val_artifact,
317
- ) = self.download_dataset_artifact(
318
- data_dict.get("val"), opt.artifact_alias
319
- )
320
-
321
- if self.train_artifact_path is not None:
322
- train_path = Path(self.train_artifact_path) / "data/images/"
323
- data_dict["train"] = str(train_path)
324
- if self.val_artifact_path is not None:
325
- val_path = Path(self.val_artifact_path) / "data/images/"
326
- data_dict["val"] = str(val_path)
327
-
328
- if self.val_artifact is not None:
329
- self.result_artifact = wandb.Artifact(
330
- "run_" + wandb.run.id + "_progress", "evaluation"
331
- )
332
- columns = ["epoch", "id", "ground truth", "prediction"]
333
- columns.extend(self.data_dict["names"])
334
- self.result_table = wandb.Table(columns)
335
- self.val_table = self.val_artifact.get("val")
336
- if self.val_table_path_map is None:
337
- self.map_val_table_path()
338
- if opt.bbox_interval == -1:
339
- self.bbox_interval = opt.bbox_interval = (
340
- (opt.epochs // 10) if opt.epochs > 10 else 1
341
- )
342
- if opt.evolve or opt.noplots:
343
- self.bbox_interval = opt.bbox_interval = (
344
- opt.epochs + 1
345
- ) # disable bbox_interval
346
- train_from_artifact = (
347
- self.train_artifact_path is not None
348
- and self.val_artifact_path is not None
349
- )
350
- # Update the the data_dict to point to local artifacts dir
351
- if train_from_artifact:
352
- self.data_dict = data_dict
353
-
354
- def download_dataset_artifact(self, path, alias):
355
- """
356
- download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX
357
-
358
- arguments:
359
- path -- path of the dataset to be used for training
360
- alias (str)-- alias of the artifact to be download/used for training
361
-
362
- returns:
363
- (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset
364
- is found otherwise returns (None, None)
365
- """
366
- if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX):
367
- artifact_path = Path(
368
- remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias
369
- )
370
- dataset_artifact = wandb.use_artifact(
371
- artifact_path.as_posix().replace("\\", "/")
372
- )
373
- assert (
374
- dataset_artifact is not None
375
- ), "'Error: W&B dataset artifact doesn't exist'"
376
- datadir = dataset_artifact.download()
377
- return datadir, dataset_artifact
378
- return None, None
379
-
380
- def download_model_artifact(self, opt):
381
- """
382
- download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX
383
-
384
- arguments:
385
- opt (namespace) -- Commandline arguments for this run
386
- """
387
- if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
388
- model_artifact = wandb.use_artifact(
389
- remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest"
390
- )
391
- assert (
392
- model_artifact is not None
393
- ), "Error: W&B model artifact doesn't exist"
394
- modeldir = model_artifact.download()
395
- # epochs_trained = model_artifact.metadata.get('epochs_trained')
396
- total_epochs = model_artifact.metadata.get("total_epochs")
397
- is_finished = total_epochs is None
398
- assert (
399
- not is_finished
400
- ), "training is finished, can only resume incomplete runs."
401
- return modeldir, model_artifact
402
- return None, None
403
-
404
- def log_model(self, path, opt, epoch, fitness_score, best_model=False):
405
- """
406
- Log the model checkpoint as W&B artifact
407
-
408
- arguments:
409
- path (Path) -- Path of directory containing the checkpoints
410
- opt (namespace) -- Command line arguments for this run
411
- epoch (int) -- Current epoch number
412
- fitness_score (float) -- fitness score for current epoch
413
- best_model (boolean) -- Boolean representing if the current checkpoint is the best yet.
414
- """
415
- model_artifact = wandb.Artifact(
416
- "run_" + wandb.run.id + "_model",
417
- type="model",
418
- metadata={
419
- "original_url": str(path),
420
- "epochs_trained": epoch + 1,
421
- "save period": opt.save_period,
422
- "project": opt.project,
423
- "total_epochs": opt.epochs,
424
- "fitness_score": fitness_score,
425
- },
426
- )
427
- model_artifact.add_file(str(path / "last.pt"), name="last.pt")
428
- wandb.log_artifact(
429
- model_artifact,
430
- aliases=[
431
- "latest",
432
- "last",
433
- "epoch " + str(self.current_epoch),
434
- "best" if best_model else "",
435
- ],
436
- )
437
- LOGGER.info(f"Saving model artifact on epoch {epoch + 1}")
438
-
439
- def log_dataset_artifact(
440
- self, data_file, single_cls, project, overwrite_config=False
441
- ):
442
- """
443
- Log the dataset as W&B artifact and return the new data file with W&B links
444
-
445
- arguments:
446
- data_file (str) -- the .yaml file with information about the dataset like - path, classes etc.
447
- single_class (boolean) -- train multi-class data as single-class
448
- project (str) -- project name. Used to construct the artifact path
449
- overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new
450
- file with _wandb postfix. Eg -> data_wandb.yaml
451
-
452
- returns:
453
- the new .yaml file with artifact links. it can be used to start training directly from artifacts
454
- """
455
- upload_dataset = self.wandb_run.config.upload_dataset
456
- log_val_only = (
457
- isinstance(upload_dataset, str) and upload_dataset == "val"
458
- )
459
- self.data_dict = check_dataset(data_file) # parse and check
460
- data = dict(self.data_dict)
461
- nc, names = (
462
- (1, ["item"]) if single_cls else (int(data["nc"]), data["names"])
463
- )
464
- names = {k: v for k, v in enumerate(names)} # to index dictionary
465
-
466
- # log train set
467
- if not log_val_only:
468
- self.train_artifact = (
469
- self.create_dataset_table(
470
- LoadImagesAndLabels(
471
- data["train"], rect=True, batch_size=1
472
- ),
473
- names,
474
- name="train",
475
- )
476
- if data.get("train")
477
- else None
478
- )
479
- if data.get("train"):
480
- data["train"] = WANDB_ARTIFACT_PREFIX + str(
481
- Path(project) / "train"
482
- )
483
-
484
- self.val_artifact = (
485
- self.create_dataset_table(
486
- LoadImagesAndLabels(data["val"], rect=True, batch_size=1),
487
- names,
488
- name="val",
489
- )
490
- if data.get("val")
491
- else None
492
- )
493
- if data.get("val"):
494
- data["val"] = WANDB_ARTIFACT_PREFIX + str(Path(project) / "val")
495
-
496
- path = Path(data_file)
497
- # create a _wandb.yaml file with artifacts links if both train and test set are logged
498
- if not log_val_only:
499
- path = (
500
- path.stem if overwrite_config else path.stem + "_wandb"
501
- ) + ".yaml" # updated data.yaml path
502
- path = ROOT / "data" / path
503
- data.pop("download", None)
504
- data.pop("path", None)
505
- with open(path, "w") as f:
506
- yaml.safe_dump(data, f)
507
- LOGGER.info(f"Created dataset config file {path}")
508
-
509
- if (
510
- self.job_type == "Training"
511
- ): # builds correct artifact pipeline graph
512
- if not log_val_only:
513
- self.wandb_run.log_artifact(
514
- self.train_artifact
515
- ) # calling use_artifact downloads the dataset. NOT NEEDED!
516
- self.wandb_run.use_artifact(self.val_artifact)
517
- self.val_artifact.wait()
518
- self.val_table = self.val_artifact.get("val")
519
- self.map_val_table_path()
520
- else:
521
- self.wandb_run.log_artifact(self.train_artifact)
522
- self.wandb_run.log_artifact(self.val_artifact)
523
- return path
524
-
525
- def map_val_table_path(self):
526
- """
527
- Map the validation dataset Table like name of file -> it's id in the W&B Table.
528
- Useful for - referencing artifacts for evaluation.
529
- """
530
- self.val_table_path_map = {}
531
- LOGGER.info("Mapping dataset")
532
- for i, data in enumerate(tqdm(self.val_table.data)):
533
- self.val_table_path_map[data[3]] = data[0]
534
-
535
- def create_dataset_table(
536
- self,
537
- dataset: LoadImagesAndLabels,
538
- class_to_id: Dict[int, str],
539
- name: str = "dataset",
540
- ):
541
- """
542
- Create and return W&B artifact containing W&B Table of the dataset.
543
-
544
- arguments:
545
- dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table
546
- class_to_id -- hash map that maps class ids to labels
547
- name -- name of the artifact
548
-
549
- returns:
550
- dataset artifact to be logged or used
551
- """
552
- # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging
553
- artifact = wandb.Artifact(name=name, type="dataset")
554
- img_files = (
555
- tqdm([dataset.path])
556
- if isinstance(dataset.path, str) and Path(dataset.path).is_dir()
557
- else None
558
- )
559
- img_files = tqdm(dataset.im_files) if not img_files else img_files
560
- for img_file in img_files:
561
- if Path(img_file).is_dir():
562
- artifact.add_dir(img_file, name="data/images")
563
- labels_path = "labels".join(dataset.path.rsplit("images", 1))
564
- artifact.add_dir(labels_path, name="data/labels")
565
- else:
566
- artifact.add_file(
567
- img_file, name="data/images/" + Path(img_file).name
568
- )
569
- label_file = Path(img2label_paths([img_file])[0])
570
- artifact.add_file(
571
- str(label_file), name="data/labels/" + label_file.name
572
- ) if label_file.exists() else None
573
- table = wandb.Table(columns=["id", "train_image", "Classes", "name"])
574
- class_set = wandb.Classes(
575
- [{"id": id, "name": name} for id, name in class_to_id.items()]
576
- )
577
- for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)):
578
- box_data, img_classes = [], {}
579
- for cls, *xywh in labels[:, 1:].tolist():
580
- cls = int(cls)
581
- box_data.append(
582
- {
583
- "position": {
584
- "middle": [xywh[0], xywh[1]],
585
- "width": xywh[2],
586
- "height": xywh[3],
587
- },
588
- "class_id": cls,
589
- "box_caption": "%s" % (class_to_id[cls]),
590
- }
591
- )
592
- img_classes[cls] = class_to_id[cls]
593
- boxes = {
594
- "ground_truth": {
595
- "box_data": box_data,
596
- "class_labels": class_to_id,
597
- }
598
- } # inference-space
599
- table.add_data(
600
- si,
601
- wandb.Image(paths, classes=class_set, boxes=boxes),
602
- list(img_classes.values()),
603
- Path(paths).name,
604
- )
605
- artifact.add(table, name)
606
- return artifact
607
-
608
- def log_training_progress(self, predn, path, names):
609
- """
610
- Build evaluation Table. Uses reference from validation dataset table.
611
-
612
- arguments:
613
- predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class]
614
- path (str): local path of the current evaluation image
615
- names (dict(int, str)): hash map that maps class ids to labels
616
- """
617
- class_set = wandb.Classes(
618
- [{"id": id, "name": name} for id, name in names.items()]
619
- )
620
- box_data = []
621
- avg_conf_per_class = [0] * len(self.data_dict["names"])
622
- pred_class_count = {}
623
- for *xyxy, conf, cls in predn.tolist():
624
- if conf >= 0.25:
625
- cls = int(cls)
626
- box_data.append(
627
- {
628
- "position": {
629
- "minX": xyxy[0],
630
- "minY": xyxy[1],
631
- "maxX": xyxy[2],
632
- "maxY": xyxy[3],
633
- },
634
- "class_id": cls,
635
- "box_caption": f"{names[cls]} {conf:.3f}",
636
- "scores": {"class_score": conf},
637
- "domain": "pixel",
638
- }
639
- )
640
- avg_conf_per_class[cls] += conf
641
-
642
- if cls in pred_class_count:
643
- pred_class_count[cls] += 1
644
- else:
645
- pred_class_count[cls] = 1
646
-
647
- for pred_class in pred_class_count.keys():
648
- avg_conf_per_class[pred_class] = (
649
- avg_conf_per_class[pred_class] / pred_class_count[pred_class]
650
- )
651
-
652
- boxes = {
653
- "predictions": {"box_data": box_data, "class_labels": names}
654
- } # inference-space
655
- id = self.val_table_path_map[Path(path).name]
656
- self.result_table.add_data(
657
- self.current_epoch,
658
- id,
659
- self.val_table.data[id][1],
660
- wandb.Image(
661
- self.val_table.data[id][1], boxes=boxes, classes=class_set
662
- ),
663
- *avg_conf_per_class,
664
- )
665
-
666
- def val_one_image(self, pred, predn, path, names, im):
667
- """
668
- Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel
669
-
670
- arguments:
671
- pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
672
- predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class]
673
- path (str): local path of the current evaluation image
674
- """
675
- if (
676
- self.val_table and self.result_table
677
- ): # Log Table if Val dataset is uploaded as artifact
678
- self.log_training_progress(predn, path, names)
679
-
680
- if (
681
- len(self.bbox_media_panel_images) < self.max_imgs_to_log
682
- and self.current_epoch > 0
683
- ):
684
- if self.current_epoch % self.bbox_interval == 0:
685
- box_data = [
686
- {
687
- "position": {
688
- "minX": xyxy[0],
689
- "minY": xyxy[1],
690
- "maxX": xyxy[2],
691
- "maxY": xyxy[3],
692
- },
693
- "class_id": int(cls),
694
- "box_caption": f"{names[int(cls)]} {conf:.3f}",
695
- "scores": {"class_score": conf},
696
- "domain": "pixel",
697
- }
698
- for *xyxy, conf, cls in pred.tolist()
699
- ]
700
- boxes = {
701
- "predictions": {
702
- "box_data": box_data,
703
- "class_labels": names,
704
- }
705
- } # inference-space
706
- self.bbox_media_panel_images.append(
707
- wandb.Image(im, boxes=boxes, caption=path.name)
708
- )
709
-
710
- def log(self, log_dict):
711
- """
712
- save the metrics to the logging dictionary
713
-
714
- arguments:
715
- log_dict (Dict) -- metrics/media to be logged in current step
716
- """
717
- if self.wandb_run:
718
- for key, value in log_dict.items():
719
- self.log_dict[key] = value
720
-
721
- def end_epoch(self, best_result=False):
722
- """
723
- commit the log_dict, model artifacts and Tables to W&B and flush the log_dict.
724
-
725
- arguments:
726
- best_result (boolean): Boolean representing if the result of this evaluation is best or not
727
- """
728
- if self.wandb_run:
729
- with all_logging_disabled():
730
- if self.bbox_media_panel_images:
731
- self.log_dict[
732
- "BoundingBoxDebugger"
733
- ] = self.bbox_media_panel_images
734
- try:
735
- wandb.log(self.log_dict)
736
- except BaseException as e:
737
- LOGGER.info(
738
- f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}"
739
- )
740
- self.wandb_run.finish()
741
- self.wandb_run = None
742
-
743
- self.log_dict = {}
744
- self.bbox_media_panel_images = []
745
- if self.result_artifact:
746
- self.result_artifact.add(self.result_table, "result")
747
- wandb.log_artifact(
748
- self.result_artifact,
749
- aliases=[
750
- "latest",
751
- "last",
752
- "epoch " + str(self.current_epoch),
753
- ("best" if best_result else ""),
754
- ],
755
- )
756
-
757
- wandb.log({"evaluation": self.result_table})
758
- columns = ["epoch", "id", "ground truth", "prediction"]
759
- columns.extend(self.data_dict["names"])
760
- self.result_table = wandb.Table(columns)
761
- self.result_artifact = wandb.Artifact(
762
- "run_" + wandb.run.id + "_progress", "evaluation"
763
- )
764
-
765
- def finish_run(self):
766
- """
767
- Log metrics if any and finish the current W&B run
768
- """
769
- if self.wandb_run:
770
- if self.log_dict:
771
- with all_logging_disabled():
772
- wandb.log(self.log_dict)
773
- wandb.run.finish()
774
-
775
-
776
- @contextmanager
777
- def all_logging_disabled(highest_level=logging.CRITICAL):
778
- """source - https://gist.github.com/simon-weber/7853144
779
- A context manager that will prevent any logging messages triggered during the body from being processed.
780
- :param highest_level: the maximum logging level in use.
781
- This would only need to be changed if a custom level greater than CRITICAL is defined.
782
- """
783
- previous_level = logging.root.manager.disable
784
- logging.disable(highest_level)
785
- try:
786
- yield
787
- finally:
788
- logging.disable(previous_level)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- license: openrail
3
- title: CoAdapter
4
- sdk: gradio
5
- sdk_version: 3.19.1
6
- emoji: 😻
7
- colorFrom: pink
8
- colorTo: blue
9
- pinned: false
10
- python_version: 3.8.16
11
- app_file: app.py
12
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/llms/utils/jsonrepair.py DELETED
@@ -1,660 +0,0 @@
1
- # jsonrepair.py - Repair invalid JSON documents in Python
2
- #
3
- # Just https://github.com/josdejong/jsonrepair ported from TypeScript to Python.
4
- #
5
- # This port won't get updates, because the goal should be to generate this library instead.
6
- #
7
- # See: https://github.com/josdejong/jsonrepair/issues/84
8
- #
9
-
10
- import json
11
- import re
12
- from typing import Optional
13
-
14
- CONTROL_CHARACTERS = {"\b": "\\b", "\f": "\\f", "\n": "\\n", "\r": "\\r", "\t": "\\t"}
15
-
16
- ESCAPE_CHARACTERS = {
17
- '"': '"',
18
- "\\": "\\",
19
- "/": "/",
20
- "b": "\b",
21
- "f": "\f",
22
- "n": "\n",
23
- "r": "\r",
24
- "t": "\t"
25
- # note that \u is handled separately in parseString()
26
- }
27
-
28
-
29
- def remove_at_index(text: str, start: int, count: int) -> str:
30
- return text[0:start] + text[start + count :]
31
-
32
-
33
- def is_control_character(char: str) -> bool:
34
- return char in CONTROL_CHARACTERS
35
-
36
-
37
- def is_valid_string_character(char: str) -> bool:
38
- return 0x20 <= ord(char) <= 0x10FFFF
39
-
40
-
41
- def is_quote(char: str) -> bool:
42
- return is_single_quote(char) or is_double_quote(char)
43
-
44
-
45
- def is_single_quote(char: str) -> bool:
46
- """Test whether the given character is a single quote character.
47
- Also tests for special variants of single quotes.
48
- """
49
- return char in (
50
- "'", # U+0027
51
- "‘", # U+2018
52
- "’", # U+2019
53
- "`", # U+0060
54
- "´", # U+00B4
55
- )
56
-
57
-
58
- def is_double_quote(char: str) -> bool:
59
- return (
60
- is_ascii_double_quote(char)
61
- or is_double_quote_left(char)
62
- or is_double_quote_right(char)
63
- )
64
-
65
-
66
- def is_ascii_double_quote(char: str) -> bool:
67
- return char == '"' # U+0022
68
-
69
-
70
- def is_double_quote_left(char: str) -> bool:
71
- return char == "“" # U+201C
72
-
73
-
74
- def is_double_quote_right(char: str) -> bool:
75
- return char == "”" # U+201D
76
-
77
-
78
- def is_start_of_value(char: str) -> bool:
79
- regex_start_of_value = (
80
- r"^[[{\w-]$" # alpha, number, minus, or opening bracket or brace
81
- )
82
- return bool(re.search(regex_start_of_value, char)) or is_quote(char)
83
-
84
-
85
- def ends_with_comma_or_newline(text: str) -> bool:
86
- return bool(re.search(r"[,\n][ \t\r]*$", text))
87
-
88
-
89
- def is_whitespace(char: str) -> bool:
90
- return char.isspace()
91
-
92
-
93
- def is_special_whitespace(char: str) -> bool:
94
- """Check if the given character is a special whitespace character, some unicode variant"""
95
- return (
96
- char == "\u00A0" # non-breaking space
97
- or ord("\u2000") <= ord(char) <= ord("\u200A")
98
- or char == "\u202F"
99
- or char == "\u205F"
100
- or char == "\u3000"
101
- )
102
-
103
-
104
- def insert_before_last_whitespace(text: str, text_to_insert: str) -> str:
105
- index = len(text)
106
-
107
- if not is_whitespace(text[index - 1]):
108
- # no trailing whitespaces
109
- return text + text_to_insert
110
-
111
- while is_whitespace(text[index - 1]):
112
- index -= 1
113
-
114
- return text[:index] + text_to_insert + text[index:]
115
-
116
-
117
- def strip_last_occurrence(
118
- text: str, text_to_strip: str, strip_remaining: bool = False
119
- ) -> str:
120
- index = text.rindex(text_to_strip)
121
- try:
122
- return text[:index] + ("" if strip_remaining else text[index + 1 :])
123
- except ValueError:
124
- return text
125
-
126
-
127
- def is_hex(char: str) -> bool:
128
- try:
129
- int(char, 16)
130
- return True
131
- except ValueError:
132
- return False
133
-
134
-
135
- def is_delimiter(char: str) -> bool:
136
- return char in ",:[]{}()\n'" or is_quote(char)
137
-
138
-
139
- def at_end_of_block_comment(text: str, i: int) -> bool:
140
- return text[i] == "*" and text[i + 1] == "/"
141
-
142
-
143
- class JsonRepairError(Exception):
144
- def __init__(self, message: str, position: int):
145
- super(JsonRepairError, self).__init__(message + f" at position {position}")
146
- self.position = position
147
-
148
-
149
- class JsonRepair:
150
- """Repairs invalid JSON, i.e. change JavaScript notation into JSON notation.
151
-
152
- Example:
153
-
154
- try:
155
- json = "{name: 'John'}"
156
- repaired = JsonRepair(json).repair()
157
- print(repaired)
158
- # '{"name": "John"}'
159
- except JsonRepairFailed as err:
160
- print(err)
161
-
162
- """
163
-
164
- def __init__(self, text: str):
165
- self.text = text
166
- self.i = 0 # current index in text
167
- self.output = "" # generated output
168
-
169
- def char(self, pos: int = 0) -> str:
170
- return self.text[self.i + pos]
171
-
172
- def inc(self, by: int = 1) -> None:
173
- self.i += by
174
-
175
- def dec(self, by: int = 1) -> None:
176
- self.i -= by
177
-
178
- def is_start_of_document(self, pos: int = 0) -> bool:
179
- return self.i + pos == 0
180
-
181
- def is_end_of_document(self, pos: int = 0) -> bool:
182
- return self.i + pos >= len(self.text)
183
-
184
- def repair(self) -> str:
185
- processed = self.parse_value()
186
- if not processed:
187
- raise self.unexpected_end()
188
-
189
- processed_comma = self.parse_character(",")
190
- if processed_comma:
191
- self.parse_whitespace_and_skip_comments()
192
-
193
- if (
194
- not self.is_end_of_document()
195
- and is_start_of_value(self.char())
196
- and ends_with_comma_or_newline(self.output)
197
- ):
198
- # start of a new value after end of the root level object: looks like
199
- # newline delimited JSON -> turn into a root level array
200
- if not processed_comma:
201
- # repair missing comma
202
- self.output = insert_before_last_whitespace(self.output, ",")
203
-
204
- self.parse_newline_delimited_json()
205
- elif processed_comma:
206
- # repair: remove trailing comma
207
- self.output = strip_last_occurrence(self.output, ",")
208
-
209
- if self.is_end_of_document():
210
- # reached the end of the document properly
211
- return self.output
212
-
213
- raise self.unexpected_character()
214
-
215
- def parse_value(self) -> bool:
216
- self.parse_whitespace_and_skip_comments()
217
- processed = (
218
- self.parse_object()
219
- or self.parse_array()
220
- or self.parse_string()
221
- or self.parse_number()
222
- or self.parse_keywords()
223
- or self.parse_unquoted_string()
224
- )
225
- self.parse_whitespace_and_skip_comments()
226
- return processed
227
-
228
- def parse_whitespace_and_skip_comments(self) -> bool:
229
- start = self.i
230
-
231
- changed = self.parse_whitespace()
232
- while True:
233
- changed = self.parse_comment()
234
- if changed:
235
- changed = self.parse_whitespace()
236
- if not changed:
237
- break
238
-
239
- return self.i > start
240
-
241
- def parse_whitespace(self) -> bool:
242
- whitespace = ""
243
-
244
- while not self.is_end_of_document():
245
- char = self.char()
246
-
247
- normal = is_whitespace(char)
248
- special = is_special_whitespace(char)
249
-
250
- if not normal and not special:
251
- break
252
-
253
- if special:
254
- whitespace += " " # repair special whitespace
255
- else:
256
- whitespace += char
257
-
258
- self.inc()
259
-
260
- if whitespace:
261
- self.output += whitespace
262
- return True
263
- return False
264
-
265
- def parse_comment(self) -> bool:
266
- # find a block comment '/* ... */'
267
- if not self.is_end_of_document() and not self.is_end_of_document(pos=+1):
268
- if self.char() == "/" and self.char(pos=+1) == "*":
269
- # repair block comment by skipping it
270
- while not self.is_end_of_document() and not at_end_of_block_comment(
271
- self.text, self.i
272
- ):
273
- self.inc()
274
- self.inc(by=2)
275
- return True
276
-
277
- # find a line comment '// ...'
278
- if self.char() == "/" and self.char(pos=+1) == "/":
279
- # repair line comment by skipping it
280
- while not self.is_end_of_document() and self.char() != "\n":
281
- self.inc()
282
- return True
283
-
284
- return False
285
-
286
- def parse_character(self, char: str) -> bool:
287
- if not self.is_end_of_document():
288
- if self.char() == char:
289
- self.output += char
290
- self.inc()
291
- return True
292
- return False
293
-
294
- def skip_character(self, char: str) -> bool:
295
- if not self.is_end_of_document() and self.char() == char:
296
- self.inc()
297
- return True
298
- return False
299
-
300
- def skip_escape_character(self) -> bool:
301
- return self.skip_character("\\")
302
-
303
- def parse_object(self) -> bool:
304
- """Parse an object like '{"key": "value"}'"""
305
- if not self.is_end_of_document() and self.char() == "{":
306
- self.output += "{"
307
- self.inc()
308
- self.parse_whitespace_and_skip_comments()
309
-
310
- initial = True
311
- while not self.is_end_of_document() and self.char() != "}":
312
- if not initial:
313
- processed_comma = self.parse_character(",")
314
- if not processed_comma:
315
- # repair missing comma
316
- self.output = insert_before_last_whitespace(self.output, ",")
317
- self.parse_whitespace_and_skip_comments()
318
- else:
319
- processed_comma = True
320
- initial = False
321
-
322
- processed_key = self.parse_string() or self.parse_unquoted_string()
323
- if not processed_key:
324
- if self.is_end_of_document() or self.char() in "{}[]":
325
- # repair trailing comma
326
- self.output = strip_last_occurrence(self.output, ",")
327
- break
328
- raise self.object_key_expected()
329
-
330
- self.parse_whitespace_and_skip_comments()
331
- processed_colon = self.parse_character(":")
332
- if not processed_colon:
333
- if is_start_of_value(self.char()):
334
- # repair missing colon
335
- self.output = insert_before_last_whitespace(self.output, ":")
336
- else:
337
- raise self.colon_expected()
338
- processed_value = self.parse_value()
339
- if not processed_value:
340
- if processed_colon:
341
- raise self.object_value_expected()
342
- raise self.colon_expected()
343
-
344
- if not self.is_end_of_document() and self.char() == "}":
345
- self.output += "}"
346
- self.inc()
347
- else:
348
- # repair missing end bracket
349
- self.output = insert_before_last_whitespace(self.output, "}")
350
-
351
- return True
352
-
353
- return False
354
-
355
- def parse_array(self) -> bool:
356
- """Parse an array like '["item1", "item2", ...]'"""
357
- if not self.is_end_of_document() and self.char() == "[":
358
- self.output += "["
359
- self.inc()
360
- self.parse_whitespace_and_skip_comments()
361
-
362
- initial = True
363
- while not self.is_end_of_document() and self.char() != "]":
364
- if not initial:
365
- processed_comma = self.parse_character(",")
366
- if not processed_comma:
367
- # repair missing comma
368
- self.output = insert_before_last_whitespace(self.output, ",")
369
- else:
370
- initial = False
371
-
372
- processed_value = self.parse_value()
373
- if not processed_value:
374
- # repair trailing comma
375
- self.output = strip_last_occurrence(self.output, ",")
376
- break
377
-
378
- if not self.is_end_of_document() and self.char() == "]":
379
- self.output += "]"
380
- self.inc()
381
- else:
382
- # repair missing closing array bracket
383
- self.output = insert_before_last_whitespace(self.output, "]")
384
-
385
- return True
386
-
387
- return False
388
-
389
- def parse_newline_delimited_json(self):
390
- """Parse and repair Newline Delimited JSON (NDJSON):
391
- multiple JSON objects separated by a newline character
392
- """
393
- # repair NDJSON
394
- initial = True
395
- processed_value = True
396
- while processed_value:
397
- if not initial:
398
- # parse optional comma, insert when missing
399
- processed_comma = self.parse_character(",")
400
- if not processed_comma:
401
- # repair: add missing comma
402
- self.output = insert_before_last_whitespace(self.output, ",")
403
- else:
404
- initial = False
405
-
406
- processed_value = self.parse_value()
407
-
408
- if not processed_value:
409
- # repair: remove trailing comma
410
- self.output = strip_last_occurrence(self.output, ",")
411
-
412
- # repair: wrap the output inside array brackets
413
- self.output = f"[\n{self.output}\n]"
414
-
415
- def parse_string(self) -> bool:
416
- """Parse a string enclosed by double quotes "...". Can contain escaped quotes
417
- Repair strings enclosed in single quotes or special quotes
418
- Repair an escaped string
419
- """
420
- if not self.is_end_of_document():
421
- skip_escape_chars = self.char() == "\\"
422
- if skip_escape_chars:
423
- # repair: remove the first escape character
424
- self.inc()
425
- skip_escape_chars = True
426
-
427
- if not self.is_end_of_document() and is_quote(self.char()):
428
- is_end_quote = (
429
- is_single_quote if is_single_quote(self.char()) else is_double_quote
430
- )
431
-
432
- if self.char() != '"':
433
- pass # TODO?: repair non-normalized quote
434
- self.output += '"'
435
- self.inc()
436
-
437
- while not self.is_end_of_document() and not is_end_quote(self.char()):
438
- if self.char() == "\\":
439
- char = self.char(pos=+1)
440
- escape_char = ESCAPE_CHARACTERS.get(char)
441
- if escape_char:
442
- self.output += self.text[self.i : self.i + 2]
443
- self.inc(by=2)
444
- elif char == "u":
445
- if (
446
- not self.is_end_of_document(pos=+5)
447
- and is_hex(self.char(pos=+2))
448
- and is_hex(self.char(pos=+3))
449
- and is_hex(self.char(pos=+4))
450
- and is_hex(self.char(pos=+5))
451
- ):
452
- self.output += self.text[self.i : self.i + 6]
453
- self.inc(by=6)
454
- else:
455
- raise self.invalid_unicode_character(self.i)
456
- else:
457
- # repair invalid escape character: remove it
458
- self.output += char
459
- self.inc(by=2)
460
- else:
461
- char = self.char()
462
-
463
- if char == '"' and self.char(pos=-1) != "\\":
464
- # repair unescaped double quote
465
- self.output += "\\" + char
466
- self.inc()
467
- elif is_control_character(char):
468
- # unescaped control character
469
- self.output += CONTROL_CHARACTERS[char]
470
- self.inc()
471
- else:
472
- if not is_valid_string_character(char):
473
- raise self.invalid_character(char)
474
- self.output += char
475
- self.inc()
476
-
477
- if skip_escape_chars:
478
- processed = self.skip_escape_character()
479
- if processed:
480
- pass # repair: skipped escape character (nothing to do)
481
-
482
- if not self.is_end_of_document() and is_quote(self.char()):
483
- if self.char() != '"':
484
- pass # TODO:? repair non-normalized quote
485
-
486
- self.output += '"'
487
- self.inc()
488
- else:
489
- # repair missing end quote
490
- self.output += '"'
491
-
492
- self.parse_concatenated_string()
493
-
494
- return True
495
-
496
- return False
497
-
498
- def parse_concatenated_string(self) -> bool:
499
- """Repair concatenated strings like \"hello\" + \"world\", change this into \"helloworld\" """
500
- processed = False
501
-
502
- self.parse_whitespace_and_skip_comments()
503
- while not self.is_end_of_document() and self.char() == "+":
504
- processed = True
505
- self.inc()
506
- self.parse_whitespace_and_skip_comments()
507
-
508
- # repair: remove the end quote of the first string
509
- self.output = strip_last_occurrence(self.output, '"', True)
510
- start = len(self.output)
511
- self.parse_string()
512
-
513
- # repair: remove the start quote of the second string
514
- self.output = remove_at_index(self.output, start, 1)
515
-
516
- return processed
517
-
518
- def parse_number(self) -> bool:
519
- """Parse a number like 2.4 or 2.4e6"""
520
- if not self.is_end_of_document():
521
- start = self.i
522
- if self.char() == "-":
523
- self.inc()
524
- err = self.expect_digit(start)
525
- if err:
526
- raise err
527
-
528
- if not self.is_end_of_document() and self.char() == "0":
529
- self.inc()
530
- elif not self.is_end_of_document() and self.char() in "123456789":
531
- self.inc()
532
- while not self.is_end_of_document() and self.char().isdigit():
533
- self.inc()
534
-
535
- if not self.is_end_of_document() and self.char() == ".":
536
- self.inc()
537
- err = self.expect_digit(start)
538
- if err:
539
- raise err
540
- while not self.is_end_of_document() and self.char().isdigit():
541
- self.inc()
542
-
543
- if not self.is_end_of_document() and self.char() in "eE":
544
- self.inc()
545
- if not self.is_end_of_document() and self.char() in "+-":
546
- self.inc()
547
- err = self.expect_digit(start)
548
- if err:
549
- raise err
550
- while not self.is_end_of_document() and self.char().isdigit():
551
- self.inc()
552
-
553
- if self.i > start:
554
- self.output += self.text[start : self.i]
555
- return True
556
-
557
- return False
558
-
559
- def parse_keywords(self) -> bool:
560
- """Parse keywords true, false, null
561
- Repair Python keywords True, False, None
562
- """
563
- return (
564
- self.parse_keyword("true", "true")
565
- or self.parse_keyword("false", "false")
566
- or self.parse_keyword("null", "null")
567
- # repair Python keywords True, False, None
568
- or self.parse_keyword("True", "true")
569
- or self.parse_keyword("False", "false")
570
- or self.parse_keyword("None", "null")
571
- )
572
-
573
- def parse_keyword(self, name: str, value: str) -> bool:
574
- if self.text[self.i : self.i + len(name)] == name:
575
- self.output += value
576
- self.inc(by=len(name))
577
- return True
578
-
579
- return False
580
-
581
- def parse_unquoted_string(self) -> bool:
582
- """Repair and unquoted string by adding quotes around it
583
- Repair a MongoDB function call like NumberLong("2")
584
- Repair a JSONP function call like callback({...});
585
- """
586
- # note that the symbol can end with whitespaces: we stop at the next delimiter
587
- start = self.i
588
- while not self.is_end_of_document() and not is_delimiter(self.char()):
589
- self.inc()
590
-
591
- if self.i > start:
592
- if not self.is_end_of_document() and self.char() == "(":
593
- # repair a MongoDB function call like NumberLong("2")
594
- # repair a JSONP function call like callback({...});
595
- self.inc()
596
-
597
- self.parse_value()
598
-
599
- if not self.is_end_of_document() and self.char() == ")":
600
- # repair: skip close bracket of function call
601
- self.inc()
602
- if not self.is_end_of_document() and self.char() == ";":
603
- # repair: skip semicolon after JSONP call
604
- self.inc()
605
-
606
- return True
607
-
608
- # else repair unquoted string
609
-
610
- # first, go back to prevent getting trailing whitespaces in the string
611
- while not self.is_start_of_document() and is_whitespace(self.char(pos=-1)):
612
- self.dec()
613
-
614
- symbol = self.text[start : self.i]
615
- self.output += json.dumps(symbol)
616
-
617
- return True
618
-
619
- return False
620
-
621
- def expect_digit(self, start: int) -> Optional[JsonRepairError]:
622
- if self.is_end_of_document() or not self.char().isdigit():
623
- num_so_far = self.text[start : self.i]
624
- return JsonRepairError(
625
- f"Invalid number '{num_so_far}', expecting a digit {self.got()}", 2
626
- )
627
-
628
- def invalid_character(self, char: str) -> JsonRepairError:
629
- return JsonRepairError("Invalid character " + json.dumps(char), self.i)
630
-
631
- def unexpected_character(self) -> JsonRepairError:
632
- return JsonRepairError(
633
- "Unexpected character " + json.dumps(self.text[self.i]), self.i
634
- )
635
-
636
- def unexpected_end(self) -> JsonRepairError:
637
- return JsonRepairError("Unexpected end of json string", len(self.text))
638
-
639
- def object_key_expected(self) -> JsonRepairError:
640
- return JsonRepairError("Object key expected", self.i)
641
-
642
- def object_value_expected(self) -> JsonRepairError:
643
- return JsonRepairError("Object value expected", self.i)
644
-
645
- def colon_expected(self) -> JsonRepairError:
646
- return JsonRepairError("Colon expected", self.i)
647
-
648
- def invalid_unicode_character(self, start: int) -> JsonRepairError:
649
- end = start + 2
650
- while re.match(r"\w", self.text[end]):
651
- end += 1
652
- chars = self.text[start:end]
653
- return JsonRepairError(f'Invalid unicode character "{chars}"', self.i)
654
-
655
- def got(self) -> str:
656
- return (
657
- f"but got '{self.char()}'"
658
- if not self.is_end_of_document()
659
- else "but reached end of input"
660
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/bracketparser2-plugin.js DELETED
@@ -1,18 +0,0 @@
1
- import BracketParser from './bracketparser2.js';
2
-
3
- class BracketParserPlugin extends Phaser.Plugins.BasePlugin {
4
- constructor(pluginManager) {
5
- super(pluginManager);
6
- }
7
-
8
- start() {
9
- var eventEmitter = this.game.events;
10
- eventEmitter.on('destroy', this.destroy, this);
11
- }
12
-
13
- add(config) {
14
- return new BracketParser(config);
15
- }
16
- }
17
-
18
- export default BracketParserPlugin;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dialog/methods/ModalMethods.js DELETED
@@ -1,43 +0,0 @@
1
- import ModalMethods from '../../basesizer/ModalMethods.js';
2
-
3
- export default {
4
- onCreateModalBehavior(self) {
5
- self.on('button.click', function (button, groupName, index, pointer, event) {
6
- if (groupName !== 'actions') {
7
- return;
8
- }
9
-
10
- var closeEventData = {
11
- index: index,
12
- text: button.text,
13
- button: button,
14
- dialog: self
15
- }
16
-
17
-
18
- switch (self.buttonsType) {
19
- case 'radio':
20
- closeEventData.value = self.getChoicesSelectedButtonName();
21
- break;
22
- case 'checkboxes':
23
- closeEventData.value = self.getChoicesButtonStates();
24
- break;
25
- default:
26
- closeEventData.value = undefined;
27
- }
28
-
29
- self.modalClose(closeEventData);
30
- });
31
- },
32
-
33
- modal(config, onClose) {
34
- if (config && (config.defaultBehavior === false)) {
35
- this.onCreateModalBehavior = false;
36
- } else {
37
- delete this.onCreateModalBehavior;
38
- }
39
-
40
- ModalMethods.modal.call(this, config, onClose);
41
- return this;
42
- }
43
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sides/childbehaviors/index.js DELETED
@@ -1,14 +0,0 @@
1
- import Visible from './Visible.js';
2
- import Fade from './Fade.js';
3
- import Move from './Move.js';
4
-
5
- var methods = {};
6
-
7
- Object.assign(
8
- methods,
9
- Visible,
10
- Fade,
11
- Move
12
- );
13
-
14
- export default methods;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/bin/evaluate_predicts.py DELETED
@@ -1,79 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
- import os
4
-
5
- import pandas as pd
6
-
7
- from saicinpainting.evaluation.data import PrecomputedInpaintingResultsDataset
8
- from saicinpainting.evaluation.evaluator import InpaintingEvaluator, lpips_fid100_f1
9
- from saicinpainting.evaluation.losses.base_loss import SegmentationAwareSSIM, \
10
- SegmentationClassStats, SSIMScore, LPIPSScore, FIDScore, SegmentationAwareLPIPS, SegmentationAwareFID
11
- from saicinpainting.evaluation.utils import load_yaml
12
-
13
-
14
- def main(args):
15
- config = load_yaml(args.config)
16
-
17
- dataset = PrecomputedInpaintingResultsDataset(args.datadir, args.predictdir, **config.dataset_kwargs)
18
-
19
- metrics = {
20
- 'ssim': SSIMScore(),
21
- 'lpips': LPIPSScore(),
22
- 'fid': FIDScore()
23
- }
24
- enable_segm = config.get('segmentation', dict(enable=False)).get('enable', False)
25
- if enable_segm:
26
- weights_path = os.path.expandvars(config.segmentation.weights_path)
27
- metrics.update(dict(
28
- segm_stats=SegmentationClassStats(weights_path=weights_path),
29
- segm_ssim=SegmentationAwareSSIM(weights_path=weights_path),
30
- segm_lpips=SegmentationAwareLPIPS(weights_path=weights_path),
31
- segm_fid=SegmentationAwareFID(weights_path=weights_path)
32
- ))
33
- evaluator = InpaintingEvaluator(dataset, scores=metrics,
34
- integral_title='lpips_fid100_f1', integral_func=lpips_fid100_f1,
35
- **config.evaluator_kwargs)
36
-
37
- os.makedirs(os.path.dirname(args.outpath), exist_ok=True)
38
-
39
- results = evaluator.evaluate()
40
-
41
- results = pd.DataFrame(results).stack(1).unstack(0)
42
- results.dropna(axis=1, how='all', inplace=True)
43
- results.to_csv(args.outpath, sep='\t', float_format='%.4f')
44
-
45
- if enable_segm:
46
- only_short_results = results[[c for c in results.columns if not c[0].startswith('segm_')]].dropna(axis=1, how='all')
47
- only_short_results.to_csv(args.outpath + '_short', sep='\t', float_format='%.4f')
48
-
49
- print(only_short_results)
50
-
51
- segm_metrics_results = results[['segm_ssim', 'segm_lpips', 'segm_fid']].dropna(axis=1, how='all').transpose().unstack(0).reorder_levels([1, 0], axis=1)
52
- segm_metrics_results.drop(['mean', 'std'], axis=0, inplace=True)
53
-
54
- segm_stats_results = results['segm_stats'].dropna(axis=1, how='all').transpose()
55
- segm_stats_results.index = pd.MultiIndex.from_tuples(n.split('/') for n in segm_stats_results.index)
56
- segm_stats_results = segm_stats_results.unstack(0).reorder_levels([1, 0], axis=1)
57
- segm_stats_results.sort_index(axis=1, inplace=True)
58
- segm_stats_results.dropna(axis=0, how='all', inplace=True)
59
-
60
- segm_results = pd.concat([segm_metrics_results, segm_stats_results], axis=1, sort=True)
61
- segm_results.sort_values(('mask_freq', 'total'), ascending=False, inplace=True)
62
-
63
- segm_results.to_csv(args.outpath + '_segm', sep='\t', float_format='%.4f')
64
- else:
65
- print(results)
66
-
67
-
68
- if __name__ == '__main__':
69
- import argparse
70
-
71
- aparser = argparse.ArgumentParser()
72
- aparser.add_argument('config', type=str, help='Path to evaluation config')
73
- aparser.add_argument('datadir', type=str,
74
- help='Path to folder with images and masks (output of gen_mask_dataset.py)')
75
- aparser.add_argument('predictdir', type=str,
76
- help='Path to folder with predicts (e.g. predict_hifill_baseline.py)')
77
- aparser.add_argument('outpath', type=str, help='Where to put results')
78
-
79
- main(aparser.parse_args())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alfasign/diffusers-gallery/index.html DELETED
@@ -1,218 +0,0 @@
1
- <!DOCTYPE html>
2
- <html>
3
- <head>
4
- <meta charset="utf-8" />
5
- <meta name="viewport" content="width=device-width" />
6
-
7
- <title>Diffusers gallery</title>
8
- <meta name="description" content="Discover all difussion models on the Hugging Face hub." />
9
-
10
- <meta property="og:url" content="https://huggingface-projects-diffusers-gallery.hf.space/" />
11
- <meta property="og:type" content="website" />
12
- <meta property="og:title" content="Hugging Face - Diffusers Models Gallery" />
13
- <meta property="og:description" content="Discover all difussion models on the Hugging Face hub." />
14
- <meta property="og:image" content="https://huggingface-projects-diffusers-gallery.hf.space/Fo6vR6JX0AEjbw1.jpeg" />
15
-
16
- <meta name="twitter:card" content="player" />
17
- <meta property="twitter:url" content="https://huggingface-projects-diffusers-gallery.hf.space/" />
18
- <meta name="twitter:description" content="Discover all difussion models on the Hugging Face hub." />
19
-
20
- <meta name="twitter:site" content="@huggingface" />
21
- <meta name="twitter:title" content="Hugging Face - Diffusers Models Gallery" />
22
-
23
- <meta name="twitter:image" content="https://huggingface-projects-diffusers-gallery.hf.space/Fo6vR6JX0AEjbw1.jpeg" />
24
- <meta name="twitter:player" content="https://huggingface-projects-diffusers-gallery.hf.space/index.html" />
25
- <meta name="twitter:player:width" content="100%" />
26
- <meta name="twitter:player:height" content="600" />
27
-
28
- <script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script>
29
- <script src="https://cdn.tailwindcss.com"></script>
30
-
31
- <script type="module">
32
- import Alpine from "https://cdn.skypack.dev/alpinejs";
33
- import Intersect from "https://cdn.skypack.dev/@alpinejs/intersect";
34
- Alpine.plugin(Intersect);
35
-
36
- Alpine.data("modelsData", () => ({
37
- async init() {
38
- const data = await this.getModels(this.page, this.sort, this.filter);
39
- this.models = data.models;
40
- this.totalPages = data.totalPages;
41
- },
42
- ASSETS_URL: "https://d26smi9133w0oo.cloudfront.net/diffusers-gallery/",
43
- models: [],
44
- filter: "all",
45
- sort: "trending",
46
- page: 1,
47
- totalPages: -1,
48
- buttonClass(attr, filter) {
49
- if (this[attr] === filter) {
50
- return "bg-black dark:bg-white shadow-lg text-white dark:text-black hover:bg-black hover:text-white";
51
- }
52
- return "text-gray-600 dark:text-gray-300 hover:bg-gray-200 dark:hover:bg-gray-500 hover:text-gray-800";
53
- },
54
- async filterModels(style) {
55
- this.filter = style;
56
- this.page = 1;
57
- const data = await this.getModels(this.page, this.sort, this.filter);
58
- this.models = data.models;
59
- this.totalPages = data.totalPages;
60
- },
61
- async sortModels(sort) {
62
- this.sort = sort;
63
- this.page = 1;
64
- const data = await this.getModels(this.page, this.sort, this.filter);
65
- this.models = data.models;
66
- this.totalPages = data.totalPages;
67
- },
68
- async getModels(page, sort, style) {
69
- // const res = await fetch(`http://localhost:8000/api/models?page=${page}&sort=${sort}&style=${style}`)
70
- const res = await fetch(
71
- `https://huggingface-projects-diffusers-gallery-bot.hf.space/api/models?page=${page}&sort=${sort}&style=${style}`
72
- );
73
- const data = await res.json();
74
- const models = data.models.map((model) => ({
75
- id: model.id,
76
- likes: model.likes,
77
- class: model.class,
78
- isNFSW: model.isNFSW,
79
- images: model.images.filter((image) => image && image.endsWith(".jpg")),
80
- }));
81
-
82
- return {
83
- models,
84
- totalPages: data.totalPages,
85
- };
86
- },
87
- async nextPage() {
88
- if (this.page < this.totalPages) {
89
- this.page += 1;
90
- const data = await this.getModels(this.page, this.sort, this.filter);
91
- this.models = this.models.concat(data.models);
92
- this.totalPages = data.totalPages;
93
- }
94
- },
95
- }));
96
- Alpine.start();
97
- </script>
98
- </head>
99
-
100
- <body class="pb-10 pt-5 bg-gray-100 dark:bg-gray-900 relative">
101
- <section
102
- class="container px-6 grid grid-cols-2 md:grid-cols-3 lg:grid-cols-4 gap-4 mx-auto relative"
103
- x-data="modelsData"
104
- >
105
- <div class="col-span-2 lg:col-span-1 flex flex-col gap-2 row-start">
106
- <h1 class="text-lg font-semibold dark:text-white whitespace-nowrap">Diffusers Models Gallery</h1>
107
- </div>
108
-
109
- <div class="col-span-2 md:col-span-3 flex items-center gap-4 flex flex-wrap lg-auto lg:ml-auto text-sm">
110
- <div class="flex gap-2">
111
- <span class="md:px-3 py-1 dark:text-white text-gray-400">sort by</span>
112
- <button
113
- :class="buttonClass('sort', 'trending')"
114
- class="px-2 md:px-3 py-1 rounded-full text"
115
- @click="sortModels('trending')"
116
- >
117
- Trending
118
- </button>
119
- <button
120
- :class="buttonClass('sort', 'recent')"
121
- class="text-gray-600 hover:bg-gray-200 dark:hover:bg-gray-500 hover:text-gray-800 px-2 md:px-3 py-1 rounded-full"
122
- @click="sortModels('recent')"
123
- >
124
- Recent
125
- </button>
126
- <button
127
- :class="buttonClass('sort', 'likes')"
128
- class="text-gray-600 hover:bg-gray-200 dark:hover:bg-gray-500 hover:text-gray-800 px-2 md:px-3 py-1 rounded-full"
129
- @click="sortModels('likes')"
130
- >
131
- Most Likes
132
- </button>
133
- </div>
134
- <div class="flex gap-2">
135
- <span class="md:px-3 py-1 dark:text-white text-gray-400"> style</span>
136
- <button
137
- :class="buttonClass('filter', 'all')"
138
- class="px-2 md:px-3 py-1 rounded-full"
139
- @click="filterModels('all')"
140
- >
141
- All
142
- </button>
143
- <button
144
- :class="buttonClass('filter', 'anime')"
145
- class="text-gray-600 hover:bg-gray-200 dark:hover:bg-gray-500 hover:text-gray-800 px-2 md:px-3 py-1 rounded-full"
146
- @click="filterModels('anime')"
147
- >
148
- Anime
149
- </button>
150
- <button
151
- :class="buttonClass('filter', '3d')"
152
- class="text-gray-600 hover:bg-gray-200 dark:hover:bg-gray-500 hover:text-gray-800 px-2 md:px-3 py-1 rounded-full"
153
- @click="filterModels('3d')"
154
- >
155
- 3D
156
- </button>
157
- <button
158
- :class="buttonClass('filter', 'realistic')"
159
- class="text-gray-600 hover:bg-gray-200 dark:hover:bg-gray-500 hover:text-gray-800 px-2 md:px-3 py-1 rounded-full"
160
- @click="filterModels('realistic')"
161
- >
162
- Realistic
163
- </button>
164
- </div>
165
- </div>
166
-
167
- <template x-for="model in models" :key="model.id">
168
- <template x-if="model.images.length > 0">
169
- <a
170
- :href="`https://huggingface.co/${model.id}`"
171
- class="block bg-gray-900 rounded-xl overflow-hidden relative group aspect-square text-white"
172
- target="_blank"
173
- >
174
- <div
175
- class="absolute bottom-0 p-4 bg-gradient-to-t text-white pt-10 from-black/90 via-black/70 to-transparent w-full z-10"
176
- >
177
- <div class="text-sm flex items-center group-hover:translate-x-0.5 transition">
178
- <svg
179
- class="mr-1.5 text-white/70"
180
- xmlns="http://www.w3.org/2000/svg"
181
- xmlns:xlink="http://www.w3.org/1999/xlink"
182
- aria-hidden="true"
183
- focusable="false"
184
- role="img"
185
- width="1em"
186
- height="1em"
187
- preserveAspectRatio="xMidYMid meet"
188
- viewBox="0 0 32 32"
189
- fill="currentColor"
190
- >
191
- <path
192
- d="M22.5,4c-2,0-3.9,0.8-5.3,2.2L16,7.4l-1.1-1.1C12,3.3,7.2,3.3,4.3,6.2c0,0-0.1,0.1-0.1,0.1c-3,3-3,7.8,0,10.8L16,29l11.8-11.9c3-3,3-7.8,0-10.8C26.4,4.8,24.5,4,22.5,4z"
193
- ></path>
194
- </svg>
195
- <span x-text="model.likes"></span>
196
- </div>
197
- <div
198
- x-text="model.id"
199
- class="text-sm md:text-lg lg:text-xl font-semibold group-hover:translate-x-0.5 transition"
200
- ></div>
201
- </div>
202
- <div class="group-hover:brightness-90 h-full" :class="model.isNFSW ? 'blur-md' : ''">
203
- <template x-if="model.images[0]">
204
- <img
205
- :src="()=> ASSETS_URL + model.images[0]"
206
- :alt="model.id"
207
- alt=""
208
- class="w-full h-full object-cover group-hover:scale-[1.01] transition"
209
- />
210
- </template>
211
- </div>
212
- </a>
213
- </template>
214
- </template>
215
- <div class="h-12 relative" x-intersect="nextPage" data-iframe-height></div>
216
- </section>
217
- </body>
218
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/__init__.py DELETED
File without changes
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/depth2img.md DELETED
@@ -1,56 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Text-guided depth-to-image generation
14
-
15
- [[open-in-colab]]
16
-
17
- The [`StableDiffusionDepth2ImgPipeline`] lets you pass a text prompt and an initial image to condition the generation of new images. In addition, you can also pass a `depth_map` to preserve the image structure. If no `depth_map` is provided, the pipeline automatically predicts the depth via an integrated [depth-estimation model](https://github.com/isl-org/MiDaS).
18
-
19
- Start by creating an instance of the [`StableDiffusionDepth2ImgPipeline`]:
20
-
21
- ```python
22
- import torch
23
- import requests
24
- from PIL import Image
25
-
26
- from diffusers import StableDiffusionDepth2ImgPipeline
27
-
28
- pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
29
- "stabilityai/stable-diffusion-2-depth",
30
- torch_dtype=torch.float16,
31
- ).to("cuda")
32
- ```
33
-
34
- Now pass your prompt to the pipeline. You can also pass a `negative_prompt` to prevent certain words from guiding how an image is generated:
35
-
36
- ```python
37
- url = "http://images.cocodataset.org/val2017/000000039769.jpg"
38
- init_image = Image.open(requests.get(url, stream=True).raw)
39
- prompt = "two tigers"
40
- n_prompt = "bad, deformed, ugly, bad anatomy"
41
- image = pipe(prompt=prompt, image=init_image, negative_prompt=n_prompt, strength=0.7).images[0]
42
- image
43
- ```
44
-
45
- | Input | Output |
46
- |---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------|
47
- | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/coco-cats.png" width="500"/> | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/depth2img-tigers.png" width="500"/> |
48
-
49
- Play around with the Spaces below and see if you notice a difference between generated images with and without a depth map!
50
-
51
- <iframe
52
- src="https://radames-stable-diffusion-depth2img.hf.space"
53
- frameborder="0"
54
- width="850"
55
- height="500"
56
- ></iframe>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/t5_film_transformer.py DELETED
@@ -1,321 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- import math
15
-
16
- import torch
17
- from torch import nn
18
-
19
- from ..configuration_utils import ConfigMixin, register_to_config
20
- from .attention_processor import Attention
21
- from .embeddings import get_timestep_embedding
22
- from .modeling_utils import ModelMixin
23
-
24
-
25
- class T5FilmDecoder(ModelMixin, ConfigMixin):
26
- @register_to_config
27
- def __init__(
28
- self,
29
- input_dims: int = 128,
30
- targets_length: int = 256,
31
- max_decoder_noise_time: float = 2000.0,
32
- d_model: int = 768,
33
- num_layers: int = 12,
34
- num_heads: int = 12,
35
- d_kv: int = 64,
36
- d_ff: int = 2048,
37
- dropout_rate: float = 0.1,
38
- ):
39
- super().__init__()
40
-
41
- self.conditioning_emb = nn.Sequential(
42
- nn.Linear(d_model, d_model * 4, bias=False),
43
- nn.SiLU(),
44
- nn.Linear(d_model * 4, d_model * 4, bias=False),
45
- nn.SiLU(),
46
- )
47
-
48
- self.position_encoding = nn.Embedding(targets_length, d_model)
49
- self.position_encoding.weight.requires_grad = False
50
-
51
- self.continuous_inputs_projection = nn.Linear(input_dims, d_model, bias=False)
52
-
53
- self.dropout = nn.Dropout(p=dropout_rate)
54
-
55
- self.decoders = nn.ModuleList()
56
- for lyr_num in range(num_layers):
57
- # FiLM conditional T5 decoder
58
- lyr = DecoderLayer(d_model=d_model, d_kv=d_kv, num_heads=num_heads, d_ff=d_ff, dropout_rate=dropout_rate)
59
- self.decoders.append(lyr)
60
-
61
- self.decoder_norm = T5LayerNorm(d_model)
62
-
63
- self.post_dropout = nn.Dropout(p=dropout_rate)
64
- self.spec_out = nn.Linear(d_model, input_dims, bias=False)
65
-
66
- def encoder_decoder_mask(self, query_input, key_input):
67
- mask = torch.mul(query_input.unsqueeze(-1), key_input.unsqueeze(-2))
68
- return mask.unsqueeze(-3)
69
-
70
- def forward(self, encodings_and_masks, decoder_input_tokens, decoder_noise_time):
71
- batch, _, _ = decoder_input_tokens.shape
72
- assert decoder_noise_time.shape == (batch,)
73
-
74
- # decoder_noise_time is in [0, 1), so rescale to expected timing range.
75
- time_steps = get_timestep_embedding(
76
- decoder_noise_time * self.config.max_decoder_noise_time,
77
- embedding_dim=self.config.d_model,
78
- max_period=self.config.max_decoder_noise_time,
79
- ).to(dtype=self.dtype)
80
-
81
- conditioning_emb = self.conditioning_emb(time_steps).unsqueeze(1)
82
-
83
- assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
84
-
85
- seq_length = decoder_input_tokens.shape[1]
86
-
87
- # If we want to use relative positions for audio context, we can just offset
88
- # this sequence by the length of encodings_and_masks.
89
- decoder_positions = torch.broadcast_to(
90
- torch.arange(seq_length, device=decoder_input_tokens.device),
91
- (batch, seq_length),
92
- )
93
-
94
- position_encodings = self.position_encoding(decoder_positions)
95
-
96
- inputs = self.continuous_inputs_projection(decoder_input_tokens)
97
- inputs += position_encodings
98
- y = self.dropout(inputs)
99
-
100
- # decoder: No padding present.
101
- decoder_mask = torch.ones(
102
- decoder_input_tokens.shape[:2], device=decoder_input_tokens.device, dtype=inputs.dtype
103
- )
104
-
105
- # Translate encoding masks to encoder-decoder masks.
106
- encodings_and_encdec_masks = [(x, self.encoder_decoder_mask(decoder_mask, y)) for x, y in encodings_and_masks]
107
-
108
- # cross attend style: concat encodings
109
- encoded = torch.cat([x[0] for x in encodings_and_encdec_masks], dim=1)
110
- encoder_decoder_mask = torch.cat([x[1] for x in encodings_and_encdec_masks], dim=-1)
111
-
112
- for lyr in self.decoders:
113
- y = lyr(
114
- y,
115
- conditioning_emb=conditioning_emb,
116
- encoder_hidden_states=encoded,
117
- encoder_attention_mask=encoder_decoder_mask,
118
- )[0]
119
-
120
- y = self.decoder_norm(y)
121
- y = self.post_dropout(y)
122
-
123
- spec_out = self.spec_out(y)
124
- return spec_out
125
-
126
-
127
- class DecoderLayer(nn.Module):
128
- def __init__(self, d_model, d_kv, num_heads, d_ff, dropout_rate, layer_norm_epsilon=1e-6):
129
- super().__init__()
130
- self.layer = nn.ModuleList()
131
-
132
- # cond self attention: layer 0
133
- self.layer.append(
134
- T5LayerSelfAttentionCond(d_model=d_model, d_kv=d_kv, num_heads=num_heads, dropout_rate=dropout_rate)
135
- )
136
-
137
- # cross attention: layer 1
138
- self.layer.append(
139
- T5LayerCrossAttention(
140
- d_model=d_model,
141
- d_kv=d_kv,
142
- num_heads=num_heads,
143
- dropout_rate=dropout_rate,
144
- layer_norm_epsilon=layer_norm_epsilon,
145
- )
146
- )
147
-
148
- # Film Cond MLP + dropout: last layer
149
- self.layer.append(
150
- T5LayerFFCond(d_model=d_model, d_ff=d_ff, dropout_rate=dropout_rate, layer_norm_epsilon=layer_norm_epsilon)
151
- )
152
-
153
- def forward(
154
- self,
155
- hidden_states,
156
- conditioning_emb=None,
157
- attention_mask=None,
158
- encoder_hidden_states=None,
159
- encoder_attention_mask=None,
160
- encoder_decoder_position_bias=None,
161
- ):
162
- hidden_states = self.layer[0](
163
- hidden_states,
164
- conditioning_emb=conditioning_emb,
165
- attention_mask=attention_mask,
166
- )
167
-
168
- if encoder_hidden_states is not None:
169
- encoder_extended_attention_mask = torch.where(encoder_attention_mask > 0, 0, -1e10).to(
170
- encoder_hidden_states.dtype
171
- )
172
-
173
- hidden_states = self.layer[1](
174
- hidden_states,
175
- key_value_states=encoder_hidden_states,
176
- attention_mask=encoder_extended_attention_mask,
177
- )
178
-
179
- # Apply Film Conditional Feed Forward layer
180
- hidden_states = self.layer[-1](hidden_states, conditioning_emb)
181
-
182
- return (hidden_states,)
183
-
184
-
185
- class T5LayerSelfAttentionCond(nn.Module):
186
- def __init__(self, d_model, d_kv, num_heads, dropout_rate):
187
- super().__init__()
188
- self.layer_norm = T5LayerNorm(d_model)
189
- self.FiLMLayer = T5FiLMLayer(in_features=d_model * 4, out_features=d_model)
190
- self.attention = Attention(query_dim=d_model, heads=num_heads, dim_head=d_kv, out_bias=False, scale_qk=False)
191
- self.dropout = nn.Dropout(dropout_rate)
192
-
193
- def forward(
194
- self,
195
- hidden_states,
196
- conditioning_emb=None,
197
- attention_mask=None,
198
- ):
199
- # pre_self_attention_layer_norm
200
- normed_hidden_states = self.layer_norm(hidden_states)
201
-
202
- if conditioning_emb is not None:
203
- normed_hidden_states = self.FiLMLayer(normed_hidden_states, conditioning_emb)
204
-
205
- # Self-attention block
206
- attention_output = self.attention(normed_hidden_states)
207
-
208
- hidden_states = hidden_states + self.dropout(attention_output)
209
-
210
- return hidden_states
211
-
212
-
213
- class T5LayerCrossAttention(nn.Module):
214
- def __init__(self, d_model, d_kv, num_heads, dropout_rate, layer_norm_epsilon):
215
- super().__init__()
216
- self.attention = Attention(query_dim=d_model, heads=num_heads, dim_head=d_kv, out_bias=False, scale_qk=False)
217
- self.layer_norm = T5LayerNorm(d_model, eps=layer_norm_epsilon)
218
- self.dropout = nn.Dropout(dropout_rate)
219
-
220
- def forward(
221
- self,
222
- hidden_states,
223
- key_value_states=None,
224
- attention_mask=None,
225
- ):
226
- normed_hidden_states = self.layer_norm(hidden_states)
227
- attention_output = self.attention(
228
- normed_hidden_states,
229
- encoder_hidden_states=key_value_states,
230
- attention_mask=attention_mask.squeeze(1),
231
- )
232
- layer_output = hidden_states + self.dropout(attention_output)
233
- return layer_output
234
-
235
-
236
- class T5LayerFFCond(nn.Module):
237
- def __init__(self, d_model, d_ff, dropout_rate, layer_norm_epsilon):
238
- super().__init__()
239
- self.DenseReluDense = T5DenseGatedActDense(d_model=d_model, d_ff=d_ff, dropout_rate=dropout_rate)
240
- self.film = T5FiLMLayer(in_features=d_model * 4, out_features=d_model)
241
- self.layer_norm = T5LayerNorm(d_model, eps=layer_norm_epsilon)
242
- self.dropout = nn.Dropout(dropout_rate)
243
-
244
- def forward(self, hidden_states, conditioning_emb=None):
245
- forwarded_states = self.layer_norm(hidden_states)
246
- if conditioning_emb is not None:
247
- forwarded_states = self.film(forwarded_states, conditioning_emb)
248
-
249
- forwarded_states = self.DenseReluDense(forwarded_states)
250
- hidden_states = hidden_states + self.dropout(forwarded_states)
251
- return hidden_states
252
-
253
-
254
- class T5DenseGatedActDense(nn.Module):
255
- def __init__(self, d_model, d_ff, dropout_rate):
256
- super().__init__()
257
- self.wi_0 = nn.Linear(d_model, d_ff, bias=False)
258
- self.wi_1 = nn.Linear(d_model, d_ff, bias=False)
259
- self.wo = nn.Linear(d_ff, d_model, bias=False)
260
- self.dropout = nn.Dropout(dropout_rate)
261
- self.act = NewGELUActivation()
262
-
263
- def forward(self, hidden_states):
264
- hidden_gelu = self.act(self.wi_0(hidden_states))
265
- hidden_linear = self.wi_1(hidden_states)
266
- hidden_states = hidden_gelu * hidden_linear
267
- hidden_states = self.dropout(hidden_states)
268
-
269
- hidden_states = self.wo(hidden_states)
270
- return hidden_states
271
-
272
-
273
- class T5LayerNorm(nn.Module):
274
- def __init__(self, hidden_size, eps=1e-6):
275
- """
276
- Construct a layernorm module in the T5 style. No bias and no subtraction of mean.
277
- """
278
- super().__init__()
279
- self.weight = nn.Parameter(torch.ones(hidden_size))
280
- self.variance_epsilon = eps
281
-
282
- def forward(self, hidden_states):
283
- # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
284
- # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
285
- # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
286
- # half-precision inputs is done in fp32
287
-
288
- variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
289
- hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
290
-
291
- # convert into half-precision if necessary
292
- if self.weight.dtype in [torch.float16, torch.bfloat16]:
293
- hidden_states = hidden_states.to(self.weight.dtype)
294
-
295
- return self.weight * hidden_states
296
-
297
-
298
- class NewGELUActivation(nn.Module):
299
- """
300
- Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
301
- the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
302
- """
303
-
304
- def forward(self, input: torch.Tensor) -> torch.Tensor:
305
- return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))
306
-
307
-
308
- class T5FiLMLayer(nn.Module):
309
- """
310
- FiLM Layer
311
- """
312
-
313
- def __init__(self, in_features, out_features):
314
- super().__init__()
315
- self.scale_bias = nn.Linear(in_features, out_features * 2, bias=False)
316
-
317
- def forward(self, x, conditioning_emb):
318
- emb = self.scale_bias(conditioning_emb)
319
- scale, shift = torch.chunk(emb, 2, -1)
320
- x = x * (1 + scale) + shift
321
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/auto_pipeline.py DELETED
@@ -1,886 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import inspect
17
- from collections import OrderedDict
18
-
19
- from ..configuration_utils import ConfigMixin
20
- from .controlnet import (
21
- StableDiffusionControlNetImg2ImgPipeline,
22
- StableDiffusionControlNetInpaintPipeline,
23
- StableDiffusionControlNetPipeline,
24
- StableDiffusionXLControlNetPipeline,
25
- )
26
- from .deepfloyd_if import IFImg2ImgPipeline, IFInpaintingPipeline, IFPipeline
27
- from .kandinsky import (
28
- KandinskyCombinedPipeline,
29
- KandinskyImg2ImgCombinedPipeline,
30
- KandinskyImg2ImgPipeline,
31
- KandinskyInpaintCombinedPipeline,
32
- KandinskyInpaintPipeline,
33
- KandinskyPipeline,
34
- )
35
- from .kandinsky2_2 import (
36
- KandinskyV22CombinedPipeline,
37
- KandinskyV22Img2ImgCombinedPipeline,
38
- KandinskyV22Img2ImgPipeline,
39
- KandinskyV22InpaintCombinedPipeline,
40
- KandinskyV22InpaintPipeline,
41
- KandinskyV22Pipeline,
42
- )
43
- from .stable_diffusion import (
44
- StableDiffusionImg2ImgPipeline,
45
- StableDiffusionInpaintPipeline,
46
- StableDiffusionPipeline,
47
- )
48
- from .stable_diffusion_xl import (
49
- StableDiffusionXLImg2ImgPipeline,
50
- StableDiffusionXLInpaintPipeline,
51
- StableDiffusionXLPipeline,
52
- )
53
-
54
-
55
- AUTO_TEXT2IMAGE_PIPELINES_MAPPING = OrderedDict(
56
- [
57
- ("stable-diffusion", StableDiffusionPipeline),
58
- ("stable-diffusion-xl", StableDiffusionXLPipeline),
59
- ("if", IFPipeline),
60
- ("kandinsky", KandinskyCombinedPipeline),
61
- ("kandinsky22", KandinskyV22CombinedPipeline),
62
- ("stable-diffusion-controlnet", StableDiffusionControlNetPipeline),
63
- ("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetPipeline),
64
- ]
65
- )
66
-
67
- AUTO_IMAGE2IMAGE_PIPELINES_MAPPING = OrderedDict(
68
- [
69
- ("stable-diffusion", StableDiffusionImg2ImgPipeline),
70
- ("stable-diffusion-xl", StableDiffusionXLImg2ImgPipeline),
71
- ("if", IFImg2ImgPipeline),
72
- ("kandinsky", KandinskyImg2ImgCombinedPipeline),
73
- ("kandinsky22", KandinskyV22Img2ImgCombinedPipeline),
74
- ("stable-diffusion-controlnet", StableDiffusionControlNetImg2ImgPipeline),
75
- ]
76
- )
77
-
78
- AUTO_INPAINT_PIPELINES_MAPPING = OrderedDict(
79
- [
80
- ("stable-diffusion", StableDiffusionInpaintPipeline),
81
- ("stable-diffusion-xl", StableDiffusionXLInpaintPipeline),
82
- ("if", IFInpaintingPipeline),
83
- ("kandinsky", KandinskyInpaintCombinedPipeline),
84
- ("kandinsky22", KandinskyV22InpaintCombinedPipeline),
85
- ("stable-diffusion-controlnet", StableDiffusionControlNetInpaintPipeline),
86
- ]
87
- )
88
-
89
- _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict(
90
- [
91
- ("kandinsky", KandinskyPipeline),
92
- ("kandinsky22", KandinskyV22Pipeline),
93
- ]
94
- )
95
- _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict(
96
- [
97
- ("kandinsky", KandinskyImg2ImgPipeline),
98
- ("kandinsky22", KandinskyV22Img2ImgPipeline),
99
- ]
100
- )
101
- _AUTO_INPAINT_DECODER_PIPELINES_MAPPING = OrderedDict(
102
- [
103
- ("kandinsky", KandinskyInpaintPipeline),
104
- ("kandinsky22", KandinskyV22InpaintPipeline),
105
- ]
106
- )
107
-
108
- SUPPORTED_TASKS_MAPPINGS = [
109
- AUTO_TEXT2IMAGE_PIPELINES_MAPPING,
110
- AUTO_IMAGE2IMAGE_PIPELINES_MAPPING,
111
- AUTO_INPAINT_PIPELINES_MAPPING,
112
- _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING,
113
- _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING,
114
- _AUTO_INPAINT_DECODER_PIPELINES_MAPPING,
115
- ]
116
-
117
-
118
- def _get_connected_pipeline(pipeline_cls):
119
- # for now connected pipelines can only be loaded from decoder pipelines, such as kandinsky-community/kandinsky-2-2-decoder
120
- if pipeline_cls in _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING.values():
121
- return _get_task_class(
122
- AUTO_TEXT2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False
123
- )
124
- if pipeline_cls in _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING.values():
125
- return _get_task_class(
126
- AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False
127
- )
128
- if pipeline_cls in _AUTO_INPAINT_DECODER_PIPELINES_MAPPING.values():
129
- return _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False)
130
-
131
-
132
- def _get_task_class(mapping, pipeline_class_name, throw_error_if_not_exist: bool = True):
133
- def get_model(pipeline_class_name):
134
- for task_mapping in SUPPORTED_TASKS_MAPPINGS:
135
- for model_name, pipeline in task_mapping.items():
136
- if pipeline.__name__ == pipeline_class_name:
137
- return model_name
138
-
139
- model_name = get_model(pipeline_class_name)
140
-
141
- if model_name is not None:
142
- task_class = mapping.get(model_name, None)
143
- if task_class is not None:
144
- return task_class
145
-
146
- if throw_error_if_not_exist:
147
- raise ValueError(f"AutoPipeline can't find a pipeline linked to {pipeline_class_name} for {model_name}")
148
-
149
-
150
- def _get_signature_keys(obj):
151
- parameters = inspect.signature(obj.__init__).parameters
152
- required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty}
153
- optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty})
154
- expected_modules = set(required_parameters.keys()) - {"self"}
155
- return expected_modules, optional_parameters
156
-
157
-
158
- class AutoPipelineForText2Image(ConfigMixin):
159
- r"""
160
-
161
- AutoPipeline for text-to-image generation.
162
-
163
- [`AutoPipelineForText2Image`] is a generic pipeline class that will be instantiated as one of the text-to-image
164
- pipeline class in diffusers.
165
-
166
- The pipeline type (for example [`StableDiffusionPipeline`]) is automatically selected when created with the
167
- AutoPipelineForText2Image.from_pretrained(pretrained_model_name_or_path) or
168
- AutoPipelineForText2Image.from_pipe(pipeline) class methods .
169
-
170
- This class cannot be instantiated using __init__() (throws an error).
171
-
172
- Class attributes:
173
-
174
- - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the
175
- diffusion pipeline's components.
176
-
177
- """
178
- config_name = "model_index.json"
179
-
180
- def __init__(self, *args, **kwargs):
181
- raise EnvironmentError(
182
- f"{self.__class__.__name__} is designed to be instantiated "
183
- f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or "
184
- f"`{self.__class__.__name__}.from_pipe(pipeline)` methods."
185
- )
186
-
187
- @classmethod
188
- def from_pretrained(cls, pretrained_model_or_path, **kwargs):
189
- r"""
190
- Instantiates a text-to-image Pytorch diffusion pipeline from pretrained pipeline weight.
191
-
192
- The from_pretrained() method takes care of returning the correct pipeline class instance by:
193
- 1. Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its
194
- config object
195
- 2. Find the text-to-image pipeline linked to the pipeline class using pattern matching on pipeline class
196
- name.
197
-
198
- If a `controlnet` argument is passed, it will instantiate a [`StableDiffusionControlNetPipeline`] object.
199
-
200
- The pipeline is set in evaluation mode (`model.eval()`) by default.
201
-
202
- If you get the error message below, you need to finetune the weights for your downstream task:
203
-
204
- ```
205
- Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
206
- - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
207
- You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
208
- ```
209
-
210
- Parameters:
211
- pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
212
- Can be either:
213
-
214
- - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline
215
- hosted on the Hub.
216
- - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights
217
- saved using
218
- [`~DiffusionPipeline.save_pretrained`].
219
- torch_dtype (`str` or `torch.dtype`, *optional*):
220
- Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the
221
- dtype is automatically derived from the model's weights.
222
- force_download (`bool`, *optional*, defaults to `False`):
223
- Whether or not to force the (re-)download of the model weights and configuration files, overriding the
224
- cached versions if they exist.
225
- cache_dir (`Union[str, os.PathLike]`, *optional*):
226
- Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
227
- is not used.
228
- resume_download (`bool`, *optional*, defaults to `False`):
229
- Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
230
- incompletely downloaded files are deleted.
231
- proxies (`Dict[str, str]`, *optional*):
232
- A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
233
- 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
234
- output_loading_info(`bool`, *optional*, defaults to `False`):
235
- Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
236
- local_files_only (`bool`, *optional*, defaults to `False`):
237
- Whether to only load local model weights and configuration files or not. If set to `True`, the model
238
- won't be downloaded from the Hub.
239
- use_auth_token (`str` or *bool*, *optional*):
240
- The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
241
- `diffusers-cli login` (stored in `~/.huggingface`) is used.
242
- revision (`str`, *optional*, defaults to `"main"`):
243
- The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
244
- allowed by Git.
245
- custom_revision (`str`, *optional*, defaults to `"main"`):
246
- The specific model version to use. It can be a branch name, a tag name, or a commit id similar to
247
- `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a
248
- custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub.
249
- mirror (`str`, *optional*):
250
- Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not
251
- guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
252
- information.
253
- device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
254
- A map that specifies where each submodule should go. It doesn’t need to be defined for each
255
- parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the
256
- same device.
257
-
258
- Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For
259
- more information about each option see [designing a device
260
- map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
261
- max_memory (`Dict`, *optional*):
262
- A dictionary device identifier for the maximum memory. Will default to the maximum memory available for
263
- each GPU and the available CPU RAM if unset.
264
- offload_folder (`str` or `os.PathLike`, *optional*):
265
- The path to offload weights if device_map contains the value `"disk"`.
266
- offload_state_dict (`bool`, *optional*):
267
- If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if
268
- the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True`
269
- when there is some disk offload.
270
- low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
271
- Speed up model loading only loading the pretrained weights and not initializing the weights. This also
272
- tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
273
- Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
274
- argument to `True` will raise an error.
275
- use_safetensors (`bool`, *optional*, defaults to `None`):
276
- If set to `None`, the safetensors weights are downloaded if they're available **and** if the
277
- safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
278
- weights. If set to `False`, safetensors weights are not loaded.
279
- kwargs (remaining dictionary of keyword arguments, *optional*):
280
- Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline
281
- class). The overwritten components are passed directly to the pipelines `__init__` method. See example
282
- below for more information.
283
- variant (`str`, *optional*):
284
- Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when
285
- loading `from_flax`.
286
-
287
- <Tip>
288
-
289
- To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with
290
- `huggingface-cli login`.
291
-
292
- </Tip>
293
-
294
- Examples:
295
-
296
- ```py
297
- >>> from diffusers import AutoPipelineForTextToImage
298
-
299
- >>> pipeline = AutoPipelineForTextToImage.from_pretrained("runwayml/stable-diffusion-v1-5")
300
- >>> print(pipeline.__class__)
301
- ```
302
- """
303
- config = cls.load_config(pretrained_model_or_path)
304
- orig_class_name = config["_class_name"]
305
-
306
- if "controlnet" in kwargs:
307
- orig_class_name = config["_class_name"].replace("Pipeline", "ControlNetPipeline")
308
-
309
- text_2_image_cls = _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, orig_class_name)
310
-
311
- return text_2_image_cls.from_pretrained(pretrained_model_or_path, **kwargs)
312
-
313
- @classmethod
314
- def from_pipe(cls, pipeline, **kwargs):
315
- r"""
316
- Instantiates a text-to-image Pytorch diffusion pipeline from another instantiated diffusion pipeline class.
317
-
318
- The from_pipe() method takes care of returning the correct pipeline class instance by finding the text-to-image
319
- pipeline linked to the pipeline class using pattern matching on pipeline class name.
320
-
321
- All the modules the pipeline contains will be used to initialize the new pipeline without reallocating
322
- additional memoery.
323
-
324
- The pipeline is set in evaluation mode (`model.eval()`) by default.
325
-
326
- Parameters:
327
- pipeline (`DiffusionPipeline`):
328
- an instantiated `DiffusionPipeline` object
329
-
330
- ```py
331
- >>> from diffusers import AutoPipelineForTextToImage, AutoPipelineForImageToImage
332
-
333
- >>> pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
334
- ... "runwayml/stable-diffusion-v1-5", requires_safety_checker=False
335
- ... )
336
-
337
- >>> pipe_t2i = AutoPipelineForTextToImage.from_pipe(pipe_t2i)
338
- ```
339
- """
340
-
341
- original_config = dict(pipeline.config)
342
- original_cls_name = pipeline.__class__.__name__
343
-
344
- # derive the pipeline class to instantiate
345
- text_2_image_cls = _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, original_cls_name)
346
-
347
- # define expected module and optional kwargs given the pipeline signature
348
- expected_modules, optional_kwargs = _get_signature_keys(text_2_image_cls)
349
-
350
- pretrained_model_name_or_path = original_config.pop("_name_or_path", None)
351
-
352
- # allow users pass modules in `kwargs` to override the original pipeline's components
353
- passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs}
354
- original_class_obj = {
355
- k: pipeline.components[k]
356
- for k, v in pipeline.components.items()
357
- if k in expected_modules and k not in passed_class_obj
358
- }
359
-
360
- # allow users pass optional kwargs to override the original pipelines config attribute
361
- passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs}
362
- original_pipe_kwargs = {
363
- k: original_config[k]
364
- for k, v in original_config.items()
365
- if k in optional_kwargs and k not in passed_pipe_kwargs
366
- }
367
-
368
- # config that were not expected by original pipeline is stored as private attribute
369
- # we will pass them as optional arguments if they can be accepted by the pipeline
370
- additional_pipe_kwargs = [
371
- k[1:]
372
- for k in original_config.keys()
373
- if k.startswith("_") and k[1:] in optional_kwargs and k[1:] not in passed_pipe_kwargs
374
- ]
375
- for k in additional_pipe_kwargs:
376
- original_pipe_kwargs[k] = original_config.pop(f"_{k}")
377
-
378
- text_2_image_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs}
379
-
380
- # store unused config as private attribute
381
- unused_original_config = {
382
- f"{'' if k.startswith('_') else '_'}{k}": original_config[k]
383
- for k, v in original_config.items()
384
- if k not in text_2_image_kwargs
385
- }
386
-
387
- missing_modules = set(expected_modules) - set(pipeline._optional_components) - set(text_2_image_kwargs.keys())
388
-
389
- if len(missing_modules) > 0:
390
- raise ValueError(
391
- f"Pipeline {text_2_image_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed"
392
- )
393
-
394
- model = text_2_image_cls(**text_2_image_kwargs)
395
- model.register_to_config(_name_or_path=pretrained_model_name_or_path)
396
- model.register_to_config(**unused_original_config)
397
-
398
- return model
399
-
400
-
401
- class AutoPipelineForImage2Image(ConfigMixin):
402
- r"""
403
-
404
- AutoPipeline for image-to-image generation.
405
-
406
- [`AutoPipelineForImage2Image`] is a generic pipeline class that will be instantiated as one of the image-to-image
407
- pipeline classes in diffusers.
408
-
409
- The pipeline type (for example [`StableDiffusionImg2ImgPipeline`]) is automatically selected when created with the
410
- `AutoPipelineForImage2Image.from_pretrained(pretrained_model_name_or_path)` or
411
- `AutoPipelineForImage2Image.from_pipe(pipeline)` class methods.
412
-
413
- This class cannot be instantiated using __init__() (throws an error).
414
-
415
- Class attributes:
416
-
417
- - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the
418
- diffusion pipeline's components.
419
-
420
- """
421
- config_name = "model_index.json"
422
-
423
- def __init__(self, *args, **kwargs):
424
- raise EnvironmentError(
425
- f"{self.__class__.__name__} is designed to be instantiated "
426
- f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or "
427
- f"`{self.__class__.__name__}.from_pipe(pipeline)` methods."
428
- )
429
-
430
- @classmethod
431
- def from_pretrained(cls, pretrained_model_or_path, **kwargs):
432
- r"""
433
- Instantiates a image-to-image Pytorch diffusion pipeline from pretrained pipeline weight.
434
-
435
- The from_pretrained() method takes care of returning the correct pipeline class instance by:
436
- 1. Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its
437
- config object
438
- 2. Find the image-to-image pipeline linked to the pipeline class using pattern matching on pipeline class
439
- name.
440
-
441
- If a `controlnet` argument is passed, it will instantiate a StableDiffusionControlNetImg2ImgPipeline object.
442
-
443
- The pipeline is set in evaluation mode (`model.eval()`) by default.
444
-
445
- If you get the error message below, you need to finetune the weights for your downstream task:
446
-
447
- ```
448
- Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
449
- - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
450
- You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
451
- ```
452
-
453
- Parameters:
454
- pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
455
- Can be either:
456
-
457
- - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline
458
- hosted on the Hub.
459
- - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights
460
- saved using
461
- [`~DiffusionPipeline.save_pretrained`].
462
- torch_dtype (`str` or `torch.dtype`, *optional*):
463
- Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the
464
- dtype is automatically derived from the model's weights.
465
- force_download (`bool`, *optional*, defaults to `False`):
466
- Whether or not to force the (re-)download of the model weights and configuration files, overriding the
467
- cached versions if they exist.
468
- cache_dir (`Union[str, os.PathLike]`, *optional*):
469
- Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
470
- is not used.
471
- resume_download (`bool`, *optional*, defaults to `False`):
472
- Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
473
- incompletely downloaded files are deleted.
474
- proxies (`Dict[str, str]`, *optional*):
475
- A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
476
- 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
477
- output_loading_info(`bool`, *optional*, defaults to `False`):
478
- Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
479
- local_files_only (`bool`, *optional*, defaults to `False`):
480
- Whether to only load local model weights and configuration files or not. If set to `True`, the model
481
- won't be downloaded from the Hub.
482
- use_auth_token (`str` or *bool*, *optional*):
483
- The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
484
- `diffusers-cli login` (stored in `~/.huggingface`) is used.
485
- revision (`str`, *optional*, defaults to `"main"`):
486
- The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
487
- allowed by Git.
488
- custom_revision (`str`, *optional*, defaults to `"main"`):
489
- The specific model version to use. It can be a branch name, a tag name, or a commit id similar to
490
- `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a
491
- custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub.
492
- mirror (`str`, *optional*):
493
- Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not
494
- guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
495
- information.
496
- device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
497
- A map that specifies where each submodule should go. It doesn’t need to be defined for each
498
- parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the
499
- same device.
500
-
501
- Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For
502
- more information about each option see [designing a device
503
- map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
504
- max_memory (`Dict`, *optional*):
505
- A dictionary device identifier for the maximum memory. Will default to the maximum memory available for
506
- each GPU and the available CPU RAM if unset.
507
- offload_folder (`str` or `os.PathLike`, *optional*):
508
- The path to offload weights if device_map contains the value `"disk"`.
509
- offload_state_dict (`bool`, *optional*):
510
- If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if
511
- the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True`
512
- when there is some disk offload.
513
- low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
514
- Speed up model loading only loading the pretrained weights and not initializing the weights. This also
515
- tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
516
- Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
517
- argument to `True` will raise an error.
518
- use_safetensors (`bool`, *optional*, defaults to `None`):
519
- If set to `None`, the safetensors weights are downloaded if they're available **and** if the
520
- safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
521
- weights. If set to `False`, safetensors weights are not loaded.
522
- kwargs (remaining dictionary of keyword arguments, *optional*):
523
- Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline
524
- class). The overwritten components are passed directly to the pipelines `__init__` method. See example
525
- below for more information.
526
- variant (`str`, *optional*):
527
- Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when
528
- loading `from_flax`.
529
-
530
- <Tip>
531
-
532
- To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with
533
- `huggingface-cli login`.
534
-
535
- </Tip>
536
-
537
- Examples:
538
-
539
- ```py
540
- >>> from diffusers import AutoPipelineForTextToImage
541
-
542
- >>> pipeline = AutoPipelineForImageToImage.from_pretrained("runwayml/stable-diffusion-v1-5")
543
- >>> print(pipeline.__class__)
544
- ```
545
- """
546
- config = cls.load_config(pretrained_model_or_path)
547
- orig_class_name = config["_class_name"]
548
-
549
- if "controlnet" in kwargs:
550
- orig_class_name = config["_class_name"].replace("Pipeline", "ControlNetPipeline")
551
-
552
- image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, orig_class_name)
553
-
554
- return image_2_image_cls.from_pretrained(pretrained_model_or_path, **kwargs)
555
-
556
- @classmethod
557
- def from_pipe(cls, pipeline, **kwargs):
558
- r"""
559
- Instantiates a image-to-image Pytorch diffusion pipeline from another instantiated diffusion pipeline class.
560
-
561
- The from_pipe() method takes care of returning the correct pipeline class instance by finding the
562
- image-to-image pipeline linked to the pipeline class using pattern matching on pipeline class name.
563
-
564
- All the modules the pipeline contains will be used to initialize the new pipeline without reallocating
565
- additional memoery.
566
-
567
- The pipeline is set in evaluation mode (`model.eval()`) by default.
568
-
569
- Parameters:
570
- pipeline (`DiffusionPipeline`):
571
- an instantiated `DiffusionPipeline` object
572
-
573
- Examples:
574
-
575
- ```py
576
- >>> from diffusers import AutoPipelineForTextToImage, AutoPipelineForImageToImage
577
-
578
- >>> pipe_t2i = AutoPipelineForText2Image.from_pretrained(
579
- ... "runwayml/stable-diffusion-v1-5", requires_safety_checker=False
580
- ... )
581
-
582
- >>> pipe_i2i = AutoPipelineForImageToImage.from_pipe(pipe_t2i)
583
- ```
584
- """
585
-
586
- original_config = dict(pipeline.config)
587
- original_cls_name = pipeline.__class__.__name__
588
-
589
- # derive the pipeline class to instantiate
590
- image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, original_cls_name)
591
-
592
- # define expected module and optional kwargs given the pipeline signature
593
- expected_modules, optional_kwargs = _get_signature_keys(image_2_image_cls)
594
-
595
- pretrained_model_name_or_path = original_config.pop("_name_or_path", None)
596
-
597
- # allow users pass modules in `kwargs` to override the original pipeline's components
598
- passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs}
599
- original_class_obj = {
600
- k: pipeline.components[k]
601
- for k, v in pipeline.components.items()
602
- if k in expected_modules and k not in passed_class_obj
603
- }
604
-
605
- # allow users pass optional kwargs to override the original pipelines config attribute
606
- passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs}
607
- original_pipe_kwargs = {
608
- k: original_config[k]
609
- for k, v in original_config.items()
610
- if k in optional_kwargs and k not in passed_pipe_kwargs
611
- }
612
-
613
- # config attribute that were not expected by original pipeline is stored as its private attribute
614
- # we will pass them as optional arguments if they can be accepted by the pipeline
615
- additional_pipe_kwargs = [
616
- k[1:]
617
- for k in original_config.keys()
618
- if k.startswith("_") and k[1:] in optional_kwargs and k[1:] not in passed_pipe_kwargs
619
- ]
620
- for k in additional_pipe_kwargs:
621
- original_pipe_kwargs[k] = original_config.pop(f"_{k}")
622
-
623
- image_2_image_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs}
624
-
625
- # store unused config as private attribute
626
- unused_original_config = {
627
- f"{'' if k.startswith('_') else '_'}{k}": original_config[k]
628
- for k, v in original_config.items()
629
- if k not in image_2_image_kwargs
630
- }
631
-
632
- missing_modules = set(expected_modules) - set(pipeline._optional_components) - set(image_2_image_kwargs.keys())
633
-
634
- if len(missing_modules) > 0:
635
- raise ValueError(
636
- f"Pipeline {image_2_image_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed"
637
- )
638
-
639
- model = image_2_image_cls(**image_2_image_kwargs)
640
- model.register_to_config(_name_or_path=pretrained_model_name_or_path)
641
- model.register_to_config(**unused_original_config)
642
-
643
- return model
644
-
645
-
646
- class AutoPipelineForInpainting(ConfigMixin):
647
- r"""
648
-
649
- AutoPipeline for inpainting generation.
650
-
651
- [`AutoPipelineForInpainting`] is a generic pipeline class that will be instantiated as one of the inpainting
652
- pipeline class in diffusers.
653
-
654
- The pipeline type (for example [`IFInpaintingPipeline`]) is automatically selected when created with the
655
- AutoPipelineForInpainting.from_pretrained(pretrained_model_name_or_path) or
656
- AutoPipelineForInpainting.from_pipe(pipeline) class methods .
657
-
658
- This class cannot be instantiated using __init__() (throws an error).
659
-
660
- Class attributes:
661
-
662
- - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the
663
- diffusion pipeline's components.
664
-
665
- """
666
- config_name = "model_index.json"
667
-
668
- def __init__(self, *args, **kwargs):
669
- raise EnvironmentError(
670
- f"{self.__class__.__name__} is designed to be instantiated "
671
- f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or "
672
- f"`{self.__class__.__name__}.from_pipe(pipeline)` methods."
673
- )
674
-
675
- @classmethod
676
- def from_pretrained(cls, pretrained_model_or_path, **kwargs):
677
- r"""
678
- Instantiates a inpainting Pytorch diffusion pipeline from pretrained pipeline weight.
679
-
680
- The from_pretrained() method takes care of returning the correct pipeline class instance by:
681
- 1. Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its
682
- config object
683
- 2. Find the inpainting pipeline linked to the pipeline class using pattern matching on pipeline class name.
684
-
685
- If a `controlnet` argument is passed, it will instantiate a StableDiffusionControlNetInpaintPipeline object.
686
-
687
- The pipeline is set in evaluation mode (`model.eval()`) by default.
688
-
689
- If you get the error message below, you need to finetune the weights for your downstream task:
690
-
691
- ```
692
- Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
693
- - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
694
- You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
695
- ```
696
-
697
- Parameters:
698
- pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
699
- Can be either:
700
-
701
- - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline
702
- hosted on the Hub.
703
- - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights
704
- saved using
705
- [`~DiffusionPipeline.save_pretrained`].
706
- torch_dtype (`str` or `torch.dtype`, *optional*):
707
- Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the
708
- dtype is automatically derived from the model's weights.
709
- force_download (`bool`, *optional*, defaults to `False`):
710
- Whether or not to force the (re-)download of the model weights and configuration files, overriding the
711
- cached versions if they exist.
712
- cache_dir (`Union[str, os.PathLike]`, *optional*):
713
- Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
714
- is not used.
715
- resume_download (`bool`, *optional*, defaults to `False`):
716
- Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
717
- incompletely downloaded files are deleted.
718
- proxies (`Dict[str, str]`, *optional*):
719
- A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
720
- 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
721
- output_loading_info(`bool`, *optional*, defaults to `False`):
722
- Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
723
- local_files_only (`bool`, *optional*, defaults to `False`):
724
- Whether to only load local model weights and configuration files or not. If set to `True`, the model
725
- won't be downloaded from the Hub.
726
- use_auth_token (`str` or *bool*, *optional*):
727
- The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
728
- `diffusers-cli login` (stored in `~/.huggingface`) is used.
729
- revision (`str`, *optional*, defaults to `"main"`):
730
- The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
731
- allowed by Git.
732
- custom_revision (`str`, *optional*, defaults to `"main"`):
733
- The specific model version to use. It can be a branch name, a tag name, or a commit id similar to
734
- `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a
735
- custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub.
736
- mirror (`str`, *optional*):
737
- Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not
738
- guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
739
- information.
740
- device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
741
- A map that specifies where each submodule should go. It doesn’t need to be defined for each
742
- parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the
743
- same device.
744
-
745
- Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For
746
- more information about each option see [designing a device
747
- map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
748
- max_memory (`Dict`, *optional*):
749
- A dictionary device identifier for the maximum memory. Will default to the maximum memory available for
750
- each GPU and the available CPU RAM if unset.
751
- offload_folder (`str` or `os.PathLike`, *optional*):
752
- The path to offload weights if device_map contains the value `"disk"`.
753
- offload_state_dict (`bool`, *optional*):
754
- If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if
755
- the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True`
756
- when there is some disk offload.
757
- low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
758
- Speed up model loading only loading the pretrained weights and not initializing the weights. This also
759
- tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
760
- Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
761
- argument to `True` will raise an error.
762
- use_safetensors (`bool`, *optional*, defaults to `None`):
763
- If set to `None`, the safetensors weights are downloaded if they're available **and** if the
764
- safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
765
- weights. If set to `False`, safetensors weights are not loaded.
766
- kwargs (remaining dictionary of keyword arguments, *optional*):
767
- Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline
768
- class). The overwritten components are passed directly to the pipelines `__init__` method. See example
769
- below for more information.
770
- variant (`str`, *optional*):
771
- Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when
772
- loading `from_flax`.
773
-
774
- <Tip>
775
-
776
- To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with
777
- `huggingface-cli login`.
778
-
779
- </Tip>
780
-
781
- Examples:
782
-
783
- ```py
784
- >>> from diffusers import AutoPipelineForTextToImage
785
-
786
- >>> pipeline = AutoPipelineForImageToImage.from_pretrained("runwayml/stable-diffusion-v1-5")
787
- >>> print(pipeline.__class__)
788
- ```
789
- """
790
- config = cls.load_config(pretrained_model_or_path)
791
- orig_class_name = config["_class_name"]
792
-
793
- if "controlnet" in kwargs:
794
- orig_class_name = config["_class_name"].replace("Pipeline", "ControlNetPipeline")
795
-
796
- inpainting_cls = _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, orig_class_name)
797
-
798
- return inpainting_cls.from_pretrained(pretrained_model_or_path, **kwargs)
799
-
800
- @classmethod
801
- def from_pipe(cls, pipeline, **kwargs):
802
- r"""
803
- Instantiates a inpainting Pytorch diffusion pipeline from another instantiated diffusion pipeline class.
804
-
805
- The from_pipe() method takes care of returning the correct pipeline class instance by finding the inpainting
806
- pipeline linked to the pipeline class using pattern matching on pipeline class name.
807
-
808
- All the modules the pipeline class contain will be used to initialize the new pipeline without reallocating
809
- additional memoery.
810
-
811
- The pipeline is set in evaluation mode (`model.eval()`) by default.
812
-
813
- Parameters:
814
- pipeline (`DiffusionPipeline`):
815
- an instantiated `DiffusionPipeline` object
816
-
817
- Examples:
818
-
819
- ```py
820
- >>> from diffusers import AutoPipelineForTextToImage, AutoPipelineForInpainting
821
-
822
- >>> pipe_t2i = AutoPipelineForText2Image.from_pretrained(
823
- ... "DeepFloyd/IF-I-XL-v1.0", requires_safety_checker=False
824
- ... )
825
-
826
- >>> pipe_inpaint = AutoPipelineForInpainting.from_pipe(pipe_t2i)
827
- ```
828
- """
829
- original_config = dict(pipeline.config)
830
- original_cls_name = pipeline.__class__.__name__
831
-
832
- # derive the pipeline class to instantiate
833
- inpainting_cls = _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, original_cls_name)
834
-
835
- # define expected module and optional kwargs given the pipeline signature
836
- expected_modules, optional_kwargs = _get_signature_keys(inpainting_cls)
837
-
838
- pretrained_model_name_or_path = original_config.pop("_name_or_path", None)
839
-
840
- # allow users pass modules in `kwargs` to override the original pipeline's components
841
- passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs}
842
- original_class_obj = {
843
- k: pipeline.components[k]
844
- for k, v in pipeline.components.items()
845
- if k in expected_modules and k not in passed_class_obj
846
- }
847
-
848
- # allow users pass optional kwargs to override the original pipelines config attribute
849
- passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs}
850
- original_pipe_kwargs = {
851
- k: original_config[k]
852
- for k, v in original_config.items()
853
- if k in optional_kwargs and k not in passed_pipe_kwargs
854
- }
855
-
856
- # config that were not expected by original pipeline is stored as private attribute
857
- # we will pass them as optional arguments if they can be accepted by the pipeline
858
- additional_pipe_kwargs = [
859
- k[1:]
860
- for k in original_config.keys()
861
- if k.startswith("_") and k[1:] in optional_kwargs and k[1:] not in passed_pipe_kwargs
862
- ]
863
- for k in additional_pipe_kwargs:
864
- original_pipe_kwargs[k] = original_config.pop(f"_{k}")
865
-
866
- inpainting_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs}
867
-
868
- # store unused config as private attribute
869
- unused_original_config = {
870
- f"{'' if k.startswith('_') else '_'}{k}": original_config[k]
871
- for k, v in original_config.items()
872
- if k not in inpainting_kwargs
873
- }
874
-
875
- missing_modules = set(expected_modules) - set(pipeline._optional_components) - set(inpainting_kwargs.keys())
876
-
877
- if len(missing_modules) > 0:
878
- raise ValueError(
879
- f"Pipeline {inpainting_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed"
880
- )
881
-
882
- model = inpainting_cls(**inpainting_kwargs)
883
- model.register_to_config(_name_or_path=pretrained_model_name_or_path)
884
- model.register_to_config(**unused_original_config)
885
-
886
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/dense_test_mixins.py DELETED
@@ -1,100 +0,0 @@
1
- from inspect import signature
2
-
3
- import torch
4
-
5
- from mmdet.core import bbox2result, bbox_mapping_back, multiclass_nms
6
-
7
-
8
- class BBoxTestMixin(object):
9
- """Mixin class for test time augmentation of bboxes."""
10
-
11
- def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas):
12
- """Merge augmented detection bboxes and scores.
13
-
14
- Args:
15
- aug_bboxes (list[Tensor]): shape (n, 4*#class)
16
- aug_scores (list[Tensor] or None): shape (n, #class)
17
- img_shapes (list[Tensor]): shape (3, ).
18
-
19
- Returns:
20
- tuple: (bboxes, scores)
21
- """
22
- recovered_bboxes = []
23
- for bboxes, img_info in zip(aug_bboxes, img_metas):
24
- img_shape = img_info[0]['img_shape']
25
- scale_factor = img_info[0]['scale_factor']
26
- flip = img_info[0]['flip']
27
- flip_direction = img_info[0]['flip_direction']
28
- bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,
29
- flip_direction)
30
- recovered_bboxes.append(bboxes)
31
- bboxes = torch.cat(recovered_bboxes, dim=0)
32
- if aug_scores is None:
33
- return bboxes
34
- else:
35
- scores = torch.cat(aug_scores, dim=0)
36
- return bboxes, scores
37
-
38
- def aug_test_bboxes(self, feats, img_metas, rescale=False):
39
- """Test det bboxes with test time augmentation.
40
-
41
- Args:
42
- feats (list[Tensor]): the outer list indicates test-time
43
- augmentations and inner Tensor should have a shape NxCxHxW,
44
- which contains features for all images in the batch.
45
- img_metas (list[list[dict]]): the outer list indicates test-time
46
- augs (multiscale, flip, etc.) and the inner list indicates
47
- images in a batch. each dict has image information.
48
- rescale (bool, optional): Whether to rescale the results.
49
- Defaults to False.
50
-
51
- Returns:
52
- list[ndarray]: bbox results of each class
53
- """
54
- # check with_nms argument
55
- gb_sig = signature(self.get_bboxes)
56
- gb_args = [p.name for p in gb_sig.parameters.values()]
57
- if hasattr(self, '_get_bboxes'):
58
- gbs_sig = signature(self._get_bboxes)
59
- else:
60
- gbs_sig = signature(self._get_bboxes_single)
61
- gbs_args = [p.name for p in gbs_sig.parameters.values()]
62
- assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \
63
- f'{self.__class__.__name__}' \
64
- ' does not support test-time augmentation'
65
-
66
- aug_bboxes = []
67
- aug_scores = []
68
- aug_factors = [] # score_factors for NMS
69
- for x, img_meta in zip(feats, img_metas):
70
- # only one image in the batch
71
- outs = self.forward(x)
72
- bbox_inputs = outs + (img_meta, self.test_cfg, False, False)
73
- bbox_outputs = self.get_bboxes(*bbox_inputs)[0]
74
- aug_bboxes.append(bbox_outputs[0])
75
- aug_scores.append(bbox_outputs[1])
76
- # bbox_outputs of some detectors (e.g., ATSS, FCOS, YOLOv3)
77
- # contains additional element to adjust scores before NMS
78
- if len(bbox_outputs) >= 3:
79
- aug_factors.append(bbox_outputs[2])
80
-
81
- # after merging, bboxes will be rescaled to the original image size
82
- merged_bboxes, merged_scores = self.merge_aug_bboxes(
83
- aug_bboxes, aug_scores, img_metas)
84
- merged_factors = torch.cat(aug_factors, dim=0) if aug_factors else None
85
- det_bboxes, det_labels = multiclass_nms(
86
- merged_bboxes,
87
- merged_scores,
88
- self.test_cfg.score_thr,
89
- self.test_cfg.nms,
90
- self.test_cfg.max_per_img,
91
- score_factors=merged_factors)
92
-
93
- if rescale:
94
- _det_bboxes = det_bboxes
95
- else:
96
- _det_bboxes = det_bboxes.clone()
97
- _det_bboxes[:, :4] *= det_bboxes.new_tensor(
98
- img_metas[0][0]['scale_factor'])
99
- bbox_results = bbox2result(_det_bboxes, det_labels, self.num_classes)
100
- return bbox_results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/README.md DELETED
@@ -1,48 +0,0 @@
1
- # GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond
2
-
3
- ## Introduction
4
-
5
- <!-- [ALGORITHM] -->
6
-
7
- ```latex
8
- @inproceedings{cao2019gcnet,
9
- title={Gcnet: Non-local networks meet squeeze-excitation networks and beyond},
10
- author={Cao, Yue and Xu, Jiarui and Lin, Stephen and Wei, Fangyun and Hu, Han},
11
- booktitle={Proceedings of the IEEE International Conference on Computer Vision Workshops},
12
- pages={0--0},
13
- year={2019}
14
- }
15
- ```
16
-
17
- ## Results and models
18
-
19
- ### Cityscapes
20
-
21
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
22
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
23
- | GCNet | R-50-D8 | 512x1024 | 40000 | 5.8 | 3.93 | 77.69 | 78.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes/gcnet_r50-d8_512x1024_40k_cityscapes_20200618_074436-4b0fd17b.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes/gcnet_r50-d8_512x1024_40k_cityscapes_20200618_074436.log.json) |
24
- | GCNet | R-101-D8 | 512x1024 | 40000 | 9.2 | 2.61 | 78.28 | 79.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes/gcnet_r101-d8_512x1024_40k_cityscapes_20200618_074436-5e62567f.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes/gcnet_r101-d8_512x1024_40k_cityscapes_20200618_074436.log.json) |
25
- | GCNet | R-50-D8 | 769x769 | 40000 | 6.5 | 1.67 | 78.12 | 80.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_40k_cityscapes/gcnet_r50-d8_769x769_40k_cityscapes_20200618_182814-a26f4471.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_40k_cityscapes/gcnet_r50-d8_769x769_40k_cityscapes_20200618_182814.log.json) |
26
- | GCNet | R-101-D8 | 769x769 | 40000 | 10.5 | 1.13 | 78.95 | 80.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_40k_cityscapes/gcnet_r101-d8_769x769_40k_cityscapes_20200619_092550-ca4f0a84.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_40k_cityscapes/gcnet_r101-d8_769x769_40k_cityscapes_20200619_092550.log.json) |
27
- | GCNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.48 | 80.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes/gcnet_r50-d8_512x1024_80k_cityscapes_20200618_074450-ef8f069b.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes/gcnet_r50-d8_512x1024_80k_cityscapes_20200618_074450.log.json) |
28
- | GCNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.03 | 79.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes/gcnet_r101-d8_512x1024_80k_cityscapes_20200618_074450-778ebf69.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes/gcnet_r101-d8_512x1024_80k_cityscapes_20200618_074450.log.json) |
29
- | GCNet | R-50-D8 | 769x769 | 80000 | - | - | 78.68 | 80.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_80k_cityscapes/gcnet_r50-d8_769x769_80k_cityscapes_20200619_092516-4839565b.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_80k_cityscapes/gcnet_r50-d8_769x769_80k_cityscapes_20200619_092516.log.json) |
30
- | GCNet | R-101-D8 | 769x769 | 80000 | - | - | 79.18 | 80.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_80k_cityscapes/gcnet_r101-d8_769x769_80k_cityscapes_20200619_092628-8e043423.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_80k_cityscapes/gcnet_r101-d8_769x769_80k_cityscapes_20200619_092628.log.json) |
31
-
32
- ### ADE20K
33
-
34
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
35
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
36
- | GCNet | R-50-D8 | 512x512 | 80000 | 8.5 | 23.38 | 41.47 | 42.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_80k_ade20k/gcnet_r50-d8_512x512_80k_ade20k_20200614_185146-91a6da41.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_80k_ade20k/gcnet_r50-d8_512x512_80k_ade20k_20200614_185146.log.json) |
37
- | GCNet | R-101-D8 | 512x512 | 80000 | 12 | 15.20 | 42.82 | 44.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_80k_ade20k/gcnet_r101-d8_512x512_80k_ade20k_20200615_020811-c3fcb6dd.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_80k_ade20k/gcnet_r101-d8_512x512_80k_ade20k_20200615_020811.log.json) |
38
- | GCNet | R-50-D8 | 512x512 | 160000 | - | - | 42.37 | 43.52 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_160k_ade20k/gcnet_r50-d8_512x512_160k_ade20k_20200615_224122-d95f3e1f.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_160k_ade20k/gcnet_r50-d8_512x512_160k_ade20k_20200615_224122.log.json) |
39
- | GCNet | R-101-D8 | 512x512 | 160000 | - | - | 43.69 | 45.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_160k_ade20k/gcnet_r101-d8_512x512_160k_ade20k_20200615_225406-615528d7.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_160k_ade20k/gcnet_r101-d8_512x512_160k_ade20k_20200615_225406.log.json) |
40
-
41
- ### Pascal VOC 2012 + Aug
42
-
43
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
44
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
45
- | GCNet | R-50-D8 | 512x512 | 20000 | 5.8 | 23.35 | 76.42 | 77.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_20k_voc12aug/gcnet_r50-d8_512x512_20k_voc12aug_20200617_165701-3cbfdab1.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_20k_voc12aug/gcnet_r50-d8_512x512_20k_voc12aug_20200617_165701.log.json) |
46
- | GCNet | R-101-D8 | 512x512 | 20000 | 9.2 | 14.80 | 77.41 | 78.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_20k_voc12aug/gcnet_r101-d8_512x512_20k_voc12aug_20200617_165713-6c720aa9.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_20k_voc12aug/gcnet_r101-d8_512x512_20k_voc12aug_20200617_165713.log.json) |
47
- | GCNet | R-50-D8 | 512x512 | 40000 | - | - | 76.24 | 77.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_40k_voc12aug/gcnet_r50-d8_512x512_40k_voc12aug_20200613_195105-9797336d.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_40k_voc12aug/gcnet_r50-d8_512x512_40k_voc12aug_20200613_195105.log.json) |
48
- | GCNet | R-101-D8 | 512x512 | 40000 | - | - | 77.84 | 78.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_40k_voc12aug/gcnet_r101-d8_512x512_40k_voc12aug_20200613_185806-1e38208d.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_40k_voc12aug/gcnet_r101-d8_512x512_40k_voc12aug_20200613_185806.log.json) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_769x769_40k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './gcnet_r50-d8_769x769_40k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/datasets/pascal_context_59.py DELETED
@@ -1,60 +0,0 @@
1
- # dataset settings
2
- dataset_type = 'PascalContextDataset59'
3
- data_root = 'data/VOCdevkit/VOC2010/'
4
- img_norm_cfg = dict(
5
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
6
-
7
- img_scale = (520, 520)
8
- crop_size = (480, 480)
9
-
10
- train_pipeline = [
11
- dict(type='LoadImageFromFile'),
12
- dict(type='LoadAnnotations', reduce_zero_label=True),
13
- dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
14
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
15
- dict(type='RandomFlip', prob=0.5),
16
- dict(type='PhotoMetricDistortion'),
17
- dict(type='Normalize', **img_norm_cfg),
18
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
19
- dict(type='DefaultFormatBundle'),
20
- dict(type='Collect', keys=['img', 'gt_semantic_seg']),
21
- ]
22
- test_pipeline = [
23
- dict(type='LoadImageFromFile'),
24
- dict(
25
- type='MultiScaleFlipAug',
26
- img_scale=img_scale,
27
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
28
- flip=False,
29
- transforms=[
30
- dict(type='Resize', keep_ratio=True),
31
- dict(type='RandomFlip'),
32
- dict(type='Normalize', **img_norm_cfg),
33
- dict(type='ImageToTensor', keys=['img']),
34
- dict(type='Collect', keys=['img']),
35
- ])
36
- ]
37
- data = dict(
38
- samples_per_gpu=4,
39
- workers_per_gpu=4,
40
- train=dict(
41
- type=dataset_type,
42
- data_root=data_root,
43
- img_dir='JPEGImages',
44
- ann_dir='SegmentationClassContext',
45
- split='ImageSets/SegmentationContext/train.txt',
46
- pipeline=train_pipeline),
47
- val=dict(
48
- type=dataset_type,
49
- data_root=data_root,
50
- img_dir='JPEGImages',
51
- ann_dir='SegmentationClassContext',
52
- split='ImageSets/SegmentationContext/val.txt',
53
- pipeline=test_pipeline),
54
- test=dict(
55
- type=dataset_type,
56
- data_root=data_root,
57
- img_dir='JPEGImages',
58
- ann_dir='SegmentationClassContext',
59
- split='ImageSets/SegmentationContext/val.txt',
60
- pipeline=test_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/gmflow_module/utils/misc.py DELETED
@@ -1,42 +0,0 @@
1
- import os
2
- import numpy as np
3
- import sys
4
- import json
5
-
6
-
7
- def read_text_lines(filepath):
8
- with open(filepath, 'r') as f:
9
- lines = f.readlines()
10
- lines = [l.rstrip() for l in lines]
11
- return lines
12
-
13
-
14
- def check_path(path):
15
- if not os.path.exists(path):
16
- os.makedirs(path, exist_ok=True) # explicitly set exist_ok when multi-processing
17
-
18
-
19
- def save_command(save_path, filename='command_train.txt'):
20
- check_path(save_path)
21
- command = sys.argv
22
- save_file = os.path.join(save_path, filename)
23
- # Save all training commands when resuming training
24
- with open(save_file, 'a') as f:
25
- f.write(' '.join(command))
26
- f.write('\n\n')
27
-
28
-
29
- def save_args(args, filename='args.json'):
30
- args_dict = vars(args)
31
- check_path(args.checkpoint_dir)
32
- save_path = os.path.join(args.checkpoint_dir, filename)
33
-
34
- # Save all training args when resuming training
35
- with open(save_path, 'a') as f:
36
- json.dump(args_dict, f, indent=4, sort_keys=False)
37
- f.write('\n\n')
38
-
39
-
40
- def int_list(s):
41
- """Convert string to int list"""
42
- return [int(x) for x in s.split(',')]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/__pip-runner__.py DELETED
@@ -1,50 +0,0 @@
1
- """Execute exactly this copy of pip, within a different environment.
2
-
3
- This file is named as it is, to ensure that this module can't be imported via
4
- an import statement.
5
- """
6
-
7
- # /!\ This version compatibility check section must be Python 2 compatible. /!\
8
-
9
- import sys
10
-
11
- # Copied from setup.py
12
- PYTHON_REQUIRES = (3, 7)
13
-
14
-
15
- def version_str(version): # type: ignore
16
- return ".".join(str(v) for v in version)
17
-
18
-
19
- if sys.version_info[:2] < PYTHON_REQUIRES:
20
- raise SystemExit(
21
- "This version of pip does not support python {} (requires >={}).".format(
22
- version_str(sys.version_info[:2]), version_str(PYTHON_REQUIRES)
23
- )
24
- )
25
-
26
- # From here on, we can use Python 3 features, but the syntax must remain
27
- # Python 2 compatible.
28
-
29
- import runpy # noqa: E402
30
- from importlib.machinery import PathFinder # noqa: E402
31
- from os.path import dirname # noqa: E402
32
-
33
- PIP_SOURCES_ROOT = dirname(dirname(__file__))
34
-
35
-
36
- class PipImportRedirectingFinder:
37
- @classmethod
38
- def find_spec(self, fullname, path=None, target=None): # type: ignore
39
- if fullname != "pip":
40
- return None
41
-
42
- spec = PathFinder.find_spec(fullname, [PIP_SOURCES_ROOT], target)
43
- assert spec, (PIP_SOURCES_ROOT, fullname)
44
- return spec
45
-
46
-
47
- sys.meta_path.insert(0, PipImportRedirectingFinder())
48
-
49
- assert __name__ == "__main__", "Cannot run __pip-runner__.py as a non-main module"
50
- runpy.run_module("pip", run_name="__main__", alter_sys=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/network/utils.py DELETED
@@ -1,96 +0,0 @@
1
- from typing import Dict, Generator
2
-
3
- from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
4
-
5
- from pip._internal.exceptions import NetworkConnectionError
6
-
7
- # The following comments and HTTP headers were originally added by
8
- # Donald Stufft in git commit 22c562429a61bb77172039e480873fb239dd8c03.
9
- #
10
- # We use Accept-Encoding: identity here because requests defaults to
11
- # accepting compressed responses. This breaks in a variety of ways
12
- # depending on how the server is configured.
13
- # - Some servers will notice that the file isn't a compressible file
14
- # and will leave the file alone and with an empty Content-Encoding
15
- # - Some servers will notice that the file is already compressed and
16
- # will leave the file alone, adding a Content-Encoding: gzip header
17
- # - Some servers won't notice anything at all and will take a file
18
- # that's already been compressed and compress it again, and set
19
- # the Content-Encoding: gzip header
20
- # By setting this to request only the identity encoding we're hoping
21
- # to eliminate the third case. Hopefully there does not exist a server
22
- # which when given a file will notice it is already compressed and that
23
- # you're not asking for a compressed file and will then decompress it
24
- # before sending because if that's the case I don't think it'll ever be
25
- # possible to make this work.
26
- HEADERS: Dict[str, str] = {"Accept-Encoding": "identity"}
27
-
28
-
29
- def raise_for_status(resp: Response) -> None:
30
- http_error_msg = ""
31
- if isinstance(resp.reason, bytes):
32
- # We attempt to decode utf-8 first because some servers
33
- # choose to localize their reason strings. If the string
34
- # isn't utf-8, we fall back to iso-8859-1 for all other
35
- # encodings.
36
- try:
37
- reason = resp.reason.decode("utf-8")
38
- except UnicodeDecodeError:
39
- reason = resp.reason.decode("iso-8859-1")
40
- else:
41
- reason = resp.reason
42
-
43
- if 400 <= resp.status_code < 500:
44
- http_error_msg = (
45
- f"{resp.status_code} Client Error: {reason} for url: {resp.url}"
46
- )
47
-
48
- elif 500 <= resp.status_code < 600:
49
- http_error_msg = (
50
- f"{resp.status_code} Server Error: {reason} for url: {resp.url}"
51
- )
52
-
53
- if http_error_msg:
54
- raise NetworkConnectionError(http_error_msg, response=resp)
55
-
56
-
57
- def response_chunks(
58
- response: Response, chunk_size: int = CONTENT_CHUNK_SIZE
59
- ) -> Generator[bytes, None, None]:
60
- """Given a requests Response, provide the data chunks."""
61
- try:
62
- # Special case for urllib3.
63
- for chunk in response.raw.stream(
64
- chunk_size,
65
- # We use decode_content=False here because we don't
66
- # want urllib3 to mess with the raw bytes we get
67
- # from the server. If we decompress inside of
68
- # urllib3 then we cannot verify the checksum
69
- # because the checksum will be of the compressed
70
- # file. This breakage will only occur if the
71
- # server adds a Content-Encoding header, which
72
- # depends on how the server was configured:
73
- # - Some servers will notice that the file isn't a
74
- # compressible file and will leave the file alone
75
- # and with an empty Content-Encoding
76
- # - Some servers will notice that the file is
77
- # already compressed and will leave the file
78
- # alone and will add a Content-Encoding: gzip
79
- # header
80
- # - Some servers won't notice anything at all and
81
- # will take a file that's already been compressed
82
- # and compress it again and set the
83
- # Content-Encoding: gzip header
84
- #
85
- # By setting this not to decode automatically we
86
- # hope to eliminate problems with the second case.
87
- decode_content=False,
88
- ):
89
- yield chunk
90
- except AttributeError:
91
- # Standard file-like object.
92
- while True:
93
- chunk = response.raw.read(chunk_size)
94
- if not chunk:
95
- break
96
- yield chunk
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/archive_util.py DELETED
@@ -1,280 +0,0 @@
1
- """distutils.archive_util
2
-
3
- Utility functions for creating archive files (tarballs, zip files,
4
- that sort of thing)."""
5
-
6
- import os
7
- from warnings import warn
8
- import sys
9
-
10
- try:
11
- import zipfile
12
- except ImportError:
13
- zipfile = None
14
-
15
-
16
- from distutils.errors import DistutilsExecError
17
- from distutils.spawn import spawn
18
- from distutils.dir_util import mkpath
19
- from distutils import log
20
-
21
- try:
22
- from pwd import getpwnam
23
- except ImportError:
24
- getpwnam = None
25
-
26
- try:
27
- from grp import getgrnam
28
- except ImportError:
29
- getgrnam = None
30
-
31
-
32
- def _get_gid(name):
33
- """Returns a gid, given a group name."""
34
- if getgrnam is None or name is None:
35
- return None
36
- try:
37
- result = getgrnam(name)
38
- except KeyError:
39
- result = None
40
- if result is not None:
41
- return result[2]
42
- return None
43
-
44
-
45
- def _get_uid(name):
46
- """Returns an uid, given a user name."""
47
- if getpwnam is None or name is None:
48
- return None
49
- try:
50
- result = getpwnam(name)
51
- except KeyError:
52
- result = None
53
- if result is not None:
54
- return result[2]
55
- return None
56
-
57
-
58
- def make_tarball(
59
- base_name, base_dir, compress="gzip", verbose=0, dry_run=0, owner=None, group=None
60
- ):
61
- """Create a (possibly compressed) tar file from all the files under
62
- 'base_dir'.
63
-
64
- 'compress' must be "gzip" (the default), "bzip2", "xz", "compress", or
65
- None. ("compress" will be deprecated in Python 3.2)
66
-
67
- 'owner' and 'group' can be used to define an owner and a group for the
68
- archive that is being built. If not provided, the current owner and group
69
- will be used.
70
-
71
- The output tar file will be named 'base_dir' + ".tar", possibly plus
72
- the appropriate compression extension (".gz", ".bz2", ".xz" or ".Z").
73
-
74
- Returns the output filename.
75
- """
76
- tar_compression = {
77
- 'gzip': 'gz',
78
- 'bzip2': 'bz2',
79
- 'xz': 'xz',
80
- None: '',
81
- 'compress': '',
82
- }
83
- compress_ext = {'gzip': '.gz', 'bzip2': '.bz2', 'xz': '.xz', 'compress': '.Z'}
84
-
85
- # flags for compression program, each element of list will be an argument
86
- if compress is not None and compress not in compress_ext.keys():
87
- raise ValueError(
88
- "bad value for 'compress': must be None, 'gzip', 'bzip2', "
89
- "'xz' or 'compress'"
90
- )
91
-
92
- archive_name = base_name + '.tar'
93
- if compress != 'compress':
94
- archive_name += compress_ext.get(compress, '')
95
-
96
- mkpath(os.path.dirname(archive_name), dry_run=dry_run)
97
-
98
- # creating the tarball
99
- import tarfile # late import so Python build itself doesn't break
100
-
101
- log.info('Creating tar archive')
102
-
103
- uid = _get_uid(owner)
104
- gid = _get_gid(group)
105
-
106
- def _set_uid_gid(tarinfo):
107
- if gid is not None:
108
- tarinfo.gid = gid
109
- tarinfo.gname = group
110
- if uid is not None:
111
- tarinfo.uid = uid
112
- tarinfo.uname = owner
113
- return tarinfo
114
-
115
- if not dry_run:
116
- tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
117
- try:
118
- tar.add(base_dir, filter=_set_uid_gid)
119
- finally:
120
- tar.close()
121
-
122
- # compression using `compress`
123
- if compress == 'compress':
124
- warn("'compress' is deprecated.", DeprecationWarning)
125
- # the option varies depending on the platform
126
- compressed_name = archive_name + compress_ext[compress]
127
- if sys.platform == 'win32':
128
- cmd = [compress, archive_name, compressed_name]
129
- else:
130
- cmd = [compress, '-f', archive_name]
131
- spawn(cmd, dry_run=dry_run)
132
- return compressed_name
133
-
134
- return archive_name
135
-
136
-
137
- def make_zipfile(base_name, base_dir, verbose=0, dry_run=0): # noqa: C901
138
- """Create a zip file from all the files under 'base_dir'.
139
-
140
- The output zip file will be named 'base_name' + ".zip". Uses either the
141
- "zipfile" Python module (if available) or the InfoZIP "zip" utility
142
- (if installed and found on the default search path). If neither tool is
143
- available, raises DistutilsExecError. Returns the name of the output zip
144
- file.
145
- """
146
- zip_filename = base_name + ".zip"
147
- mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
148
-
149
- # If zipfile module is not available, try spawning an external
150
- # 'zip' command.
151
- if zipfile is None:
152
- if verbose:
153
- zipoptions = "-r"
154
- else:
155
- zipoptions = "-rq"
156
-
157
- try:
158
- spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
159
- except DistutilsExecError:
160
- # XXX really should distinguish between "couldn't find
161
- # external 'zip' command" and "zip failed".
162
- raise DistutilsExecError(
163
- (
164
- "unable to create zip file '%s': "
165
- "could neither import the 'zipfile' module nor "
166
- "find a standalone zip utility"
167
- )
168
- % zip_filename
169
- )
170
-
171
- else:
172
- log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
173
-
174
- if not dry_run:
175
- try:
176
- zip = zipfile.ZipFile(
177
- zip_filename, "w", compression=zipfile.ZIP_DEFLATED
178
- )
179
- except RuntimeError:
180
- zip = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_STORED)
181
-
182
- with zip:
183
- if base_dir != os.curdir:
184
- path = os.path.normpath(os.path.join(base_dir, ''))
185
- zip.write(path, path)
186
- log.info("adding '%s'", path)
187
- for dirpath, dirnames, filenames in os.walk(base_dir):
188
- for name in dirnames:
189
- path = os.path.normpath(os.path.join(dirpath, name, ''))
190
- zip.write(path, path)
191
- log.info("adding '%s'", path)
192
- for name in filenames:
193
- path = os.path.normpath(os.path.join(dirpath, name))
194
- if os.path.isfile(path):
195
- zip.write(path, path)
196
- log.info("adding '%s'", path)
197
-
198
- return zip_filename
199
-
200
-
201
- ARCHIVE_FORMATS = {
202
- 'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
203
- 'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
204
- 'xztar': (make_tarball, [('compress', 'xz')], "xz'ed tar-file"),
205
- 'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"),
206
- 'tar': (make_tarball, [('compress', None)], "uncompressed tar file"),
207
- 'zip': (make_zipfile, [], "ZIP file"),
208
- }
209
-
210
-
211
- def check_archive_formats(formats):
212
- """Returns the first format from the 'format' list that is unknown.
213
-
214
- If all formats are known, returns None
215
- """
216
- for format in formats:
217
- if format not in ARCHIVE_FORMATS:
218
- return format
219
- return None
220
-
221
-
222
- def make_archive(
223
- base_name,
224
- format,
225
- root_dir=None,
226
- base_dir=None,
227
- verbose=0,
228
- dry_run=0,
229
- owner=None,
230
- group=None,
231
- ):
232
- """Create an archive file (eg. zip or tar).
233
-
234
- 'base_name' is the name of the file to create, minus any format-specific
235
- extension; 'format' is the archive format: one of "zip", "tar", "gztar",
236
- "bztar", "xztar", or "ztar".
237
-
238
- 'root_dir' is a directory that will be the root directory of the
239
- archive; ie. we typically chdir into 'root_dir' before creating the
240
- archive. 'base_dir' is the directory where we start archiving from;
241
- ie. 'base_dir' will be the common prefix of all files and
242
- directories in the archive. 'root_dir' and 'base_dir' both default
243
- to the current directory. Returns the name of the archive file.
244
-
245
- 'owner' and 'group' are used when creating a tar archive. By default,
246
- uses the current owner and group.
247
- """
248
- save_cwd = os.getcwd()
249
- if root_dir is not None:
250
- log.debug("changing into '%s'", root_dir)
251
- base_name = os.path.abspath(base_name)
252
- if not dry_run:
253
- os.chdir(root_dir)
254
-
255
- if base_dir is None:
256
- base_dir = os.curdir
257
-
258
- kwargs = {'dry_run': dry_run}
259
-
260
- try:
261
- format_info = ARCHIVE_FORMATS[format]
262
- except KeyError:
263
- raise ValueError("unknown archive format '%s'" % format)
264
-
265
- func = format_info[0]
266
- for arg, val in format_info[1]:
267
- kwargs[arg] = val
268
-
269
- if format != 'zip':
270
- kwargs['owner'] = owner
271
- kwargs['group'] = group
272
-
273
- try:
274
- filename = func(base_name, base_dir, **kwargs)
275
- finally:
276
- if root_dir is not None:
277
- log.debug("changing back to '%s'", save_cwd)
278
- os.chdir(save_cwd)
279
-
280
- return filename
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Atualli/node-media-server/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: Node Media Server
3
- emoji: 🚀
4
- colorFrom: indigo
5
- colorTo: gray
6
- sdk: docker
7
- pinned: false
8
- license: apache-2.0
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/lazyconfigs.md DELETED
@@ -1,170 +0,0 @@
1
- # Lazy Configs
2
-
3
- The traditional yacs-based config system provides basic, standard functionalities.
4
- However, it does not offer enough flexibility for many new projects.
5
- We develop an alternative, non-intrusive config system that can be used with
6
- detectron2 or potentially any other complex projects.
7
-
8
- ## Python Syntax
9
-
10
- Our config objects are still dictionaries. Instead of using Yaml to define dictionaries,
11
- we create dictionaries in Python directly. This gives users the following power that
12
- doesn't exist in Yaml:
13
-
14
- * Easily manipulate the dictionary (addition & deletion) using Python.
15
- * Write simple arithmetics or call simple functions.
16
- * Use more data types / objects.
17
- * Import / compose other config files, using the familiar Python import syntax.
18
-
19
- A Python config file can be loaded like this:
20
- ```python
21
- # config.py:
22
- a = dict(x=1, y=2, z=dict(xx=1))
23
- b = dict(x=3, y=4)
24
-
25
- # my_code.py:
26
- from detectron2.config import LazyConfig
27
- cfg = LazyConfig.load("path/to/config.py") # an omegaconf dictionary
28
- assert cfg.a.z.xx == 1
29
- ```
30
-
31
- After [LazyConfig.load](../modules/config.html#detectron2.config.LazyConfig.load), `cfg` will be a dictionary that contains all dictionaries
32
- defined in the global scope of the config file. Note that:
33
- * All dictionaries are turned to an [omegaconf](https://omegaconf.readthedocs.io/)
34
- config object during loading. This enables access to omegaconf features,
35
- such as its [access syntax](https://omegaconf.readthedocs.io/en/2.1_branch/usage.html#access-and-manipulation)
36
- and [interpolation](https://omegaconf.readthedocs.io/en/2.1_branch/usage.html#variable-interpolation).
37
- * Absolute imports in `config.py` works the same as in regular Python.
38
- * Relative imports can only import dictionaries from config files.
39
- They are simply a syntax sugar for [LazyConfig.load_rel](../modules/config.html#detectron2.config.LazyConfig.load_rel).
40
- They can load Python files at relative path without requiring `__init__.py`.
41
-
42
- [LazyConfig.save](../modules/config.html#detectron2.config.LazyConfig.save) can save a config object to yaml.
43
- Note that this is not always successful if non-serializable objects appear in the config file (e.g. lambdas).
44
- It is up to users whether to sacrifice the ability to save in exchange for flexibility.
45
-
46
- ## Recursive Instantiation
47
-
48
- The LazyConfig system heavily uses recursive instantiation, which is a pattern that
49
- uses a dictionary to describe a
50
- call to a function/class. The dictionary consists of:
51
-
52
- 1. A "\_target\_" key which contains path to the callable, such as "module.submodule.class_name".
53
- 2. Other keys that represent arguments to pass to the callable. Arguments themselves can be defined
54
- using recursive instantiation.
55
-
56
- We provide a helper function [LazyCall](../modules/config.html#detectron2.config.LazyCall) that helps create such dictionaries.
57
- The following code using `LazyCall`
58
- ```python
59
- from detectron2.config import LazyCall as L
60
- from my_app import Trainer, Optimizer
61
- cfg = L(Trainer)(
62
- optimizer=L(Optimizer)(
63
- lr=0.01,
64
- algo="SGD"
65
- )
66
- )
67
- ```
68
- creates a dictionary like this:
69
- ```
70
- cfg = {
71
- "_target_": "my_app.Trainer",
72
- "optimizer": {
73
- "_target_": "my_app.Optimizer",
74
- "lr": 0.01, "algo": "SGD"
75
- }
76
- }
77
- ```
78
-
79
- By representing objects using such dictionaries, a general
80
- [instantiate](../modules/config.html#detectron2.config.instantiate)
81
- function can turn them into actual objects, i.e.:
82
- ```python
83
- from detectron2.config import instantiate
84
- trainer = instantiate(cfg)
85
- # equivalent to:
86
- # from my_app import Trainer, Optimizer
87
- # trainer = Trainer(optimizer=Optimizer(lr=0.01, algo="SGD"))
88
- ```
89
-
90
- This pattern is powerful enough to describe very complex objects, e.g.:
91
-
92
- <details>
93
- <summary>
94
- A Full Mask R-CNN described in recursive instantiation (click to expand)
95
- </summary>
96
-
97
- ```eval_rst
98
- .. literalinclude:: ../../configs/common/models/mask_rcnn_fpn.py
99
- :language: python
100
- :linenos:
101
- ```
102
-
103
- </details>
104
-
105
- There are also objects or logic that cannot be described simply by a dictionary,
106
- such as reused objects or method calls. They may require some refactoring
107
- to work with recursive instantiation.
108
-
109
- ## Using Model Zoo LazyConfigs
110
-
111
- We provide some configs in the model zoo using the LazyConfig system, for example:
112
-
113
- * [common baselines](../../configs/common/).
114
- * [new Mask R-CNN baselines](../../configs/new_baselines/)
115
-
116
- After installing detectron2, they can be loaded by the model zoo API
117
- [model_zoo.get_config](../modules/model_zoo.html#detectron2.model_zoo.get_config).
118
-
119
- Using these as references, you're free to define custom config structure / fields for your own
120
- project, as long as your training script can understand them.
121
- Despite of this, our model zoo configs still follow some simple conventions for consistency, e.g.
122
- `cfg.model` defines a model object, `cfg.dataloader.{train,test}` defines dataloader objects,
123
- and `cfg.train` contains training options in key-value form.
124
- In addition to `print()`, a better way to view the structure of a config is like this:
125
- ```
126
- from detectron2.model_zoo import get_config
127
- from detectron2.config import LazyConfig
128
- print(LazyConfig.to_py(get_config("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py")))
129
- ```
130
- From the output it's easier to find relevant options to change, e.g.
131
- `dataloader.train.total_batch_size` for the batch size, or `optimizer.lr` for base learning rate.
132
-
133
- We provide a reference training script
134
- [tools/lazyconfig_train_net.py](../../tools/lazyconfig_train_net.py),
135
- that can train/eval our model zoo configs.
136
- It also shows how to support command line value overrides.
137
-
138
- To demonstrate the power and flexibility of the new system, we show that
139
- [a simple config file](../../configs/Misc/torchvision_imagenet_R_50.py)
140
- can let detectron2 train an ImageNet classification model from torchvision, even though
141
- detectron2 contains no features about ImageNet classification.
142
- This can serve as a reference for using detectron2 in other deep learning tasks.
143
-
144
- ## Summary
145
-
146
- By using recursive instantiation to create objects,
147
- we avoid passing a giant config to many places, because `cfg` is only passed to `instantiate`.
148
- This has the following benefits:
149
-
150
- * It's __non-intrusive__: objects to be constructed are config-agnostic, regular Python
151
- functions/classes.
152
- They can even live in other libraries. For example,
153
- `{"_target_": "torch.nn.Conv2d", "in_channels": 10, "out_channels": 10, "kernel_size": 1}`
154
- defines a conv layer.
155
- * __Clarity__ of what function/classes will be called, and what arguments they use.
156
- * `cfg` doesn't need pre-defined keys and structures. It's valid as long as it translates to valid
157
- code. This gives a lot more __flexibility__.
158
- * You can still pass huge dictionaries as arguments, just like the old way.
159
-
160
- Recursive instantiation and Python syntax are orthogonal: you can use one without the other.
161
- But by putting them together, the config file looks a lot like the code that will be executed:
162
-
163
- ![img](./lazyconfig.jpg)
164
-
165
- However, the config file just defines dictionaries, which can be easily manipulated further
166
- by composition or overrides.
167
- The corresponding code will only be executed
168
- later when `instantiate` is called. In some way,
169
- in config files we're writing "editable code" that will be "lazily executed" later when needed.
170
- That's why we call this system "LazyConfig".
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/bifpn_fcos.py DELETED
@@ -1,469 +0,0 @@
1
- # This file is modified from https://github.com/aim-uofa/AdelaiDet/blob/master/adet/modeling/backbone/bifpn.py
2
- # The original file is under 2-clause BSD License for academic use, and *non-commercial use*.
3
- import torch
4
- import torch.nn.functional as F
5
- from torch import nn
6
-
7
- from detectron2.layers import Conv2d, ShapeSpec, get_norm
8
-
9
- from detectron2.modeling.backbone import Backbone, build_resnet_backbone
10
- from detectron2.modeling import BACKBONE_REGISTRY
11
- from .dlafpn import dla34
12
-
13
- __all__ = []
14
-
15
-
16
- def swish(x):
17
- return x * x.sigmoid()
18
-
19
-
20
- def split_name(name):
21
- for i, c in enumerate(name):
22
- if not c.isalpha():
23
- return name[:i], int(name[i:])
24
- raise ValueError()
25
-
26
-
27
- class FeatureMapResampler(nn.Module):
28
- def __init__(self, in_channels, out_channels, stride, norm=""):
29
- super(FeatureMapResampler, self).__init__()
30
- if in_channels != out_channels:
31
- self.reduction = Conv2d(
32
- in_channels, out_channels, kernel_size=1,
33
- bias=(norm == ""),
34
- norm=get_norm(norm, out_channels),
35
- activation=None
36
- )
37
- else:
38
- self.reduction = None
39
-
40
- assert stride <= 2
41
- self.stride = stride
42
-
43
- def forward(self, x):
44
- if self.reduction is not None:
45
- x = self.reduction(x)
46
-
47
- if self.stride == 2:
48
- x = F.max_pool2d(
49
- x, kernel_size=self.stride + 1,
50
- stride=self.stride, padding=1
51
- )
52
- elif self.stride == 1:
53
- pass
54
- else:
55
- raise NotImplementedError()
56
- return x
57
-
58
-
59
- class BackboneWithTopLevels(Backbone):
60
- def __init__(self, backbone, out_channels, num_top_levels, norm=""):
61
- super(BackboneWithTopLevels, self).__init__()
62
- self.backbone = backbone
63
- backbone_output_shape = backbone.output_shape()
64
-
65
- self._out_feature_channels = {name: shape.channels for name, shape in backbone_output_shape.items()}
66
- self._out_feature_strides = {name: shape.stride for name, shape in backbone_output_shape.items()}
67
- self._out_features = list(self._out_feature_strides.keys())
68
-
69
- last_feature_name = max(self._out_feature_strides.keys(), key=lambda x: split_name(x)[1])
70
- self.last_feature_name = last_feature_name
71
- self.num_top_levels = num_top_levels
72
-
73
- last_channels = self._out_feature_channels[last_feature_name]
74
- last_stride = self._out_feature_strides[last_feature_name]
75
-
76
- prefix, suffix = split_name(last_feature_name)
77
- prev_channels = last_channels
78
- for i in range(num_top_levels):
79
- name = prefix + str(suffix + i + 1)
80
- self.add_module(name, FeatureMapResampler(
81
- prev_channels, out_channels, 2, norm
82
- ))
83
- prev_channels = out_channels
84
-
85
- self._out_feature_channels[name] = out_channels
86
- self._out_feature_strides[name] = last_stride * 2 ** (i + 1)
87
- self._out_features.append(name)
88
-
89
- def forward(self, x):
90
- outputs = self.backbone(x)
91
- last_features = outputs[self.last_feature_name]
92
- prefix, suffix = split_name(self.last_feature_name)
93
-
94
- x = last_features
95
- for i in range(self.num_top_levels):
96
- name = prefix + str(suffix + i + 1)
97
- x = self.__getattr__(name)(x)
98
- outputs[name] = x
99
-
100
- return outputs
101
-
102
-
103
- class SingleBiFPN(Backbone):
104
- """
105
- This module implements Feature Pyramid Network.
106
- It creates pyramid features built on top of some input feature maps.
107
- """
108
-
109
- def __init__(
110
- self, in_channels_list, out_channels, norm=""
111
- ):
112
- """
113
- Args:
114
- bottom_up (Backbone): module representing the bottom up subnetwork.
115
- Must be a subclass of :class:`Backbone`. The multi-scale feature
116
- maps generated by the bottom up network, and listed in `in_features`,
117
- are used to generate FPN levels.
118
- in_features (list[str]): names of the input feature maps coming
119
- from the backbone to which FPN is attached. For example, if the
120
- backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
121
- of these may be used; order must be from high to low resolution.
122
- out_channels (int): number of channels in the output feature maps.
123
- norm (str): the normalization to use.
124
- """
125
- super(SingleBiFPN, self).__init__()
126
-
127
- self.out_channels = out_channels
128
- # build 5-levels bifpn
129
- if len(in_channels_list) == 5:
130
- self.nodes = [
131
- {'feat_level': 3, 'inputs_offsets': [3, 4]},
132
- {'feat_level': 2, 'inputs_offsets': [2, 5]},
133
- {'feat_level': 1, 'inputs_offsets': [1, 6]},
134
- {'feat_level': 0, 'inputs_offsets': [0, 7]},
135
- {'feat_level': 1, 'inputs_offsets': [1, 7, 8]},
136
- {'feat_level': 2, 'inputs_offsets': [2, 6, 9]},
137
- {'feat_level': 3, 'inputs_offsets': [3, 5, 10]},
138
- {'feat_level': 4, 'inputs_offsets': [4, 11]},
139
- ]
140
- elif len(in_channels_list) == 3:
141
- self.nodes = [
142
- {'feat_level': 1, 'inputs_offsets': [1, 2]},
143
- {'feat_level': 0, 'inputs_offsets': [0, 3]},
144
- {'feat_level': 1, 'inputs_offsets': [1, 3, 4]},
145
- {'feat_level': 2, 'inputs_offsets': [2, 5]},
146
- ]
147
- else:
148
- raise NotImplementedError
149
-
150
- node_info = [_ for _ in in_channels_list]
151
-
152
- num_output_connections = [0 for _ in in_channels_list]
153
- for fnode in self.nodes:
154
- feat_level = fnode["feat_level"]
155
- inputs_offsets = fnode["inputs_offsets"]
156
- inputs_offsets_str = "_".join(map(str, inputs_offsets))
157
- for input_offset in inputs_offsets:
158
- num_output_connections[input_offset] += 1
159
-
160
- in_channels = node_info[input_offset]
161
- if in_channels != out_channels:
162
- lateral_conv = Conv2d(
163
- in_channels,
164
- out_channels,
165
- kernel_size=1,
166
- norm=get_norm(norm, out_channels)
167
- )
168
- self.add_module(
169
- "lateral_{}_f{}".format(input_offset, feat_level), lateral_conv
170
- )
171
- node_info.append(out_channels)
172
- num_output_connections.append(0)
173
-
174
- # generate attention weights
175
- name = "weights_f{}_{}".format(feat_level, inputs_offsets_str)
176
- self.__setattr__(name, nn.Parameter(
177
- torch.ones(len(inputs_offsets), dtype=torch.float32),
178
- requires_grad=True
179
- ))
180
-
181
- # generate convolutions after combination
182
- name = "outputs_f{}_{}".format(feat_level, inputs_offsets_str)
183
- self.add_module(name, Conv2d(
184
- out_channels,
185
- out_channels,
186
- kernel_size=3,
187
- padding=1,
188
- norm=get_norm(norm, out_channels),
189
- bias=(norm == "")
190
- ))
191
-
192
- def forward(self, feats):
193
- """
194
- Args:
195
- input (dict[str->Tensor]): mapping feature map name (e.g., "p5") to
196
- feature map tensor for each feature level in high to low resolution order.
197
- Returns:
198
- dict[str->Tensor]:
199
- mapping from feature map name to FPN feature map tensor
200
- in high to low resolution order. Returned feature names follow the FPN
201
- paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
202
- ["n2", "n3", ..., "n6"].
203
- """
204
- feats = [_ for _ in feats]
205
- num_levels = len(feats)
206
- num_output_connections = [0 for _ in feats]
207
- for fnode in self.nodes:
208
- feat_level = fnode["feat_level"]
209
- inputs_offsets = fnode["inputs_offsets"]
210
- inputs_offsets_str = "_".join(map(str, inputs_offsets))
211
- input_nodes = []
212
- _, _, target_h, target_w = feats[feat_level].size()
213
- for input_offset in inputs_offsets:
214
- num_output_connections[input_offset] += 1
215
- input_node = feats[input_offset]
216
-
217
- # reduction
218
- if input_node.size(1) != self.out_channels:
219
- name = "lateral_{}_f{}".format(input_offset, feat_level)
220
- input_node = self.__getattr__(name)(input_node)
221
-
222
- # maybe downsample
223
- _, _, h, w = input_node.size()
224
- if h > target_h and w > target_w:
225
- height_stride_size = int((h - 1) // target_h + 1)
226
- width_stride_size = int((w - 1) // target_w + 1)
227
- assert height_stride_size == width_stride_size == 2
228
- input_node = F.max_pool2d(
229
- input_node, kernel_size=(height_stride_size + 1, width_stride_size + 1),
230
- stride=(height_stride_size, width_stride_size), padding=1
231
- )
232
- elif h <= target_h and w <= target_w:
233
- if h < target_h or w < target_w:
234
- input_node = F.interpolate(
235
- input_node,
236
- size=(target_h, target_w),
237
- mode="nearest"
238
- )
239
- else:
240
- raise NotImplementedError()
241
- input_nodes.append(input_node)
242
-
243
- # attention
244
- name = "weights_f{}_{}".format(feat_level, inputs_offsets_str)
245
- weights = F.relu(self.__getattr__(name))
246
- norm_weights = weights / (weights.sum() + 0.0001)
247
-
248
- new_node = torch.stack(input_nodes, dim=-1)
249
- new_node = (norm_weights * new_node).sum(dim=-1)
250
- new_node = swish(new_node)
251
-
252
- name = "outputs_f{}_{}".format(feat_level, inputs_offsets_str)
253
- feats.append(self.__getattr__(name)(new_node))
254
-
255
- num_output_connections.append(0)
256
-
257
- output_feats = []
258
- for idx in range(num_levels):
259
- for i, fnode in enumerate(reversed(self.nodes)):
260
- if fnode['feat_level'] == idx:
261
- output_feats.append(feats[-1 - i])
262
- break
263
- else:
264
- raise ValueError()
265
- return output_feats
266
-
267
-
268
- class BiFPN(Backbone):
269
- """
270
- This module implements Feature Pyramid Network.
271
- It creates pyramid features built on top of some input feature maps.
272
- """
273
-
274
- def __init__(
275
- self, bottom_up, in_features, out_channels, num_top_levels, num_repeats, norm=""
276
- ):
277
- """
278
- Args:
279
- bottom_up (Backbone): module representing the bottom up subnetwork.
280
- Must be a subclass of :class:`Backbone`. The multi-scale feature
281
- maps generated by the bottom up network, and listed in `in_features`,
282
- are used to generate FPN levels.
283
- in_features (list[str]): names of the input feature maps coming
284
- from the backbone to which FPN is attached. For example, if the
285
- backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
286
- of these may be used; order must be from high to low resolution.
287
- out_channels (int): number of channels in the output feature maps.
288
- num_top_levels (int): the number of the top levels (p6 or p7).
289
- num_repeats (int): the number of repeats of BiFPN.
290
- norm (str): the normalization to use.
291
- """
292
- super(BiFPN, self).__init__()
293
- assert isinstance(bottom_up, Backbone)
294
-
295
- # add extra feature levels (i.e., 6 and 7)
296
- self.bottom_up = BackboneWithTopLevels(
297
- bottom_up, out_channels,
298
- num_top_levels, norm
299
- )
300
- bottom_up_output_shapes = self.bottom_up.output_shape()
301
-
302
- in_features = sorted(in_features, key=lambda x: split_name(x)[1])
303
- self._size_divisibility = 128 #bottom_up_output_shapes[in_features[-1]].stride
304
- self.out_channels = out_channels
305
- self.min_level = split_name(in_features[0])[1]
306
-
307
- # add the names for top blocks
308
- prefix, last_suffix = split_name(in_features[-1])
309
- for i in range(num_top_levels):
310
- in_features.append(prefix + str(last_suffix + i + 1))
311
- self.in_features = in_features
312
-
313
- # generate output features
314
- self._out_features = ["p{}".format(split_name(name)[1]) for name in in_features]
315
- self._out_feature_strides = {
316
- out_name: bottom_up_output_shapes[in_name].stride
317
- for out_name, in_name in zip(self._out_features, in_features)
318
- }
319
- self._out_feature_channels = {k: out_channels for k in self._out_features}
320
-
321
- # build bifpn
322
- self.repeated_bifpn = nn.ModuleList()
323
- for i in range(num_repeats):
324
- if i == 0:
325
- in_channels_list = [
326
- bottom_up_output_shapes[name].channels for name in in_features
327
- ]
328
- else:
329
- in_channels_list = [
330
- self._out_feature_channels[name] for name in self._out_features
331
- ]
332
- self.repeated_bifpn.append(SingleBiFPN(
333
- in_channels_list, out_channels, norm
334
- ))
335
-
336
- @property
337
- def size_divisibility(self):
338
- return self._size_divisibility
339
-
340
- def forward(self, x):
341
- """
342
- Args:
343
- input (dict[str->Tensor]): mapping feature map name (e.g., "p5") to
344
- feature map tensor for each feature level in high to low resolution order.
345
- Returns:
346
- dict[str->Tensor]:
347
- mapping from feature map name to FPN feature map tensor
348
- in high to low resolution order. Returned feature names follow the FPN
349
- paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
350
- ["n2", "n3", ..., "n6"].
351
- """
352
- bottom_up_features = self.bottom_up(x)
353
- feats = [bottom_up_features[f] for f in self.in_features]
354
-
355
- for bifpn in self.repeated_bifpn:
356
- feats = bifpn(feats)
357
-
358
- return dict(zip(self._out_features, feats))
359
-
360
-
361
- def _assert_strides_are_log2_contiguous(strides):
362
- """
363
- Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2".
364
- """
365
- for i, stride in enumerate(strides[1:], 1):
366
- assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format(
367
- stride, strides[i - 1]
368
- )
369
-
370
-
371
- @BACKBONE_REGISTRY.register()
372
- def build_fcos_resnet_bifpn_backbone(cfg, input_shape: ShapeSpec):
373
- """
374
- Args:
375
- cfg: a detectron2 CfgNode
376
- Returns:
377
- backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
378
- """
379
- bottom_up = build_resnet_backbone(cfg, input_shape)
380
- in_features = cfg.MODEL.FPN.IN_FEATURES
381
- out_channels = cfg.MODEL.BIFPN.OUT_CHANNELS
382
- num_repeats = cfg.MODEL.BIFPN.NUM_BIFPN
383
- top_levels = 2
384
-
385
- backbone = BiFPN(
386
- bottom_up=bottom_up,
387
- in_features=in_features,
388
- out_channels=out_channels,
389
- num_top_levels=top_levels,
390
- num_repeats=num_repeats,
391
- norm=cfg.MODEL.BIFPN.NORM
392
- )
393
- return backbone
394
-
395
-
396
-
397
- @BACKBONE_REGISTRY.register()
398
- def build_p35_fcos_resnet_bifpn_backbone(cfg, input_shape: ShapeSpec):
399
- """
400
- Args:
401
- cfg: a detectron2 CfgNode
402
- Returns:
403
- backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
404
- """
405
- bottom_up = build_resnet_backbone(cfg, input_shape)
406
- in_features = cfg.MODEL.FPN.IN_FEATURES
407
- out_channels = cfg.MODEL.BIFPN.OUT_CHANNELS
408
- num_repeats = cfg.MODEL.BIFPN.NUM_BIFPN
409
- top_levels = 0
410
-
411
- backbone = BiFPN(
412
- bottom_up=bottom_up,
413
- in_features=in_features,
414
- out_channels=out_channels,
415
- num_top_levels=top_levels,
416
- num_repeats=num_repeats,
417
- norm=cfg.MODEL.BIFPN.NORM
418
- )
419
- return backbone
420
-
421
-
422
- @BACKBONE_REGISTRY.register()
423
- def build_p35_fcos_dla_bifpn_backbone(cfg, input_shape: ShapeSpec):
424
- """
425
- Args:
426
- cfg: a detectron2 CfgNode
427
- Returns:
428
- backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
429
- """
430
- bottom_up = dla34(cfg)
431
- in_features = cfg.MODEL.FPN.IN_FEATURES
432
- out_channels = cfg.MODEL.BIFPN.OUT_CHANNELS
433
- num_repeats = cfg.MODEL.BIFPN.NUM_BIFPN
434
- top_levels = 0
435
-
436
- backbone = BiFPN(
437
- bottom_up=bottom_up,
438
- in_features=in_features,
439
- out_channels=out_channels,
440
- num_top_levels=top_levels,
441
- num_repeats=num_repeats,
442
- norm=cfg.MODEL.BIFPN.NORM
443
- )
444
- return backbone
445
-
446
- @BACKBONE_REGISTRY.register()
447
- def build_p37_fcos_dla_bifpn_backbone(cfg, input_shape: ShapeSpec):
448
- """
449
- Args:
450
- cfg: a detectron2 CfgNode
451
- Returns:
452
- backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
453
- """
454
- bottom_up = dla34(cfg)
455
- in_features = cfg.MODEL.FPN.IN_FEATURES
456
- out_channels = cfg.MODEL.BIFPN.OUT_CHANNELS
457
- num_repeats = cfg.MODEL.BIFPN.NUM_BIFPN
458
- assert cfg.MODEL.BIFPN.NUM_LEVELS == 5
459
- top_levels = 2
460
-
461
- backbone = BiFPN(
462
- bottom_up=bottom_up,
463
- in_features=in_features,
464
- out_channels=out_channels,
465
- num_top_levels=top_levels,
466
- num_repeats=num_repeats,
467
- norm=cfg.MODEL.BIFPN.NORM
468
- )
469
- return backbone
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/data/__init__.py DELETED
File without changes
spaces/Awiny/Image2Paragraph/pretrained_models/download_pretrain.sh DELETED
@@ -1,5 +0,0 @@
1
- git clone https://huggingface.co/openai/clip-vit-large-patch14
2
- git clone https://huggingface.co/shi-labs/oneformer_ade20k_swin_large
3
- git clone https://huggingface.co/shi-labs/oneformer_coco_swin_large
4
- git clone https://huggingface.co/Salesforce/blip-image-captioning-large
5
- git clone https://huggingface.co/CIDAS/clipseg-rd64-refined
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/components/ui/dropdown-menu.tsx DELETED
@@ -1,200 +0,0 @@
1
- "use client"
2
-
3
- import * as React from "react"
4
- import * as DropdownMenuPrimitive from "@radix-ui/react-dropdown-menu"
5
- import { Check, ChevronRight, Circle } from "lucide-react"
6
-
7
- import { cn } from "@/lib/utils"
8
-
9
- const DropdownMenu = DropdownMenuPrimitive.Root
10
-
11
- const DropdownMenuTrigger = DropdownMenuPrimitive.Trigger
12
-
13
- const DropdownMenuGroup = DropdownMenuPrimitive.Group
14
-
15
- const DropdownMenuPortal = DropdownMenuPrimitive.Portal
16
-
17
- const DropdownMenuSub = DropdownMenuPrimitive.Sub
18
-
19
- const DropdownMenuRadioGroup = DropdownMenuPrimitive.RadioGroup
20
-
21
- const DropdownMenuSubTrigger = React.forwardRef<
22
- React.ElementRef<typeof DropdownMenuPrimitive.SubTrigger>,
23
- React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.SubTrigger> & {
24
- inset?: boolean
25
- }
26
- >(({ className, inset, children, ...props }, ref) => (
27
- <DropdownMenuPrimitive.SubTrigger
28
- ref={ref}
29
- className={cn(
30
- "flex cursor-default select-none items-center rounded-sm px-2 py-1.5 text-sm outline-none focus:bg-stone-100 data-[state=open]:bg-stone-100 dark:focus:bg-stone-800 dark:data-[state=open]:bg-stone-800",
31
- inset && "pl-8",
32
- className
33
- )}
34
- {...props}
35
- >
36
- {children}
37
- <ChevronRight className="ml-auto h-4 w-4" />
38
- </DropdownMenuPrimitive.SubTrigger>
39
- ))
40
- DropdownMenuSubTrigger.displayName =
41
- DropdownMenuPrimitive.SubTrigger.displayName
42
-
43
- const DropdownMenuSubContent = React.forwardRef<
44
- React.ElementRef<typeof DropdownMenuPrimitive.SubContent>,
45
- React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.SubContent>
46
- >(({ className, ...props }, ref) => (
47
- <DropdownMenuPrimitive.SubContent
48
- ref={ref}
49
- className={cn(
50
- "z-50 min-w-[8rem] overflow-hidden rounded-md border border-stone-200 bg-white p-1 text-stone-950 shadow-lg data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 dark:border-stone-800 dark:bg-stone-950 dark:text-stone-50",
51
- className
52
- )}
53
- {...props}
54
- />
55
- ))
56
- DropdownMenuSubContent.displayName =
57
- DropdownMenuPrimitive.SubContent.displayName
58
-
59
- const DropdownMenuContent = React.forwardRef<
60
- React.ElementRef<typeof DropdownMenuPrimitive.Content>,
61
- React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Content>
62
- >(({ className, sideOffset = 4, ...props }, ref) => (
63
- <DropdownMenuPrimitive.Portal>
64
- <DropdownMenuPrimitive.Content
65
- ref={ref}
66
- sideOffset={sideOffset}
67
- className={cn(
68
- "z-50 min-w-[8rem] overflow-hidden rounded-md border border-stone-200 bg-white p-1 text-stone-950 shadow-md data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 dark:border-stone-800 dark:bg-stone-950 dark:text-stone-50",
69
- className
70
- )}
71
- {...props}
72
- />
73
- </DropdownMenuPrimitive.Portal>
74
- ))
75
- DropdownMenuContent.displayName = DropdownMenuPrimitive.Content.displayName
76
-
77
- const DropdownMenuItem = React.forwardRef<
78
- React.ElementRef<typeof DropdownMenuPrimitive.Item>,
79
- React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Item> & {
80
- inset?: boolean
81
- }
82
- >(({ className, inset, ...props }, ref) => (
83
- <DropdownMenuPrimitive.Item
84
- ref={ref}
85
- className={cn(
86
- "relative flex cursor-default select-none items-center rounded-sm px-2 py-1.5 text-sm outline-none transition-colors focus:bg-stone-100 focus:text-stone-900 data-[disabled]:pointer-events-none data-[disabled]:opacity-50 dark:focus:bg-stone-800 dark:focus:text-stone-50",
87
- inset && "pl-8",
88
- className
89
- )}
90
- {...props}
91
- />
92
- ))
93
- DropdownMenuItem.displayName = DropdownMenuPrimitive.Item.displayName
94
-
95
- const DropdownMenuCheckboxItem = React.forwardRef<
96
- React.ElementRef<typeof DropdownMenuPrimitive.CheckboxItem>,
97
- React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.CheckboxItem>
98
- >(({ className, children, checked, ...props }, ref) => (
99
- <DropdownMenuPrimitive.CheckboxItem
100
- ref={ref}
101
- className={cn(
102
- "relative flex cursor-default select-none items-center rounded-sm py-1.5 pl-8 pr-2 text-sm outline-none transition-colors focus:bg-stone-100 focus:text-stone-900 data-[disabled]:pointer-events-none data-[disabled]:opacity-50 dark:focus:bg-stone-800 dark:focus:text-stone-50",
103
- className
104
- )}
105
- checked={checked}
106
- {...props}
107
- >
108
- <span className="absolute left-2 flex h-3.5 w-3.5 items-center justify-center">
109
- <DropdownMenuPrimitive.ItemIndicator>
110
- <Check className="h-4 w-4" />
111
- </DropdownMenuPrimitive.ItemIndicator>
112
- </span>
113
- {children}
114
- </DropdownMenuPrimitive.CheckboxItem>
115
- ))
116
- DropdownMenuCheckboxItem.displayName =
117
- DropdownMenuPrimitive.CheckboxItem.displayName
118
-
119
- const DropdownMenuRadioItem = React.forwardRef<
120
- React.ElementRef<typeof DropdownMenuPrimitive.RadioItem>,
121
- React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.RadioItem>
122
- >(({ className, children, ...props }, ref) => (
123
- <DropdownMenuPrimitive.RadioItem
124
- ref={ref}
125
- className={cn(
126
- "relative flex cursor-default select-none items-center rounded-sm py-1.5 pl-8 pr-2 text-sm outline-none transition-colors focus:bg-stone-100 focus:text-stone-900 data-[disabled]:pointer-events-none data-[disabled]:opacity-50 dark:focus:bg-stone-800 dark:focus:text-stone-50",
127
- className
128
- )}
129
- {...props}
130
- >
131
- <span className="absolute left-2 flex h-3.5 w-3.5 items-center justify-center">
132
- <DropdownMenuPrimitive.ItemIndicator>
133
- <Circle className="h-2 w-2 fill-current" />
134
- </DropdownMenuPrimitive.ItemIndicator>
135
- </span>
136
- {children}
137
- </DropdownMenuPrimitive.RadioItem>
138
- ))
139
- DropdownMenuRadioItem.displayName = DropdownMenuPrimitive.RadioItem.displayName
140
-
141
- const DropdownMenuLabel = React.forwardRef<
142
- React.ElementRef<typeof DropdownMenuPrimitive.Label>,
143
- React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Label> & {
144
- inset?: boolean
145
- }
146
- >(({ className, inset, ...props }, ref) => (
147
- <DropdownMenuPrimitive.Label
148
- ref={ref}
149
- className={cn(
150
- "px-2 py-1.5 text-sm font-semibold",
151
- inset && "pl-8",
152
- className
153
- )}
154
- {...props}
155
- />
156
- ))
157
- DropdownMenuLabel.displayName = DropdownMenuPrimitive.Label.displayName
158
-
159
- const DropdownMenuSeparator = React.forwardRef<
160
- React.ElementRef<typeof DropdownMenuPrimitive.Separator>,
161
- React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Separator>
162
- >(({ className, ...props }, ref) => (
163
- <DropdownMenuPrimitive.Separator
164
- ref={ref}
165
- className={cn("-mx-1 my-1 h-px bg-stone-100 dark:bg-stone-800", className)}
166
- {...props}
167
- />
168
- ))
169
- DropdownMenuSeparator.displayName = DropdownMenuPrimitive.Separator.displayName
170
-
171
- const DropdownMenuShortcut = ({
172
- className,
173
- ...props
174
- }: React.HTMLAttributes<HTMLSpanElement>) => {
175
- return (
176
- <span
177
- className={cn("ml-auto text-xs tracking-widest opacity-60", className)}
178
- {...props}
179
- />
180
- )
181
- }
182
- DropdownMenuShortcut.displayName = "DropdownMenuShortcut"
183
-
184
- export {
185
- DropdownMenu,
186
- DropdownMenuTrigger,
187
- DropdownMenuContent,
188
- DropdownMenuItem,
189
- DropdownMenuCheckboxItem,
190
- DropdownMenuRadioItem,
191
- DropdownMenuLabel,
192
- DropdownMenuSeparator,
193
- DropdownMenuShortcut,
194
- DropdownMenuGroup,
195
- DropdownMenuPortal,
196
- DropdownMenuSub,
197
- DropdownMenuSubContent,
198
- DropdownMenuSubTrigger,
199
- DropdownMenuRadioGroup,
200
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Arquero Inactivo Torre De Defensa Mod Apk.md DELETED
@@ -1,88 +0,0 @@
1
-
2
- <tabla>
3
- <tr>
4
- <td>
5
- <h1>Idle Archer Tower Defense Mod APK: Un divertido y desafiante juego de rol</h1>
6
- <p>¿Estás buscando un juego de rol divertido y desafiante que te mantenga entretenido durante horas? ¿Quieres experimentar un emocionante juego de torre de defensa con increíbles gráficos y efectos de sonido? Si es así, entonces usted debe tratar Idle Archer Tower Defense Mod APK! </p>
7
- <p>Idle Archer Tower Defense es un popular juego de rol que combina la defensa de la torre y elementos inactivos. Juegas como un arquero solitario que tiene que defender tu torre contra las olas de monstruos malvados invocados por el señor oscuro. Puedes mejorar tus habilidades de arquero, armas y torre para volverte más fuerte y derrotar a más enemigos. También puedes recoger monedas y gemas para desbloquear nuevos elementos y características. </p>
8
- <h2>arquero inactivo torre de defensa mod apk</h2><br /><p><b><b>Download</b> &#10037;&#10037;&#10037; <a href="https://bltlly.com/2v6K3h">https://bltlly.com/2v6K3h</a></b></p><br /><br />
9
- <p>Pero ¿y si quieres disfrutar del juego sin limitaciones ni restricciones? ¿Qué pasa si quieres tener recursos y potenciadores ilimitados para que tu juego sea más fácil y divertido? Bueno, ahí es donde Idle Archer Tower Defense Mod APK viene muy bien! </p>
10
- <p>Idle Archer Tower Defense Mod APK es una versión modificada del juego original que le da acceso a varios beneficios y ventajas que no están disponibles en la versión regular. Usted puede disfrutar de características como el modo de dios, un golpe matar, monedas y gemas ilimitadas, y más. Puede descargar e instalar Idle Archer Tower Defense Mod APK gratis desde nuestro sitio web. </p>
11
- <p>En este artículo, le diremos todo lo que necesita saber sobre Idle Archer Tower Defense Mod APK. Te explicaremos qué es, por qué deberías descargarlo, cómo descargarlo, cómo instalarlo y cómo jugarlo. También vamos a responder a algunas preguntas frecuentes sobre Idle Archer Tower Defense Mod APK.</p>
12
- <p>Así que, sin más preámbulos, ¡empecemos! </p>
13
- <h2>¿Qué es Idle Archer Tower Defense? </ debilidades. También tienes que enfrentarte a jefes poderosos que pondrán a prueba tus habilidades y estrategia. Tienes que completar varias misiones y logros para ganar recompensas y desbloquear nuevas características. </p>
14
-
15
- <h2>El juego de Idle Archer Tower Defense</h2>
16
- <h3>Cómo jugar Idle Archer Tower Defense</h3>
17
- <p>Jugar Idle Archer Tower Defense es fácil y agradable. Solo tienes que seguir estos sencillos pasos:</p>
18
- <ol>
19
- <li>Descargar e instalar Idle Archer Tower Defense desde Google Play Store o desde nuestro sitio web. </li>
20
- <li>Inicie el juego y elija su idioma preferido. </li>
21
- <li>Ver el tutorial y aprender los fundamentos del juego. </li>
22
- <li>Comienza el juego y defiende tu torre de los enemigos. </li>
23
- <li>Toque en la pantalla para apuntar y disparar las flechas. </li>
24
- <li>Desliza el dedo en la pantalla para mover el arquero y esquivar los ataques enemigos. </li>
25
- <li>Usa hechizos, objetos y aliados para aumentar tu potencia y rendimiento. </li>
26
- <li>Gana monedas y gemas matando enemigos y completando misiones. </li>
27
- <li>Mejora tus habilidades, armas y torre en la tienda. </li>
28
- <li>Progreso a través de las etapas y niveles del juego. </li>
29
- <li>Disfruta de la historia, gráficos, sonido y jugabilidad del juego. </li>
30
- </ol>
31
- <h3>¿Cuáles son las características de Idle Archer Tower Defense</h3>
32
- <p>Idle Archer Tower Defense tiene muchas características que lo convierten en un juego de rol divertido y desafiante. Algunas de estas características son:</p>
33
- <p></p>
34
- <ul>
35
- <li>Una combinación única de defensa de torre y elementos inactivos. </li>
36
- <li>Un sistema de control simple e intuitivo. </li>
37
- <li>Un estilo gráfico colorido y caricaturesco. </li>
38
- <li>Una banda sonora animada y pegadiza. </li>
39
- <li>Una variedad de enemigos, jefes, escenarios y escenarios. </li>
40
- <li>Un modo inactivo que te permite ganar monedas y gemas incluso cuando no estás conectado. </li>
41
- <li>Una historia rica e inmersiva que te mantendrá enganchado y motivado para jugar. </li>
42
- <li>Una tienda donde puedes comprar nuevos artículos y características. </li>
43
- <li>Un sistema de misión que te recompensa por completar tareas y logros. </li>
44
- <li>Un sistema de eventos que añade variedad y diversión al juego. </li>
45
- </ul> <h2>¿Por qué descargar Idle Archer Tower Defense Mod APK? </h2>
46
-
47
- <p>Es por eso que usted debe descargar Idle Archer Tower Defense Mod APK de nuestro sitio web. Idle Archer Tower Defense Mod APK es una versión modificada del juego original que le da acceso a varios beneficios y ventajas que no están disponibles en la versión regular. Usted puede disfrutar de características como el modo de dios, un golpe matar, monedas y gemas ilimitadas, y más. Puede descargar e instalar Idle Archer Tower Defense Mod APK gratis desde nuestro sitio web. </p>
48
- <h3> ¿Qué es Idle Archer Tower Defense Mod APK? </h3>
49
- <p>Idle Archer Tower Defense Mod APK es una versión modificada del juego original que ha sido creado por desarrolladores de terceros. El archivo mod APK contiene algunos cambios y modificaciones que alteran el código y la funcionalidad del juego. El archivo APK mod le permite evitar el sistema de seguridad del juego y acceder a las características que normalmente están bloqueadas o restringidas. </p>
50
- <p>Idle Archer Tower Defense Mod APK no es una versión oficial del juego. No está avalado o apoyado por los desarrolladores originales o editores del juego. No está disponible en Google Play Store ni en ninguna otra tienda de aplicaciones oficial. Solo está disponible en nuestro sitio web y en otras fuentes no oficiales. </p>
51
- <h3>¿Cuáles son los beneficios de Idle Archer Tower Defense Mod APK? </h3>
52
- <p>Idle Archer Tower Defense Mod APK tiene muchos beneficios que harán que su juego más fácil y divertido. Algunos de estos beneficios son:</p>
53
- <h4>Modo de Dios</h4>
54
- <p>El modo Dios es una característica que te hace invencible e inmune a cualquier daño. Puede activar el modo dios en Idle Archer Tower Defense Mod APK y disfrutar del juego sin preocuparse por morir o perder la salud. Puedes sobrevivir a cualquier ataque enemigo, lucha contra jefes o desafío de escenario con facilidad. También puedes explorar el mundo del juego sin ningún miedo o riesgo. </p>
55
- <h4>Matar con un solo golpe</h4>
56
-
57
- <h4>Monedas y gemas ilimitadas</h4>
58
- <p>Monedas y gemas ilimitadas son características que te hacen rico e ingenioso. Puede activar monedas y gemas ilimitadas en Idle Archer Tower Defense Mod APK y disfrutar del juego sin gastar dinero real o moler duro. Puedes tener tantas monedas y gemas como quieras en tu cuenta. Puedes usarlos para comprar nuevos artículos, características, mejoras, habilidades, armas, torres, hechizos, aliados y más en la tienda. También puedes usarlos para desbloquear nuevas etapas, niveles, modos, eventos, escenarios, personajes y más en el juego. </p> <h2>Cómo descargar e instalar Idle Archer Tower Defense Mod APK? </h2>
59
- <p>Descargar e instalar Idle Archer Tower Defense Mod APK es fácil y simple. Solo tienes que seguir estos sencillos pasos:</p>
60
- <h3>Paso 1: Descargar el archivo APK</h3>
61
- <p>El primer paso es descargar el archivo APK de Idle Archer Tower Defense Mod APK de nuestro sitio web. Puede encontrar el enlace de descarga al final de este artículo. Solo tiene que hacer clic en el enlace y esperar a que comience la descarga. El tamaño del archivo es de unos 100 MB, así que asegúrese de tener suficiente espacio de almacenamiento y una conexión a Internet estable. </p>
62
- <h3>Paso 2: Habilitar fuentes desconocidas</h3>
63
- <p>El segundo paso es habilitar fuentes desconocidas en su dispositivo. Esta es una configuración de seguridad que le permite instalar aplicaciones desde fuentes distintas de Google Play Store u otras tiendas de aplicaciones oficiales. Es necesario habilitar esta configuración porque Idle Archer Tower Defense Mod APK no es una aplicación oficial y no está disponible en ninguna tienda de aplicaciones oficial. </p>
64
- <p>Para habilitar fuentes desconocidas, debe ir a la configuración de su dispositivo y buscar la opción de seguridad o privacidad. Luego, debe encontrar la opción de fuentes desconocidas y activarla. Es posible que vea un mensaje de advertencia que indica que instalar aplicaciones de fuentes desconocidas puede dañar su dispositivo o datos. Puede ignorar este mensaje y proceder con la instalación. </p>
65
- <h3>Paso 3: Instalar el archivo APK</h3>
66
-
67
- <h3>Paso 4: Disfruta del juego</h3>
68
- <p>El cuarto y último paso es disfrutar del juego. Puede iniciar el juego desde el cajón de aplicaciones de su dispositivo o la pantalla de inicio. También puede crear un icono de acceso directo en el escritorio para facilitar el acceso. Ahora puede disfrutar de todas las características y beneficios de Idle Archer Tower Defense Mod APK sin limitaciones o restricciones. </p>
69
- <h2>Conclusión</h2>
70
- <p>Idle Archer Tower Defense Mod APK es un divertido y desafiante juego de rol que combina la torre de defensa y elementos inactivos. Puedes defender tu torre de oleadas de enemigos, mejorar tus habilidades, armas y torre, recoger monedas y gemas, y disfrutar de una historia rica e inmersiva. También puede disfrutar de características como el modo de dios, un solo golpe matar, monedas y gemas ilimitadas, y más con Idle Archer Tower Defense Mod APK.</p>
71
- <p>Si desea descargar e instalar Idle Archer Tower Defense Mod APK, puede seguir los pasos que hemos proporcionado en este artículo. También puede encontrar el enlace de descarga al final de este artículo. Puede descargar e instalar Idle Archer Tower Defense Mod APK gratis desde nuestro sitio web. </p>
72
- <p>Esperamos que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario sobre Idle Archer Tower Defense Mod APK, puede dejar un comentario a continuación. Intentaremos responder a sus preguntas lo antes posible. </p>
73
- <h3>Preguntas frecuentes</h3>
74
- <p>Aquí hay algunas preguntas frecuentes sobre Idle Archer Tower Defense Mod APK:</p>
75
- <ol>
76
- <li> ¿Es seguro usar Idle Archer Tower Defense Mod APK? </li>
77
- <p>Sí, Idle Archer Tower Defense Mod APK es seguro de usar. No contiene ningún virus, malware o spyware que pueda dañar su dispositivo o datos. Tampoco requiere ningún acceso root o permisos especiales que puedan comprometer la seguridad o privacidad de su dispositivo. </p>
78
- <li> ¿Es Idle Archer Tower Defense Mod APK compatible con mi dispositivo? </li>
79
-
80
- <li> ¿Puedo jugar Idle Archer Tower Defense Mod APK en línea? </li>
81
- <p>No, Idle Archer Tower Defense Mod APK es un juego sin conexión que no requiere una conexión a Internet para jugar. Sin embargo, es posible que necesite una conexión a Internet para descargar e instalar el juego, así como para acceder a algunas funciones en línea como actualizaciones, eventos o tablas de clasificación. </p>
82
- <li> ¿Puedo actualizar Idle Archer Tower Defense Mod APK? </li>
83
- <p>Sí, puede actualizar Idle Archer Tower Defense Mod APK siempre que haya una nueva versión disponible en nuestro sitio web. Sin embargo, puedes perder parte de tu progreso o datos si actualizas el juego sin hacer una copia de seguridad. </p>
84
- <li> ¿Puedo jugar Idle Archer Tower Defense Mod APK con mis amigos? </li>
85
- <p>No, Idle Archer Tower Defense Mod APK no tiene un modo multijugador o característica que le permite jugar con tus amigos. Sin embargo, puedes comparar tus puntajes y logros con otros jugadores en las tablas de clasificación o plataformas de redes sociales. </p>
86
- <p>Enlace de descarga: <a href="">Idle Archer Tower Defense Mod APK</a></p> 64aa2da5cf<br />
87
- <br />
88
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Conquista Paraso Riddim.md DELETED
@@ -1,71 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar Conquest Paradise Riddim</h1>
3
- <p>Si eres un fan de la música reggae, es posible que hayas oído hablar de Conquest Paradise Riddim, una compilación de canciones que comparten la misma pista instrumental o "riddim". Este riddim fue producido por Damage Musiq, un sello discográfico jamaicano especializado en dancehall y reggae. En este artículo, te mostraremos cómo descargar Conquest Paradise Riddim gratis o por un cargo, dependiendo de tu preferencia y presupuesto. También explicaremos qué hace que este riddim sea tan especial y por qué deberías añadirlo a tu biblioteca de música. </p>
4
- <h2>descargar conquista paraíso riddim</h2><br /><p><b><b>DOWNLOAD</b> &#9658; <a href="https://bltlly.com/2v6Mgq">https://bltlly.com/2v6Mgq</a></b></p><br /><br />
5
- <h2>¿Qué es Conquest Paradise Riddim? </h2>
6
- <h3>Una breve introducción al riddim y su productor</h3>
7
- <p>Un riddim es un término utilizado en la música jamaicana para describir un ritmo o pista instrumental que es utilizado por diferentes artistas para crear sus propias canciones. Un riddim puede ser original o muestreado de otra canción, y generalmente consiste en un patrón de batería, una línea de bajo y algunos otros elementos. Un riddim puede tener un tema o un estado de ánimo que refleje su nombre o productor. </p>
8
- <p>Conquest Paradise Riddim es un riddim que fue lanzado en febrero de 2019 por Damage Musiq, un sello discográfico fundado por Dwayne Parkinson, también conocido como Damage. Damage Musiq es conocido por producir riddims de alta calidad que cuentan con algunos de los mejores artistas de dancehall y reggae en Jamaica y más allá. Algunos de sus riddims anteriores incluyen Holocaust Riddim, Levels Riddim, Rope Riddim y Dark Temptation Riddim.</p>
9
- <h3>Los artistas y canciones que aparecen en el riddim</h3>
10
- <p>Conquest Paradise Riddim cuenta con 10 canciones de 9 artistas diferentes, incluyendo algunos de los nombres más populares en la escena reggae. Las canciones son las siguientes:</p>
11
- <p></p>
12
- <tabla>
13
- <tr><th>Artista</th><th>Canción</th></tr>
14
- <tr><td>Tommy Lee Sparta</td><td>Bendiciones</td></tr>
15
- <tr><td>Ley Crónica</td><td>Bendíceme</td></tr>
16
- <tr><td>Shane O</td><td>Mantenga pulsado </td></tr>
17
- <tr><td>Shatta Wale</td><td>Hola</td></tr>
18
- <tr><td>Singer J</td><td>Pray</td></tr>
19
- <tr><td>Tamo J</td><td>Victoria</td></tr>
20
-
21
- <tr><td>Jah Vinci</td><td>Guerra de lucha</td></tr>
22
- <tr><td>Jahmiel</td><td>Mankind</td></tr>
23
- <tr><td>Damage Musiq</td><td>Conquest Paradise Riddim (Instrumental)</td></tr>
24
- </tabla>
25
- <p>Las canciones de Conquest Paradise Riddim son en su mayoría positivas y edificantes, con temas como la gratitud, la perseverancia, la fe, el éxito y el amor. El riddim tiene un sonido suave y melódico, con un riff de guitarra pegadizo, un bajo pulsante y un ritmo de batería constante. El riddim es adecuado para relajarse y bailar, y muestra la versatilidad y el talento de los artistas y el productor. </p>
26
- <h2>¿Por qué descargar Conquest Paradise Riddim? </h2>
27
- <h3>Los beneficios de poseer tu música</h3>
28
- <p>Download Conquest Paradise Riddim es una gran manera de disfrutar de su música en cualquier momento y en cualquier lugar, sin depender de la conexión a Internet o servicios de transmisión. Al descargar el riddim, puedes tener control total sobre tu biblioteca de música, y puedes crear tus propias listas de reproducción, mixtapes o álbumes. También puede compartir su música con sus amigos y familiares, o reproducirla en cualquier dispositivo o altavoz que desee. Descargar tu música también significa que puedes apoyar a los artistas y al productor directamente, mostrándoles tu aprecio y lealtad. </p>
29
- <h3>La calidad y variedad del riddim</h3>
30
- <p>Otra razón para descargar Conquest Paradise Riddim es que es un riddim de alta calidad y diversa que satisfará a cualquier amante del reggae. El riddim tiene un sonido nítido y claro, sin distorsión ni ruido. Las canciones están bien producidas y bien mezcladas, con voces e instrumentos equilibrados. El riddim también ofrece una variedad de estilos y vibraciones, desde las canciones inspiradoras y motivadoras de Tommy Lee Sparta, Chronic Law, Shane O, Singer J, Tamo J y Gott Yo, hasta las canciones románticas y sensuales de Shatta Wale y Jah Vinci, y las canciones conscientes y sociales de Jahmiel. Hay algo para todos en Conquest Paradise Riddim, y no te aburrirás de escucharlo. </p>
31
-
32
- <h3>Las cuestiones legales y éticas de descargar música gratuita</h3>
33
- <p>Antes de mostrarle cómo descargar Conquest Paradise Riddim gratis, tenemos que advertirle sobre los problemas legales y éticos de hacerlo. Descargar música gratis de fuentes no autorizadas se considera piratería, que es una forma de robo que viola los derechos de propiedad intelectual de los artistas y el productor. La piratería puede tener consecuencias negativas para la industria musical, como una reducción de los ingresos, una menor calidad, menos innovación y menos incentivos para que surjan nuevos artistas. La piratería también puede exponerlo a malware, virus, spyware u otro software dañino que puede dañar su dispositivo o comprometer su privacidad. Por lo tanto, no recomendamos ni aprobamos la descarga de música gratuita de fuentes ilegales, y le aconsejamos que lo haga bajo su propio riesgo. </p>
34
- <h3>Los sitios web y aplicaciones que ofrecen descargas gratuitas del riddim</h3>
35
- <p>Si todavía quieres descargar Conquest Paradise Riddim gratis, a pesar de los riesgos que implica, hay algunos sitios web y aplicaciones que afirman ofrecer descargas gratuitas de la riddim. Sin embargo, no podemos garantizar su seguridad, fiabilidad o calidad. Algunos de estos sitios web y aplicaciones son:</p>
36
- <ul>
37
- <li><a href="">Reggae4us.com</a>: Este sitio web afirma ser "el destino online definitivo para los amantes de la música reggae". Ofrece descargas gratuitas de varias canciones de reggae y álbumes, incluyendo Conquest Paradise Riddim. Sin embargo, el sitio web tiene muchos anuncios emergentes y redirecciones que pueden ser molestos o peligrosos. </li>
38
- <li><a href="">Audiomack.com</a>: Este sitio web es una plataforma para que los artistas suban su música y la compartan con sus fans. También permite a los usuarios transmitir o descargar música de forma gratuita. Sin embargo, no todas las canciones de Conquest Paradise Riddim están disponibles en este sitio web, y algunas de ellas tienen versiones de baja calidad o incompletas. </li>
39
-
40
- </ul> <h2>Cómo descargar Conquest Paradise Riddim por una tarifa? </h2>
41
- <h3>Las ventajas de comprar música online</h3>
42
- <p>Si desea descargar Conquest Paradise Riddim legal y éticamente, puede optar por comprarlo en línea de fuentes autorizadas. Comprar música online tiene muchas ventajas, como:</p>
43
- <ul>
44
- <li>Puedes apoyar financieramente a los artistas y al productor, y mostrarles tu respeto y aprecio por su trabajo. </li>
45
- <li> Puede obtener la mejor calidad y la versión completa de la riddim, sin interrupciones o errores. </li>
46
- <li> Puede acceder a su música en cualquier momento y en cualquier lugar, sin depender de la conexión a Internet o servicios de streaming. </li>
47
- <li>Puedes disfrutar de tu música sin anuncios, pop-ups, redirecciones o malware. </li>
48
- <li>Puedes obtener características o beneficios adicionales, como letras, ilustraciones, reseñas, recomendaciones o descuentos. </li>
49
- </ul>
50
- <h3>Las plataformas y servicios que venden el riddim</h3>
51
- <p>Hay muchas plataformas y servicios que venden Conquest Paradise Riddim en línea, por un precio razonable. Algunas de estas plataformas y servicios son:</p>
52
- <ul>
53
- <li><a href="">iTunes</a>: Esta es una de las plataformas más populares y confiables para comprar música en línea. Puedes comprar Conquest Paradise Riddim por $9.99 USD, o canciones individuales por $0.99 USD cada una. También puede transmitir el riddim en Apple Music, si tiene una suscripción. </li>
54
- <li><a href="">Amazon Music</a>: Esta es otra plataforma conocida y confiable para comprar música en línea. Puedes comprar Conquest Paradise Riddim por $8.99 USD, o canciones individuales por $0.99 USD cada una. También puede transmitir el riddim en Amazon Music Unlimited, si tiene una suscripción. </li>
55
- <li><a href="">Spotify</a>: Esta es una de las plataformas más populares y ampliamente utilizadas para la transmisión de música en línea. Puedes transmitir Conquest Paradise Riddim en Spotify de forma gratuita, con anuncios o con una suscripción premium, sin anuncios. También puede descargar el riddim en Spotify Premium, para escuchar sin conexión. </li>
56
- </ul>
57
-
58
- <h3>Un resumen de los puntos principales y una llamada a la acción</h3>
59
- <p>En conclusión, Conquest Paradise Riddim es un imprescindible para cualquier amante de la música reggae. Es un riddim de alta calidad y diversa que cuenta con algunos de los mejores artistas y canciones del género. También es un riddim positivo y edificante que te inspirará y te hará sentir bien. Puedes descargar Conquest Paradise Riddim gratis o por una tarifa, dependiendo de tu preferencia y presupuesto. Sin embargo, le recomendamos que compre el riddim en línea de fuentes autorizadas, para apoyar a los artistas y al productor, y para disfrutar de la mejor calidad y experiencia. Entonces, ¿qué estás esperando? Descargar Conquest Paradise Riddim hoy y disfrutar de las vibraciones! </p>
60
- <h2>Preguntas frecuentes</h2>
61
- <h3>¿Qué es un riddim? </h3>
62
- <p>Un riddim es un término utilizado en la música jamaicana para describir un ritmo o pista instrumental que es utilizado por diferentes artistas para crear sus propias canciones. </p>
63
- <h3>¿Qué es la música reggae? </h3>
64
- <p>El reggae es un género de música que se originó en Jamaica a finales de los años 60. Se caracteriza por su ritmo distintivo, línea de bajo, acordes de guitarra, voces y letras. La música reggae a menudo expresa mensajes sociales y políticos, así como valores culturales y espirituales. </p>
65
- <h3>¿Cómo puedo reproducir la música descargada en mi dispositivo? </h3>
66
- <p>Puede reproducir la música descargada en su dispositivo mediante el uso de cualquier aplicación de reproductor de medios que soporta mp3 u otros formatos de audio. También puede transferir la música descargada a otros dispositivos o altavoces a través de Bluetooth, USB o Wi-Fi.</p>
67
- <h3>¿Cómo puedo apoyar a los artistas y productores del riddim? </h3>
68
- <p>Puedes apoyar a los artistas y productores del riddim comprando su música en línea de fuentes autorizadas, transmitiendo su música en plataformas legales, siguiéndola en redes sociales, compartiendo su música con tus amigos y familiares, asistiendo a sus conciertos o eventos, o enviándoles comentarios o donaciones. </p>
69
- <h3>¿Dónde puedo encontrar más música reggae online? </h3> 64aa2da5cf<br />
70
- <br />
71
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/mbcsgroupprober.py DELETED
@@ -1,57 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is Mozilla Universal charset detector code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 2001
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- # Shy Shalom - original C code
12
- # Proofpoint, Inc.
13
- #
14
- # This library is free software; you can redistribute it and/or
15
- # modify it under the terms of the GNU Lesser General Public
16
- # License as published by the Free Software Foundation; either
17
- # version 2.1 of the License, or (at your option) any later version.
18
- #
19
- # This library is distributed in the hope that it will be useful,
20
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
21
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22
- # Lesser General Public License for more details.
23
- #
24
- # You should have received a copy of the GNU Lesser General Public
25
- # License along with this library; if not, write to the Free Software
26
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
27
- # 02110-1301 USA
28
- ######################### END LICENSE BLOCK #########################
29
-
30
- from .big5prober import Big5Prober
31
- from .charsetgroupprober import CharSetGroupProber
32
- from .cp949prober import CP949Prober
33
- from .enums import LanguageFilter
34
- from .eucjpprober import EUCJPProber
35
- from .euckrprober import EUCKRProber
36
- from .euctwprober import EUCTWProber
37
- from .gb2312prober import GB2312Prober
38
- from .johabprober import JOHABProber
39
- from .sjisprober import SJISProber
40
- from .utf8prober import UTF8Prober
41
-
42
-
43
- class MBCSGroupProber(CharSetGroupProber):
44
- def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
45
- super().__init__(lang_filter=lang_filter)
46
- self.probers = [
47
- UTF8Prober(),
48
- SJISProber(),
49
- EUCJPProber(),
50
- GB2312Prober(),
51
- EUCKRProber(),
52
- CP949Prober(),
53
- Big5Prober(),
54
- EUCTWProber(),
55
- JOHABProber(),
56
- ]
57
- self.reset()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Billius/VizLib-TopLargeHospitalsNewJersey-04-07-2023/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: VizLib TopLargeHospitalsNewJersey 04 07 2023
3
- emoji: 💻
4
- colorFrom: yellow
5
- colorTo: purple
6
- sdk: streamlit
7
- sdk_version: 1.17.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/.github/ISSUE_TEMPLATE/feature-request.md DELETED
@@ -1,32 +0,0 @@
1
- ---
2
- name: "\U0001F680Feature Request"
3
- about: Submit a proposal/request for a new detectron2 feature
4
-
5
- ---
6
-
7
- ## 🚀 Feature
8
- A clear and concise description of the feature proposal.
9
-
10
-
11
- ## Motivation & Examples
12
-
13
- Tell us why the feature is useful.
14
-
15
- Describe what the feature would look like, if it is implemented.
16
- Best demonstrated using **code examples** in addition to words.
17
-
18
- ## Note
19
-
20
- We only consider adding new features if they are relevant to many users.
21
-
22
- If you request implementation of research papers --
23
- we only consider papers that have enough significance and prevalance.
24
-
25
- We do not take requests for most projects in the `projects/` directory,
26
- because they are research code release that is mainly for other researchers to reproduce results.
27
-
28
- Instead of adding features inside detectron2,
29
- you can implement many features by [extending detectron2](https://detectron2.readthedocs.io/tutorials/extend.html).
30
- The [projects/](https://github.com/facebookresearch/detectron2/tree/master/projects/) directory
31
- contains many of such examples.
32
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/dev/packaging/gen_wheel_index.sh DELETED
@@ -1,26 +0,0 @@
1
- #!/bin/bash -e
2
-
3
-
4
- root=$1
5
- if [[ -z "$root" ]]; then
6
- echo "Usage: ./gen_wheel_index.sh /path/to/wheels"
7
- exit
8
- fi
9
-
10
- index=$root/index.html
11
-
12
- cd "$root"
13
- for cu in cpu cu92 cu100 cu101; do
14
- cd $cu
15
- echo "Creating $PWD/index.html ..."
16
- for whl in *.whl; do
17
- echo "<a href=\"${whl/+/%2B}\">$whl</a><br>"
18
- done > index.html
19
- cd "$root"
20
- done
21
-
22
- echo "Creating $index ..."
23
- for whl in $(find . -type f -name '*.whl' -printf '%P\n' | sort); do
24
- echo "<a href=\"${whl/+/%2B}\">$whl</a><br>"
25
- done > "$index"
26
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_opaque_types.cpp DELETED
@@ -1,67 +0,0 @@
1
- /*
2
- tests/test_opaque_types.cpp -- opaque types, passing void pointers
3
-
4
- Copyright (c) 2016 Wenzel Jakob <[email protected]>
5
-
6
- All rights reserved. Use of this source code is governed by a
7
- BSD-style license that can be found in the LICENSE file.
8
- */
9
-
10
- #include "pybind11_tests.h"
11
- #include <pybind11/stl.h>
12
- #include <vector>
13
-
14
- // IMPORTANT: Disable internal pybind11 translation mechanisms for STL data structures
15
- //
16
- // This also deliberately doesn't use the below StringList type alias to test
17
- // that MAKE_OPAQUE can handle a type containing a `,`. (The `std::allocator`
18
- // bit is just the default `std::vector` allocator).
19
- PYBIND11_MAKE_OPAQUE(std::vector<std::string, std::allocator<std::string>>);
20
-
21
- using StringList = std::vector<std::string, std::allocator<std::string>>;
22
-
23
- TEST_SUBMODULE(opaque_types, m) {
24
- // test_string_list
25
- py::class_<StringList>(m, "StringList")
26
- .def(py::init<>())
27
- .def("pop_back", &StringList::pop_back)
28
- /* There are multiple versions of push_back(), etc. Select the right ones. */
29
- .def("push_back", (void (StringList::*)(const std::string &)) &StringList::push_back)
30
- .def("back", (std::string &(StringList::*)()) &StringList::back)
31
- .def("__len__", [](const StringList &v) { return v.size(); })
32
- .def("__iter__", [](StringList &v) {
33
- return py::make_iterator(v.begin(), v.end());
34
- }, py::keep_alive<0, 1>());
35
-
36
- class ClassWithSTLVecProperty {
37
- public:
38
- StringList stringList;
39
- };
40
- py::class_<ClassWithSTLVecProperty>(m, "ClassWithSTLVecProperty")
41
- .def(py::init<>())
42
- .def_readwrite("stringList", &ClassWithSTLVecProperty::stringList);
43
-
44
- m.def("print_opaque_list", [](const StringList &l) {
45
- std::string ret = "Opaque list: [";
46
- bool first = true;
47
- for (auto entry : l) {
48
- if (!first)
49
- ret += ", ";
50
- ret += entry;
51
- first = false;
52
- }
53
- return ret + "]";
54
- });
55
-
56
- // test_pointers
57
- m.def("return_void_ptr", []() { return (void *) 0x1234; });
58
- m.def("get_void_ptr_value", [](void *ptr) { return reinterpret_cast<std::intptr_t>(ptr); });
59
- m.def("return_null_str", []() { return (char *) nullptr; });
60
- m.def("get_null_str_value", [](char *ptr) { return reinterpret_cast<std::intptr_t>(ptr); });
61
-
62
- m.def("return_unique_ptr", []() -> std::unique_ptr<StringList> {
63
- StringList *result = new StringList();
64
- result->push_back("some value");
65
- return std::unique_ptr<StringList>(result);
66
- });
67
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/uninitialized_fill.h DELETED
@@ -1,57 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- #pragma once
19
-
20
- #include <thrust/detail/config.h>
21
- #include <thrust/system/detail/generic/tag.h>
22
-
23
- namespace thrust
24
- {
25
- namespace system
26
- {
27
- namespace detail
28
- {
29
- namespace generic
30
- {
31
-
32
- template<typename DerivedPolicy,
33
- typename ForwardIterator,
34
- typename T>
35
- __host__ __device__
36
- void uninitialized_fill(thrust::execution_policy<DerivedPolicy> &exec,
37
- ForwardIterator first,
38
- ForwardIterator last,
39
- const T &x);
40
-
41
- template<typename DerivedPolicy,
42
- typename ForwardIterator,
43
- typename Size,
44
- typename T>
45
- __host__ __device__
46
- ForwardIterator uninitialized_fill_n(thrust::execution_policy<DerivedPolicy> &exec,
47
- ForwardIterator first,
48
- Size n,
49
- const T &x);
50
-
51
- } // end namespace generic
52
- } // end namespace detail
53
- } // end namespace system
54
- } // end namespace thrust
55
-
56
- #include <thrust/system/detail/generic/uninitialized_fill.inl>
57
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/transform_reduce.h DELETED
@@ -1,22 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system has no special transform_reduce functions
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/SPOTER_Sign_Language_Recognition/spoter/gaussian_noise.py DELETED
@@ -1,18 +0,0 @@
1
-
2
- import torch
3
-
4
-
5
- class GaussianNoise(object):
6
- def __init__(self, mean=0., std=1.):
7
- self.std = std
8
- self.mean = mean
9
-
10
- def __call__(self, tensor):
11
- return tensor + torch.randn(tensor.size()) * self.std + self.mean
12
-
13
- def __repr__(self):
14
- return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
15
-
16
-
17
- if __name__ == "__main__":
18
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/utils/dist_utils.py DELETED
@@ -1,69 +0,0 @@
1
- import warnings
2
- from collections import OrderedDict
3
-
4
- import torch.distributed as dist
5
- from mmcv.runner import OptimizerHook
6
- from torch._utils import (_flatten_dense_tensors, _take_tensors,
7
- _unflatten_dense_tensors)
8
-
9
-
10
- def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
11
- if bucket_size_mb > 0:
12
- bucket_size_bytes = bucket_size_mb * 1024 * 1024
13
- buckets = _take_tensors(tensors, bucket_size_bytes)
14
- else:
15
- buckets = OrderedDict()
16
- for tensor in tensors:
17
- tp = tensor.type()
18
- if tp not in buckets:
19
- buckets[tp] = []
20
- buckets[tp].append(tensor)
21
- buckets = buckets.values()
22
-
23
- for bucket in buckets:
24
- flat_tensors = _flatten_dense_tensors(bucket)
25
- dist.all_reduce(flat_tensors)
26
- flat_tensors.div_(world_size)
27
- for tensor, synced in zip(
28
- bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
29
- tensor.copy_(synced)
30
-
31
-
32
- def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
33
- """Allreduce gradients.
34
-
35
- Args:
36
- params (list[torch.Parameters]): List of parameters of a model
37
- coalesce (bool, optional): Whether allreduce parameters as a whole.
38
- Defaults to True.
39
- bucket_size_mb (int, optional): Size of bucket, the unit is MB.
40
- Defaults to -1.
41
- """
42
- grads = [
43
- param.grad.data for param in params
44
- if param.requires_grad and param.grad is not None
45
- ]
46
- world_size = dist.get_world_size()
47
- if coalesce:
48
- _allreduce_coalesced(grads, world_size, bucket_size_mb)
49
- else:
50
- for tensor in grads:
51
- dist.all_reduce(tensor.div_(world_size))
52
-
53
-
54
- class DistOptimizerHook(OptimizerHook):
55
- """Deprecated optimizer hook for distributed training."""
56
-
57
- def __init__(self, *args, **kwargs):
58
- warnings.warn('"DistOptimizerHook" is deprecated, please switch to'
59
- '"mmcv.runner.OptimizerHook".')
60
- super().__init__(*args, **kwargs)
61
-
62
-
63
- def reduce_mean(tensor):
64
- """"Obtain the mean of tensor on different GPUs."""
65
- if not (dist.is_available() and dist.is_initialized()):
66
- return tensor
67
- tensor = tensor.clone()
68
- dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
69
- return tensor
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/mysite/urls.py DELETED
@@ -1,23 +0,0 @@
1
- """
2
- URL configuration for mysite project.
3
-
4
- The `urlpatterns` list routes URLs to views. For more information please see:
5
- https://docs.djangoproject.com/en/4.2/topics/http/urls/
6
- Examples:
7
- Function views
8
- 1. Add an import: from my_app import views
9
- 2. Add a URL to urlpatterns: path('', views.home, name='home')
10
- Class-based views
11
- 1. Add an import: from other_app.views import Home
12
- 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
13
- Including another URLconf
14
- 1. Import the include() function: from django.urls import include, path
15
- 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
16
- """
17
- from django.contrib import admin
18
- from django.urls import path, include
19
-
20
- urlpatterns = [
21
- path('admin/', admin.site.urls),
22
- path('', include('andrew_alpha.urls')),
23
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/lib/plugins/stdin.js DELETED
@@ -1,159 +0,0 @@
1
- import fetch from "node-fetch"
2
- import fs from "node:fs"
3
- import path from "node:path"
4
- import common from "../common/common.js"
5
- import { fileTypeFromBuffer } from "file-type"
6
-
7
- Bot.adapter.push(new class stdinAdapter {
8
- constructor() {
9
- this.id = "stdin"
10
- this.name = "标准输入"
11
- this.path = "data/stdin/"
12
- common.mkdirs(this.path)
13
- }
14
-
15
- async makeBuffer(file) {
16
- if (file.match(/^base64:\/\//))
17
- return Buffer.from(file.replace(/^base64:\/\//, ""), "base64")
18
- else if (file.match(/^https?:\/\//))
19
- return Buffer.from(await (await fetch(file)).arrayBuffer())
20
- else if (fs.existsSync(file))
21
- return Buffer.from(fs.readFileSync(file))
22
- return file
23
- }
24
-
25
- async fileType(data) {
26
- const file = {}
27
- try {
28
- file.url = data.replace(/^base64:\/\/.*/, "base64://...")
29
- file.buffer = await this.makeBuffer(data)
30
- file.type = await fileTypeFromBuffer(file.buffer)
31
- file.path = `${this.path}${Date.now()}.${file.type.ext}`
32
- } catch (err) {
33
- logger.error(`文件类型检测错误:${logger.red(err)}`)
34
- }
35
- return file
36
- }
37
-
38
- async sendMsg(msg) {
39
- if (!Array.isArray(msg))
40
- msg = [msg]
41
- for (let i of msg) {
42
- if (typeof i != "object")
43
- i = { type: "text", data: { text: i }}
44
- else if (!i.data)
45
- i = { type: i.type, data: { ...i, type: undefined }}
46
-
47
- let file
48
- if (i.data.file)
49
- file = await this.fileType(i.data.file)
50
-
51
- switch (i.type) {
52
- case "text":
53
- if (i.data.text.match("\n"))
54
- i.data.text = `\n${i.data.text}`
55
- logger.info(`${logger.blue(`[${this.id}]`)} 发送文本:${i.data.text}`)
56
- break
57
- case "image":
58
- logger.info(`${logger.blue(`[${this.id}]`)} 发送图片:${file.url}\n文件已保存到:${logger.cyan(file.path)}`)
59
- fs.writeFileSync(file.path, file.buffer)
60
- break
61
- case "record":
62
- logger.info(`${logger.blue(`[${this.id}]`)} 发送音频:${file.url}\n文件已保存到:${logger.cyan(file.path)}`)
63
- fs.writeFileSync(file.path, file.buffer)
64
- break
65
- case "video":
66
- logger.info(`${logger.blue(`[${this.id}]`)} 发送视频:${file.url}\n文件已保存到:${logger.cyan(file.path)}`)
67
- fs.writeFileSync(file.path, file.buffer)
68
- break
69
- case "reply":
70
- break
71
- case "at":
72
- break
73
- case "node":
74
- Bot.sendForwardMsg(msg => this.sendMsg(msg), i.data)
75
- break
76
- default:
77
- i = JSON.stringify(i)
78
- if (i.match("\n"))
79
- i = `\n${i}`
80
- logger.info(`${logger.blue(`[${this.id}]`)} 发送消息:${i}`)
81
- }
82
- }
83
- return { message_id: Date.now() }
84
- }
85
-
86
- recallMsg(message_id) {
87
- logger.info(`${logger.blue(`[${this.id}]`)} 撤回消息:${message_id}`)
88
- }
89
-
90
- async sendFile(file, name = path.basename(file)) {
91
- const buffer = await this.makeBuffer(file)
92
- if (!Buffer.isBuffer(buffer)) {
93
- logger.error(`${logger.blue(`[${this.id}]`)} 发送文件错误:找不到文件 ${logger.red(file)}`)
94
- return false
95
- }
96
-
97
- const files = `${this.path}${Date.now()}-${name}`
98
- logger.info(`${logger.blue(`[${this.id}]`)} 发送文件:${file}\n文件已保存到:${logger.cyan(files)}`)
99
- return fs.writeFileSync(files, buffer)
100
- }
101
-
102
- pickFriend() {
103
- return {
104
- user_id: this.id,
105
- nickname: this.name,
106
- group_id: this.id,
107
- group_name: this.name,
108
- sendMsg: msg => this.sendMsg(msg),
109
- recallMsg: message_id => this.recallMsg(message_id),
110
- sendFile: (file, name) => this.sendFile(file, name),
111
- }
112
- }
113
-
114
- message(msg) {
115
- const data = {
116
- bot: Bot[this.id],
117
- self_id: this.id,
118
- user_id: this.id,
119
- post_type: "message",
120
- message_type: "private",
121
- sender: { user_id: this.id, nickname: this.name },
122
- message: [{ type: "text", text: msg }],
123
- raw_message: msg,
124
- friend: this.pickFriend(),
125
- }
126
- logger.info(`${logger.blue(`[${data.self_id}]`)} 系统消息:[${data.sender.nickname}(${data.user_id})] ${data.raw_message}`)
127
-
128
- Bot.em(`${data.post_type}.${data.message_type}`, data)
129
- }
130
-
131
- load() {
132
- Bot[this.id] = {
133
- adapter: this,
134
- uin: this.id,
135
- nickname: this.name,
136
- stat: { start_time: Date.now()/1000 },
137
- version: { id: this.id, name: this.name },
138
- pickFriend: () => this.pickFriend(),
139
- get pickUser() { return this.pickFriend },
140
- get pickMember() { return this.pickFriend },
141
- get pickGroup() { return this.pickFriend },
142
-
143
- fl: new Map().set(this.id, {
144
- user_id: this.id,
145
- nickname: this.name,
146
- group_id: this.id,
147
- group_name: this.name,
148
- }),
149
- get gl() { return this.fl },
150
- gml: new Map,
151
- }
152
- Bot[this.id].gml.set(this.id, Bot[this.id].fl)
153
-
154
- process[this.id].on("data", data => this.message(data.toString()))
155
-
156
- logger.mark(`${logger.blue(`[${this.id}]`)} ${this.name}(${this.id}) 已连接`)
157
- Bot.em(`connect.${this.id}`, { self_id: this.id })
158
- }
159
- })