parquet-converter commited on
Commit
caf9a01
·
1 Parent(s): dc785a0

Update parquet files (step 8 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/17TheWord/RealESRGAN/setup.py +0 -107
  2. spaces/1gistliPinn/ChatGPT4/Examples/All Autodesk 2018 Products Crack Keygen (x86x64) !Latest Utorrent HOT.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Download Driver Booster Pro Full Version.md +0 -21
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Anime X 1.1.9 APK How to Download and Use It on Your Android Device.md +0 -106
  5. spaces/1phancelerku/anime-remove-background/Caves (Roguelike) APK A Mod Menu for Every Play Style and Preference.md +0 -148
  6. spaces/1phancelerku/anime-remove-background/Experience the Legendary Stick War with MOD APK Features.md +0 -93
  7. spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_537227KB.py +0 -123
  8. spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/commons.py +0 -166
  9. spaces/AIConsultant/MusicGen/audiocraft/grids/compression/_explorers.py +0 -55
  10. spaces/ASJMO/freegpt/g4f/Provider/Providers/Phind.py +0 -36
  11. spaces/Adapter/CoAdapter/ldm/modules/extra_condition/openpose/util.py +0 -203
  12. spaces/Adapter/T2I-Adapter/docs/examples.md +0 -41
  13. spaces/Adr740/CV_XPLORER_POC/app.py +0 -38
  14. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/kawaseblurpipeline-plugin.d.ts +0 -30
  15. spaces/AiiluoChen/webui/README.md +0 -20
  16. spaces/AlekseyCalvin/dreambooth-training3/convertosd.py +0 -302
  17. spaces/Alfasign/HuggingGPT-Lite/models_server.py +0 -779
  18. spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/models/facial_recognition/__init__.py +0 -0
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/TRANSLATING.md +0 -57
  20. spaces/Andy1621/uniformer_image_detection/mmdet/core/utils/misc.py +0 -61
  21. spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/accuracy.py +0 -78
  22. spaces/Andy1621/uniformer_image_detection/tools/deployment/pytorch2onnx.py +0 -244
  23. spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py +0 -4
  24. spaces/Andy1621/uniformer_image_segmentation/configs/sem_fpn/fpn_r101_512x1024_80k_cityscapes.py +0 -2
  25. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/cmd_wsl.bat +0 -11
  26. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/silero_tts/tts_preprocessor.py +0 -200
  27. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/memory.py +0 -25
  28. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/appdirs.py +0 -608
  29. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/py34compat.py +0 -13
  30. spaces/Bakar31/MLOps_Practice_Repo_1/README.md +0 -12
  31. spaces/Banbri/zcvzcv/src/lib/useImageDimension.ts +0 -20
  32. spaces/BartPoint/VoiceChange/infer_pack/commons.py +0 -166
  33. spaces/Benson/text-generation/Examples/Car Drift Game Download Apkpure.md +0 -58
  34. spaces/Benson/text-generation/Examples/Descargar Camin Simulador ltimo Para Ventanas 10.md +0 -97
  35. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/core.py +0 -0
  36. spaces/Bilalst/Gradio_Youtube_Transcript_v2/app.py +0 -116
  37. spaces/Bonosa2/dall-e_image-generation/app.py +0 -43
  38. spaces/CVPR/LIVE/thrust/thrust/iterator/transform_output_iterator.h +0 -163
  39. spaces/CVPR/LIVE/thrust/thrust/iterator/zip_iterator.h +0 -245
  40. spaces/CVPR/WALT/mmdet/models/dense_heads/free_anchor_retina_head.py +0 -270
  41. spaces/CVPR/WALT/walt/datasets/walt_3d.py +0 -535
  42. spaces/Catmeow/AI_story_writing/README.md +0 -12
  43. spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/file_operations.py +0 -267
  44. spaces/CofAI/chat.b4/client/css/message-input.css +0 -27
  45. spaces/CofAI/chat.b4/g4f/Provider/Providers/helpers/gpt4love.py +0 -48
  46. spaces/Cran-May/SEA-Streamlit/README.md +0 -14
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/version.py +0 -6
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/svgLib/path/__init__.py +0 -61
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/__main__.py +0 -100
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_content.py +0 -238
spaces/17TheWord/RealESRGAN/setup.py DELETED
@@ -1,107 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- from setuptools import find_packages, setup
4
-
5
- import os
6
- import subprocess
7
- import time
8
-
9
- version_file = 'realesrgan/version.py'
10
-
11
-
12
- def readme():
13
- with open('README.md', encoding='utf-8') as f:
14
- content = f.read()
15
- return content
16
-
17
-
18
- def get_git_hash():
19
-
20
- def _minimal_ext_cmd(cmd):
21
- # construct minimal environment
22
- env = {}
23
- for k in ['SYSTEMROOT', 'PATH', 'HOME']:
24
- v = os.environ.get(k)
25
- if v is not None:
26
- env[k] = v
27
- # LANGUAGE is used on win32
28
- env['LANGUAGE'] = 'C'
29
- env['LANG'] = 'C'
30
- env['LC_ALL'] = 'C'
31
- out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
32
- return out
33
-
34
- try:
35
- out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
36
- sha = out.strip().decode('ascii')
37
- except OSError:
38
- sha = 'unknown'
39
-
40
- return sha
41
-
42
-
43
- def get_hash():
44
- if os.path.exists('.git'):
45
- sha = get_git_hash()[:7]
46
- else:
47
- sha = 'unknown'
48
-
49
- return sha
50
-
51
-
52
- def write_version_py():
53
- content = """# GENERATED VERSION FILE
54
- # TIME: {}
55
- __version__ = '{}'
56
- __gitsha__ = '{}'
57
- version_info = ({})
58
- """
59
- sha = get_hash()
60
- with open('VERSION', 'r') as f:
61
- SHORT_VERSION = f.read().strip()
62
- VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
63
-
64
- version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO)
65
- with open(version_file, 'w') as f:
66
- f.write(version_file_str)
67
-
68
-
69
- def get_version():
70
- with open(version_file, 'r') as f:
71
- exec(compile(f.read(), version_file, 'exec'))
72
- return locals()['__version__']
73
-
74
-
75
- def get_requirements(filename='requirements.txt'):
76
- here = os.path.dirname(os.path.realpath(__file__))
77
- with open(os.path.join(here, filename), 'r') as f:
78
- requires = [line.replace('\n', '') for line in f.readlines()]
79
- return requires
80
-
81
-
82
- if __name__ == '__main__':
83
- write_version_py()
84
- setup(
85
- name='realesrgan',
86
- version=get_version(),
87
- description='Real-ESRGAN aims at developing Practical Algorithms for General Image Restoration',
88
- long_description=readme(),
89
- long_description_content_type='text/markdown',
90
- author='Xintao Wang',
91
- author_email='[email protected]',
92
- keywords='computer vision, pytorch, image restoration, super-resolution, esrgan, real-esrgan',
93
- url='https://github.com/xinntao/Real-ESRGAN',
94
- include_package_data=True,
95
- packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')),
96
- classifiers=[
97
- 'Development Status :: 4 - Beta',
98
- 'License :: OSI Approved :: Apache Software License',
99
- 'Operating System :: OS Independent',
100
- 'Programming Language :: Python :: 3',
101
- 'Programming Language :: Python :: 3.7',
102
- 'Programming Language :: Python :: 3.8',
103
- ],
104
- license='BSD-3-Clause License',
105
- setup_requires=['cython', 'numpy'],
106
- install_requires=get_requirements(),
107
- zip_safe=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/All Autodesk 2018 Products Crack Keygen (x86x64) !Latest Utorrent HOT.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>All Autodesk 2018 Products Crack Keygen (x86x64) !{Latest} Utorrent</h2><br /><p><b><b>Download Zip</b> &#9733;&#9733;&#9733; <a href="https://imgfil.com/2uxXDX">https://imgfil.com/2uxXDX</a></b></p><br /><br />
2
- <br />
3
- Autodesk 3ds max 2011 32 bit xforce keygen free. ... sketchbook pro 2011 serial number and product key download on autodesk ... 12 Jan 2015 8 free download full version with crack xforce keygen 64 bit microsoft visual 3. ... Latest Utorrent. All AutoCAD For Mac 2018 Products Crack Keygen (x86x64) !{ 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download Driver Booster Pro Full Version.md DELETED
@@ -1,21 +0,0 @@
1
- <br />
2
- <h1>How to Download and Install Driver Booster Pro Full Version for Windows</h1>
3
- <p>Driver Booster Pro is a powerful and easy-to-use tool that helps you update your device drivers automatically with just one click. It can scan and identify outdated, faulty, missing, and game-ready drivers from a large cloud database of over 2.5 million drivers, and download and install them at an unrivaled speed. It also offers backup and restore features to keep your drivers safe and secure.</p>
4
- <h2>Download driver booster pro full version</h2><br /><p><b><b>Download Zip</b> &#8230; <a href="https://imgfil.com/2uxZpp">https://imgfil.com/2uxZpp</a></b></p><br /><br />
5
- <p>If you want to enjoy the full benefits of Driver Booster Pro, such as priority updates, larger driver database, faster download speed, driver tweak for better gaming performance, and 24/7 technical support, you need to purchase a license code from the official website or use one of the free license codes provided by some online sources[^5^]. In this article, we will show you how to download and install Driver Booster Pro full version for Windows using a free license code.</p>
6
- <h2>Step 1: Download Driver Booster Pro</h2>
7
- <p>You can download Driver Booster Pro from the official website[^3^] or from other trusted sources such as FileCR[^1^] or 4DOWNLOAD[^2^]. The installation file is about 30 MB in size and supports Windows 11/10/8.1/8/7/Vista/XP. Here we will use the FileCR link as an example.</p>
8
- <p>Go to <a href="https://filecr.com/windows/iobit-driver-booster/">https://filecr.com/windows/iobit-driver-booster/</a> and click on the "Download Now" button. You will be redirected to another page where you can choose a server to download the file. Click on any server and wait for the download to start. You may need to enter a password to extract the file. The password is www.filecr.com.</p>
9
- <h2>Step 2: Install Driver Booster Pro</h2>
10
- <p>After downloading the file, double-click on it to run the setup wizard. Follow the on-screen instructions to complete the installation process. You can choose your preferred language, destination folder, and additional tasks such as creating a desktop icon or adding Driver Booster to the system tray.</p>
11
- <p></p>
12
- <p>When the installation is finished, click on "Finish" to launch Driver Booster Pro. You will see the main interface of the program, which shows your current driver status and a big "Scan" button.</p>
13
- <h2>Step 3: Activate Driver Booster Pro</h2>
14
- <p>To activate Driver Booster Pro full version, you need a valid license code. You can buy one from the official website[^3^] or use one of the free license codes provided by some online sources[^5^]. Here we will use a free license code from nsaneforums.com as an example.</p>
15
- <p>Go to <a href="https://nsaneforums.com/topic/438923-expired-iobit-driver-booster-pro-v1030-free-1-year-license-code-full-version/">https://nsaneforums.com/topic/438923-expired-iobit-driver-booster-pro-v1030-free-1-year-license-code-full-version/</a> and copy one of the license codes from the list. Make sure you choose a code that is not expired or used by someone else.</p>
16
- <p>Go back to Driver Booster Pro and click on the "Enter License" button at the bottom right corner of the interface. Paste the license code into the text box and click on "Activate Now". You will see a message that says "Congratulations! You have successfully activated Driver Booster PRO!"</p>
17
- <h2>Step 4: Update Your Drivers</h2>
18
- <p>Now that you have activated Driver Booster Pro full version, you can enjoy all its features and benefits. To update your drivers, simply click on the "Scan" button and wait for Driver Booster Pro to scan your system and find any outdated, faulty, missing, or game-ready drivers. You can see the details of each driver such as its name, version, date, size, status, and source.</p>
19
- <p>To update all your drivers at once, click on the "Update Now" button at the top right corner of the interface. You can also choose to update specific drivers by clicking on the "Update" button next to each driver. Driver Booster Pro will download and install the drivers for you automatically</p> d5da3c52bf<br />
20
- <br />
21
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Anime X 1.1.9 APK How to Download and Use It on Your Android Device.md DELETED
@@ -1,106 +0,0 @@
1
- <br />
2
- <h1>Anime X APK 1.1.9 Download: Watch Free Anime Online</h1>
3
- <p>If you are an anime fan, you might be looking for a way to watch your favorite shows and movies online for free. There are many streaming platforms that offer anime content, but they often require subscriptions or have limited selections. That's why you might want to try Anime X, a free app that lets you watch thousands of anime episodes and movies on your Android device.</p>
4
- <h2>What is Anime X?</h2>
5
- <p>Anime X is an app that allows you to stream and download anime videos from various sources, such as Gogoanime, KissAnime, 9Anime, and more. You can choose from a wide range of genres, such as action, comedy, romance, horror, sci-fi, and more. You can also search for specific titles or browse by popularity, ratings, or latest updates.</p>
6
- <h2>anime x apk 1.1.9 download</h2><br /><p><b><b>Download File</b> &#10042; <a href="https://urlin.us/2uSUCS">https://urlin.us/2uSUCS</a></b></p><br /><br />
7
- <h3>Features of Anime X</h3>
8
- <p>Some of the features that make Anime X stand out from other anime apps are:</p>
9
- <ul>
10
- <li>It has a simple and user-friendly interface that makes it easy to navigate and find what you want.</li>
11
- <li>It supports multiple languages, such as English, Arabic, Spanish, French, and more.</li>
12
- <li>It has a built-in video player that supports subtitles, playback speed, and screen orientation.</li>
13
- <li>It allows you to download videos for offline viewing or share them with your friends via social media or other apps.</li>
14
- <li>It has a favorites list that lets you save the shows and movies you like for quick access.</li>
15
- <li>It has a notification system that alerts you when new episodes or movies are added.</li>
16
- </ul>
17
- <h3>How to download and install Anime X APK 1.1.9</h3>
18
- <p>To download and install Anime X APK 1.1.9 on your Android device, you need to follow these steps:</p>
19
- <ol>
20
- <li>Go to the official website of Anime X or click on this link to download the APK file.</li>
21
- <li>Once the download is complete, open the file manager on your device and locate the APK file.</li>
22
- <li>Tap on the file and allow the installation from unknown sources if prompted.</li>
23
- <li>Wait for the installation to finish and then launch the app from your app drawer or home screen.</li>
24
- <li>Enjoy watching free anime online with Anime X!</li>
25
- </ol>
26
- <h2>Why use Anime X?</h2>
27
- <p>Anime X is one of the best apps for anime lovers who want to watch their favorite shows and movies online for free. It has many advantages over other streaming platforms, such as:</p>
28
- <h3>Pros and cons of Anime X</h3>
29
- <table border="1">
30
- <tr><th>Pros</th><th>Cons</th></tr>
31
- <tr><td>It offers a huge collection of anime videos from various sources and genres.</td><td>It may not have some of the latest or obscure titles that are available on other platforms.</td></tr>
32
- <tr><td>It has a high-quality video player that supports subtitles and other features.</td><td>It may encounter some buffering or loading issues depending on your internet connection or server availability.</td></tr>
33
- <tr><td>It allows you to download videos for offline viewing or share them with your friends.</td><td>It may consume a lot of storage space or data usage if you download or stream too many videos.</td></tr>
34
- <tr><td>It has a simple and user-friendly interface that makes it easy to use.</td><td>It may contain some ads or pop-ups that can be annoying or intrusive.</td></tr>
35
- <tr><td>It is free and does not require any registration or subscription.</td><td>It may not be legal or safe in some countries or regions where anime piracy is prohibited or regulated.</td></tr>
36
- </table>
37
- <h3>Alternatives to Anime X</h3>
38
- <p>If If you are not satisfied with Anime X or want to try some other apps that offer similar features, you can check out these alternatives:</p>
39
- <p>anime x app apk 1.1.9 free download<br />
40
- anime x streaming apk 1.1.9 latest version<br />
41
- anime x mod apk 1.1.9 unlocked features<br />
42
- anime x pro apk 1.1.9 premium access<br />
43
- anime x apk 1.1.9 download for android<br />
44
- anime x apk 1.1.9 download for pc<br />
45
- anime x apk 1.1.9 download for ios<br />
46
- anime x apk 1.1.9 download for firestick<br />
47
- anime x apk 1.1.9 download for smart tv<br />
48
- anime x apk 1.1.9 download for windows 10<br />
49
- anime x apk 1.1.9 download no ads<br />
50
- anime x apk 1.1.9 download offline mode<br />
51
- anime x apk 1.1.9 download with subtitles<br />
52
- anime x apk 1.1.9 download high quality<br />
53
- anime x apk 1.1.9 download fast speed<br />
54
- anime x apk 1.1.9 download filehippo.com[^4^]<br />
55
- anime x apk 1.1.9 download gitlab.com[^2^] [^3^]<br />
56
- anime x apk 1.1.9 download apkpure.com<br />
57
- anime x apk 1.1.9 download uptodown.com<br />
58
- anime x apk 1.1.9 download malavida.com<br />
59
- anime x apk 1.1.9 download softonic.com<br />
60
- anime x apk 1.1.9 download androidapksfree.com<br />
61
- anime x apk 1.1.9 download apkmirror.com<br />
62
- anime x apk 1.1.9 download apkmody.io<br />
63
- anime x apk 1.1.9 download happymod.com<br />
64
- how to install anime x apk 1.1.9 on android<br />
65
- how to install anime x apk 1.1.9 on pc<br />
66
- how to install anime x apk 1.1.9 on ios<br />
67
- how to install anime x apk 1.1.9 on firestick<br />
68
- how to install anime x apk 1.1.9 on smart tv<br />
69
- how to install anime x apk 1.1.9 on windows 10<br />
70
- how to update anime x apk to version 1.2 or higher<br />
71
- how to fix anime x apk not working or crashing issues<br />
72
- how to uninstall or delete anime x apk from device<br />
73
- how to use anime x apk to watch and download anime online<br />
74
- what is new in anime x apk version 1.2 or higher<br />
75
- what are the features and benefits of anime x apk version 1.x.x <br />
76
- what are the requirements and compatibility of anime x apk version 2.x.x <br />
77
- what are the alternatives and competitors of anime x apk in the market <br />
78
- what are the reviews and ratings of anime x apk by users and experts <br />
79
- is anime x apk safe and secure to use and download <br />
80
- is anime x apk legal and ethical to use and download <br />
81
- is anime x apk free or paid to use and download <br />
82
- is anime x apk original or modified to use and download <br />
83
- is anime x apk available or banned in my country or region</p>
84
- <ul>
85
- <li><strong>AnimeFlix</strong>: This is another app that lets you watch and download anime videos from various sources, such as Gogoanime, 4Anime, AnimeDao, and more. It has a sleek and modern interface that makes it easy to browse and find what you want. It also has a dark mode option that reduces eye strain and saves battery life. You can download it from here .</li>
86
- <li><strong>AnimeGlare</strong>: This is an app that allows you to stream anime videos from multiple servers, such as Streamtape, Vidstreaming, Mp4upload, and more. It has a simple and minimalist interface that makes it fast and smooth to use. It also has a favorites list, a history list, and a random anime generator. You can download it from here .</li>
87
- <li><strong>AnimeZone</strong>: This is an app that enables you to watch and download anime videos from various websites, such as AnimeHeaven, AnimeFreak, AnimeUltima, and more. It has a colorful and attractive interface that makes it fun and enjoyable to use. It also has a chat room, a news section, and a request feature. You can download it from here .</li>
88
- </ul>
89
- <h2>Conclusion</h2>
90
- <p>Anime X is one of the best apps for anime fans who want to watch free anime online on their Android devices. It has many features that make it stand out from other streaming platforms, such as a huge collection of anime videos, a high-quality video player, a download and share option, a simple and user-friendly interface, and a notification system. However, it also has some drawbacks, such as limited or outdated titles, buffering or loading issues, storage or data consumption, ads or pop-ups, and legal or safety concerns. Therefore, you should use it at your own risk and discretion. Alternatively, you can try some other apps that offer similar features, such as AnimeFlix, AnimeGlare, or AnimeZone.</p>
91
- <h3>FAQs</h3>
92
- <p>Here are some of the frequently asked questions about Anime X:</p>
93
- <ol>
94
- <li><strong>Is Anime X safe to use?</strong></li>
95
- <p>Anime X is not available on the Google Play Store or any other official app store. Therefore, you need to download it from third-party sources that may not be secure or reliable. Moreover, Anime X may contain some ads or pop-ups that can redirect you to malicious or inappropriate websites. Furthermore, Anime X may not be legal or safe in some countries or regions where anime piracy is prohibited or regulated. Therefore, you should use it at your own risk and discretion.</p>
96
- <li><strong>Does Anime X require any permissions?</strong></li>
97
- <p>Anime X requires some permissions to function properly on your device. These include access to your storage (to download videos), access to your network (to stream videos), access to your device settings (to change screen orientation), and access to your notifications (to alert you of new updates). You can grant or deny these permissions as per your preference.</p>
98
- <li><strong>How can I update Anime X?</strong></li>
99
- <p>Anime X does not have an auto-update feature. Therefore, you need to manually check for updates on the official website of Anime X or on the app itself. If there is a new version available, you can download and install it following the same steps as mentioned above.</p>
100
- <li><strong>How can I contact the developers of Anime X?</strong></li>
101
- <p>If you have any questions, suggestions, feedbacks, or complaints about Anime X, you can contact the developers via email at [email protected] or via their Facebook page . They will try to respond to you as soon as possible.</p>
102
- <li><strong>How can I support the developers of Anime X?</strong></li>
103
- <p>If you like Anime X and want to support the developers for their hard work and dedication, you can donate to them via PayPal or Patreon . You can also rate and review the app on their website or on their Facebook page . You can also share the app with your friends and family who love anime.</p>
104
- </ol></p> 197e85843d<br />
105
- <br />
106
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Caves (Roguelike) APK A Mod Menu for Every Play Style and Preference.md DELETED
@@ -1,148 +0,0 @@
1
- <br />
2
- <h1>What is Caves (Roguelike)?</h1>
3
- <p>Caves (Roguelike) is a pixel-art dungeon crawler game that challenges you to explore randomly generated underground levels, fight monsters, collect loot, and upgrade your character. The game is inspired by classic roguelike games such as Rogue, Nethack, and Dungeon Crawl Stone Soup, which means that every run is different and death is permanent. You can choose from various classes, skills, and items to customize your playstyle and strategy.</p>
4
- <h2>Why play Caves (Roguelike)?</h2>
5
- <p>If you are looking for a game that offers a high level of replayability, difficulty, and variety, then Caves (Roguelike) is a great choice. Here are some reasons why you should play this game:</p>
6
- <h2>caves (roguelike mod menu apk)</h2><br /><p><b><b>Download File</b> &raquo;&raquo;&raquo; <a href="https://jinyurl.com/2uNSOB">https://jinyurl.com/2uNSOB</a></b></p><br /><br />
7
- <ul>
8
- <li>It has retro graphics and sound effects that create a nostalgic and immersive atmosphere.</li>
9
- <li>It has a simple and intuitive interface that makes it easy to navigate and control.</li>
10
- <li>It has a deep and complex gameplay system that requires skill, strategy, and luck.</li>
11
- <li>It has a large amount of content that includes hundreds of items, enemies, traps, secrets, and bosses.</li>
12
- <li>It has a lot of customization options that allow you to tailor your character to your preferences.</li>
13
- <li>It has a lot of fun and humor that adds charm and personality to the game.</li>
14
- </ul>
15
- <h3>How to play Caves (Roguelike)?</h3>
16
- <p>The basic gameplay of Caves (Roguelike) is similar to other roguelike games. You start by choosing a class from the available options, such as warrior, mage, rogue, or priest. Each class has its own strengths, weaknesses, and skills. Then, you enter the dungeon and explore each floor by moving around with the arrow keys or tapping on the screen. You can interact with objects, such as chests, doors, switches, or stairs, by pressing the spacebar or tapping on them. You can also use items from your inventory by pressing the I key or tapping on the backpack icon. You can fight enemies by moving into them or using skills from your skill bar by pressing the number keys or tapping on the skill icons. You can also use potions or scrolls from your quick slots by pressing the Q or E keys or tapping on the potion or scroll icons. You can also access the game menu by pressing the ESC key or tapping on the menu icon. The game menu allows you to save, load, quit, or change the game settings. Your goal is to reach the deepest level of the dungeon and defeat the final boss. Along the way, you will find various items, such as weapons, armor, rings, amulets, or artifacts, that can improve your stats and abilities. You will also gain experience points and level up by killing enemies, which will allow you to increase your attributes and skills. However, you will also face many dangers, such as traps, curses, diseases, or hunger, that can hinder your progress and end your run. You have to be careful and smart to survive and succeed in Caves (Roguelike).</p>
17
- <h4>Tips and tricks for Caves (Roguelike)</h4>
18
- <p>Caves (Roguelike) is a challenging game that requires a lot of trial and error and learning from your mistakes. Here are some tips and tricks that can help you improve your performance and enjoyment of the game:</p>
19
- <ul>
20
- <li>Experiment with different classes, skills, and items to find the ones that suit your playstyle and strategy.</li>
21
- <li>Explore every floor thoroughly and don't miss any secrets or hidden areas.</li>
22
- <li>Use your skills wisely and don't waste them on weak enemies or unnecessary situations.</li>
23
- <li>Manage your inventory carefully and don't carry too much junk or useless items.</li>
24
- <li>Identify your items before using them to avoid unpleasant surprises or negative effects.</li>
25
- <li>Use potions and scrolls sparingly and save them for emergencies or boss fights.</li>
26
- <li>Pay attention to your health, mana, hunger, and status effects and take care of them accordingly.</li>
27
- <li>Avoid unnecessary risks and fights and know when to run away or retreat.</li>
28
- <li>Learn from your deaths and try again with a different approach or strategy.</li>
29
- <li>Have fun and don't give up!</li>
30
- </ul>
31
- <h2>What is a mod menu apk?</h2>
32
- <p>A mod menu apk is a modified version of an original game application that allows you to access various cheats, hacks, or features that are not available in the official game. A mod menu apk usually has a menu interface that lets you toggle on or off different options and settings that can alter the gameplay in various ways. For example, you can enable unlimited money, health, ammo, or resources; unlock all levels, items, or characters; activate god mode, speed hack, or wall hack; or customize the graphics, sound, or controls of the game. A mod menu apk can enhance your gaming experience by making it more fun, easy, or interesting.</p>
33
- <p>caves roguelike mod apk unlimited skills<br />
34
- caves roguelike mod menu apk download<br />
35
- caves roguelike mod apk latest version<br />
36
- caves roguelike mod menu apk free<br />
37
- caves roguelike mod apk android<br />
38
- caves roguelike mod menu apk 2023<br />
39
- caves roguelike mod apk no root<br />
40
- caves roguelike mod menu apk offline<br />
41
- caves roguelike mod apk unlimited money<br />
42
- caves roguelike mod menu apk hack<br />
43
- caves roguelike mod apk 0.95.2.41<br />
44
- caves roguelike mod menu apk online<br />
45
- caves roguelike mod apk unlimited health<br />
46
- caves roguelike mod menu apk 36dev<br />
47
- caves roguelike mod apk premium<br />
48
- caves roguelike mod menu apk cheats<br />
49
- caves roguelike mod apk unlocked<br />
50
- caves roguelike mod menu apk update<br />
51
- caves roguelike mod apk pro<br />
52
- caves roguelike mod menu apk review<br />
53
- caves roguelike mod apk full<br />
54
- caves roguelike mod menu apk cracked<br />
55
- caves roguelike mod apk paid<br />
56
- caves roguelike mod menu apk reddit<br />
57
- caves roguelike mod apk mega<br />
58
- caves roguelike mod menu apk mediafire<br />
59
- caves roguelike mod apk vip<br />
60
- caves roguelike mod menu apk tutorial<br />
61
- caves roguelike mod apk obb<br />
62
- caves roguelike mod menu apk gameplay</p>
63
- <h3>Benefits of using a mod menu apk</h3>
64
- <p>Using a mod menu apk can have many benefits for players who want to enjoy their games in different ways. Here are some of the advantages that a mod menu apk can offer:</p>
65
- <ul>
66
- <li>It can give you more freedom and control over your game.</li>
67
- <li>It can make your game more exciting and entertaining.</li>
68
- <li>It can help you overcome difficult or frustrating parts of the game.</li>
69
- <li>It can let you explore new aspects or features of the game.</li>
70
- <li>It can save you time and money by unlocking everything for free.</li>
71
- </ul>
72
- <h4>Risks of using a mod menu apk</h4>
73
- <p>However, using a mod menu apk can also have some drawbacks and dangers for players who are not careful or aware of the consequences. Here are some of the risks that a mod menu apk can pose:</p>
74
- <ul>
75
- <li>It can damage your device or corrupt your data by containing viruses, malware, or bugs.</li>
76
- <li>It can cause compatibility issues or errors with your game or device by being outdated, incompatible, or poorly made.</li>
77
- <li>It can ruin your game balance or challenge by making it too easy or boring.</li>
78
- <li>It can spoil your game story or content by revealing spoilers or secrets.</li>
79
- <li>It can get you banned or penalized by the game developers or publishers by violating their terms of service or policies.</li>
80
- </ul>
81
- <h2>How to download and install Caves (Roguelike) mod menu apk?</h2>
82
- <p>If you want to try out Caves (Roguelike) mod menu apk for yourself, you will need to follow some simple steps to download and install it on your device. Here is a step-by-step guide on how to do it:</p>
83
- <ol>
84
- <li>Find a reliable and safe source for Caves (Roguelike) mod menu apk. You can use one of the websites listed below in this article.</li>
85
- <li>Download the mod menu apk file from the website to your device. Make sure you have enough storage space and a stable internet connection.</li>
86
- <li>Enable the installation of unknown sources on your device. You can do this by going to your device settings, security, and allowing unknown sources.</li>
87
- <li>Locate the downloaded mod menu apk file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.</li>
88
- <li>Launch the game from your device and enjoy the mod menu apk features. You can access the mod menu by tapping on the icon on the top left corner of the screen. You can also adjust the mod settings from the game menu.</li>
89
- </ol>
90
- <h3>Requirements for Caves (Roguelike) mod menu apk</h3>
91
- <p>Before you download and install Caves (Roguelike) mod menu apk, you should make sure that your device meets the minimum and recommended specifications for the game. Here are the requirements for Caves (Roguelike) mod menu apk:</p>
92
- <table>
93
- <tr>
94
- <th>Minimum</th>
95
- <th>Recommended</th>
96
- </tr>
97
- <tr>
98
- <td>Android 4.1 or higher</td>
99
- <td>Android 6.0 or higher</td>
100
- </tr>
101
- <tr>
102
- <td>1 GB of RAM</td>
103
- <td>2 GB of RAM or more</td>
104
- </tr>
105
- <tr>
106
- <td>100 MB of free storage space</td>
107
- <td>200 MB of free storage space or more</td>
108
- </tr>
109
- <tr>
110
- <td>A stable internet connection</td>
111
- <td>A fast and reliable internet connection</td>
112
- </tr>
113
- </table>
114
- <h4>Sources for Caves (Roguelike) mod menu apk</h4>
115
- <p>There are many websites that offer Caves (Roguelike) mod menu apk for download, but not all of them are trustworthy or safe. Some of them may contain viruses, malware, or fake files that can harm your device or data. Therefore, you should be careful and selective when choosing a source for Caves (Roguelike) mod menu apk. Here are some of the reliable and safe websites where you can download Caves (Roguelike) mod menu apk:</p>
116
- <ul>
117
- <li>[Moddroid.com](^1^): This website provides a variety of modded games and apps for Android devices, including Caves (Roguelike) mod menu apk. The website is easy to use and has a fast download speed. The website also has a rating and review system that allows you to see the feedback and comments from other users.</li>
118
- <li>[Apkdone.com]: This website offers a large collection of premium and modded games and apps for Android devices, including Caves (Roguelike) mod menu apk. The website is user-friendly and has a simple design. The website also has a search and filter function that helps you find what you are looking for.</li>
119
- <li>[Apkhome.net]: This website features a wide range of free and modded games and apps for Android devices, including Caves (Roguelike) mod menu apk. The website is well-organized and has a clear layout. The website also has a blog section that provides news, updates, and tips about Android games and apps.</li>
120
- </ul>
121
- <h2>What features does Caves (Roguelike) mod menu apk have?</h2>
122
- <p>Caves (Roguelike) mod menu apk has many features that can enhance your gaming experience and make it more fun, easy, or interesting. Here are some of the features that Caves (Roguelike) mod menu apk provides:</p>
123
- <h3>Unlimited skills</h3>
124
- <p>This feature allows you to use any skill without cooldown or cost. This means that you can spam your skills as much as you want without worrying about running out of mana or waiting for them to recharge. This can give you an edge in combat and help you defeat enemies faster and easier.</p>
125
- <h4>How to activate unlimited skills</h4>
126
- <p>To activate unlimited skills, you need to go to the mod menu by tapping on the icon on the top left corner of the screen. Then, you need to toggle on the option that says "Unlimited Skills". You will see a green check mark next to it when it is enabled. You can also toggle it off by tapping on it again.</p>
127
- <h3>God mode</h3>
128
- <p>This feature makes you invincible and immune to damage. This means that you can survive any attack or trap without losing any health or dying. This can make you unstoppable and fearless in exploring the dungeon and facing any enemy or boss.</p>
129
- <h4>How to activate god mode</h4>
130
- <p>To activate god mode, you need to go to the mod menu by tapping on the icon on the top left corner of the screen. Then, you need to toggle on the option that says "God Mode". You will see a green check mark next to it when it is enabled. You can also toggle it off by tapping on it again.</p>
131
- <h2>Conclusion</h2>
132
- <p>Caves (Roguelike) is a pixel-art dungeon crawler game that offers a high level of replayability, difficulty, and variety. You can choose from various classes, skills, and items to customize your playstyle and strategy. You can also use a mod menu apk to access various cheats, hacks, or features that can alter the gameplay in various ways. However, you should also be aware of the risks and consequences of using a mod menu apk and use it responsibly and ethically. If you want to download and install Caves (Roguelike) mod menu apk, you can follow the steps and sources provided in this article. Have fun and enjoy the game!</p>
133
- <h3>FAQs</h3>
134
- <p>Here are some frequently asked questions and their answers about Caves (Roguelike) and its mod menu apk:</p>
135
- <ol>
136
- <li>Q: Is Caves (Roguelike) free to play?</li>
137
- <li>A: Yes, Caves (Roguelike) is free to play and download from the Google Play Store or other official sources. However, the game may contain ads or in-app purchases that require real money.</li>
138
- <li>Q: Is Caves (Roguelike) mod menu apk safe to use?</li>
139
- <li>A: It depends on the source and quality of the mod menu apk. Some mod menu apks may contain viruses, malware, or fake files that can harm your device or data. Therefore, you should only download and install mod menu apks from reliable and safe websites that have positive ratings and reviews from other users.</li>
140
- <li>Q: Is Caves (Roguelike) mod menu apk legal to use?</li>
141
- <li>A: It depends on the laws and regulations of your country or region. Some countries or regions may prohibit or restrict the use of mod menu apks or other forms of game modification or cheating. Therefore, you should check the terms of service and policies of the game developers or publishers and the laws and regulations of your country or region before using a mod menu apk.</li>
142
- <li>Q: How can I update Caves (Roguelike) mod menu apk?</li>
143
- <li>A: You can update Caves (Roguelike) mod menu apk by downloading and installing the latest version of the mod menu apk from the same source that you used before. However, you should also backup your game data before updating to avoid losing your progress or settings.</li>
144
- <li>Q: How can I uninstall Caves (Roguelike) mod menu apk?</li>
145
- <li>A: You can uninstall Caves (Roguelike) mod menu apk by deleting the mod menu apk file from your device or by using an uninstaller app that can remove all traces of the mod menu apk from your device. However, you should also backup your game data before uninstalling to avoid losing your progress or settings.</li>
146
- </ol></p> 401be4b1e0<br />
147
- <br />
148
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Experience the Legendary Stick War with MOD APK Features.md DELETED
@@ -1,93 +0,0 @@
1
-
2
- <h1>Stick War: Legacy MOD APK - The Ultimate Strategy Game for Android</h1>
3
- <p>If you are a fan of strategy games, you might have heard of Stick War: Legacy, one of the most popular and addictive web games ever. Now, you can enjoy this game on your Android device with Stick War: Legacy MOD APK, a modified version that gives you unlimited gems, unlocked skins and weapons, and no ads. In this article, we will tell you everything you need to know about Stick War: Legacy MOD APK, including its features, how to download and install it, and some tips and tricks for playing it.</p>
4
- <h2>stickman war mod apk</h2><br /><p><b><b>Download Zip</b> &#10084; <a href="https://jinyurl.com/2uNRD2">https://jinyurl.com/2uNRD2</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <p>Stick War: Legacy is a strategy game that puts you in charge of an army of stickmen who are fighting against other stickmen nations. You can control each and every character in your army, from miners who gather resources, to swordsmen who slash enemies, to archers who shoot arrows from afar. You can also use spells and special abilities to turn the tide of the battle. Your goal is to conquer all the territories on the map and become the ultimate stickman leader.</p>
7
- <h3>What is Stick War: Legacy?</h3>
8
- <p>Stick War: Legacy is the official mobile version of the original web game, Stick War. It was developed by Max Games Studios and released in 2016. It has been downloaded over 100 million times on Google Play Store and has an average rating of 4.5 out of 5 stars. It features several game modes, such as Campaign, Endless Deads, Tournament, and Sandbox. It also has different difficulty levels, from Normal to Insane. You can play Stick War: Legacy for free, but you will have to watch ads and earn gems slowly.</p>
9
- <h3>What is Stick War: Legacy MOD APK?</h3>
10
- <p>Stick War: Legacy MOD APK is a modified version of the original game that gives you some advantages that make the game more fun and easy. With Stick War: Legacy MOD APK, you will get unlimited gems, which are the main currency in the game. You can use gems to buy skins and weapons for your units, upgrade your spells and abilities, and unlock new game modes. You will also get all the skins and weapons unlocked from the start, so you can customize your army as you wish. Moreover, you will not see any ads in the game, which can be annoying and distracting.</p>
11
- <h2>Features of Stick War: Legacy MOD APK</h2>
12
- <p>Here are some of the main features of Stick War: Legacy MOD APK that make it worth downloading:</p>
13
- <h3>Unlimited Gems</h3>
14
- <p>Gems are essential in Stick War: Legacy, as they allow you to buy and upgrade various items in the game. However, earning gems in the original game can be slow and tedious, as you have to complete missions, watch ads, or spend real money. With Stick War: Legacy MOD APK, you will get unlimited gems from the start, so you can buy anything you want without worrying about running out.</p>
15
- <p>stickman war legacy mod apk unlimited gems<br />
16
- stickman war 2 mod apk download<br />
17
- stickman war zombie mod apk<br />
18
- stickman war legacy mod apk latest version<br />
19
- stickman war hack mod apk<br />
20
- stickman war legacy mod apk android 1<br />
21
- stickman war shadow mod apk<br />
22
- stickman war legacy mod apk revdl<br />
23
- stickman war legacy mod apk happymod<br />
24
- stickman war legacy mod apk unlimited money<br />
25
- stickman war legacy 2 mod apk<br />
26
- stickman war draw mod apk<br />
27
- stickman war legacy mod apk rexdl<br />
28
- stickman war legacy mod apk 2023.2.85<br />
29
- stickman war legacy mod apk offline<br />
30
- stickman war legacy mod apk free shopping<br />
31
- stickman war world mod apk<br />
32
- stickman war legacy mod apk no ads<br />
33
- stickman war legacy mod apk unlimited everything<br />
34
- stickman war legacy mod apk online<br />
35
- stickman war empire mod apk<br />
36
- stickman war legacy mod apk unlimited health<br />
37
- stickman war of clans mod apk<br />
38
- stickman war legacy 3d mod apk<br />
39
- stickman war legacy mod apk all unlocked<br />
40
- stickman war heroes mod apk<br />
41
- stickman war strategy mod apk<br />
42
- stickman war legacy mega mod apk<br />
43
- stickman war simulator mod apk<br />
44
- stickman war legacy hack mod apk download<br />
45
- stickman war z mod apk unlimited money and gems<br />
46
- stickman war battlegrounds mod apk<br />
47
- stickman war legacy god mode mod apk<br />
48
- stickman war castle defense mod apk<br />
49
- stickman war legacy cheat mod apk<br />
50
- stickman war super dragon legend mod apk<br />
51
- stickman war ultimate challenge mod apk<br />
52
- stickman war legacy infinite gems mod apk<br />
53
- stickman war army vs zombies mod apk<br />
54
- stickman war shadow warrior legend mod apk</p>
55
- <h3>Unlocked Skins and Weapons</h3>
56
- <p>Skins and weapons are cosmetic items that change the appearance of your units. They do not affect their performance or stats, but they can make your army look more cool and unique. In the original game, you have to buy skins and weapons with gems or unlock them by playing certain game modes. With Stick War: Legacy MOD APK, you will get all the skins and weapons unlocked from the start, so you can choose your favorite ones without spending any gems.</p>
57
- <h3>No Ads</h3>
58
- <p> <p>Ads are one of the most annoying things in any game, especially when they interrupt your gameplay or force you to watch them to get rewards. In the original game, you have to watch ads to get extra gems, unlock game modes, or revive your units. With Stick War: Legacy MOD APK, you will not see any ads in the game, which will make your gaming experience more smooth and enjoyable.</p>
59
- <h2>How to Download and Install Stick War: Legacy MOD APK</h2>
60
- <p>If you are interested in downloading and installing Stick War: Legacy MOD APK, you can follow these simple steps:</p>
61
- <h3>Step 1: Enable Unknown Sources</h3>
62
- <p>Before you can install any APK file on your Android device, you have to enable the option of unknown sources, which allows you to install apps from sources other than Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on.</p>
63
- <h3>Step 2: Download the APK File</h3>
64
- <p>Next, you have to download the APK file of Stick War: Legacy MOD APK from a reliable source. You can use the link below to download it directly to your device. The file size is about 100 MB, so make sure you have enough storage space and a stable internet connection.</p>
65
- <p><a href="">Download Stick War: Legacy MOD APK</a></p>
66
- <h3>Step 3: Install the APK File</h3>
67
- <p>Once you have downloaded the APK file, you can install it by tapping on it and following the instructions on the screen. The installation process should take a few seconds, and then you will see the icon of Stick War: Legacy MOD APK on your home screen or app drawer.</p>
68
- <h3>Step 4: Enjoy the Game</h3>
69
- <p>Now, you can launch the game and enjoy all the features of Stick War: Legacy MOD APK. You will see that you have unlimited gems, unlocked skins and weapons, and no ads. You can start playing the game mode of your choice and conquer all the stickman nations.</p>
70
- <h2>Tips and Tricks for Playing Stick War: Legacy MOD APK</h2>
71
- <p>Stick War: Legacy MOD APK is a fun and easy game to play, but it can also be challenging and strategic at times. Here are some tips and tricks that can help you improve your skills and win more battles:</p>
72
- <h3>Choose Your Strategy Wisely</h3>
73
- <p>In Stick War: Legacy MOD APK, you can choose from different strategies to lead your army. You can either be aggressive and attack your enemies head-on, or defensive and build up your defenses and resources. You can also be balanced and mix both approaches. Each strategy has its pros and cons, so you have to consider the situation and the enemy before deciding. For example, if you are facing a strong enemy with powerful units, you might want to be defensive and wait for an opening. On the other hand, if you are facing a weak enemy with few units, you might want to be aggressive and finish them off quickly.</p>
74
- <h3>Upgrade Your Units and Spells</h3>
75
- <p>In Stick War: Legacy MOD APK, you can upgrade your units and spells with gems. Upgrading your units will increase their health, damage, speed, and range. Upgrading your spells will increase their power, duration, and cooldown. Upgrading is essential if you want to keep up with the increasing difficulty of the game. You should upgrade your units and spells regularly and evenly, so that they are all effective and useful in different situations.</p>
76
- <h3>Use Your Special Abilities</h3>
77
- <p>In Stick War: Legacy MOD APK, you can use special abilities that can give you an edge in battle. These abilities include summoning giants, controlling a single unit, casting spells, or using items. Each ability has a different effect and cost, so you have to use them wisely and sparingly. You should use your abilities when they are most needed or when they can make a big difference in the outcome of the battle. For example, you can use the giant ability to break through enemy defenses or crush their units. You can use the control ability to take over an enemy unit or a powerful unit of your own. You can use the spell ability to heal your units or damage your enemies. You can use the item ability to boost your units or hinder your enemies.</p>
78
- <h2>Conclusion</h2>
79
- <p>Stick War: Legacy MOD APK is a great game for anyone who loves strategy games and stickman games. It is fun, addictive, challenging, and rewarding. It has amazing graphics, sound effects, animations, and gameplay. It has various game modes, difficulty levels, skins, weapons, spells, abilities, and items. It has unlimited gems, unlocked skins and weapons, and no ads. It is easy to download and install, and easy to play. It is the ultimate strategy game for Android. If you are looking for a game that will keep you entertained for hours, you should definitely try Stick War: Legacy MOD APK. You will not regret it.</p>
80
- <h2>FAQs</h2>
81
- <p>Here are some of the frequently asked questions about Stick War: Legacy MOD APK:</p>
82
- <h3>Is Stick War: Legacy MOD APK safe to download and install?</h3>
83
- <p>Yes, Stick War: Legacy MOD APK is safe to download and install, as long as you use a reliable source and follow the instructions carefully. The APK file does not contain any viruses, malware, or spyware that can harm your device or compromise your privacy. However, you should always be careful when downloading and installing any APK file from unknown sources, as they might not be trustworthy or compatible with your device.</p>
84
- <h3>Is Stick War: Legacy MOD APK compatible with my device?</h3>
85
- <p>Stick War: Legacy MOD APK is compatible with most Android devices that run on Android 4.4 or higher. It does not require root access or any special permissions to work. However, some devices might not support the game or the mod features due to different specifications or settings. If you encounter any problems or errors while playing the game, you can try to update your device, clear your cache, or reinstall the game.</p>
86
- <h3>Can I play Stick War: Legacy MOD APK online or offline?</h3>
87
- <p>Stick War: Legacy MOD APK can be played both online and offline. You can play the game online if you want to access the leaderboards, achievements, or other online features. You can also play the game offline if you do not have an internet connection or if you want to save your data. However, some game modes or features might not be available offline, such as Tournament or Endless Deads.</p>
88
- <h3>Can I play Stick War: Legacy MOD APK with my friends?</h3>
89
- <p>Unfortunately, Stick War: Legacy MOD APK does not have a multiplayer mode or a co-op mode that allows you to play with your friends. The game is a single-player game that pits you against AI-controlled enemies. However, you can still compete with your friends by comparing your scores, achievements, or strategies on the leaderboards or social media.</p>
90
- <h3>Can I update Stick War: Legacy MOD APK?</h3>
91
- <p>Yes, you can update Stick War: Legacy MOD APK whenever there is a new version available. However, you have to download and install the new version manually from the same source that you used before. You cannot update the game from Google Play Store or any other app store, as they will not recognize the modded version of the game. You should also backup your game data before updating, as you might lose your progress or settings.</p> 197e85843d<br />
92
- <br />
93
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_537227KB.py DELETED
@@ -1,123 +0,0 @@
1
- import torch
2
- import numpy as np
3
- from torch import nn
4
- import torch.nn.functional as F
5
-
6
- from . import layers_537238KB as layers
7
-
8
-
9
- class BaseASPPNet(nn.Module):
10
- def __init__(self, nin, ch, dilations=(4, 8, 16)):
11
- super(BaseASPPNet, self).__init__()
12
- self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
13
- self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
14
- self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
15
- self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
16
-
17
- self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
18
-
19
- self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
20
- self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
21
- self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
22
- self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
23
-
24
- def __call__(self, x):
25
- h, e1 = self.enc1(x)
26
- h, e2 = self.enc2(h)
27
- h, e3 = self.enc3(h)
28
- h, e4 = self.enc4(h)
29
-
30
- h = self.aspp(h)
31
-
32
- h = self.dec4(h, e4)
33
- h = self.dec3(h, e3)
34
- h = self.dec2(h, e2)
35
- h = self.dec1(h, e1)
36
-
37
- return h
38
-
39
-
40
- class CascadedASPPNet(nn.Module):
41
- def __init__(self, n_fft):
42
- super(CascadedASPPNet, self).__init__()
43
- self.stg1_low_band_net = BaseASPPNet(2, 64)
44
- self.stg1_high_band_net = BaseASPPNet(2, 64)
45
-
46
- self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
47
- self.stg2_full_band_net = BaseASPPNet(32, 64)
48
-
49
- self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0)
50
- self.stg3_full_band_net = BaseASPPNet(64, 128)
51
-
52
- self.out = nn.Conv2d(128, 2, 1, bias=False)
53
- self.aux1_out = nn.Conv2d(64, 2, 1, bias=False)
54
- self.aux2_out = nn.Conv2d(64, 2, 1, bias=False)
55
-
56
- self.max_bin = n_fft // 2
57
- self.output_bin = n_fft // 2 + 1
58
-
59
- self.offset = 128
60
-
61
- def forward(self, x, aggressiveness=None):
62
- mix = x.detach()
63
- x = x.clone()
64
-
65
- x = x[:, :, : self.max_bin]
66
-
67
- bandw = x.size()[2] // 2
68
- aux1 = torch.cat(
69
- [
70
- self.stg1_low_band_net(x[:, :, :bandw]),
71
- self.stg1_high_band_net(x[:, :, bandw:]),
72
- ],
73
- dim=2,
74
- )
75
-
76
- h = torch.cat([x, aux1], dim=1)
77
- aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
78
-
79
- h = torch.cat([x, aux1, aux2], dim=1)
80
- h = self.stg3_full_band_net(self.stg3_bridge(h))
81
-
82
- mask = torch.sigmoid(self.out(h))
83
- mask = F.pad(
84
- input=mask,
85
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
86
- mode="replicate",
87
- )
88
-
89
- if self.training:
90
- aux1 = torch.sigmoid(self.aux1_out(aux1))
91
- aux1 = F.pad(
92
- input=aux1,
93
- pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
94
- mode="replicate",
95
- )
96
- aux2 = torch.sigmoid(self.aux2_out(aux2))
97
- aux2 = F.pad(
98
- input=aux2,
99
- pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
100
- mode="replicate",
101
- )
102
- return mask * mix, aux1 * mix, aux2 * mix
103
- else:
104
- if aggressiveness:
105
- mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
106
- mask[:, :, : aggressiveness["split_bin"]],
107
- 1 + aggressiveness["value"] / 3,
108
- )
109
- mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
110
- mask[:, :, aggressiveness["split_bin"] :],
111
- 1 + aggressiveness["value"],
112
- )
113
-
114
- return mask * mix
115
-
116
- def predict(self, x_mag, aggressiveness=None):
117
- h = self.forward(x_mag, aggressiveness)
118
-
119
- if self.offset > 0:
120
- h = h[:, :, :, self.offset : -self.offset]
121
- assert h.size()[3] > 0
122
-
123
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/commons.py DELETED
@@ -1,166 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
-
8
- def init_weights(m, mean=0.0, std=0.01):
9
- classname = m.__class__.__name__
10
- if classname.find("Conv") != -1:
11
- m.weight.data.normal_(mean, std)
12
-
13
-
14
- def get_padding(kernel_size, dilation=1):
15
- return int((kernel_size * dilation - dilation) / 2)
16
-
17
-
18
- def convert_pad_shape(pad_shape):
19
- l = pad_shape[::-1]
20
- pad_shape = [item for sublist in l for item in sublist]
21
- return pad_shape
22
-
23
-
24
- def kl_divergence(m_p, logs_p, m_q, logs_q):
25
- """KL(P||Q)"""
26
- kl = (logs_q - logs_p) - 0.5
27
- kl += (
28
- 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
29
- )
30
- return kl
31
-
32
-
33
- def rand_gumbel(shape):
34
- """Sample from the Gumbel distribution, protect from overflows."""
35
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
36
- return -torch.log(-torch.log(uniform_samples))
37
-
38
-
39
- def rand_gumbel_like(x):
40
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
41
- return g
42
-
43
-
44
- def slice_segments(x, ids_str, segment_size=4):
45
- ret = torch.zeros_like(x[:, :, :segment_size])
46
- for i in range(x.size(0)):
47
- idx_str = ids_str[i]
48
- idx_end = idx_str + segment_size
49
- ret[i] = x[i, :, idx_str:idx_end]
50
- return ret
51
-
52
-
53
- def slice_segments2(x, ids_str, segment_size=4):
54
- ret = torch.zeros_like(x[:, :segment_size])
55
- for i in range(x.size(0)):
56
- idx_str = ids_str[i]
57
- idx_end = idx_str + segment_size
58
- ret[i] = x[i, idx_str:idx_end]
59
- return ret
60
-
61
-
62
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
63
- b, d, t = x.size()
64
- if x_lengths is None:
65
- x_lengths = t
66
- ids_str_max = x_lengths - segment_size + 1
67
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
68
- ret = slice_segments(x, ids_str, segment_size)
69
- return ret, ids_str
70
-
71
-
72
- def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
73
- position = torch.arange(length, dtype=torch.float)
74
- num_timescales = channels // 2
75
- log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
76
- num_timescales - 1
77
- )
78
- inv_timescales = min_timescale * torch.exp(
79
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
80
- )
81
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
82
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
83
- signal = F.pad(signal, [0, 0, 0, channels % 2])
84
- signal = signal.view(1, channels, length)
85
- return signal
86
-
87
-
88
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
89
- b, channels, length = x.size()
90
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
91
- return x + signal.to(dtype=x.dtype, device=x.device)
92
-
93
-
94
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
95
- b, channels, length = x.size()
96
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
97
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
98
-
99
-
100
- def subsequent_mask(length):
101
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
102
- return mask
103
-
104
-
105
- @torch.jit.script
106
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
107
- n_channels_int = n_channels[0]
108
- in_act = input_a + input_b
109
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
110
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
111
- acts = t_act * s_act
112
- return acts
113
-
114
-
115
- def convert_pad_shape(pad_shape):
116
- l = pad_shape[::-1]
117
- pad_shape = [item for sublist in l for item in sublist]
118
- return pad_shape
119
-
120
-
121
- def shift_1d(x):
122
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
123
- return x
124
-
125
-
126
- def sequence_mask(length, max_length=None):
127
- if max_length is None:
128
- max_length = length.max()
129
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
130
- return x.unsqueeze(0) < length.unsqueeze(1)
131
-
132
-
133
- def generate_path(duration, mask):
134
- """
135
- duration: [b, 1, t_x]
136
- mask: [b, 1, t_y, t_x]
137
- """
138
- device = duration.device
139
-
140
- b, _, t_y, t_x = mask.shape
141
- cum_duration = torch.cumsum(duration, -1)
142
-
143
- cum_duration_flat = cum_duration.view(b * t_x)
144
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
145
- path = path.view(b, t_x, t_y)
146
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
147
- path = path.unsqueeze(1).transpose(2, 3) * mask
148
- return path
149
-
150
-
151
- def clip_grad_value_(parameters, clip_value, norm_type=2):
152
- if isinstance(parameters, torch.Tensor):
153
- parameters = [parameters]
154
- parameters = list(filter(lambda p: p.grad is not None, parameters))
155
- norm_type = float(norm_type)
156
- if clip_value is not None:
157
- clip_value = float(clip_value)
158
-
159
- total_norm = 0
160
- for p in parameters:
161
- param_norm = p.grad.data.norm(norm_type)
162
- total_norm += param_norm.item() ** norm_type
163
- if clip_value is not None:
164
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
165
- total_norm = total_norm ** (1.0 / norm_type)
166
- return total_norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/grids/compression/_explorers.py DELETED
@@ -1,55 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import treetable as tt
8
-
9
- from .._base_explorers import BaseExplorer
10
-
11
-
12
- class CompressionExplorer(BaseExplorer):
13
- eval_metrics = ["sisnr", "visqol"]
14
-
15
- def stages(self):
16
- return ["train", "valid", "evaluate"]
17
-
18
- def get_grid_meta(self):
19
- """Returns the list of Meta information to display for each XP/job.
20
- """
21
- return [
22
- tt.leaf("index", align=">"),
23
- tt.leaf("name", wrap=140),
24
- tt.leaf("state"),
25
- tt.leaf("sig", align=">"),
26
- ]
27
-
28
- def get_grid_metrics(self):
29
- """Return the metrics that should be displayed in the tracking table.
30
- """
31
- return [
32
- tt.group(
33
- "train",
34
- [
35
- tt.leaf("epoch"),
36
- tt.leaf("bandwidth", ".2f"),
37
- tt.leaf("adv", ".4f"),
38
- tt.leaf("d_loss", ".4f"),
39
- ],
40
- align=">",
41
- ),
42
- tt.group(
43
- "valid",
44
- [
45
- tt.leaf("bandwidth", ".2f"),
46
- tt.leaf("adv", ".4f"),
47
- tt.leaf("msspec", ".4f"),
48
- tt.leaf("sisnr", ".2f"),
49
- ],
50
- align=">",
51
- ),
52
- tt.group(
53
- "evaluate", [tt.leaf(name, ".3f") for name in self.eval_metrics], align=">"
54
- ),
55
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/g4f/Provider/Providers/Phind.py DELETED
@@ -1,36 +0,0 @@
1
- import os
2
- import json
3
- import time
4
- import subprocess
5
-
6
- from ...typing import sha256, Dict, get_type_hints
7
-
8
- url = 'https://phind.com'
9
- model = ['gpt-4']
10
- supports_stream = True
11
-
12
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
13
-
14
- path = os.path.dirname(os.path.realpath(__file__))
15
- config = json.dumps({
16
- 'model': model,
17
- 'messages': messages}, separators=(',', ':'))
18
-
19
- cmd = ['python', f'{path}/helpers/phind.py', config]
20
-
21
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
22
-
23
- for line in iter(p.stdout.readline, b''):
24
- if b'<title>Just a moment...</title>' in line:
25
- os.system('clear' if os.name == 'posix' else 'cls')
26
- yield 'Clouflare error, please try again...'
27
- os._exit(0)
28
-
29
- else:
30
- if b'ping - 2023-' in line:
31
- continue
32
-
33
- yield line.decode('cp1251') #[:-1]
34
-
35
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
36
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/modules/extra_condition/openpose/util.py DELETED
@@ -1,203 +0,0 @@
1
- import math
2
-
3
- import cv2
4
- import matplotlib
5
- import numpy as np
6
-
7
-
8
- def padRightDownCorner(img, stride, padValue):
9
- h = img.shape[0]
10
- w = img.shape[1]
11
-
12
- pad = 4 * [None]
13
- pad[0] = 0 # up
14
- pad[1] = 0 # left
15
- pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
16
- pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
17
-
18
- img_padded = img
19
- pad_up = np.tile(img_padded[0:1, :, :] * 0 + padValue, (pad[0], 1, 1))
20
- img_padded = np.concatenate((pad_up, img_padded), axis=0)
21
- pad_left = np.tile(img_padded[:, 0:1, :] * 0 + padValue, (1, pad[1], 1))
22
- img_padded = np.concatenate((pad_left, img_padded), axis=1)
23
- pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + padValue, (pad[2], 1, 1))
24
- img_padded = np.concatenate((img_padded, pad_down), axis=0)
25
- pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + padValue, (1, pad[3], 1))
26
- img_padded = np.concatenate((img_padded, pad_right), axis=1)
27
-
28
- return img_padded, pad
29
-
30
-
31
- # transfer caffe model to pytorch which will match the layer name
32
- def transfer(model, model_weights):
33
- transfered_model_weights = {}
34
- for weights_name in model.state_dict().keys():
35
- transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])]
36
- return transfered_model_weights
37
-
38
-
39
- # draw the body keypoint and lims
40
- def draw_bodypose(canvas, candidate, subset):
41
- stickwidth = 4
42
- limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
43
- [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
44
- [1, 16], [16, 18], [3, 17], [6, 18]]
45
-
46
- colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
47
- [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
48
- [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
49
- for i in range(18):
50
- for n in range(len(subset)):
51
- index = int(subset[n][i])
52
- if index == -1:
53
- continue
54
- x, y = candidate[index][0:2]
55
- cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
56
- for i in range(17):
57
- for n in range(len(subset)):
58
- index = subset[n][np.array(limbSeq[i]) - 1]
59
- if -1 in index:
60
- continue
61
- cur_canvas = canvas.copy()
62
- Y = candidate[index.astype(int), 0]
63
- X = candidate[index.astype(int), 1]
64
- mX = np.mean(X)
65
- mY = np.mean(Y)
66
- length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5
67
- angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
68
- polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
69
- cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
70
- canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
71
- # plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]])
72
- # plt.imshow(canvas[:, :, [2, 1, 0]])
73
- return canvas
74
-
75
-
76
- # image drawed by opencv is not good.
77
- def draw_handpose(canvas, all_hand_peaks, show_number=False):
78
- edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \
79
- [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
80
-
81
- for peaks in all_hand_peaks:
82
- for ie, e in enumerate(edges):
83
- if np.sum(np.all(peaks[e], axis=1) == 0) == 0:
84
- x1, y1 = peaks[e[0]]
85
- x2, y2 = peaks[e[1]]
86
- cv2.line(
87
- canvas, (x1, y1), (x2, y2),
88
- matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255,
89
- thickness=2)
90
-
91
- for i, keyponit in enumerate(peaks):
92
- x, y = keyponit
93
- cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
94
- if show_number:
95
- cv2.putText(canvas, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), lineType=cv2.LINE_AA)
96
- return canvas
97
-
98
-
99
- # detect hand according to body pose keypoints
100
- # please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp
101
- def handDetect(candidate, subset, oriImg):
102
- # right hand: wrist 4, elbow 3, shoulder 2
103
- # left hand: wrist 7, elbow 6, shoulder 5
104
- ratioWristElbow = 0.33
105
- detect_result = []
106
- image_height, image_width = oriImg.shape[0:2]
107
- for person in subset.astype(int):
108
- # if any of three not detected
109
- has_left = np.sum(person[[5, 6, 7]] == -1) == 0
110
- has_right = np.sum(person[[2, 3, 4]] == -1) == 0
111
- if not (has_left or has_right):
112
- continue
113
- hands = []
114
- #left hand
115
- if has_left:
116
- left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]]
117
- x1, y1 = candidate[left_shoulder_index][:2]
118
- x2, y2 = candidate[left_elbow_index][:2]
119
- x3, y3 = candidate[left_wrist_index][:2]
120
- hands.append([x1, y1, x2, y2, x3, y3, True])
121
- # right hand
122
- if has_right:
123
- right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]]
124
- x1, y1 = candidate[right_shoulder_index][:2]
125
- x2, y2 = candidate[right_elbow_index][:2]
126
- x3, y3 = candidate[right_wrist_index][:2]
127
- hands.append([x1, y1, x2, y2, x3, y3, False])
128
-
129
- for x1, y1, x2, y2, x3, y3, is_left in hands:
130
- # pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox
131
- # handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]);
132
- # handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]);
133
- # const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow);
134
- # const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder);
135
- # handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder);
136
- x = x3 + ratioWristElbow * (x3 - x2)
137
- y = y3 + ratioWristElbow * (y3 - y2)
138
- distanceWristElbow = math.sqrt((x3 - x2)**2 + (y3 - y2)**2)
139
- distanceElbowShoulder = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
140
- width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)
141
- # x-y refers to the center --> offset to topLeft point
142
- # handRectangle.x -= handRectangle.width / 2.f;
143
- # handRectangle.y -= handRectangle.height / 2.f;
144
- x -= width / 2
145
- y -= width / 2 # width = height
146
- # overflow the image
147
- if x < 0: x = 0
148
- if y < 0: y = 0
149
- width1 = width
150
- width2 = width
151
- if x + width > image_width: width1 = image_width - x
152
- if y + width > image_height: width2 = image_height - y
153
- width = min(width1, width2)
154
- # the max hand box value is 20 pixels
155
- if width >= 20:
156
- detect_result.append([int(x), int(y), int(width), is_left])
157
- '''
158
- return value: [[x, y, w, True if left hand else False]].
159
- width=height since the network require squared input.
160
- x, y is the coordinate of top left
161
- '''
162
- return detect_result
163
-
164
-
165
- # get max index of 2d array
166
- def npmax(array):
167
- arrayindex = array.argmax(1)
168
- arrayvalue = array.max(1)
169
- i = arrayvalue.argmax()
170
- j = arrayindex[i]
171
- return i, j
172
-
173
-
174
- def HWC3(x):
175
- assert x.dtype == np.uint8
176
- if x.ndim == 2:
177
- x = x[:, :, None]
178
- assert x.ndim == 3
179
- H, W, C = x.shape
180
- assert C == 1 or C == 3 or C == 4
181
- if C == 3:
182
- return x
183
- if C == 1:
184
- return np.concatenate([x, x, x], axis=2)
185
- if C == 4:
186
- color = x[:, :, 0:3].astype(np.float32)
187
- alpha = x[:, :, 3:4].astype(np.float32) / 255.0
188
- y = color * alpha + 255.0 * (1.0 - alpha)
189
- y = y.clip(0, 255).astype(np.uint8)
190
- return y
191
-
192
-
193
- def resize_image(input_image, resolution):
194
- H, W, C = input_image.shape
195
- H = float(H)
196
- W = float(W)
197
- k = float(resolution) / min(H, W)
198
- H *= k
199
- W *= k
200
- H = int(np.round(H / 64.0)) * 64
201
- W = int(np.round(W / 64.0)) * 64
202
- img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
203
- return img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/T2I-Adapter/docs/examples.md DELETED
@@ -1,41 +0,0 @@
1
- # Demos
2
-
3
- ## Style Adapter
4
-
5
- <p align="center">
6
- <img src="https://user-images.githubusercontent.com/17445847/222734169-d47789e8-e83c-48c2-80ef-a896c2bafbb0.png" height=450>
7
- </p>
8
-
9
- ## Color Adapter (Spatial Palette)
10
-
11
- <p align="center">
12
- <img src="https://user-images.githubusercontent.com/17445847/222915829-ccfb0366-13a8-484a-9561-627fabd87d29.png" height=450>
13
- </p>
14
-
15
- ## Openpose Adapter
16
-
17
- <p align="center">
18
- <img src="https://user-images.githubusercontent.com/17445847/222733916-dc26a66e-d786-4407-8889-b81804862b1a.png" height=450>
19
- </p>
20
-
21
- ## Canny Adapter (Edge)
22
-
23
- <p align="center">
24
- <img src="https://user-images.githubusercontent.com/17445847/222915813-c8f264bd-1be6-4496-97ff-aec4f6b53788.png" height=450>
25
- </p>
26
-
27
- ## Multi-adapters
28
- <p align="center">
29
- <img src="https://user-images.githubusercontent.com/17445847/220939329-379f88b7-444f-4a3a-9de0-8f90605d1d34.png" height=450>
30
- </p>
31
-
32
- <div align="center">
33
-
34
- *T2I adapters naturally support using multiple adapters together.*
35
-
36
- </div><br />
37
- The testing script usage for this example is similar to the command line given below, except that we replaced the pretrained SD model with Anything 4.5 and Kenshi
38
-
39
- >python test_composable_adapters.py --prompt "1gril, computer desk, best quality, extremely detailed" --neg_prompt "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality" --depth_cond_path examples/depth/desk_depth.png --depth_cond_weight 1.0 --depth_ckpt models/t2iadapter_depth_sd14v1.pth --depth_type_in depth --pose_cond_path examples/keypose/person_keypose.png --pose_cond_weight 1.5 --ckpt models/anything-v4.0-pruned.ckpt --n_sample 4 --max_resolution 524288
40
-
41
- [Image source](https://twitter.com/toyxyz3/status/1628375164781211648)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adr740/CV_XPLORER_POC/app.py DELETED
@@ -1,38 +0,0 @@
1
- import gradio as gr
2
- from functools import partial
3
- import os
4
- from get_cv import get_cv
5
-
6
- title = "CV Indexing par Intelligence Artificielle"
7
- desc = "Ceci est un outil qui vous aide à trouver rapidement des CV pertinents en fonction des descriptions de poste. Il suffit de taper simplement ce que vous recherchez dans la zone ci-dessous.\n\n Avec l'aide de l'IA, cet outil est conçu pour simplifier votre recherche de CV en suggérant des résultats qui correspondent le mieux à vos besoins. Vous n'avez qu'à saisir les termes pertinents qui décrivent le poste que vous recherchez et l'outil vous présentera une liste de CV adaptés à vos critères. Cela vous permettra de gagner du temps et de trouver plus facilement les candidats idéaux pour votre entreprise.\n\n"
8
- # warning = "Warning!"
9
- disclaimer = "#### Attention! La méthode utilisée est stochastique et par conséquent les résultats peuvent parfois ne pas respecter parfaitement la requête. SI CELA ARRIVE : essayez d'adapter votre demande en reformulant ou en fournissant plus d'informations, cela fonctionne mieux avec des textes plus longs (fiche de poste par exemple)"
10
- def iter_grid(n_rows, n_cols):
11
- for _ in range(n_rows):
12
- with gr.Row():
13
- for _ in range(n_cols):
14
- with gr.Column():
15
- yield
16
- with gr.Blocks(title=title) as demo:
17
- gr.Markdown(f"## {title}")
18
- gr.Markdown(desc)
19
- gr.Markdown(disclaimer)
20
- with gr.Row():
21
- with gr.Column(scale=4):
22
- text_area = gr.Textbox(placeholder="Écrivez ici", lines=3, label="Décrivez le type de candidat que vous chechez ou copiez collez une fiche de poste")
23
- with gr.Column(scale=1):
24
- number_to_display = gr.Number(value=10,label = "Nombre de candidats à afficher")
25
- submit_button = gr.Button(value="Rechercher des candidats")
26
- pass
27
-
28
- fn = partial(get_cv)
29
-
30
- with gr.Accordion("Tous les résultats:"):
31
- ll = gr.Markdown("Vide")
32
-
33
-
34
- submit_button.click(fn=fn, inputs=[text_area,number_to_display], outputs=[ll])
35
-
36
- login = os.environ.get("login")
37
- pwd = os.environ.get("pwd")
38
- demo.launch(enable_queue=True,max_threads=40)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/kawaseblurpipeline-plugin.d.ts DELETED
@@ -1,30 +0,0 @@
1
- // import * as Phaser from 'phaser';
2
- import KawaseBlurFilterPostFxPipeline from './kawaseblurpipeline';
3
-
4
-
5
- export default KawaseBlurFilterPipelinePlugin;
6
-
7
- declare namespace KawaseBlurFilterPipelinePlugin {
8
-
9
- interface IConfig extends KawaseBlurFilterPostFxPipeline.IConfig {
10
- name?: string,
11
- }
12
-
13
- }
14
-
15
- declare class KawaseBlurFilterPipelinePlugin extends Phaser.Plugins.BasePlugin {
16
- add(
17
- gameObject: Phaser.GameObjects.GameObject | Phaser.Cameras.Scene2D.Camera,
18
- config?: KawaseBlurFilterPipelinePlugin.IConfig
19
- ): KawaseBlurFilterPostFxPipeline;
20
-
21
- remove(
22
- gameObject: Phaser.GameObjects.GameObject,
23
- name?: string
24
- ): this;
25
-
26
- get(
27
- gameObject: Phaser.GameObjects.GameObject,
28
- name?: string
29
- ): KawaseBlurFilterPostFxPipeline | KawaseBlurFilterPostFxPipeline[];
30
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AiiluoChen/webui/README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- title: Stable Diffusion Web UI
3
- emoji: 🚧
4
- colorFrom: yellow
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.9
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: camenduru/webui
11
- ---
12
-
13
- ## Stable Diffusion Web UI
14
- [https://github.com/AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
15
-
16
- ## Documentation
17
- [https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki)
18
-
19
- ## Models License
20
- https://huggingface.co/spaces/CompVis/stable-diffusion-license
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlekseyCalvin/dreambooth-training3/convertosd.py DELETED
@@ -1,302 +0,0 @@
1
- # Script for converting a HF Diffusers saved pipeline to a Stable Diffusion checkpoint.
2
- # *Only* converts the UNet, VAE, and Text Encoder.
3
- # Does not convert optimizer state or any other thing.
4
-
5
- import argparse
6
- import os.path as osp
7
- import re
8
-
9
- import torch
10
- import gc
11
-
12
- # =================#
13
- # UNet Conversion #
14
- # =================#
15
-
16
- unet_conversion_map = [
17
- # (stable-diffusion, HF Diffusers)
18
- ("time_embed.0.weight", "time_embedding.linear_1.weight"),
19
- ("time_embed.0.bias", "time_embedding.linear_1.bias"),
20
- ("time_embed.2.weight", "time_embedding.linear_2.weight"),
21
- ("time_embed.2.bias", "time_embedding.linear_2.bias"),
22
- ("input_blocks.0.0.weight", "conv_in.weight"),
23
- ("input_blocks.0.0.bias", "conv_in.bias"),
24
- ("out.0.weight", "conv_norm_out.weight"),
25
- ("out.0.bias", "conv_norm_out.bias"),
26
- ("out.2.weight", "conv_out.weight"),
27
- ("out.2.bias", "conv_out.bias"),
28
- ]
29
-
30
- unet_conversion_map_resnet = [
31
- # (stable-diffusion, HF Diffusers)
32
- ("in_layers.0", "norm1"),
33
- ("in_layers.2", "conv1"),
34
- ("out_layers.0", "norm2"),
35
- ("out_layers.3", "conv2"),
36
- ("emb_layers.1", "time_emb_proj"),
37
- ("skip_connection", "conv_shortcut"),
38
- ]
39
-
40
- unet_conversion_map_layer = []
41
- # hardcoded number of downblocks and resnets/attentions...
42
- # would need smarter logic for other networks.
43
- for i in range(4):
44
- # loop over downblocks/upblocks
45
-
46
- for j in range(2):
47
- # loop over resnets/attentions for downblocks
48
- hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
49
- sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
50
- unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
51
-
52
- if i < 3:
53
- # no attention layers in down_blocks.3
54
- hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
55
- sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
56
- unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
57
-
58
- for j in range(3):
59
- # loop over resnets/attentions for upblocks
60
- hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
61
- sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
62
- unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
63
-
64
- if i > 0:
65
- # no attention layers in up_blocks.0
66
- hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
67
- sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
68
- unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
69
-
70
- if i < 3:
71
- # no downsample in down_blocks.3
72
- hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
73
- sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
74
- unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
75
-
76
- # no upsample in up_blocks.3
77
- hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
78
- sd_upsample_prefix = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
79
- unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
80
-
81
- hf_mid_atn_prefix = "mid_block.attentions.0."
82
- sd_mid_atn_prefix = "middle_block.1."
83
- unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
84
-
85
- for j in range(2):
86
- hf_mid_res_prefix = f"mid_block.resnets.{j}."
87
- sd_mid_res_prefix = f"middle_block.{2*j}."
88
- unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
89
-
90
-
91
- def convert_unet_state_dict(unet_state_dict):
92
- # buyer beware: this is a *brittle* function,
93
- # and correct output requires that all of these pieces interact in
94
- # the exact order in which I have arranged them.
95
- mapping = {k: k for k in unet_state_dict.keys()}
96
- for sd_name, hf_name in unet_conversion_map:
97
- mapping[hf_name] = sd_name
98
- for k, v in mapping.items():
99
- if "resnets" in k:
100
- for sd_part, hf_part in unet_conversion_map_resnet:
101
- v = v.replace(hf_part, sd_part)
102
- mapping[k] = v
103
- for k, v in mapping.items():
104
- for sd_part, hf_part in unet_conversion_map_layer:
105
- v = v.replace(hf_part, sd_part)
106
- mapping[k] = v
107
- new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()}
108
- return new_state_dict
109
-
110
-
111
- # ================#
112
- # VAE Conversion #
113
- # ================#
114
-
115
- vae_conversion_map = [
116
- # (stable-diffusion, HF Diffusers)
117
- ("nin_shortcut", "conv_shortcut"),
118
- ("norm_out", "conv_norm_out"),
119
- ("mid.attn_1.", "mid_block.attentions.0."),
120
- ]
121
-
122
- for i in range(4):
123
- # down_blocks have two resnets
124
- for j in range(2):
125
- hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}."
126
- sd_down_prefix = f"encoder.down.{i}.block.{j}."
127
- vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
128
-
129
- if i < 3:
130
- hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0."
131
- sd_downsample_prefix = f"down.{i}.downsample."
132
- vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
133
-
134
- hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
135
- sd_upsample_prefix = f"up.{3-i}.upsample."
136
- vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
137
-
138
- # up_blocks have three resnets
139
- # also, up blocks in hf are numbered in reverse from sd
140
- for j in range(3):
141
- hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}."
142
- sd_up_prefix = f"decoder.up.{3-i}.block.{j}."
143
- vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
144
-
145
- # this part accounts for mid blocks in both the encoder and the decoder
146
- for i in range(2):
147
- hf_mid_res_prefix = f"mid_block.resnets.{i}."
148
- sd_mid_res_prefix = f"mid.block_{i+1}."
149
- vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
150
-
151
-
152
- vae_conversion_map_attn = [
153
- # (stable-diffusion, HF Diffusers)
154
- ("norm.", "group_norm."),
155
- ("q.", "query."),
156
- ("k.", "key."),
157
- ("v.", "value."),
158
- ("proj_out.", "proj_attn."),
159
- ]
160
-
161
-
162
- def reshape_weight_for_sd(w):
163
- # convert HF linear weights to SD conv2d weights
164
- return w.reshape(*w.shape, 1, 1)
165
-
166
-
167
- def convert_vae_state_dict(vae_state_dict):
168
- mapping = {k: k for k in vae_state_dict.keys()}
169
- for k, v in mapping.items():
170
- for sd_part, hf_part in vae_conversion_map:
171
- v = v.replace(hf_part, sd_part)
172
- mapping[k] = v
173
- for k, v in mapping.items():
174
- if "attentions" in k:
175
- for sd_part, hf_part in vae_conversion_map_attn:
176
- v = v.replace(hf_part, sd_part)
177
- mapping[k] = v
178
- new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()}
179
- weights_to_convert = ["q", "k", "v", "proj_out"]
180
- print("Converting to CKPT ...")
181
- for k, v in new_state_dict.items():
182
- for weight_name in weights_to_convert:
183
- if f"mid.attn_1.{weight_name}.weight" in k:
184
- print(f"Reshaping {k} for SD format")
185
- new_state_dict[k] = reshape_weight_for_sd(v)
186
- return new_state_dict
187
-
188
-
189
- # =========================#
190
- # Text Encoder Conversion #
191
- # =========================#
192
-
193
-
194
- textenc_conversion_lst = [
195
- # (stable-diffusion, HF Diffusers)
196
- ("resblocks.", "text_model.encoder.layers."),
197
- ("ln_1", "layer_norm1"),
198
- ("ln_2", "layer_norm2"),
199
- (".c_fc.", ".fc1."),
200
- (".c_proj.", ".fc2."),
201
- (".attn", ".self_attn"),
202
- ("ln_final.", "transformer.text_model.final_layer_norm."),
203
- ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
204
- ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
205
- ]
206
- protected = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
207
- textenc_pattern = re.compile("|".join(protected.keys()))
208
-
209
- # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
210
- code2idx = {"q": 0, "k": 1, "v": 2}
211
-
212
-
213
- def convert_text_enc_state_dict_v20(text_enc_dict):
214
- new_state_dict = {}
215
- capture_qkv_weight = {}
216
- capture_qkv_bias = {}
217
- for k, v in text_enc_dict.items():
218
- if (
219
- k.endswith(".self_attn.q_proj.weight")
220
- or k.endswith(".self_attn.k_proj.weight")
221
- or k.endswith(".self_attn.v_proj.weight")
222
- ):
223
- k_pre = k[: -len(".q_proj.weight")]
224
- k_code = k[-len("q_proj.weight")]
225
- if k_pre not in capture_qkv_weight:
226
- capture_qkv_weight[k_pre] = [None, None, None]
227
- capture_qkv_weight[k_pre][code2idx[k_code]] = v
228
- continue
229
-
230
- if (
231
- k.endswith(".self_attn.q_proj.bias")
232
- or k.endswith(".self_attn.k_proj.bias")
233
- or k.endswith(".self_attn.v_proj.bias")
234
- ):
235
- k_pre = k[: -len(".q_proj.bias")]
236
- k_code = k[-len("q_proj.bias")]
237
- if k_pre not in capture_qkv_bias:
238
- capture_qkv_bias[k_pre] = [None, None, None]
239
- capture_qkv_bias[k_pre][code2idx[k_code]] = v
240
- continue
241
-
242
- relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k)
243
- new_state_dict[relabelled_key] = v
244
-
245
- for k_pre, tensors in capture_qkv_weight.items():
246
- if None in tensors:
247
- raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing")
248
- relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)
249
- new_state_dict[relabelled_key + ".in_proj_weight"] = torch.cat(tensors)
250
-
251
- for k_pre, tensors in capture_qkv_bias.items():
252
- if None in tensors:
253
- raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing")
254
- relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)
255
- new_state_dict[relabelled_key + ".in_proj_bias"] = torch.cat(tensors)
256
-
257
- return new_state_dict
258
-
259
-
260
- def convert_text_enc_state_dict(text_enc_dict):
261
- return text_enc_dict
262
-
263
-
264
- def convert(model_path, checkpoint_path):
265
- unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.bin")
266
- vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.bin")
267
- text_enc_path = osp.join(model_path, "text_encoder", "pytorch_model.bin")
268
-
269
- # Convert the UNet model
270
- unet_state_dict = torch.load(unet_path, map_location="cpu")
271
- unet_state_dict = convert_unet_state_dict(unet_state_dict)
272
- unet_state_dict = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
273
-
274
- # Convert the VAE model
275
- vae_state_dict = torch.load(vae_path, map_location="cpu")
276
- vae_state_dict = convert_vae_state_dict(vae_state_dict)
277
- vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
278
-
279
- # Convert the text encoder model
280
- text_enc_dict = torch.load(text_enc_path, map_location="cpu")
281
-
282
- # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
283
- is_v20_model = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
284
-
285
- if is_v20_model:
286
- # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
287
- text_enc_dict = {"transformer." + k: v for k, v in text_enc_dict.items()}
288
- text_enc_dict = convert_text_enc_state_dict_v20(text_enc_dict)
289
- text_enc_dict = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
290
- else:
291
- text_enc_dict = convert_text_enc_state_dict(text_enc_dict)
292
- text_enc_dict = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
293
-
294
- # Put together new checkpoint
295
- state_dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
296
- state_dict = {k: v.half() for k, v in state_dict.items()}
297
- state_dict = {"state_dict": state_dict}
298
- torch.save(state_dict, checkpoint_path)
299
- del state_dict, text_enc_dict, vae_state_dict, unet_state_dict
300
- torch.cuda.empty_cache()
301
- gc.collect()
302
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alfasign/HuggingGPT-Lite/models_server.py DELETED
@@ -1,779 +0,0 @@
1
- import argparse
2
- import logging
3
- import random
4
- import uuid
5
- import numpy as np
6
- from transformers import pipeline
7
- from diffusers import (
8
- DiffusionPipeline,
9
- StableDiffusionControlNetPipeline,
10
- ControlNetModel,
11
- UniPCMultistepScheduler,
12
- )
13
- from diffusers.utils import load_image
14
- from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
15
- from diffusers.utils import export_to_video
16
- from transformers import BlipProcessor, BlipForConditionalGeneration
17
- from transformers import (
18
- TrOCRProcessor,
19
- VisionEncoderDecoderModel,
20
- ViTImageProcessor,
21
- AutoTokenizer,
22
- )
23
- from datasets import load_dataset
24
- from PIL import Image
25
- import io
26
- from torchvision import transforms
27
- import torch
28
- import torchaudio
29
- from speechbrain.pretrained import WaveformEnhancement
30
- import joblib
31
- from huggingface_hub import hf_hub_url, cached_download
32
- from transformers import AutoImageProcessor, TimesformerForVideoClassification
33
- from transformers import (
34
- MaskFormerFeatureExtractor,
35
- MaskFormerForInstanceSegmentation,
36
- AutoFeatureExtractor,
37
- )
38
- from controlnet_aux import (
39
- OpenposeDetector,
40
- MLSDdetector,
41
- HEDdetector,
42
- CannyDetector,
43
- MidasDetector,
44
- )
45
- from controlnet_aux.open_pose.body import Body
46
- from controlnet_aux.mlsd.models.mbv2_mlsd_large import MobileV2_MLSD_Large
47
- from controlnet_aux.hed import Network
48
- from transformers import DPTForDepthEstimation, DPTFeatureExtractor
49
- import warnings
50
- import time
51
- from espnet2.bin.tts_inference import Text2Speech
52
- import soundfile as sf
53
- from asteroid.models import BaseModel
54
- import traceback
55
- import os
56
- import yaml
57
-
58
- warnings.filterwarnings("ignore")
59
-
60
- parser = argparse.ArgumentParser()
61
- parser.add_argument("--config", type=str, default="config.yaml")
62
- args = parser.parse_args()
63
-
64
- if __name__ != "__main__":
65
- args.config = "config.gradio.yaml"
66
-
67
- logger = logging.getLogger(__name__)
68
- logger.setLevel(logging.INFO)
69
- handler = logging.StreamHandler()
70
- handler.setLevel(logging.INFO)
71
- formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
72
- handler.setFormatter(formatter)
73
- logger.addHandler(handler)
74
-
75
- config = yaml.load(open(args.config, "r"), Loader=yaml.FullLoader)
76
-
77
- local_deployment = config["local_deployment"]
78
- if config["inference_mode"] == "huggingface":
79
- local_deployment = "none"
80
-
81
- PROXY = None
82
- if config["proxy"]:
83
- PROXY = {
84
- "https": config["proxy"],
85
- }
86
-
87
- start = time.time()
88
-
89
- # local_models = "models/"
90
- local_models = ""
91
-
92
-
93
- def load_pipes(local_deployment):
94
- other_pipes = {}
95
- standard_pipes = {}
96
- controlnet_sd_pipes = {}
97
- if local_deployment in ["full"]:
98
- other_pipes = {
99
- # "Salesforce/blip-image-captioning-large": {
100
- # "model": BlipForConditionalGeneration.from_pretrained(f"Salesforce/blip-image-captioning-large"),
101
- # "processor": BlipProcessor.from_pretrained(f"Salesforce/blip-image-captioning-large"),
102
- # "device": "cpu"
103
- # },
104
- # "damo-vilab/text-to-video-ms-1.7b": {
105
- # "model": DiffusionPipeline.from_pretrained(
106
- # f"{local_models}damo-vilab/text-to-video-ms-1.7b",
107
- # torch_dtype=torch.float16,
108
- # variant="fp16",
109
- # ),
110
- # "device": "cpu",
111
- # },
112
- # "facebook/maskformer-swin-large-ade": {
113
- # "model": MaskFormerForInstanceSegmentation.from_pretrained(f"facebook/maskformer-swin-large-ade"),
114
- # "feature_extractor" : AutoFeatureExtractor.from_pretrained("facebook/maskformer-swin-large-ade"),
115
- # "device": "cpu"
116
- # },
117
- # "microsoft/trocr-base-printed": {
118
- # "processor": TrOCRProcessor.from_pretrained(f"microsoft/trocr-base-printed"),
119
- # "model": VisionEncoderDecoderModel.from_pretrained(f"microsoft/trocr-base-printed"),
120
- # "device": "cpu"
121
- # },
122
- # "microsoft/trocr-base-handwritten": {
123
- # "processor": TrOCRProcessor.from_pretrained(f"microsoft/trocr-base-handwritten"),
124
- # "model": VisionEncoderDecoderModel.from_pretrained(f"microsoft/trocr-base-handwritten"),
125
- # "device": "cpu"
126
- # },
127
- # "JorisCos/DCCRNet_Libri1Mix_enhsingle_16k": {
128
- # "model": BaseModel.from_pretrained(
129
- # "JorisCos/DCCRNet_Libri1Mix_enhsingle_16k"
130
- # ),
131
- # "device": "cpu",
132
- # },
133
- # "CompVis/stable-diffusion-v1-4": {
134
- # "model": DiffusionPipeline.from_pretrained(f"CompVis/stable-diffusion-v1-4"),
135
- # "device": "cpu"
136
- # },
137
- # "stabilityai/stable-diffusion-2-1": {
138
- # "model": DiffusionPipeline.from_pretrained(f"stabilityai/stable-diffusion-2-1"),
139
- # "device": "cpu"
140
- # },
141
- # "microsoft/speecht5_tts":{
142
- # "processor": SpeechT5Processor.from_pretrained(f"microsoft/speecht5_tts"),
143
- # "model": SpeechT5ForTextToSpeech.from_pretrained(f"microsoft/speecht5_tts"),
144
- # "vocoder": SpeechT5HifiGan.from_pretrained(f"microsoft/speecht5_hifigan"),
145
- # "embeddings_dataset": load_dataset(f"Matthijs/cmu-arctic-xvectors", split="validation"),
146
- # "device": "cpu"
147
- # },
148
- # "speechbrain/mtl-mimic-voicebank": {
149
- # "model": WaveformEnhancement.from_hparams(source="speechbrain/mtl-mimic-voicebank", savedir="models/mtl-mimic-voicebank"),
150
- # "device": "cpu"
151
- # },
152
- # "microsoft/speecht5_vc": {
153
- # "processor": SpeechT5Processor.from_pretrained(
154
- # f"{local_models}microsoft/speecht5_vc"
155
- # ),
156
- # "model": SpeechT5ForSpeechToSpeech.from_pretrained(
157
- # f"{local_models}microsoft/speecht5_vc"
158
- # ),
159
- # "vocoder": SpeechT5HifiGan.from_pretrained(
160
- # f"{local_models}microsoft/speecht5_hifigan"
161
- # ),
162
- # "embeddings_dataset": load_dataset(
163
- # f"{local_models}Matthijs/cmu-arctic-xvectors", split="validation"
164
- # ),
165
- # "device": "cpu",
166
- # },
167
- # "julien-c/wine-quality": {
168
- # "model": joblib.load(cached_download(hf_hub_url("julien-c/wine-quality", "sklearn_model.joblib")))
169
- # },
170
- # "facebook/timesformer-base-finetuned-k400": {
171
- # "processor": AutoImageProcessor.from_pretrained(f"facebook/timesformer-base-finetuned-k400"),
172
- # "model": TimesformerForVideoClassification.from_pretrained(f"facebook/timesformer-base-finetuned-k400"),
173
- # "device": "cpu"
174
- # },
175
- "facebook/maskformer-swin-base-coco": {
176
- "feature_extractor": MaskFormerFeatureExtractor.from_pretrained(
177
- f"{local_models}facebook/maskformer-swin-base-coco"
178
- ),
179
- "model": MaskFormerForInstanceSegmentation.from_pretrained(
180
- f"{local_models}facebook/maskformer-swin-base-coco"
181
- ),
182
- "device": "cpu",
183
- },
184
- # "Intel/dpt-hybrid-midas": {
185
- # "model": DPTForDepthEstimation.from_pretrained(
186
- # f"{local_models}Intel/dpt-hybrid-midas", low_cpu_mem_usage=True
187
- # ),
188
- # "feature_extractor": DPTFeatureExtractor.from_pretrained(
189
- # f"{local_models}Intel/dpt-hybrid-midas"
190
- # ),
191
- # "device": "cpu",
192
- # },
193
- }
194
-
195
- if local_deployment in ["full", "standard"]:
196
- standard_pipes = {
197
- # "nlpconnect/vit-gpt2-image-captioning":{
198
- # "model": VisionEncoderDecoderModel.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"),
199
- # "feature_extractor": ViTImageProcessor.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"),
200
- # "tokenizer": AutoTokenizer.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"),
201
- # "device": "cpu"
202
- # },
203
- # "espnet/kan-bayashi_ljspeech_vits": {
204
- # "model": Text2Speech.from_pretrained(
205
- # "espnet/kan-bayashi_ljspeech_vits"
206
- # ),
207
- # "device": "cpu",
208
- # },
209
- # "lambdalabs/sd-image-variations-diffusers": {
210
- # "model": DiffusionPipeline.from_pretrained(f"{local_models}lambdalabs/sd-image-variations-diffusers"), #torch_dtype=torch.float16
211
- # "device": "cpu"
212
- # },
213
- # "runwayml/stable-diffusion-v1-5": {
214
- # "model": DiffusionPipeline.from_pretrained(
215
- # f"{local_models}runwayml/stable-diffusion-v1-5"
216
- # ),
217
- # "device": "cpu",
218
- # },
219
- # "superb/wav2vec2-base-superb-ks": {
220
- # "model": pipeline(task="audio-classification", model=f"superb/wav2vec2-base-superb-ks"),
221
- # "device": "cpu"
222
- # },
223
- # "openai/whisper-base": {
224
- # "model": pipeline(
225
- # task="automatic-speech-recognition",
226
- # model=f"{local_models}openai/whisper-base",
227
- # ),
228
- # "device": "cpu",
229
- # },
230
- # "microsoft/speecht5_asr": {
231
- # "model": pipeline(task="automatic-speech-recognition", model=f"{local_models}microsoft/speecht5_asr"),
232
- # "device": "cpu"
233
- # },
234
- "Intel/dpt-large": {
235
- "model": pipeline(
236
- task="depth-estimation", model=f"{local_models}Intel/dpt-large"
237
- ),
238
- "device": "cpu",
239
- },
240
- # "microsoft/beit-base-patch16-224-pt22k-ft22k": {
241
- # "model": pipeline(task="image-classification", model=f"microsoft/beit-base-patch16-224-pt22k-ft22k"),
242
- # "device": "cpu"
243
- # },
244
- "facebook/detr-resnet-50-panoptic": {
245
- "model": pipeline(
246
- task="image-segmentation",
247
- model=f"{local_models}facebook/detr-resnet-50-panoptic",
248
- ),
249
- "device": "cpu",
250
- },
251
- "facebook/detr-resnet-101": {
252
- "model": pipeline(
253
- task="object-detection",
254
- model=f"{local_models}facebook/detr-resnet-101",
255
- ),
256
- "device": "cpu",
257
- },
258
- # "openai/clip-vit-large-patch14": {
259
- # "model": pipeline(task="zero-shot-image-classification", model=f"openai/clip-vit-large-patch14"),
260
- # "device": "cpu"
261
- # },
262
- # "google/owlvit-base-patch32": {
263
- # "model": pipeline(task="zero-shot-object-detection", model=f"{local_models}google/owlvit-base-patch32"),
264
- # "device": "cpu"
265
- # },
266
- # "microsoft/DialoGPT-medium": {
267
- # "model": pipeline(task="conversational", model=f"microsoft/DialoGPT-medium"),
268
- # "device": "cpu"
269
- # },
270
- # "bert-base-uncased": {
271
- # "model": pipeline(task="fill-mask", model=f"bert-base-uncased"),
272
- # "device": "cpu"
273
- # },
274
- # "deepset/roberta-base-squad2": {
275
- # "model": pipeline(task = "question-answering", model=f"deepset/roberta-base-squad2"),
276
- # "device": "cpu"
277
- # },
278
- # "facebook/bart-large-cnn": {
279
- # "model": pipeline(task="summarization", model=f"facebook/bart-large-cnn"),
280
- # "device": "cpu"
281
- # },
282
- # "google/tapas-base-finetuned-wtq": {
283
- # "model": pipeline(task="table-question-answering", model=f"google/tapas-base-finetuned-wtq"),
284
- # "device": "cpu"
285
- # },
286
- # "distilbert-base-uncased-finetuned-sst-2-english": {
287
- # "model": pipeline(task="text-classification", model=f"distilbert-base-uncased-finetuned-sst-2-english"),
288
- # "device": "cpu"
289
- # },
290
- # "gpt2": {
291
- # "model": pipeline(task="text-generation", model="gpt2"),
292
- # "device": "cpu"
293
- # },
294
- # "mrm8488/t5-base-finetuned-question-generation-ap": {
295
- # "model": pipeline(task="text2text-generation", model=f"mrm8488/t5-base-finetuned-question-generation-ap"),
296
- # "device": "cpu"
297
- # },
298
- # "Jean-Baptiste/camembert-ner": {
299
- # "model": pipeline(task="token-classification", model=f"Jean-Baptiste/camembert-ner", aggregation_strategy="simple"),
300
- # "device": "cpu"
301
- # },
302
- # "t5-base": {
303
- # "model": pipeline(task="translation", model=f"t5-base"),
304
- # "device": "cpu"
305
- # },
306
- # "impira/layoutlm-document-qa": {
307
- # "model": pipeline(task="document-question-answering", model=f"{local_models}impira/layoutlm-document-qa"),
308
- # "device": "cpu"
309
- # },
310
- "ydshieh/vit-gpt2-coco-en": {
311
- "model": pipeline(
312
- task="image-to-text",
313
- model=f"{local_models}ydshieh/vit-gpt2-coco-en",
314
- ),
315
- "device": "cpu",
316
- },
317
- # "dandelin/vilt-b32-finetuned-vqa": {
318
- # "model": pipeline(
319
- # task="visual-question-answering",
320
- # model=f"{local_models}dandelin/vilt-b32-finetuned-vqa",
321
- # ),
322
- # "device": "cpu",
323
- # },
324
- }
325
-
326
- if local_deployment in ["full", "standard", "minimal"]:
327
- controlnet = ControlNetModel.from_pretrained(
328
- f"{local_models}lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16
329
- )
330
- controlnetpipe = StableDiffusionControlNetPipeline.from_pretrained(
331
- f"{local_models}runwayml/stable-diffusion-v1-5",
332
- controlnet=controlnet,
333
- torch_dtype=torch.float16,
334
- )
335
-
336
- hed_network = HEDdetector.from_pretrained("lllyasviel/ControlNet")
337
-
338
- pipes = {**standard_pipes, **other_pipes}
339
- return pipes
340
-
341
-
342
- pipes = load_pipes(local_deployment)
343
-
344
- end = time.time()
345
- during = end - start
346
-
347
- print(f"[ ready ] {during}s")
348
-
349
-
350
- def running():
351
- return {"running": True}
352
-
353
-
354
- def status(model_id):
355
- disabled_models = [
356
- "microsoft/trocr-base-printed",
357
- "microsoft/trocr-base-handwritten",
358
- ]
359
- if model_id in pipes.keys() and model_id not in disabled_models:
360
- print(f"[ check {model_id} ] success")
361
- return {"loaded": True}
362
- else:
363
- print(f"[ check {model_id} ] failed")
364
- return {"loaded": False}
365
-
366
-
367
- def models(model_id, data):
368
- while "using" in pipes[model_id] and pipes[model_id]["using"]:
369
- print(f"[ inference {model_id} ] waiting")
370
- time.sleep(0.1)
371
- pipes[model_id]["using"] = True
372
- print(f"[ inference {model_id} ] start")
373
-
374
- start = time.time()
375
-
376
- pipe = pipes[model_id]["model"]
377
-
378
- if "device" in pipes[model_id]:
379
- try:
380
- pipe.to(pipes[model_id]["device"])
381
- except:
382
- pipe.device = torch.device(pipes[model_id]["device"])
383
- pipe.model.to(pipes[model_id]["device"])
384
-
385
- result = None
386
- try:
387
- # text to video
388
- if model_id == "damo-vilab/text-to-video-ms-1.7b":
389
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(
390
- pipe.scheduler.config
391
- )
392
- # pipe.enable_model_cpu_offload()
393
- prompt = data["text"]
394
- video_frames = pipe(prompt, num_inference_steps=50, num_frames=40).frames
395
- file_name = str(uuid.uuid4())[:4]
396
- video_path = export_to_video(video_frames, f"public/videos/{file_name}.mp4")
397
-
398
- new_file_name = str(uuid.uuid4())[:4]
399
- os.system(
400
- f"ffmpeg -i {video_path} -vcodec libx264 public/videos/{new_file_name}.mp4"
401
- )
402
-
403
- if os.path.exists(f"public/videos/{new_file_name}.mp4"):
404
- result = {"path": f"/videos/{new_file_name}.mp4"}
405
- else:
406
- result = {"path": f"/videos/{file_name}.mp4"}
407
-
408
- # controlnet
409
- if model_id.startswith("lllyasviel/sd-controlnet-"):
410
- pipe.controlnet.to("cpu")
411
- pipe.controlnet = pipes[model_id]["control"].to(pipes[model_id]["device"])
412
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
413
- control_image = load_image(data["img_url"])
414
- # generator = torch.manual_seed(66)
415
- out_image: Image = pipe(
416
- data["text"], num_inference_steps=20, image=control_image
417
- ).images[0]
418
- file_name = str(uuid.uuid4())[:4]
419
- out_image.save(f"public/images/{file_name}.png")
420
- result = {"path": f"/images/{file_name}.png"}
421
-
422
- if model_id.endswith("-control"):
423
- image = load_image(data["img_url"])
424
- if "scribble" in model_id:
425
- control = pipe(image, scribble=True)
426
- elif "canny" in model_id:
427
- control = pipe(image, low_threshold=100, high_threshold=200)
428
- else:
429
- control = pipe(image)
430
- file_name = str(uuid.uuid4())[:4]
431
- control.save(f"public/images/{file_name}.png")
432
- result = {"path": f"/images/{file_name}.png"}
433
-
434
- # image to image
435
- if model_id == "lambdalabs/sd-image-variations-diffusers":
436
- im = load_image(data["img_url"])
437
- file_name = str(uuid.uuid4())[:4]
438
- with open(f"public/images/{file_name}.png", "wb") as f:
439
- f.write(data)
440
- tform = transforms.Compose(
441
- [
442
- transforms.ToTensor(),
443
- transforms.Resize(
444
- (224, 224),
445
- interpolation=transforms.InterpolationMode.BICUBIC,
446
- antialias=False,
447
- ),
448
- transforms.Normalize(
449
- [0.48145466, 0.4578275, 0.40821073],
450
- [0.26862954, 0.26130258, 0.27577711],
451
- ),
452
- ]
453
- )
454
- inp = tform(im).to(pipes[model_id]["device"]).unsqueeze(0)
455
- out = pipe(inp, guidance_scale=3)
456
- out["images"][0].save(f"public/images/{file_name}.jpg")
457
- result = {"path": f"/images/{file_name}.jpg"}
458
-
459
- # image to text
460
- if model_id == "Salesforce/blip-image-captioning-large":
461
- raw_image = load_image(data["img_url"]).convert("RGB")
462
- text = data["text"]
463
- inputs = pipes[model_id]["processor"](raw_image, return_tensors="pt").to(
464
- pipes[model_id]["device"]
465
- )
466
- out = pipe.generate(**inputs)
467
- caption = pipes[model_id]["processor"].decode(
468
- out[0], skip_special_tokens=True
469
- )
470
- result = {"generated text": caption}
471
- if model_id == "ydshieh/vit-gpt2-coco-en":
472
- img_url = data["img_url"]
473
- generated_text = pipe(img_url)[0]["generated_text"]
474
- result = {"generated text": generated_text}
475
- if model_id == "nlpconnect/vit-gpt2-image-captioning":
476
- image = load_image(data["img_url"]).convert("RGB")
477
- pixel_values = pipes[model_id]["feature_extractor"](
478
- images=image, return_tensors="pt"
479
- ).pixel_values
480
- pixel_values = pixel_values.to(pipes[model_id]["device"])
481
- generated_ids = pipe.generate(
482
- pixel_values, **{"max_length": 200, "num_beams": 1}
483
- )
484
- generated_text = pipes[model_id]["tokenizer"].batch_decode(
485
- generated_ids, skip_special_tokens=True
486
- )[0]
487
- result = {"generated text": generated_text}
488
- # image to text: OCR
489
- if (
490
- model_id == "microsoft/trocr-base-printed"
491
- or model_id == "microsoft/trocr-base-handwritten"
492
- ):
493
- image = load_image(data["img_url"]).convert("RGB")
494
- pixel_values = pipes[model_id]["processor"](
495
- image, return_tensors="pt"
496
- ).pixel_values
497
- pixel_values = pixel_values.to(pipes[model_id]["device"])
498
- generated_ids = pipe.generate(pixel_values)
499
- generated_text = pipes[model_id]["processor"].batch_decode(
500
- generated_ids, skip_special_tokens=True
501
- )[0]
502
- result = {"generated text": generated_text}
503
-
504
- # text to image
505
- if model_id == "runwayml/stable-diffusion-v1-5":
506
- file_name = str(uuid.uuid4())[:4]
507
- text = data["text"]
508
- out = pipe(prompt=text)
509
- out["images"][0].save(f"public/images/{file_name}.jpg")
510
- result = {"path": f"/images/{file_name}.jpg"}
511
-
512
- # object detection
513
- if (
514
- model_id == "google/owlvit-base-patch32"
515
- or model_id == "facebook/detr-resnet-101"
516
- ):
517
- img_url = data["img_url"]
518
- open_types = [
519
- "cat",
520
- "couch",
521
- "person",
522
- "car",
523
- "dog",
524
- "horse",
525
- "sheep",
526
- "cow",
527
- "elephant",
528
- "bear",
529
- "zebra",
530
- "giraffe",
531
- "backpack",
532
- "umbrella",
533
- "handbag",
534
- "tie",
535
- "suitcase",
536
- "frisbee",
537
- "skis",
538
- "snowboard",
539
- "sports ball",
540
- "kite",
541
- "baseball bat",
542
- "baseball glove",
543
- "skateboard",
544
- "surfboard",
545
- "tennis racket",
546
- "bottle",
547
- "wine glass",
548
- "cup",
549
- "fork",
550
- "knife",
551
- "spoon",
552
- "bowl",
553
- "banana",
554
- "apple",
555
- "sandwich",
556
- "orange",
557
- "broccoli",
558
- "carrot",
559
- "hot dog",
560
- "pizza",
561
- "donut",
562
- "cake",
563
- "chair",
564
- "couch",
565
- "potted plant",
566
- "bed",
567
- "dining table",
568
- "toilet",
569
- "tv",
570
- "laptop",
571
- "mouse",
572
- "remote",
573
- "keyboard",
574
- "cell phone",
575
- "microwave",
576
- "oven",
577
- "toaster",
578
- "sink",
579
- "refrigerator",
580
- "book",
581
- "clock",
582
- "vase",
583
- "scissors",
584
- "teddy bear",
585
- "hair drier",
586
- "toothbrush",
587
- "traffic light",
588
- "fire hydrant",
589
- "stop sign",
590
- "parking meter",
591
- "bench",
592
- "bird",
593
- ]
594
- result = pipe(img_url, candidate_labels=open_types)
595
-
596
- # VQA
597
- if model_id == "dandelin/vilt-b32-finetuned-vqa":
598
- question = data["text"]
599
- img_url = data["img_url"]
600
- result = pipe(question=question, image=img_url)
601
-
602
- # DQA
603
- if model_id == "impira/layoutlm-document-qa":
604
- question = data["text"]
605
- img_url = data["img_url"]
606
- result = pipe(img_url, question)
607
-
608
- # depth-estimation
609
- if model_id == "Intel/dpt-large":
610
- output = pipe(data["img_url"])
611
- image = output["depth"]
612
- name = str(uuid.uuid4())[:4]
613
- image.save(f"public/images/{name}.jpg")
614
- result = {"path": f"/images/{name}.jpg"}
615
-
616
- if model_id == "Intel/dpt-hybrid-midas" and model_id == "Intel/dpt-large":
617
- image = load_image(data["img_url"])
618
- inputs = pipes[model_id]["feature_extractor"](
619
- images=image, return_tensors="pt"
620
- )
621
- with torch.no_grad():
622
- outputs = pipe(**inputs)
623
- predicted_depth = outputs.predicted_depth
624
- prediction = torch.nn.functional.interpolate(
625
- predicted_depth.unsqueeze(1),
626
- size=image.size[::-1],
627
- mode="bicubic",
628
- align_corners=False,
629
- )
630
- output = prediction.squeeze().cpu().numpy()
631
- formatted = (output * 255 / np.max(output)).astype("uint8")
632
- image = Image.fromarray(formatted)
633
- name = str(uuid.uuid4())[:4]
634
- image.save(f"public/images/{name}.jpg")
635
- result = {"path": f"/images/{name}.jpg"}
636
-
637
- # TTS
638
- if model_id == "espnet/kan-bayashi_ljspeech_vits":
639
- text = data["text"]
640
- wav = pipe(text)["wav"]
641
- name = str(uuid.uuid4())[:4]
642
- sf.write(f"public/audios/{name}.wav", wav.cpu().numpy(), pipe.fs, "PCM_16")
643
- result = {"path": f"/audios/{name}.wav"}
644
-
645
- if model_id == "microsoft/speecht5_tts":
646
- text = data["text"]
647
- inputs = pipes[model_id]["processor"](text=text, return_tensors="pt")
648
- embeddings_dataset = pipes[model_id]["embeddings_dataset"]
649
- speaker_embeddings = (
650
- torch.tensor(embeddings_dataset[7306]["xvector"])
651
- .unsqueeze(0)
652
- .to(pipes[model_id]["device"])
653
- )
654
- pipes[model_id]["vocoder"].to(pipes[model_id]["device"])
655
- speech = pipe.generate_speech(
656
- inputs["input_ids"].to(pipes[model_id]["device"]),
657
- speaker_embeddings,
658
- vocoder=pipes[model_id]["vocoder"],
659
- )
660
- name = str(uuid.uuid4())[:4]
661
- sf.write(
662
- f"public/audios/{name}.wav", speech.cpu().numpy(), samplerate=16000
663
- )
664
- result = {"path": f"/audios/{name}.wav"}
665
-
666
- # ASR
667
- if model_id == "openai/whisper-base" or model_id == "microsoft/speecht5_asr":
668
- audio_url = data["audio_url"]
669
- result = {"text": pipe(audio_url)["text"]}
670
-
671
- # audio to audio
672
- if model_id == "JorisCos/DCCRNet_Libri1Mix_enhsingle_16k":
673
- audio_url = data["audio_url"]
674
- wav, sr = torchaudio.load(audio_url)
675
- with torch.no_grad():
676
- result_wav = pipe(wav.to(pipes[model_id]["device"]))
677
- name = str(uuid.uuid4())[:4]
678
- sf.write(
679
- f"public/audios/{name}.wav", result_wav.cpu().squeeze().numpy(), sr
680
- )
681
- result = {"path": f"/audios/{name}.wav"}
682
-
683
- if model_id == "microsoft/speecht5_vc":
684
- audio_url = data["audio_url"]
685
- wav, sr = torchaudio.load(audio_url)
686
- inputs = pipes[model_id]["processor"](
687
- audio=wav, sampling_rate=sr, return_tensors="pt"
688
- )
689
- embeddings_dataset = pipes[model_id]["embeddings_dataset"]
690
- speaker_embeddings = torch.tensor(
691
- embeddings_dataset[7306]["xvector"]
692
- ).unsqueeze(0)
693
- pipes[model_id]["vocoder"].to(pipes[model_id]["device"])
694
- speech = pipe.generate_speech(
695
- inputs["input_ids"].to(pipes[model_id]["device"]),
696
- speaker_embeddings,
697
- vocoder=pipes[model_id]["vocoder"],
698
- )
699
- name = str(uuid.uuid4())[:4]
700
- sf.write(
701
- f"public/audios/{name}.wav", speech.cpu().numpy(), samplerate=16000
702
- )
703
- result = {"path": f"/audios/{name}.wav"}
704
-
705
- # segmentation
706
- if model_id == "facebook/detr-resnet-50-panoptic":
707
- result = []
708
- segments = pipe(data["img_url"])
709
- image = load_image(data["img_url"])
710
-
711
- colors = []
712
- for i in range(len(segments)):
713
- colors.append(
714
- (
715
- random.randint(100, 255),
716
- random.randint(100, 255),
717
- random.randint(100, 255),
718
- 50,
719
- )
720
- )
721
-
722
- for segment in segments:
723
- mask = segment["mask"]
724
- mask = mask.convert("L")
725
- layer = Image.new("RGBA", mask.size, colors[i])
726
- image.paste(layer, (0, 0), mask)
727
- name = str(uuid.uuid4())[:4]
728
- image.save(f"public/images/{name}.jpg")
729
- result = {"path": f"/images/{name}.jpg"}
730
-
731
- if (
732
- model_id == "facebook/maskformer-swin-base-coco"
733
- or model_id == "facebook/maskformer-swin-large-ade"
734
- ):
735
- image = load_image(data["img_url"])
736
- inputs = pipes[model_id]["feature_extractor"](
737
- images=image, return_tensors="pt"
738
- ).to(pipes[model_id]["device"])
739
- outputs = pipe(**inputs)
740
- result = pipes[model_id][
741
- "feature_extractor"
742
- ].post_process_panoptic_segmentation(
743
- outputs, target_sizes=[image.size[::-1]]
744
- )[
745
- 0
746
- ]
747
- predicted_panoptic_map = result["segmentation"].cpu().numpy()
748
- predicted_panoptic_map = Image.fromarray(
749
- predicted_panoptic_map.astype(np.uint8)
750
- )
751
- name = str(uuid.uuid4())[:4]
752
- predicted_panoptic_map.save(f"public/images/{name}.jpg")
753
- result = {"path": f"/images/{name}.jpg"}
754
-
755
- except Exception as e:
756
- print(e)
757
- traceback.print_exc()
758
- result = {"error": {"message": "Error when running the model inference."}}
759
-
760
- if "device" in pipes[model_id]:
761
- try:
762
- pipe.to("cpu")
763
- # torch.cuda.empty_cache()
764
- except:
765
- pipe.device = torch.device("cpu")
766
- pipe.model.to("cpu")
767
- # torch.cuda.empty_cache()
768
-
769
- pipes[model_id]["using"] = False
770
-
771
- if result is None:
772
- result = {"error": {"message": "model not found"}}
773
-
774
- end = time.time()
775
- during = end - start
776
- print(f"[ complete {model_id} ] {during}s")
777
- print(f"[ result {model_id} ] {result}")
778
-
779
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/models/facial_recognition/__init__.py DELETED
File without changes
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/TRANSLATING.md DELETED
@@ -1,57 +0,0 @@
1
- ### Translating the Diffusers documentation into your language
2
-
3
- As part of our mission to democratize machine learning, we'd love to make the Diffusers library available in many more languages! Follow the steps below if you want to help translate the documentation into your language 🙏.
4
-
5
- **🗞️ Open an issue**
6
-
7
- To get started, navigate to the [Issues](https://github.com/huggingface/diffusers/issues) page of this repo and check if anyone else has opened an issue for your language. If not, open a new issue by selecting the "Translation template" from the "New issue" button.
8
-
9
- Once an issue exists, post a comment to indicate which chapters you'd like to work on, and we'll add your name to the list.
10
-
11
-
12
- **🍴 Fork the repository**
13
-
14
- First, you'll need to [fork the Diffusers repo](https://docs.github.com/en/get-started/quickstart/fork-a-repo). You can do this by clicking on the **Fork** button on the top-right corner of this repo's page.
15
-
16
- Once you've forked the repo, you'll want to get the files on your local machine for editing. You can do that by cloning the fork with Git as follows:
17
-
18
- ```bash
19
- git clone https://github.com/YOUR-USERNAME/diffusers.git
20
- ```
21
-
22
- **📋 Copy-paste the English version with a new language code**
23
-
24
- The documentation files are in one leading directory:
25
-
26
- - [`docs/source`](https://github.com/huggingface/diffusers/tree/main/docs/source): All the documentation materials are organized here by language.
27
-
28
- You'll only need to copy the files in the [`docs/source/en`](https://github.com/huggingface/diffusers/tree/main/docs/source/en) directory, so first navigate to your fork of the repo and run the following:
29
-
30
- ```bash
31
- cd ~/path/to/diffusers/docs
32
- cp -r source/en source/LANG-ID
33
- ```
34
-
35
- Here, `LANG-ID` should be one of the ISO 639-1 or ISO 639-2 language codes -- see [here](https://www.loc.gov/standards/iso639-2/php/code_list.php) for a handy table.
36
-
37
- **✍️ Start translating**
38
-
39
- The fun part comes - translating the text!
40
-
41
- The first thing we recommend is translating the part of the `_toctree.yml` file that corresponds to your doc chapter. This file is used to render the table of contents on the website.
42
-
43
- > 🙋 If the `_toctree.yml` file doesn't yet exist for your language, you can create one by copy-pasting from the English version and deleting the sections unrelated to your chapter. Just make sure it exists in the `docs/source/LANG-ID/` directory!
44
-
45
- The fields you should add are `local` (with the name of the file containing the translation; e.g. `autoclass_tutorial`), and `title` (with the title of the doc in your language; e.g. `Load pretrained instances with an AutoClass`) -- as a reference, here is the `_toctree.yml` for [English](https://github.com/huggingface/diffusers/blob/main/docs/source/en/_toctree.yml):
46
-
47
- ```yaml
48
- - sections:
49
- - local: pipeline_tutorial # Do not change this! Use the same name for your .md file
50
- title: Pipelines for inference # Translate this!
51
- ...
52
- title: Tutorials # Translate this!
53
- ```
54
-
55
- Once you have translated the `_toctree.yml` file, you can start translating the [MDX](https://mdxjs.com/) files associated with your docs chapter.
56
-
57
- > 🙋 If you'd like others to help you with the translation, you should [open an issue](https://github.com/huggingface/diffusers/issues) and tag @patrickvonplaten.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/utils/misc.py DELETED
@@ -1,61 +0,0 @@
1
- from functools import partial
2
-
3
- import numpy as np
4
- import torch
5
- from six.moves import map, zip
6
-
7
- from ..mask.structures import BitmapMasks, PolygonMasks
8
-
9
-
10
- def multi_apply(func, *args, **kwargs):
11
- """Apply function to a list of arguments.
12
-
13
- Note:
14
- This function applies the ``func`` to multiple inputs and
15
- map the multiple outputs of the ``func`` into different
16
- list. Each list contains the same type of outputs corresponding
17
- to different inputs.
18
-
19
- Args:
20
- func (Function): A function that will be applied to a list of
21
- arguments
22
-
23
- Returns:
24
- tuple(list): A tuple containing multiple list, each list contains \
25
- a kind of returned results by the function
26
- """
27
- pfunc = partial(func, **kwargs) if kwargs else func
28
- map_results = map(pfunc, *args)
29
- return tuple(map(list, zip(*map_results)))
30
-
31
-
32
- def unmap(data, count, inds, fill=0):
33
- """Unmap a subset of item (data) back to the original set of items (of size
34
- count)"""
35
- if data.dim() == 1:
36
- ret = data.new_full((count, ), fill)
37
- ret[inds.type(torch.bool)] = data
38
- else:
39
- new_size = (count, ) + data.size()[1:]
40
- ret = data.new_full(new_size, fill)
41
- ret[inds.type(torch.bool), :] = data
42
- return ret
43
-
44
-
45
- def mask2ndarray(mask):
46
- """Convert Mask to ndarray..
47
-
48
- Args:
49
- mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or
50
- torch.Tensor or np.ndarray): The mask to be converted.
51
-
52
- Returns:
53
- np.ndarray: Ndarray mask of shape (n, h, w) that has been converted
54
- """
55
- if isinstance(mask, (BitmapMasks, PolygonMasks)):
56
- mask = mask.to_ndarray()
57
- elif isinstance(mask, torch.Tensor):
58
- mask = mask.detach().cpu().numpy()
59
- elif not isinstance(mask, np.ndarray):
60
- raise TypeError(f'Unsupported {type(mask)} data type')
61
- return mask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/accuracy.py DELETED
@@ -1,78 +0,0 @@
1
- import mmcv
2
- import torch.nn as nn
3
-
4
-
5
- @mmcv.jit(coderize=True)
6
- def accuracy(pred, target, topk=1, thresh=None):
7
- """Calculate accuracy according to the prediction and target.
8
-
9
- Args:
10
- pred (torch.Tensor): The model prediction, shape (N, num_class)
11
- target (torch.Tensor): The target of each prediction, shape (N, )
12
- topk (int | tuple[int], optional): If the predictions in ``topk``
13
- matches the target, the predictions will be regarded as
14
- correct ones. Defaults to 1.
15
- thresh (float, optional): If not None, predictions with scores under
16
- this threshold are considered incorrect. Default to None.
17
-
18
- Returns:
19
- float | tuple[float]: If the input ``topk`` is a single integer,
20
- the function will return a single float as accuracy. If
21
- ``topk`` is a tuple containing multiple integers, the
22
- function will return a tuple containing accuracies of
23
- each ``topk`` number.
24
- """
25
- assert isinstance(topk, (int, tuple))
26
- if isinstance(topk, int):
27
- topk = (topk, )
28
- return_single = True
29
- else:
30
- return_single = False
31
-
32
- maxk = max(topk)
33
- if pred.size(0) == 0:
34
- accu = [pred.new_tensor(0.) for i in range(len(topk))]
35
- return accu[0] if return_single else accu
36
- assert pred.ndim == 2 and target.ndim == 1
37
- assert pred.size(0) == target.size(0)
38
- assert maxk <= pred.size(1), \
39
- f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
40
- pred_value, pred_label = pred.topk(maxk, dim=1)
41
- pred_label = pred_label.t() # transpose to shape (maxk, N)
42
- correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
43
- if thresh is not None:
44
- # Only prediction values larger than thresh are counted as correct
45
- correct = correct & (pred_value > thresh).t()
46
- res = []
47
- for k in topk:
48
- correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
49
- res.append(correct_k.mul_(100.0 / pred.size(0)))
50
- return res[0] if return_single else res
51
-
52
-
53
- class Accuracy(nn.Module):
54
-
55
- def __init__(self, topk=(1, ), thresh=None):
56
- """Module to calculate the accuracy.
57
-
58
- Args:
59
- topk (tuple, optional): The criterion used to calculate the
60
- accuracy. Defaults to (1,).
61
- thresh (float, optional): If not None, predictions with scores
62
- under this threshold are considered incorrect. Default to None.
63
- """
64
- super().__init__()
65
- self.topk = topk
66
- self.thresh = thresh
67
-
68
- def forward(self, pred, target):
69
- """Forward function to calculate accuracy.
70
-
71
- Args:
72
- pred (torch.Tensor): Prediction of models.
73
- target (torch.Tensor): Target for each prediction.
74
-
75
- Returns:
76
- tuple[float]: The accuracies under different topk criterions.
77
- """
78
- return accuracy(pred, target, self.topk, self.thresh)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/tools/deployment/pytorch2onnx.py DELETED
@@ -1,244 +0,0 @@
1
- import argparse
2
- import os.path as osp
3
- import warnings
4
-
5
- import numpy as np
6
- import onnx
7
- import onnxruntime as rt
8
- import torch
9
- from mmcv import DictAction
10
-
11
- from mmdet.core import (build_model_from_cfg, generate_inputs_and_wrap_model,
12
- preprocess_example_input)
13
-
14
-
15
- def pytorch2onnx(config_path,
16
- checkpoint_path,
17
- input_img,
18
- input_shape,
19
- opset_version=11,
20
- show=False,
21
- output_file='tmp.onnx',
22
- verify=False,
23
- normalize_cfg=None,
24
- dataset='coco',
25
- test_img=None,
26
- do_simplify=False,
27
- cfg_options=None):
28
-
29
- input_config = {
30
- 'input_shape': input_shape,
31
- 'input_path': input_img,
32
- 'normalize_cfg': normalize_cfg
33
- }
34
-
35
- # prepare original model and meta for verifying the onnx model
36
- orig_model = build_model_from_cfg(
37
- config_path, checkpoint_path, cfg_options=cfg_options)
38
- one_img, one_meta = preprocess_example_input(input_config)
39
- model, tensor_data = generate_inputs_and_wrap_model(
40
- config_path, checkpoint_path, input_config, cfg_options=cfg_options)
41
- output_names = ['boxes']
42
- if model.with_bbox:
43
- output_names.append('labels')
44
- if model.with_mask:
45
- output_names.append('masks')
46
-
47
- torch.onnx.export(
48
- model,
49
- tensor_data,
50
- output_file,
51
- input_names=['input'],
52
- output_names=output_names,
53
- export_params=True,
54
- keep_initializers_as_inputs=True,
55
- do_constant_folding=True,
56
- verbose=show,
57
- opset_version=opset_version)
58
-
59
- model.forward = orig_model.forward
60
-
61
- # simplify onnx model
62
- if do_simplify:
63
- from mmdet import digit_version
64
- import mmcv
65
-
66
- min_required_version = '1.2.5'
67
- assert digit_version(mmcv.__version__) >= digit_version(
68
- min_required_version
69
- ), f'Requires to install mmcv>={min_required_version}'
70
- from mmcv.onnx.simplify import simplify
71
-
72
- input_dic = {'input': one_img.detach().cpu().numpy()}
73
- _ = simplify(output_file, [input_dic], output_file)
74
- print(f'Successfully exported ONNX model: {output_file}')
75
- if verify:
76
- from mmdet.core import get_classes, bbox2result
77
- from mmdet.apis import show_result_pyplot
78
-
79
- ort_custom_op_path = ''
80
- try:
81
- from mmcv.ops import get_onnxruntime_op_path
82
- ort_custom_op_path = get_onnxruntime_op_path()
83
- except (ImportError, ModuleNotFoundError):
84
- warnings.warn('If input model has custom op from mmcv, \
85
- you may have to build mmcv with ONNXRuntime from source.')
86
- model.CLASSES = get_classes(dataset)
87
- num_classes = len(model.CLASSES)
88
- # check by onnx
89
- onnx_model = onnx.load(output_file)
90
- onnx.checker.check_model(onnx_model)
91
- if test_img is not None:
92
- input_config['input_path'] = test_img
93
- one_img, one_meta = preprocess_example_input(input_config)
94
- tensor_data = [one_img]
95
- # check the numerical value
96
- # get pytorch output
97
- pytorch_results = model(tensor_data, [[one_meta]], return_loss=False)
98
- pytorch_results = pytorch_results[0]
99
- # get onnx output
100
- input_all = [node.name for node in onnx_model.graph.input]
101
- input_initializer = [
102
- node.name for node in onnx_model.graph.initializer
103
- ]
104
- net_feed_input = list(set(input_all) - set(input_initializer))
105
- assert (len(net_feed_input) == 1)
106
- session_options = rt.SessionOptions()
107
- # register custom op for onnxruntime
108
- if osp.exists(ort_custom_op_path):
109
- session_options.register_custom_ops_library(ort_custom_op_path)
110
- sess = rt.InferenceSession(output_file, session_options)
111
- onnx_outputs = sess.run(None,
112
- {net_feed_input[0]: one_img.detach().numpy()})
113
- output_names = [_.name for _ in sess.get_outputs()]
114
- output_shapes = [_.shape for _ in onnx_outputs]
115
- print(f'onnxruntime output names: {output_names}, \
116
- output shapes: {output_shapes}')
117
- nrof_out = len(onnx_outputs)
118
- assert nrof_out > 0, 'Must have output'
119
- with_mask = nrof_out == 3
120
- if nrof_out == 1:
121
- onnx_results = onnx_outputs[0]
122
- else:
123
- det_bboxes, det_labels = onnx_outputs[:2]
124
- onnx_results = bbox2result(det_bboxes, det_labels, num_classes)
125
- if with_mask:
126
- segm_results = onnx_outputs[2].squeeze(1)
127
- cls_segms = [[] for _ in range(num_classes)]
128
- for i in range(det_bboxes.shape[0]):
129
- cls_segms[det_labels[i]].append(segm_results[i])
130
- onnx_results = (onnx_results, cls_segms)
131
- # visualize predictions
132
-
133
- if show:
134
- show_result_pyplot(
135
- model, one_meta['show_img'], pytorch_results, title='Pytorch')
136
- show_result_pyplot(
137
- model, one_meta['show_img'], onnx_results, title='ONNX')
138
-
139
- # compare a part of result
140
-
141
- if with_mask:
142
- compare_pairs = list(zip(onnx_results, pytorch_results))
143
- else:
144
- compare_pairs = [(onnx_results, pytorch_results)]
145
- for onnx_res, pytorch_res in compare_pairs:
146
- for o_res, p_res in zip(onnx_res, pytorch_res):
147
- np.testing.assert_allclose(
148
- o_res,
149
- p_res,
150
- rtol=1e-03,
151
- atol=1e-05,
152
- )
153
- print('The numerical values are the same between Pytorch and ONNX')
154
-
155
-
156
- def parse_args():
157
- parser = argparse.ArgumentParser(
158
- description='Convert MMDetection models to ONNX')
159
- parser.add_argument('config', help='test config file path')
160
- parser.add_argument('checkpoint', help='checkpoint file')
161
- parser.add_argument('--input-img', type=str, help='Images for input')
162
- parser.add_argument('--show', action='store_true', help='show onnx graph')
163
- parser.add_argument('--output-file', type=str, default='tmp.onnx')
164
- parser.add_argument('--opset-version', type=int, default=11)
165
- parser.add_argument(
166
- '--test-img', type=str, default=None, help='Images for test')
167
- parser.add_argument(
168
- '--dataset', type=str, default='coco', help='Dataset name')
169
- parser.add_argument(
170
- '--verify',
171
- action='store_true',
172
- help='verify the onnx model output against pytorch output')
173
- parser.add_argument(
174
- '--simplify',
175
- action='store_true',
176
- help='Whether to simplify onnx model.')
177
- parser.add_argument(
178
- '--shape',
179
- type=int,
180
- nargs='+',
181
- default=[800, 1216],
182
- help='input image size')
183
- parser.add_argument(
184
- '--mean',
185
- type=float,
186
- nargs='+',
187
- default=[123.675, 116.28, 103.53],
188
- help='mean value used for preprocess input data')
189
- parser.add_argument(
190
- '--std',
191
- type=float,
192
- nargs='+',
193
- default=[58.395, 57.12, 57.375],
194
- help='variance value used for preprocess input data')
195
- parser.add_argument(
196
- '--cfg-options',
197
- nargs='+',
198
- action=DictAction,
199
- help='override some settings in the used config, the key-value pair '
200
- 'in xxx=yyy format will be merged into config file. If the value to '
201
- 'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
202
- 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
203
- 'Note that the quotation marks are necessary and that no white space '
204
- 'is allowed.')
205
- args = parser.parse_args()
206
- return args
207
-
208
-
209
- if __name__ == '__main__':
210
- args = parse_args()
211
-
212
- assert args.opset_version == 11, 'MMDet only support opset 11 now'
213
-
214
- if not args.input_img:
215
- args.input_img = osp.join(
216
- osp.dirname(__file__), '../../tests/data/color.jpg')
217
-
218
- if len(args.shape) == 1:
219
- input_shape = (1, 3, args.shape[0], args.shape[0])
220
- elif len(args.shape) == 2:
221
- input_shape = (1, 3) + tuple(args.shape)
222
- else:
223
- raise ValueError('invalid input shape')
224
-
225
- assert len(args.mean) == 3
226
- assert len(args.std) == 3
227
-
228
- normalize_cfg = {'mean': args.mean, 'std': args.std}
229
-
230
- # convert model to onnx file
231
- pytorch2onnx(
232
- args.config,
233
- args.checkpoint,
234
- args.input_img,
235
- input_shape,
236
- opset_version=args.opset_version,
237
- show=args.show,
238
- output_file=args.output_file,
239
- verify=args.verify,
240
- normalize_cfg=normalize_cfg,
241
- dataset=args.dataset,
242
- test_img=args.test_img,
243
- do_simplify=args.simplify,
244
- cfg_options=args.cfg_options)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
4
- ]
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/sem_fpn/fpn_r101_512x1024_80k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './fpn_r50_512x1024_80k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/cmd_wsl.bat DELETED
@@ -1,11 +0,0 @@
1
- @echo off
2
-
3
- cd /D "%~dp0"
4
-
5
- set PATH=%PATH%;%SystemRoot%\system32
6
-
7
- @rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script
8
- call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh cmd"
9
-
10
- :end
11
- pause
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/silero_tts/tts_preprocessor.py DELETED
@@ -1,200 +0,0 @@
1
- import re
2
-
3
- from num2words import num2words
4
-
5
- punctuation = r'[\s,.?!/)\'\]>]'
6
- alphabet_map = {
7
- "A": " Ei ",
8
- "B": " Bee ",
9
- "C": " See ",
10
- "D": " Dee ",
11
- "E": " Eee ",
12
- "F": " Eff ",
13
- "G": " Jee ",
14
- "H": " Eich ",
15
- "I": " Eye ",
16
- "J": " Jay ",
17
- "K": " Kay ",
18
- "L": " El ",
19
- "M": " Emm ",
20
- "N": " Enn ",
21
- "O": " Ohh ",
22
- "P": " Pee ",
23
- "Q": " Queue ",
24
- "R": " Are ",
25
- "S": " Ess ",
26
- "T": " Tee ",
27
- "U": " You ",
28
- "V": " Vee ",
29
- "W": " Double You ",
30
- "X": " Ex ",
31
- "Y": " Why ",
32
- "Z": " Zed " # Zed is weird, as I (da3dsoul) am American, but most of the voice models sound British, so it matches
33
- }
34
-
35
-
36
- def preprocess(string):
37
- # the order for some of these matter
38
- # For example, you need to remove the commas in numbers before expanding them
39
- string = remove_surrounded_chars(string)
40
- string = string.replace('"', '')
41
- string = string.replace('\u201D', '').replace('\u201C', '') # right and left quote
42
- string = string.replace('\u201F', '') # italic looking quote
43
- string = string.replace('\n', ' ')
44
- string = convert_num_locale(string)
45
- string = replace_negative(string)
46
- string = replace_roman(string)
47
- string = hyphen_range_to(string)
48
- string = num_to_words(string)
49
-
50
- # TODO Try to use a ML predictor to expand abbreviations. It's hard, dependent on context, and whether to actually
51
- # try to say the abbreviation or spell it out as I've done below is not agreed upon
52
-
53
- # For now, expand abbreviations to pronunciations
54
- # replace_abbreviations adds a lot of unnecessary whitespace to ensure separation
55
- string = replace_abbreviations(string)
56
- string = replace_lowercase_abbreviations(string)
57
-
58
- # cleanup whitespaces
59
- # remove whitespace before punctuation
60
- string = re.sub(rf'\s+({punctuation})', r'\1', string)
61
- string = string.strip()
62
- # compact whitespace
63
- string = ' '.join(string.split())
64
-
65
- return string
66
-
67
-
68
- def remove_surrounded_chars(string):
69
- # first this expression will check if there is a string nested exclusively between a alt=
70
- # and a style= string. This would correspond to only a the alt text of an embedded image
71
- # If it matches it will only keep that part as the string, and rend it for further processing
72
- # Afterwards this expression matches to 'as few symbols as possible (0 upwards) between any
73
- # asterisks' OR' as few symbols as possible (0 upwards) between an asterisk and the end of the string'
74
- if re.search(r'(?<=alt=)(.*)(?=style=)', string, re.DOTALL):
75
- m = re.search(r'(?<=alt=)(.*)(?=style=)', string, re.DOTALL)
76
- string = m.group(0)
77
- return re.sub(r'\*[^*]*?(\*|$)', '', string)
78
-
79
-
80
- def convert_num_locale(text):
81
- # This detects locale and converts it to American without comma separators
82
- pattern = re.compile(r'(?:\s|^)\d{1,3}(?:\.\d{3})+(,\d+)(?:\s|$)')
83
- result = text
84
- while True:
85
- match = pattern.search(result)
86
- if match is None:
87
- break
88
-
89
- start = match.start()
90
- end = match.end()
91
- result = result[0:start] + result[start:end].replace('.', '').replace(',', '.') + result[end:len(result)]
92
-
93
- # removes comma separators from existing American numbers
94
- pattern = re.compile(r'(\d),(\d)')
95
- result = pattern.sub(r'\1\2', result)
96
-
97
- return result
98
-
99
-
100
- def replace_negative(string):
101
- # handles situations like -5. -5 would become negative 5, which would then be expanded to negative five
102
- return re.sub(rf'(\s)(-)(\d+)({punctuation})', r'\1negative \3\4', string)
103
-
104
-
105
- def replace_roman(string):
106
- # find a string of roman numerals.
107
- # Only 2 or more, to avoid capturing I and single character abbreviations, like names
108
- pattern = re.compile(rf'\s[IVXLCDM]{{2,}}{punctuation}')
109
- result = string
110
- while True:
111
- match = pattern.search(result)
112
- if match is None:
113
- break
114
-
115
- start = match.start()
116
- end = match.end()
117
- result = result[0:start + 1] + str(roman_to_int(result[start + 1:end - 1])) + result[end - 1:len(result)]
118
-
119
- return result
120
-
121
-
122
- def roman_to_int(s):
123
- rom_val = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
124
- int_val = 0
125
- for i in range(len(s)):
126
- if i > 0 and rom_val[s[i]] > rom_val[s[i - 1]]:
127
- int_val += rom_val[s[i]] - 2 * rom_val[s[i - 1]]
128
- else:
129
- int_val += rom_val[s[i]]
130
- return int_val
131
-
132
-
133
- def hyphen_range_to(text):
134
- pattern = re.compile(r'(\d+)[-–](\d+)')
135
- result = pattern.sub(lambda x: x.group(1) + ' to ' + x.group(2), text)
136
- return result
137
-
138
-
139
- def num_to_words(text):
140
- # 1000 or 10.23
141
- pattern = re.compile(r'\d+\.\d+|\d+')
142
- result = pattern.sub(lambda x: num2words(float(x.group())), text)
143
- return result
144
-
145
-
146
- def replace_abbreviations(string):
147
- # abbreviations 1 to 4 characters long. It will get things like A and I, but those are pronounced with their letter
148
- pattern = re.compile(rf'(^|[\s(.\'\[<])([A-Z]{{1,4}})({punctuation}|$)')
149
- result = string
150
- while True:
151
- match = pattern.search(result)
152
- if match is None:
153
- break
154
-
155
- start = match.start()
156
- end = match.end()
157
- result = result[0:start] + replace_abbreviation(result[start:end]) + result[end:len(result)]
158
-
159
- return result
160
-
161
-
162
- def replace_lowercase_abbreviations(string):
163
- # abbreviations 1 to 4 characters long, separated by dots i.e. e.g.
164
- pattern = re.compile(rf'(^|[\s(.\'\[<])(([a-z]\.){{1,4}})({punctuation}|$)')
165
- result = string
166
- while True:
167
- match = pattern.search(result)
168
- if match is None:
169
- break
170
-
171
- start = match.start()
172
- end = match.end()
173
- result = result[0:start] + replace_abbreviation(result[start:end].upper()) + result[end:len(result)]
174
-
175
- return result
176
-
177
-
178
- def replace_abbreviation(string):
179
- result = ""
180
- for char in string:
181
- result += match_mapping(char)
182
-
183
- return result
184
-
185
-
186
- def match_mapping(char):
187
- for mapping in alphabet_map.keys():
188
- if char == mapping:
189
- return alphabet_map[char]
190
-
191
- return char
192
-
193
-
194
- def __main__(args):
195
- print(preprocess(args[1]))
196
-
197
-
198
- if __name__ == "__main__":
199
- import sys
200
- __main__(sys.argv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/memory.py DELETED
@@ -1,25 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import torch
3
-
4
- from .hook import HOOKS, Hook
5
-
6
-
7
- @HOOKS.register_module()
8
- class EmptyCacheHook(Hook):
9
-
10
- def __init__(self, before_epoch=False, after_epoch=True, after_iter=False):
11
- self._before_epoch = before_epoch
12
- self._after_epoch = after_epoch
13
- self._after_iter = after_iter
14
-
15
- def after_iter(self, runner):
16
- if self._after_iter:
17
- torch.cuda.empty_cache()
18
-
19
- def before_epoch(self, runner):
20
- if self._before_epoch:
21
- torch.cuda.empty_cache()
22
-
23
- def after_epoch(self, runner):
24
- if self._after_epoch:
25
- torch.cuda.empty_cache()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/appdirs.py DELETED
@@ -1,608 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
- # Copyright (c) 2005-2010 ActiveState Software Inc.
4
- # Copyright (c) 2013 Eddy Petrișor
5
-
6
- """Utilities for determining application-specific dirs.
7
-
8
- See <http://github.com/ActiveState/appdirs> for details and usage.
9
- """
10
- # Dev Notes:
11
- # - MSDN on where to store app data files:
12
- # http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
13
- # - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
14
- # - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
15
-
16
- __version_info__ = (1, 4, 3)
17
- __version__ = '.'.join(map(str, __version_info__))
18
-
19
-
20
- import sys
21
- import os
22
-
23
- PY3 = sys.version_info[0] == 3
24
-
25
- if PY3:
26
- unicode = str
27
-
28
- if sys.platform.startswith('java'):
29
- import platform
30
- os_name = platform.java_ver()[3][0]
31
- if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
32
- system = 'win32'
33
- elif os_name.startswith('Mac'): # "Mac OS X", etc.
34
- system = 'darwin'
35
- else: # "Linux", "SunOS", "FreeBSD", etc.
36
- # Setting this to "linux2" is not ideal, but only Windows or Mac
37
- # are actually checked for and the rest of the module expects
38
- # *sys.platform* style strings.
39
- system = 'linux2'
40
- else:
41
- system = sys.platform
42
-
43
-
44
-
45
- def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
46
- r"""Return full path to the user-specific data dir for this application.
47
-
48
- "appname" is the name of application.
49
- If None, just the system directory is returned.
50
- "appauthor" (only used on Windows) is the name of the
51
- appauthor or distributing body for this application. Typically
52
- it is the owning company name. This falls back to appname. You may
53
- pass False to disable it.
54
- "version" is an optional version path element to append to the
55
- path. You might want to use this if you want multiple versions
56
- of your app to be able to run independently. If used, this
57
- would typically be "<major>.<minor>".
58
- Only applied when appname is present.
59
- "roaming" (boolean, default False) can be set True to use the Windows
60
- roaming appdata directory. That means that for users on a Windows
61
- network setup for roaming profiles, this user data will be
62
- sync'd on login. See
63
- <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
64
- for a discussion of issues.
65
-
66
- Typical user data directories are:
67
- Mac OS X: ~/Library/Application Support/<AppName>
68
- Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
69
- Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
70
- Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
71
- Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
72
- Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
73
-
74
- For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
75
- That means, by default "~/.local/share/<AppName>".
76
- """
77
- if system == "win32":
78
- if appauthor is None:
79
- appauthor = appname
80
- const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
81
- path = os.path.normpath(_get_win_folder(const))
82
- if appname:
83
- if appauthor is not False:
84
- path = os.path.join(path, appauthor, appname)
85
- else:
86
- path = os.path.join(path, appname)
87
- elif system == 'darwin':
88
- path = os.path.expanduser('~/Library/Application Support/')
89
- if appname:
90
- path = os.path.join(path, appname)
91
- else:
92
- path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
93
- if appname:
94
- path = os.path.join(path, appname)
95
- if appname and version:
96
- path = os.path.join(path, version)
97
- return path
98
-
99
-
100
- def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
101
- r"""Return full path to the user-shared data dir for this application.
102
-
103
- "appname" is the name of application.
104
- If None, just the system directory is returned.
105
- "appauthor" (only used on Windows) is the name of the
106
- appauthor or distributing body for this application. Typically
107
- it is the owning company name. This falls back to appname. You may
108
- pass False to disable it.
109
- "version" is an optional version path element to append to the
110
- path. You might want to use this if you want multiple versions
111
- of your app to be able to run independently. If used, this
112
- would typically be "<major>.<minor>".
113
- Only applied when appname is present.
114
- "multipath" is an optional parameter only applicable to *nix
115
- which indicates that the entire list of data dirs should be
116
- returned. By default, the first item from XDG_DATA_DIRS is
117
- returned, or '/usr/local/share/<AppName>',
118
- if XDG_DATA_DIRS is not set
119
-
120
- Typical site data directories are:
121
- Mac OS X: /Library/Application Support/<AppName>
122
- Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
123
- Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
124
- Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
125
- Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
126
-
127
- For Unix, this is using the $XDG_DATA_DIRS[0] default.
128
-
129
- WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
130
- """
131
- if system == "win32":
132
- if appauthor is None:
133
- appauthor = appname
134
- path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
135
- if appname:
136
- if appauthor is not False:
137
- path = os.path.join(path, appauthor, appname)
138
- else:
139
- path = os.path.join(path, appname)
140
- elif system == 'darwin':
141
- path = os.path.expanduser('/Library/Application Support')
142
- if appname:
143
- path = os.path.join(path, appname)
144
- else:
145
- # XDG default for $XDG_DATA_DIRS
146
- # only first, if multipath is False
147
- path = os.getenv('XDG_DATA_DIRS',
148
- os.pathsep.join(['/usr/local/share', '/usr/share']))
149
- pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
150
- if appname:
151
- if version:
152
- appname = os.path.join(appname, version)
153
- pathlist = [os.sep.join([x, appname]) for x in pathlist]
154
-
155
- if multipath:
156
- path = os.pathsep.join(pathlist)
157
- else:
158
- path = pathlist[0]
159
- return path
160
-
161
- if appname and version:
162
- path = os.path.join(path, version)
163
- return path
164
-
165
-
166
- def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
167
- r"""Return full path to the user-specific config dir for this application.
168
-
169
- "appname" is the name of application.
170
- If None, just the system directory is returned.
171
- "appauthor" (only used on Windows) is the name of the
172
- appauthor or distributing body for this application. Typically
173
- it is the owning company name. This falls back to appname. You may
174
- pass False to disable it.
175
- "version" is an optional version path element to append to the
176
- path. You might want to use this if you want multiple versions
177
- of your app to be able to run independently. If used, this
178
- would typically be "<major>.<minor>".
179
- Only applied when appname is present.
180
- "roaming" (boolean, default False) can be set True to use the Windows
181
- roaming appdata directory. That means that for users on a Windows
182
- network setup for roaming profiles, this user data will be
183
- sync'd on login. See
184
- <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
185
- for a discussion of issues.
186
-
187
- Typical user config directories are:
188
- Mac OS X: same as user_data_dir
189
- Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
190
- Win *: same as user_data_dir
191
-
192
- For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
193
- That means, by default "~/.config/<AppName>".
194
- """
195
- if system in ["win32", "darwin"]:
196
- path = user_data_dir(appname, appauthor, None, roaming)
197
- else:
198
- path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
199
- if appname:
200
- path = os.path.join(path, appname)
201
- if appname and version:
202
- path = os.path.join(path, version)
203
- return path
204
-
205
-
206
- def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
207
- r"""Return full path to the user-shared data dir for this application.
208
-
209
- "appname" is the name of application.
210
- If None, just the system directory is returned.
211
- "appauthor" (only used on Windows) is the name of the
212
- appauthor or distributing body for this application. Typically
213
- it is the owning company name. This falls back to appname. You may
214
- pass False to disable it.
215
- "version" is an optional version path element to append to the
216
- path. You might want to use this if you want multiple versions
217
- of your app to be able to run independently. If used, this
218
- would typically be "<major>.<minor>".
219
- Only applied when appname is present.
220
- "multipath" is an optional parameter only applicable to *nix
221
- which indicates that the entire list of config dirs should be
222
- returned. By default, the first item from XDG_CONFIG_DIRS is
223
- returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
224
-
225
- Typical site config directories are:
226
- Mac OS X: same as site_data_dir
227
- Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
228
- $XDG_CONFIG_DIRS
229
- Win *: same as site_data_dir
230
- Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
231
-
232
- For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
233
-
234
- WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
235
- """
236
- if system in ["win32", "darwin"]:
237
- path = site_data_dir(appname, appauthor)
238
- if appname and version:
239
- path = os.path.join(path, version)
240
- else:
241
- # XDG default for $XDG_CONFIG_DIRS
242
- # only first, if multipath is False
243
- path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
244
- pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
245
- if appname:
246
- if version:
247
- appname = os.path.join(appname, version)
248
- pathlist = [os.sep.join([x, appname]) for x in pathlist]
249
-
250
- if multipath:
251
- path = os.pathsep.join(pathlist)
252
- else:
253
- path = pathlist[0]
254
- return path
255
-
256
-
257
- def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
258
- r"""Return full path to the user-specific cache dir for this application.
259
-
260
- "appname" is the name of application.
261
- If None, just the system directory is returned.
262
- "appauthor" (only used on Windows) is the name of the
263
- appauthor or distributing body for this application. Typically
264
- it is the owning company name. This falls back to appname. You may
265
- pass False to disable it.
266
- "version" is an optional version path element to append to the
267
- path. You might want to use this if you want multiple versions
268
- of your app to be able to run independently. If used, this
269
- would typically be "<major>.<minor>".
270
- Only applied when appname is present.
271
- "opinion" (boolean) can be False to disable the appending of
272
- "Cache" to the base app data dir for Windows. See
273
- discussion below.
274
-
275
- Typical user cache directories are:
276
- Mac OS X: ~/Library/Caches/<AppName>
277
- Unix: ~/.cache/<AppName> (XDG default)
278
- Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
279
- Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
280
-
281
- On Windows the only suggestion in the MSDN docs is that local settings go in
282
- the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
283
- app data dir (the default returned by `user_data_dir` above). Apps typically
284
- put cache data somewhere *under* the given dir here. Some examples:
285
- ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
286
- ...\Acme\SuperApp\Cache\1.0
287
- OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
288
- This can be disabled with the `opinion=False` option.
289
- """
290
- if system == "win32":
291
- if appauthor is None:
292
- appauthor = appname
293
- path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
294
- if appname:
295
- if appauthor is not False:
296
- path = os.path.join(path, appauthor, appname)
297
- else:
298
- path = os.path.join(path, appname)
299
- if opinion:
300
- path = os.path.join(path, "Cache")
301
- elif system == 'darwin':
302
- path = os.path.expanduser('~/Library/Caches')
303
- if appname:
304
- path = os.path.join(path, appname)
305
- else:
306
- path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
307
- if appname:
308
- path = os.path.join(path, appname)
309
- if appname and version:
310
- path = os.path.join(path, version)
311
- return path
312
-
313
-
314
- def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
315
- r"""Return full path to the user-specific state dir for this application.
316
-
317
- "appname" is the name of application.
318
- If None, just the system directory is returned.
319
- "appauthor" (only used on Windows) is the name of the
320
- appauthor or distributing body for this application. Typically
321
- it is the owning company name. This falls back to appname. You may
322
- pass False to disable it.
323
- "version" is an optional version path element to append to the
324
- path. You might want to use this if you want multiple versions
325
- of your app to be able to run independently. If used, this
326
- would typically be "<major>.<minor>".
327
- Only applied when appname is present.
328
- "roaming" (boolean, default False) can be set True to use the Windows
329
- roaming appdata directory. That means that for users on a Windows
330
- network setup for roaming profiles, this user data will be
331
- sync'd on login. See
332
- <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
333
- for a discussion of issues.
334
-
335
- Typical user state directories are:
336
- Mac OS X: same as user_data_dir
337
- Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
338
- Win *: same as user_data_dir
339
-
340
- For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
341
- to extend the XDG spec and support $XDG_STATE_HOME.
342
-
343
- That means, by default "~/.local/state/<AppName>".
344
- """
345
- if system in ["win32", "darwin"]:
346
- path = user_data_dir(appname, appauthor, None, roaming)
347
- else:
348
- path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
349
- if appname:
350
- path = os.path.join(path, appname)
351
- if appname and version:
352
- path = os.path.join(path, version)
353
- return path
354
-
355
-
356
- def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
357
- r"""Return full path to the user-specific log dir for this application.
358
-
359
- "appname" is the name of application.
360
- If None, just the system directory is returned.
361
- "appauthor" (only used on Windows) is the name of the
362
- appauthor or distributing body for this application. Typically
363
- it is the owning company name. This falls back to appname. You may
364
- pass False to disable it.
365
- "version" is an optional version path element to append to the
366
- path. You might want to use this if you want multiple versions
367
- of your app to be able to run independently. If used, this
368
- would typically be "<major>.<minor>".
369
- Only applied when appname is present.
370
- "opinion" (boolean) can be False to disable the appending of
371
- "Logs" to the base app data dir for Windows, and "log" to the
372
- base cache dir for Unix. See discussion below.
373
-
374
- Typical user log directories are:
375
- Mac OS X: ~/Library/Logs/<AppName>
376
- Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
377
- Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
378
- Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
379
-
380
- On Windows the only suggestion in the MSDN docs is that local settings
381
- go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
382
- examples of what some windows apps use for a logs dir.)
383
-
384
- OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
385
- value for Windows and appends "log" to the user cache dir for Unix.
386
- This can be disabled with the `opinion=False` option.
387
- """
388
- if system == "darwin":
389
- path = os.path.join(
390
- os.path.expanduser('~/Library/Logs'),
391
- appname)
392
- elif system == "win32":
393
- path = user_data_dir(appname, appauthor, version)
394
- version = False
395
- if opinion:
396
- path = os.path.join(path, "Logs")
397
- else:
398
- path = user_cache_dir(appname, appauthor, version)
399
- version = False
400
- if opinion:
401
- path = os.path.join(path, "log")
402
- if appname and version:
403
- path = os.path.join(path, version)
404
- return path
405
-
406
-
407
- class AppDirs(object):
408
- """Convenience wrapper for getting application dirs."""
409
- def __init__(self, appname=None, appauthor=None, version=None,
410
- roaming=False, multipath=False):
411
- self.appname = appname
412
- self.appauthor = appauthor
413
- self.version = version
414
- self.roaming = roaming
415
- self.multipath = multipath
416
-
417
- @property
418
- def user_data_dir(self):
419
- return user_data_dir(self.appname, self.appauthor,
420
- version=self.version, roaming=self.roaming)
421
-
422
- @property
423
- def site_data_dir(self):
424
- return site_data_dir(self.appname, self.appauthor,
425
- version=self.version, multipath=self.multipath)
426
-
427
- @property
428
- def user_config_dir(self):
429
- return user_config_dir(self.appname, self.appauthor,
430
- version=self.version, roaming=self.roaming)
431
-
432
- @property
433
- def site_config_dir(self):
434
- return site_config_dir(self.appname, self.appauthor,
435
- version=self.version, multipath=self.multipath)
436
-
437
- @property
438
- def user_cache_dir(self):
439
- return user_cache_dir(self.appname, self.appauthor,
440
- version=self.version)
441
-
442
- @property
443
- def user_state_dir(self):
444
- return user_state_dir(self.appname, self.appauthor,
445
- version=self.version)
446
-
447
- @property
448
- def user_log_dir(self):
449
- return user_log_dir(self.appname, self.appauthor,
450
- version=self.version)
451
-
452
-
453
- #---- internal support stuff
454
-
455
- def _get_win_folder_from_registry(csidl_name):
456
- """This is a fallback technique at best. I'm not sure if using the
457
- registry for this guarantees us the correct answer for all CSIDL_*
458
- names.
459
- """
460
- if PY3:
461
- import winreg as _winreg
462
- else:
463
- import _winreg
464
-
465
- shell_folder_name = {
466
- "CSIDL_APPDATA": "AppData",
467
- "CSIDL_COMMON_APPDATA": "Common AppData",
468
- "CSIDL_LOCAL_APPDATA": "Local AppData",
469
- }[csidl_name]
470
-
471
- key = _winreg.OpenKey(
472
- _winreg.HKEY_CURRENT_USER,
473
- r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
474
- )
475
- dir, type = _winreg.QueryValueEx(key, shell_folder_name)
476
- return dir
477
-
478
-
479
- def _get_win_folder_with_pywin32(csidl_name):
480
- from win32com.shell import shellcon, shell
481
- dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
482
- # Try to make this a unicode path because SHGetFolderPath does
483
- # not return unicode strings when there is unicode data in the
484
- # path.
485
- try:
486
- dir = unicode(dir)
487
-
488
- # Downgrade to short path name if have highbit chars. See
489
- # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
490
- has_high_char = False
491
- for c in dir:
492
- if ord(c) > 255:
493
- has_high_char = True
494
- break
495
- if has_high_char:
496
- try:
497
- import win32api
498
- dir = win32api.GetShortPathName(dir)
499
- except ImportError:
500
- pass
501
- except UnicodeError:
502
- pass
503
- return dir
504
-
505
-
506
- def _get_win_folder_with_ctypes(csidl_name):
507
- import ctypes
508
-
509
- csidl_const = {
510
- "CSIDL_APPDATA": 26,
511
- "CSIDL_COMMON_APPDATA": 35,
512
- "CSIDL_LOCAL_APPDATA": 28,
513
- }[csidl_name]
514
-
515
- buf = ctypes.create_unicode_buffer(1024)
516
- ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
517
-
518
- # Downgrade to short path name if have highbit chars. See
519
- # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
520
- has_high_char = False
521
- for c in buf:
522
- if ord(c) > 255:
523
- has_high_char = True
524
- break
525
- if has_high_char:
526
- buf2 = ctypes.create_unicode_buffer(1024)
527
- if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
528
- buf = buf2
529
-
530
- return buf.value
531
-
532
- def _get_win_folder_with_jna(csidl_name):
533
- import array
534
- from com.sun import jna
535
- from com.sun.jna.platform import win32
536
-
537
- buf_size = win32.WinDef.MAX_PATH * 2
538
- buf = array.zeros('c', buf_size)
539
- shell = win32.Shell32.INSTANCE
540
- shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
541
- dir = jna.Native.toString(buf.tostring()).rstrip("\0")
542
-
543
- # Downgrade to short path name if have highbit chars. See
544
- # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
545
- has_high_char = False
546
- for c in dir:
547
- if ord(c) > 255:
548
- has_high_char = True
549
- break
550
- if has_high_char:
551
- buf = array.zeros('c', buf_size)
552
- kernel = win32.Kernel32.INSTANCE
553
- if kernel.GetShortPathName(dir, buf, buf_size):
554
- dir = jna.Native.toString(buf.tostring()).rstrip("\0")
555
-
556
- return dir
557
-
558
- if system == "win32":
559
- try:
560
- import win32com.shell
561
- _get_win_folder = _get_win_folder_with_pywin32
562
- except ImportError:
563
- try:
564
- from ctypes import windll
565
- _get_win_folder = _get_win_folder_with_ctypes
566
- except ImportError:
567
- try:
568
- import com.sun.jna
569
- _get_win_folder = _get_win_folder_with_jna
570
- except ImportError:
571
- _get_win_folder = _get_win_folder_from_registry
572
-
573
-
574
- #---- self test code
575
-
576
- if __name__ == "__main__":
577
- appname = "MyApp"
578
- appauthor = "MyCompany"
579
-
580
- props = ("user_data_dir",
581
- "user_config_dir",
582
- "user_cache_dir",
583
- "user_state_dir",
584
- "user_log_dir",
585
- "site_data_dir",
586
- "site_config_dir")
587
-
588
- print("-- app dirs %s --" % __version__)
589
-
590
- print("-- app dirs (with optional 'version')")
591
- dirs = AppDirs(appname, appauthor, version="1.0")
592
- for prop in props:
593
- print("%s: %s" % (prop, getattr(dirs, prop)))
594
-
595
- print("\n-- app dirs (without optional 'version')")
596
- dirs = AppDirs(appname, appauthor)
597
- for prop in props:
598
- print("%s: %s" % (prop, getattr(dirs, prop)))
599
-
600
- print("\n-- app dirs (without optional 'appauthor')")
601
- dirs = AppDirs(appname)
602
- for prop in props:
603
- print("%s: %s" % (prop, getattr(dirs, prop)))
604
-
605
- print("\n-- app dirs (with disabled 'appauthor')")
606
- dirs = AppDirs(appname, appauthor=False)
607
- for prop in props:
608
- print("%s: %s" % (prop, getattr(dirs, prop)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/py34compat.py DELETED
@@ -1,13 +0,0 @@
1
- import importlib
2
-
3
- try:
4
- import importlib.util
5
- except ImportError:
6
- pass
7
-
8
-
9
- try:
10
- module_from_spec = importlib.util.module_from_spec
11
- except AttributeError:
12
- def module_from_spec(spec):
13
- return spec.loader.load_module(spec.name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bakar31/MLOps_Practice_Repo_1/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- license: cc
3
- title: News Summarizer
4
- sdk: gradio
5
- emoji: 📚
6
- colorFrom: indigo
7
- colorTo: blue
8
- ---
9
-
10
- # MLOps-Practice-Repo-1
11
-
12
- source ~/.venv/bin/activate
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/lib/useImageDimension.ts DELETED
@@ -1,20 +0,0 @@
1
- import { useEffect, useState } from "react"
2
-
3
- import { ImageDimension, getImageDimension } from "./getImageDimension"
4
-
5
- export function useImageDimension(src: string) {
6
- const [dimension, setDimension] = useState<ImageDimension>({
7
- width: 0,
8
- height: 0,
9
- })
10
-
11
- useEffect(() => {
12
- const compute = async () => {
13
- const newDimension = await getImageDimension(src)
14
- setDimension(newDimension)
15
- }
16
- compute()
17
- }, [src])
18
-
19
- return dimension
20
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BartPoint/VoiceChange/infer_pack/commons.py DELETED
@@ -1,166 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
-
8
- def init_weights(m, mean=0.0, std=0.01):
9
- classname = m.__class__.__name__
10
- if classname.find("Conv") != -1:
11
- m.weight.data.normal_(mean, std)
12
-
13
-
14
- def get_padding(kernel_size, dilation=1):
15
- return int((kernel_size * dilation - dilation) / 2)
16
-
17
-
18
- def convert_pad_shape(pad_shape):
19
- l = pad_shape[::-1]
20
- pad_shape = [item for sublist in l for item in sublist]
21
- return pad_shape
22
-
23
-
24
- def kl_divergence(m_p, logs_p, m_q, logs_q):
25
- """KL(P||Q)"""
26
- kl = (logs_q - logs_p) - 0.5
27
- kl += (
28
- 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
29
- )
30
- return kl
31
-
32
-
33
- def rand_gumbel(shape):
34
- """Sample from the Gumbel distribution, protect from overflows."""
35
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
36
- return -torch.log(-torch.log(uniform_samples))
37
-
38
-
39
- def rand_gumbel_like(x):
40
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
41
- return g
42
-
43
-
44
- def slice_segments(x, ids_str, segment_size=4):
45
- ret = torch.zeros_like(x[:, :, :segment_size])
46
- for i in range(x.size(0)):
47
- idx_str = ids_str[i]
48
- idx_end = idx_str + segment_size
49
- ret[i] = x[i, :, idx_str:idx_end]
50
- return ret
51
-
52
-
53
- def slice_segments2(x, ids_str, segment_size=4):
54
- ret = torch.zeros_like(x[:, :segment_size])
55
- for i in range(x.size(0)):
56
- idx_str = ids_str[i]
57
- idx_end = idx_str + segment_size
58
- ret[i] = x[i, idx_str:idx_end]
59
- return ret
60
-
61
-
62
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
63
- b, d, t = x.size()
64
- if x_lengths is None:
65
- x_lengths = t
66
- ids_str_max = x_lengths - segment_size + 1
67
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
68
- ret = slice_segments(x, ids_str, segment_size)
69
- return ret, ids_str
70
-
71
-
72
- def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
73
- position = torch.arange(length, dtype=torch.float)
74
- num_timescales = channels // 2
75
- log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
76
- num_timescales - 1
77
- )
78
- inv_timescales = min_timescale * torch.exp(
79
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
80
- )
81
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
82
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
83
- signal = F.pad(signal, [0, 0, 0, channels % 2])
84
- signal = signal.view(1, channels, length)
85
- return signal
86
-
87
-
88
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
89
- b, channels, length = x.size()
90
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
91
- return x + signal.to(dtype=x.dtype, device=x.device)
92
-
93
-
94
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
95
- b, channels, length = x.size()
96
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
97
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
98
-
99
-
100
- def subsequent_mask(length):
101
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
102
- return mask
103
-
104
-
105
- @torch.jit.script
106
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
107
- n_channels_int = n_channels[0]
108
- in_act = input_a + input_b
109
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
110
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
111
- acts = t_act * s_act
112
- return acts
113
-
114
-
115
- def convert_pad_shape(pad_shape):
116
- l = pad_shape[::-1]
117
- pad_shape = [item for sublist in l for item in sublist]
118
- return pad_shape
119
-
120
-
121
- def shift_1d(x):
122
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
123
- return x
124
-
125
-
126
- def sequence_mask(length, max_length=None):
127
- if max_length is None:
128
- max_length = length.max()
129
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
130
- return x.unsqueeze(0) < length.unsqueeze(1)
131
-
132
-
133
- def generate_path(duration, mask):
134
- """
135
- duration: [b, 1, t_x]
136
- mask: [b, 1, t_y, t_x]
137
- """
138
- device = duration.device
139
-
140
- b, _, t_y, t_x = mask.shape
141
- cum_duration = torch.cumsum(duration, -1)
142
-
143
- cum_duration_flat = cum_duration.view(b * t_x)
144
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
145
- path = path.view(b, t_x, t_y)
146
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
147
- path = path.unsqueeze(1).transpose(2, 3) * mask
148
- return path
149
-
150
-
151
- def clip_grad_value_(parameters, clip_value, norm_type=2):
152
- if isinstance(parameters, torch.Tensor):
153
- parameters = [parameters]
154
- parameters = list(filter(lambda p: p.grad is not None, parameters))
155
- norm_type = float(norm_type)
156
- if clip_value is not None:
157
- clip_value = float(clip_value)
158
-
159
- total_norm = 0
160
- for p in parameters:
161
- param_norm = p.grad.data.norm(norm_type)
162
- total_norm += param_norm.item() ** norm_type
163
- if clip_value is not None:
164
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
165
- total_norm = total_norm ** (1.0 / norm_type)
166
- return total_norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Car Drift Game Download Apkpure.md DELETED
@@ -1,58 +0,0 @@
1
- <br />
2
- <h1>Juego de deriva de coches Descargar Apkpure: Cómo disfrutar de la deriva realista en su dispositivo Android</h1>
3
- <p>Si eres un fan de los juegos de carreras y quieres experimentar la emoción de la deriva en tu dispositivo Android, entonces deberías probar Car Drift Game. Este es un simulador de deriva realista y divertido que le permite quemar neumáticos en el asfalto y realizar acrobacias increíbles. En este artículo, le diremos qué es el juego de deriva de coches, cómo descargarlo desde Apkpure y cómo jugarlo en su dispositivo Android. </p>
4
- <h2>¿Qué es el juego de deriva de coches? </h2>
5
- <p>Car Drift Game es un popular juego de carreras que se centra en la deriva, que es una técnica de conducción en la que el conductor sobreventa intencionalmente el coche para que se deslice de lado. La deriva se utiliza a menudo en los deportes de motor y carreras callejeras, ya que puede crear efectos espectaculares y mostrar las habilidades del conductor. </p>
6
- <h2>car drift game download apkpure</h2><br /><p><b><b>DOWNLOAD</b> &rArr; <a href="https://bltlly.com/2v6Jlx">https://bltlly.com/2v6Jlx</a></b></p><br /><br />
7
- <h3>Las características del juego de deriva del coche</h3>
8
- <p>Car Drift Game tiene muchas características que lo convierten en uno de los mejores juegos de deriva para dispositivos Android. Algunas de estas características son:</p>
9
- <ul>
10
- <li>Física realista y gráficos que simulan el comportamiento y la apariencia de los coches y pistas reales. </li>
11
- <li>Una variedad de coches para elegir, cada uno con diferentes características y opciones de personalización. </li>
12
- <li>Una selección de pistas a la deriva, que van desde las calles de la ciudad a las carreteras de montaña. </li>
13
- <li>Un sistema de tiempo dinámico que afecta las condiciones de conducción y la visibilidad. </li>
14
- <li>Un modo de reproducción que te permite ver tus derivas desde diferentes ángulos y compartirlas con tus amigos. </li>
15
- </ul>
16
- <h3>Los beneficios del juego de deriva del coche</h3>
17
- <p>Car Drift Game no es solo un juego divertido y emocionante, sino también uno beneficioso. Algunos de los beneficios de jugar Car Drift Game son:</p>
18
- <ul>
19
- <li> Mejora la coordinación mano-ojo y los reflejos, ya que tiene que controlar el coche y reaccionar al entorno cambiante. </li>
20
- <li>Mejora tu creatividad y habilidades de resolución de problemas, ya que tienes que encontrar la mejor manera de la deriva y superar los obstáculos. </li>
21
-
22
- <li>Reduce el estrés y el aburrimiento, ya que puede sumergirse en el juego y olvidarse de sus preocupaciones. </li>
23
- </ul>
24
- <h2>¿Cómo descargar juego de deriva de coches de Apkpure? </h2>
25
- <p>Si quieres descargar Car Drift Game en tu dispositivo Android, una de las mejores fuentes es Apkpure. Apkpure es un sitio web que proporciona archivos APK libres y seguros para aplicaciones y juegos Android. Los archivos APK son los archivos de instalación para aplicaciones Android, que se pueden descargar e instalar manualmente sin usar Google Play Store.</p>
26
- <h3>Los pasos para descargar juego de deriva de coches de Apkpure</h3>
27
- <p>Para descargar Car Drift Game de Apkpure, debe seguir estos pasos:</p>
28
- <ol>
29
- <li>Ir a <a href="( 1 )">https://apkpure.com/carx-drift-racing/com.CarXTech.CarXDriftRacingFull</a>, que es la página oficial de Car Drift Game on Apkpure.</li>
30
- <li>Haga clic en el "Descargar APK" botón, que comenzará a descargar el archivo APK de juego de deriva de coche en su dispositivo. </li>
31
- <li>Una vez que la descarga se haya completado, busque el archivo APK en su dispositivo y toque en él para instalarlo. Es posible que deba habilitar "Fuentes desconocidas" en la configuración del dispositivo para permitir la instalación. </li>
32
- <li>Después de la instalación en su dispositivo Android. </p>
33
- <h3>Los controles del juego de deriva del coche</h3>
34
- <p>Car Drift Game tiene controles simples e intuitivos que te permiten ir a la deriva con facilidad. Puedes elegir entre dos modos de control: tilt o touch. En el modo de inclinación, puede dirigir el coche inclinando el dispositivo a la izquierda o derecha. En el modo táctil, puede dirigir el automóvil tocando el lado izquierdo o derecho de la pantalla. También puede ajustar la sensibilidad y el ángulo de la inclinación o toque en el menú de configuración. </p>
35
- <p></p>
36
-
37
- <p>Para cambiar la vista de la cámara, puede tocar el icono de la cámara en la esquina superior derecha de la pantalla. Puedes elegir entre cuatro vistas de cámara: cabina, capó, parachoques y persecución. Cada vista de cámara tiene sus propias ventajas y desventajas, dependiendo de su preferencia y situación. </p>
38
- <h3>Los modos de juego de deriva del coche</h3>
39
- <p>Car Drift Game tiene tres modos para elegir: carrera, un solo jugador y multijugador. Cada modo tiene sus propios desafíos y recompensas. </p>
40
- <ul>
41
- <li>Modo carrera: En este modo, puedes progresar a través de varios niveles y eventos, donde tienes que completar diferentes objetivos y ganar estrellas. Cuantas más estrellas ganes, más coches y pistas desbloquearás. También puedes actualizar tus coches y personalizar su apariencia en este modo. </li>
42
- <li>Modo de un solo jugador: En este modo, puede practicar sus habilidades de deriva y establecer sus propios registros en cualquier pista que desee. También puede ajustar la dificultad y el número de oponentes en este modo. </li>
43
- <li>Modo multijugador: En este modo, puede competir con otros jugadores en línea en carreras y torneos en tiempo real. También puede chatear con otros jugadores y unirse a clubes en este modo. </li>
44
- </ul>
45
- <h3>Los consejos y trucos del juego de deriva del coche</h3>
46
- <p>Car Drift Game es un juego que requiere habilidad y práctica para dominar. Aquí hay algunos consejos y trucos que pueden ayudarte a mejorar tu rendimiento y puntuación:</p>
47
- <ul>
48
- <li>Elige un coche que se adapte a tu estilo y preferencia. Diferentes coches tienen diferentes atributos, como la velocidad, la aceleración, el manejo, el peso y la capacidad de deriva. También puede ajustar su coche para optimizar su rendimiento para la deriva. </li>
49
- <li>Aprenda a usar el freno de mano de manera efectiva. El freno de mano es esencial para la deriva, ya que le ayuda a iniciar y controlar las derivas. Puede usarlo para entrar en las esquinas a alta velocidad, ajustar su ángulo y dirección durante las derivaciones y salir de las esquinas sin problemas. </li>
50
-
51
- <li>Ver su ángulo de deriva y la velocidad. El ángulo de deriva es el ángulo entre la dirección de su coche y su movimiento. La velocidad es lo rápido que se mueve su coche. Ambos factores afectan su puntuación de deriva, que se calcula multiplicando su ángulo de deriva por su velocidad. Debe apuntar a un ángulo de deriva alto y una alta velocidad para obtener una alta puntuación de deriva. </li>
52
- <li>Práctica en diferentes pistas y condiciones. Juego de deriva de coche ofrece una variedad de pistas y condiciones para desafiar sus habilidades de deriva. Usted debe practicar en diferentes pistas y condiciones para aprender sus diseños, características y peligros. También debe adaptarse a diferentes efectos climáticos, como lluvia, nieve, niebla y noche. </li>
53
- </ul>
54
- <h2>Conclusión</h2>
55
- <p>Car Drift Game es un simulador de deriva realista y divertido que te permite quemar neumáticos en el asfalto y realizar acrobacias increíbles en tu dispositivo Android. Puede descargarlo desde Apkpure, que ofrece un 2 )">https://carx-tech.com/</a>, o enviándoles un correo electrónico a <a href="">[email protected]</a>. </p>
56
- </ol></p> 64aa2da5cf<br />
57
- <br />
58
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Camin Simulador ltimo Para Ventanas 10.md DELETED
@@ -1,97 +0,0 @@
1
-
2
- <h1>Cómo descargar Truck Simulator Ultimate para Windows 10</h1>
3
- <p>¿Te encanta conducir camiones y explorar diferentes países? ¿Quieres experimentar la emoción de dirigir tu propia empresa de transporte y gestionar tu flota? Si es así, entonces deberías probar <strong>Truck Simulator Ultimate</strong>, un juego de simulación de camiones realista e inmersivo que te permite viajar por el mundo en tu camión y completar varias misiones. </p>
4
- <h2>descargar camión simulador último para ventanas 10</h2><br /><p><b><b>Download</b> &rarr;&rarr;&rarr; <a href="https://bltlly.com/2v6KYw">https://bltlly.com/2v6KYw</a></b></p><br /><br />
5
- <p>En este artículo, le mostraremos lo que es Truck Simulator Ultimate, cuáles son sus características y beneficios, y cómo descargarlo para Windows 10. También compartiremos algunos consejos y trucos para jugar el juego en tu PC. ¡Empecemos! </p>
6
- <h2>¿Qué es Truck Simulator Ultimate? </h2>
7
- <p>Truck Simulator Ultimate es un juego de simulación desarrollado por Zuuks Games, los creadores de Bus Simulator : Ultimate. El juego cuenta con camiones oficiales con licencia de Mercedes-Benz y le permite transportar una amplia variedad de carga en más de 100 ciudades de todo el mundo. También puedes participar en temporadas multijugador, donde puedes llevar carga conjunta o competir en carreras con otros jugadores. </p>
8
- <p>El juego también tiene un elemento magnate, donde puede establecer su propia empresa de transporte, contratar empleados, ampliar su flota, diseñar sus oficinas y convertirse en la empresa de logística más grande del mundo. Puede operar en diferentes países como Estados Unidos, China, Canadá, Rusia, Alemania, Italia, Francia, España, Países Bajos, Turquía, Corea del Sur, Japón, Brasil, Azerbaiyán y más. </p>
9
- <p></p>
10
- <h3>Características de Truck Simulator Ultimate</h3>
11
- <p>Algunas de las características de Truck Simulator Ultimate son:</p>
12
- <ul>
13
- <li><strong>DLC mods system</strong>: Puede personalizar sus camiones con varios accesorios como lámparas, parachoques, bocinas, luces de cabina y más. </li>
14
- <li><strong>Cabinas detalladas</strong>: Puede disfrutar de la física de conducción realista y los controles en la cabina de su camión. </li>
15
-
16
- <li><strong>Más de 25 idiomas de soporte</strong>: Puedes jugar el juego en tu idioma preferido. </li>
17
- <li><strong>Más de 250 emisoras de radio</strong>: Puedes escuchar tu música favorita mientras conduces. </li>
18
- <li><strong>Autopistas de peaje</strong>: Puedes pagar peajes para usar carreteras más rápidas y seguras. </li>
19
- <li><strong>Pronóstico del tiempo realista</strong>: Puedes experimentar diferentes condiciones climáticas como lluvia, nieve, niebla, etc.</li>
20
- <li><strong>Pueblo, ciudad, carreteras </strong>: Puedes conducir en diferentes tipos de carreteras con tráfico y paisajes variables. </li>
21
- </ul>
22
- <h3>Beneficios de jugar Truck Simulator Ultimate en PC</h3>
23
- <p>Si bien Truck Simulator Ultimate está disponible para dispositivos Android e iOS, reproducirlo en PC tiene algunas ventajas. Estas son algunas de ellas:</p>
24
- <ul>
25
- <li><strong>Mejores gráficos y rendimiento</strong>: Puedes disfrutar de los impresionantes gráficos del juego y una jugabilidad suave en una pantalla más grande y una resolución más alta. </li>
26
- <li><strong>Controles más fáciles</strong>: Puedes usar el teclado y el ratón para controlar tu camión con mayor comodidad y precisión. </li>
27
- <li><strong>Más espacio de almacenamiento</strong>: No tienes que preocuparte por quedarte sin espacio en tu dispositivo móvil, ya que puedes almacenar los archivos del juego en el disco duro de tu PC. </li>
28
- <li><strong>No hay pérdida de batería o sobrecalentamiento</strong>: Usted no tiene que preocuparse por la batería de su dispositivo móvil que se agota o se calienta demasiado mientras juega el juego durante largas horas. </li>
29
- </ul>
30
- <h2>Cómo descargar Truck Simulator Ultimate para Windows 10</h2>
31
- <p>Si desea jugar Truck Simulator Ultimate en su PC con Windows 10, tendrá que cumplir con algunos requisitos del sistema primero. Luego, puedes elegir entre dos métodos para descargar el juego: usando Google Play Store o usando BlueStacks App Player.</p>
32
- <h3>Requisitos del sistema para Windows 10</h3>
33
- <p>Antes de descargar Truck Simulator Ultimate para Windows 10, debe asegurarse de que su PC cumple con los siguientes requisitos mínimos del sistema:</p>
34
- <tabla>
35
- <tr>
36
-
37
- <th>Procesador</th>
38
- <th>Memoria</th>
39
- <th>Gráficos</th>
40
- <th>Almacenamiento</th>
41
- </tr>
42
- <tr>
43
- <td>Windows 10 (64 bits)</td>
44
- <td>Intel Core i3-2100 o AMD FX-6300</td>
45
- <td>4 GB de RAM</td>
46
- <td>NVIDIA GeForce GTX 750 Ti o AMD Radeon HD 7870</td>
47
- <td>5 GB de espacio disponible</td>
48
- </tr>
49
- </tabla>
50
- <h3>Pasos para descargar Truck Simulator Ultimate para Windows 10</h3>
51
- <p>Hay dos maneras de descargar Truck Simulator Ultimate para Windows 10: usando Google Play Store o usando BlueStacks App Player. Estos son los pasos para cada método:</p>
52
- <h4>Usando Google Play Store</h4>
53
- <ol>
54
- <li>Abra su navegador web y vaya a <a href="">https://play.google.com/store/apps/apps/detailss?id=com.zuuks.truck.simulator.ultimate&hl=en_US&gl=US</a>. </li>
55
- <li>Haga clic en el botón <strong>Instalar</strong> e inicie sesión con su cuenta de Google. </li>
56
- <li>El juego comenzará a descargar e instalar en su PC.</li>
57
- <li>Una vez completada la instalación, puede iniciar el juego desde la aplicación Google Play Store o desde el acceso directo de su escritorio. </li>
58
- </ol>
59
- <h4> Uso de BlueStacks App Player</h4>
60
- <ol>
61
- <li>Descargar e instalar BlueStacks App Player desde <a href="">https://www.bluestacks.com/</a>. </li>
62
- <li>Inicie BlueStacks e inicie sesión con su cuenta de Google. </li>
63
- <li>Vaya a la pestaña <strong>Mis aplicaciones</strong> y haga clic en el icono <strong>Google Play Store</strong>. </li>
64
- <li>Busque <strong>Truck Simulator Ultimate</strong> y haga clic en el botón <strong>Instalar</strong>. </li>
65
- <li>El juego comenzará a descargar e instalar en su PC.</li>
66
- <li>Una vez completada la instalación, puede iniciar el juego desde la pantalla de inicio de BlueStacks o desde el acceso directo de su escritorio. </li>
67
- </ol>
68
- <h2>Consejos y trucos para jugar Truck Simulator Ultimate en PC</h2>
69
- <p>Para aprovechar al máximo su experiencia de transporte, aquí hay algunos consejos y trucos para jugar Truck Simulator Ultimate en PC:</p>
70
- <ul>
71
-
72
- <li><strong>Alimente y repare su camión regularmente</strong>: Usted no quiere quedarse sin gasolina o romperse en el medio de la carretera. Asegúrese de revisar su medidor de combustible y el indicador de daños y parada en las gasolineras y talleres de reparación cuando sea necesario. </li>
73
- <li><strong>Sigue las reglas de tráfico y los límites de velocidad</strong>: No quieres ser multado o causar accidentes. Obedezca los semáforos, señales, señales y límites de velocidad. Además, evite chocar con otros vehículos, peatones u objetos. </li>
74
- <li><strong>Gana dinero y XP completando misiones y desafíos</strong>: Puedes ganar dinero y XP transportando carga, participando en temporadas multijugador, completando tareas diarias, logros y eventos. Puede utilizar el dinero para comprar camiones nuevos, mejorar su flota, contratar empleados y expandir su empresa. Puedes usar la XP para subir de nivel y desbloquear nuevas características y recompensas. </li>
75
- <li><strong>Crea un perfil realista y personaliza tu camión</strong>: Puedes crear un perfil realista eligiendo tu nombre, país, bandera, logotipo, matrícula, etc. También puedes personalizar tu camión con varios mods DLC como lámparas, parachoques, bocinas, luces de cabina, etc.</li>
76
- </ul>
77
- <h2>Conclusión</h2>
78
- <p>En conclusión, Truck Simulator Ultimate es un divertido y realista juego de simulación de camiones que le permite viajar por el mundo en su camión y ejecutar su propia empresa de transporte. Puede descargarlo para Windows 10 usando Google Play Store o BlueStacks App Player. También puede seguir algunos consejos y trucos para mejorar su juego. Esperamos que disfrute jugando Truck Simulator Ultimate en PC! </p>
79
- <h2>FAQs</h2> <p>Aquí hay algunas preguntas frecuentes sobre Truck Simulator Ultimate:</p>
80
- <ol>
81
- <li><strong>¿Cómo puedo jugar Truck Simulator Ultimate con mis amigos? </strong></li>
82
-
83
- <li><strong>¿Cómo puedo cambiar la vista de la cámara en Truck Simulator Ultimate? </strong></li>
84
- <p>Puede cambiar la vista de la cámara en Truck Simulator Ultimate presionando la tecla C del teclado. Puede elegir entre diferentes ángulos de cámara, como cabina, parte delantera, trasera, lateral, superior, etc.</p>
85
- <li><strong>¿Cómo puedo guardar mi progreso en Truck Simulator Ultimate? </strong></li>
86
- <p>Puede guardar su progreso en Truck Simulator Ultimate iniciando sesión con su cuenta de Google. El juego sincronizará automáticamente tus datos con la nube. También puede hacer copias de seguridad de sus datos manualmente yendo al menú de configuración y haciendo clic en el botón de copia de seguridad. </p>
87
- <li><strong>¿Cómo puedo actualizar Truck Simulator Ultimate en PC? </strong></li>
88
- <p>Puede actualizar Truck Simulator Ultimate en PC siguiendo estos pasos:</p>
89
- <ul>
90
- <li>Si has descargado el juego desde Google Play Store, puedes buscar actualizaciones abriendo la aplicación Google Play Store y haciendo clic en el icono del menú. Luego, ve a <strong>Mis aplicaciones y juegos</strong> y encuentra Truck Simulator Ultimate. Si hay una actualización disponible, haga clic en el botón <strong>Update</strong>. </li>
91
- <li>Si ha descargado el juego de BlueStacks App Player, puede comprobar si hay actualizaciones abriendo la aplicación BlueStacks y haciendo clic en el icono del menú. Luego, ve a <strong>App Center</strong> y encuentra Truck Simulator Ultimate. Si hay una actualización disponible, haga clic en el botón <strong>Update</strong>. </li>
92
- </ul>
93
- <li><strong>¿Cómo puedo contactar a los desarrolladores de Truck Simulator Ultimate? </strong></li>
94
- <p>Puede ponerse en contacto con los desarrolladores de Truck Simulator Ultimate enviando un correo electrónico a <a href="mailto:[email protected]">[email protected]</a>. También puedes seguirlos en sus cuentas de redes sociales como Facebook, Twitter, Instagram y YouTube.</p>
95
- </ol></p> 64aa2da5cf<br />
96
- <br />
97
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/core.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/Bilalst/Gradio_Youtube_Transcript_v2/app.py DELETED
@@ -1,116 +0,0 @@
1
- import gradio as gr
2
- import requests
3
- from sentence_transformers import SentenceTransformer
4
- from youtube_transcript_api import YouTubeTranscriptApi
5
- import numpy as np
6
- import huggingface_hub
7
- import os
8
- import faiss
9
-
10
- # Set up SentenceTransformer
11
- model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
12
-
13
-
14
- playlist_id = 'PLD4EAA8F8C9148A1B'
15
- api_key = 'AIzaSyBGuTvXcnliEh6yhTxugrAVM5YzcG9qr9U'
16
-
17
- # Make a request to the YouTube Data API to retrieve the playlist items
18
- url = f'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId={playlist_id}&key={api_key}'
19
- video_ids = []
20
-
21
- while True:
22
- response = requests.get(url)
23
- data = response.json()
24
-
25
- # Extract the video IDs from the response
26
- for item in data['items']:
27
- video_ids.append(item['snippet']['resourceId']['videoId'])
28
-
29
- # Check if there are more pages of results
30
- if 'nextPageToken' in data:
31
- next_page_token = data['nextPageToken']
32
- url = f'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId={playlist_id}&key={api_key}&pageToken={next_page_token}'
33
- else:
34
- break
35
-
36
- # Empty lists to store transcripts and video IDs
37
- transcripts = []
38
- ids = []
39
-
40
- for video_id in video_ids:
41
- try:
42
- transcript = YouTubeTranscriptApi.get_transcript(video_id)
43
- transcript_text = ' '.join([t['text'] for t in transcript])
44
- transcripts.append(transcript_text)
45
- ids.append(video_id)
46
-
47
- except Exception as e:
48
- print(f"Error retrieving transcript for video {video_id}: {e}")
49
- continue
50
-
51
- # create sentence embeddings
52
- sentence_embeddings = model.encode(transcripts)
53
-
54
- # Set up FAISS
55
- index = faiss.IndexFlatL2(384)
56
- # Convert list of embeddings to NumPy array
57
- sentence_embeddings = np.array(sentence_embeddings)
58
-
59
- # Add sentence embeddings to FAISS index
60
- index.add(sentence_embeddings)
61
-
62
-
63
- #---------------------------------------------
64
-
65
- def get_video_links(input_text):
66
- # Encode input text using SentenceTransformer
67
- input_embedding = model.encode([input_text])[0]
68
-
69
- # Perform nearest neighbor search in FAISS index
70
- k = 15 # Number of nearest neighbors to retrieve
71
- _, T = index.search(np.array([input_embedding]), k) # search
72
-
73
- # Return the list of video links with thumbnails and titles as an HTML string
74
- video_links = []
75
- visited_ids = set()
76
- for i in T[0]:
77
- video_id = ids[i]
78
- if video_id in visited_ids:
79
- continue # Skip if the video_id has already been visited
80
- visited_ids.add(video_id)
81
-
82
- # Retrieve video details using YouTube Data API
83
- video_info_url = f"https://www.googleapis.com/youtube/v3/videos?part=snippet&id={video_id}&key={api_key}"
84
- response = requests.get(video_info_url)
85
- data = response.json()
86
- video_title = data['items'][0]['snippet']['title']
87
- video_thumbnail = data['items'][0]['snippet']['thumbnails']['default']['url']
88
-
89
- # Generate HTML code for the video link with thumbnail and title
90
- video_link = f"https://www.youtube.com/watch?v={video_id}"
91
- video_html = f'<a href="{video_link}" target="_blank"><img src="{video_thumbnail}"><br>{video_title}</a><br>'
92
- video_links.append(video_html)
93
-
94
- return ''.join(video_links)
95
-
96
- # Create Gradio interface with "html" output type
97
- iface = gr.Interface(fn=get_video_links, inputs=[gr.inputs.Textbox(label="Add what you are looking to find in Dr. Joe's testimonials!")], outputs="html", title="Dr. Joe Dispenza testimonials Search")
98
-
99
-
100
-
101
- # Launch the Gradio interface on Hugging Face Spaces
102
- if __name__ == '__main__':
103
- iface.launch()
104
-
105
-
106
-
107
-
108
-
109
-
110
-
111
-
112
-
113
-
114
-
115
-
116
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bonosa2/dall-e_image-generation/app.py DELETED
@@ -1,43 +0,0 @@
1
- import gradio as gr
2
- import openai
3
- import urllib.request
4
- from PIL import Image
5
- import os
6
- import nltk
7
- #nltk.download('punkt')
8
-
9
- def generate_image(api_key, prompt, resolution):
10
- if not api_key:
11
- print("Error: API Key is required.")
12
- return
13
- openai.api_key = api_key
14
- response = openai.Image.create(
15
- prompt=prompt,
16
- n=1,
17
- size=resolution
18
- )
19
-
20
- image_url = response['data'][0]['url']
21
-
22
- # Open the URL image, resize it to the chosen resolution and return it
23
- with urllib.request.urlopen(image_url) as url:
24
- with open('temp.jpg', 'wb') as f:
25
- f.write(url.read())
26
- img = Image.open('temp.jpg')
27
-
28
- return img
29
-
30
- iface = gr.Interface(
31
- fn=generate_image,
32
- inputs=[
33
- gr.inputs.Textbox(lines=1, label="API Key", type="password"),
34
- gr.inputs.Textbox(lines=1, label="Prompt"),
35
- gr.inputs.Radio(choices=["256x256", "512x512", "1024x1024"], label="Resolution")
36
- ],
37
- outputs=gr.outputs.Image(type="pil"),
38
- title="DALL-E Image Generator",
39
- description="Enter your API key, a prompt, and choose a resolution to generate an image from DALL-E."
40
- )
41
-
42
-
43
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/iterator/transform_output_iterator.h DELETED
@@ -1,163 +0,0 @@
1
- /*
2
- * Copyright 2008-2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file thrust/iterator/transform_output_iterator.h
18
- * \brief An output iterator which adapts another output iterator by applying a
19
- * function to the result of its dereference before writing it.
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/iterator/detail/transform_output_iterator.inl>
26
-
27
- namespace thrust
28
- {
29
-
30
- /*! \addtogroup iterators
31
- * \{
32
- */
33
-
34
- /*! \addtogroup fancyiterator Fancy Iterators
35
- * \ingroup iterators
36
- * \{
37
- */
38
-
39
- /*! \p transform_output_iterator is a special kind of output iterator which
40
- * transforms a value written upon dereference. This iterator is useful
41
- * for transforming an output from algorithms without explicitly storing the
42
- * intermediate result in the memory and applying subsequent transformation,
43
- * thereby avoiding wasting memory capacity and bandwidth.
44
- * Using \p transform_iterator facilitates kernel fusion by deferring execution
45
- * of transformation until the value is written while saving both memory
46
- * capacity and bandwidth.
47
- *
48
- * The following code snippet demonstrated how to create a
49
- * \p transform_output_iterator which applies \c sqrtf to the assigning value.
50
- *
51
- * \code
52
- * #include <thrust/iterator/transform_output_iterator.h>
53
- * #include <thrust/device_vector.h>
54
- *
55
- * // note: functor inherits form unary function
56
- * // note: functor inherits from unary_function
57
- * struct square_root : public thrust::unary_function<float,float>
58
- * {
59
- * __host__ __device__
60
- * float operator()(float x) const
61
- * {
62
- * return sqrtf(x);
63
- * }
64
- * };
65
- *
66
- * int main()
67
- * {
68
- * thrust::device_vector<float> v(4);
69
- *
70
- * typedef thrust::device_vector<float>::iterator FloatIterator;
71
- * thrust::transform_output_iterator<square_root, FloatIterator> iter(v.begin(), square_root());
72
- *
73
- * iter[0] = 1.0f; // stores sqrtf( 1.0f)
74
- * iter[1] = 4.0f; // stores sqrtf( 4.0f)
75
- * iter[2] = 9.0f; // stores sqrtf( 9.0f)
76
- * iter[3] = 16.0f; // stores sqrtf(16.0f)
77
- * // iter[4] is an out-of-bounds error
78
- *
79
- * v[0]; // returns 1.0f;
80
- * v[1]; // returns 2.0f;
81
- * v[2]; // returns 3.0f;
82
- * v[3]; // returns 4.0f;
83
- *
84
- * }
85
- * \endcode
86
- *
87
- * \see make_transform_output_iterator
88
- */
89
-
90
- template <typename UnaryFunction, typename OutputIterator>
91
- class transform_output_iterator
92
- : public detail::transform_output_iterator_base<UnaryFunction, OutputIterator>::type
93
- {
94
-
95
- /*! \cond
96
- */
97
-
98
- public:
99
-
100
- typedef typename
101
- detail::transform_output_iterator_base<UnaryFunction, OutputIterator>::type
102
- super_t;
103
-
104
- friend class thrust::iterator_core_access;
105
- /*! \endcond
106
- */
107
-
108
- /*! This constructor takes as argument an \c OutputIterator and an \c
109
- * UnaryFunction and copies them to a new \p transform_output_iterator
110
- *
111
- * \param out An \c OutputIterator pointing to the output range whereto the result of
112
- * \p transform_output_iterator's \c UnaryFunction will be written.
113
- * \param fun An \c UnaryFunction used to transform the objects assigned to
114
- * this \p transform_output_iterator.
115
- */
116
- __host__ __device__
117
- transform_output_iterator(OutputIterator const& out, UnaryFunction fun) : super_t(out), fun(fun)
118
- {
119
- }
120
-
121
- /*! \cond
122
- */
123
- private:
124
-
125
- __host__ __device__
126
- typename super_t::reference dereference() const
127
- {
128
- return detail::transform_output_iterator_proxy<
129
- UnaryFunction, OutputIterator
130
- >(this->base_reference(), fun);
131
- }
132
-
133
- UnaryFunction fun;
134
-
135
- /*! \endcond
136
- */
137
- }; // end transform_output_iterator
138
-
139
- /*! \p make_transform_output_iterator creates a \p transform_output_iterator from
140
- * an \c OutputIterator and \c UnaryFunction.
141
- *
142
- * \param out The \c OutputIterator pointing to the output range of the newly
143
- * created \p transform_output_iterator
144
- * \param fun The \c UnaryFunction transform the object before assigning it to
145
- * \c out by the newly created \p transform_output_iterator
146
- * \see transform_output_iterator
147
- */
148
- template <typename UnaryFunction, typename OutputIterator>
149
- transform_output_iterator<UnaryFunction, OutputIterator>
150
- __host__ __device__
151
- make_transform_output_iterator(OutputIterator out, UnaryFunction fun)
152
- {
153
- return transform_output_iterator<UnaryFunction, OutputIterator>(out, fun);
154
- } // end make_transform_output_iterator
155
-
156
- /*! \} // end fancyiterators
157
- */
158
-
159
- /*! \} // end iterators
160
- */
161
-
162
- } // end thrust
163
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/iterator/zip_iterator.h DELETED
@@ -1,245 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file thrust/iterator/zip_iterator.h
19
- * \brief An iterator which returns a tuple of the result of dereferencing
20
- * a tuple of iterators when dereferenced
21
- */
22
-
23
- /*
24
- * Copyright David Abrahams and Thomas Becker 2000-2006.
25
- *
26
- * Distributed under the Boost Software License, Version 1.0.
27
- * (See accompanying NOTICE file for the complete license)
28
- *
29
- * For more information, see http://www.boost.org
30
- */
31
-
32
- #pragma once
33
-
34
- #include <thrust/detail/config.h>
35
- #include <thrust/iterator/detail/zip_iterator_base.h>
36
- #include <thrust/iterator/iterator_facade.h>
37
- #include <thrust/detail/type_traits.h>
38
-
39
- namespace thrust
40
- {
41
-
42
- /*! \addtogroup iterators
43
- * \{
44
- */
45
-
46
- /*! \addtogroup fancyiterator Fancy Iterators
47
- * \ingroup iterators
48
- * \{
49
- */
50
-
51
- /*! \p zip_iterator is an iterator which represents a pointer into a range
52
- * of \p tuples whose elements are themselves taken from a \p tuple of input
53
- * iterators. This iterator is useful for creating a virtual array of structures
54
- * while achieving the same performance and bandwidth as the structure of arrays
55
- * idiom. \p zip_iterator also facilitates kernel fusion by providing a convenient
56
- * means of amortizing the execution of the same operation over multiple ranges.
57
- *
58
- * The following code snippet demonstrates how to create a \p zip_iterator
59
- * which represents the result of "zipping" multiple ranges together.
60
- *
61
- * \code
62
- * #include <thrust/iterator/zip_iterator.h>
63
- * #include <thrust/tuple.h>
64
- * #include <thrust/device_vector.h>
65
- * ...
66
- * thrust::device_vector<int> int_v(3);
67
- * int_v[0] = 0; int_v[1] = 1; int_v[2] = 2;
68
- *
69
- * thrust::device_vector<float> float_v(3);
70
- * float_v[0] = 0.0f; float_v[1] = 1.0f; float_v[2] = 2.0f;
71
- *
72
- * thrust::device_vector<char> char_v(3);
73
- * char_v[0] = 'a'; char_v[1] = 'b'; char_v[2] = 'c';
74
- *
75
- * // typedef these iterators for shorthand
76
- * typedef thrust::device_vector<int>::iterator IntIterator;
77
- * typedef thrust::device_vector<float>::iterator FloatIterator;
78
- * typedef thrust::device_vector<char>::iterator CharIterator;
79
- *
80
- * // typedef a tuple of these iterators
81
- * typedef thrust::tuple<IntIterator, FloatIterator, CharIterator> IteratorTuple;
82
- *
83
- * // typedef the zip_iterator of this tuple
84
- * typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
85
- *
86
- * // finally, create the zip_iterator
87
- * ZipIterator iter(thrust::make_tuple(int_v.begin(), float_v.begin(), char_v.begin()));
88
- *
89
- * *iter; // returns (0, 0.0f, 'a')
90
- * iter[0]; // returns (0, 0.0f, 'a')
91
- * iter[1]; // returns (1, 1.0f, 'b')
92
- * iter[2]; // returns (2, 2.0f, 'c')
93
- *
94
- * thrust::get<0>(iter[2]); // returns 2
95
- * thrust::get<1>(iter[0]); // returns 0.0f
96
- * thrust::get<2>(iter[1]); // returns 'b'
97
- *
98
- * // iter[3] is an out-of-bounds error
99
- * \endcode
100
- *
101
- * Defining the type of a \p zip_iterator can be complex. The next code example demonstrates
102
- * how to use the \p make_zip_iterator function with the \p make_tuple function to avoid
103
- * explicitly specifying the type of the \p zip_iterator. This example shows how to use
104
- * \p zip_iterator to copy multiple ranges with a single call to \p thrust::copy.
105
- *
106
- * \code
107
- * #include <thrust/zip_iterator.h>
108
- * #include <thrust/tuple.h>
109
- * #include <thrust/device_vector.h>
110
- *
111
- * int main()
112
- * {
113
- * thrust::device_vector<int> int_in(3), int_out(3);
114
- * int_in[0] = 0;
115
- * int_in[1] = 1;
116
- * int_in[2] = 2;
117
- *
118
- * thrust::device_vector<float> float_in(3), float_out(3);
119
- * float_in[0] = 0.0f;
120
- * float_in[1] = 10.0f;
121
- * float_in[2] = 20.0f;
122
- *
123
- * thrust::copy(thrust::make_zip_iterator(thrust::make_tuple(int_in.begin(), float_in.begin())),
124
- * thrust::make_zip_iterator(thrust::make_tuple(int_in.end(), float_in.end())),
125
- * thrust::make_zip_iterator(thrust::make_tuple(int_out.begin(),float_out.begin())));
126
- *
127
- * // int_out is now [0, 1, 2]
128
- * // float_out is now [0.0f, 10.0f, 20.0f]
129
- *
130
- * return 0;
131
- * }
132
- * \endcode
133
- *
134
- * \see make_zip_iterator
135
- * \see make_tuple
136
- * \see tuple
137
- * \see get
138
- */
139
- template <typename IteratorTuple>
140
- class zip_iterator
141
- : public detail::zip_iterator_base<IteratorTuple>::type
142
- {
143
- public:
144
- /*! Null constructor does nothing.
145
- */
146
- inline __host__ __device__
147
- zip_iterator();
148
-
149
- /*! This constructor creates a new \p zip_iterator from a
150
- * \p tuple of iterators.
151
- *
152
- * \param iterator_tuple The \p tuple of iterators to copy from.
153
- */
154
- inline __host__ __device__
155
- zip_iterator(IteratorTuple iterator_tuple);
156
-
157
- /*! This copy constructor creates a new \p zip_iterator from another
158
- * \p zip_iterator.
159
- *
160
- * \param other The \p zip_iterator to copy.
161
- */
162
- template<typename OtherIteratorTuple>
163
- inline __host__ __device__
164
- zip_iterator(const zip_iterator<OtherIteratorTuple> &other,
165
- typename thrust::detail::enable_if_convertible<
166
- OtherIteratorTuple,
167
- IteratorTuple
168
- >::type * = 0);
169
-
170
- /*! This method returns a \c const reference to this \p zip_iterator's
171
- * \p tuple of iterators.
172
- *
173
- * \return A \c const reference to this \p zip_iterator's \p tuple
174
- * of iterators.
175
- */
176
- inline __host__ __device__
177
- const IteratorTuple &get_iterator_tuple() const;
178
-
179
- /*! \cond
180
- */
181
- private:
182
- typedef typename
183
- detail::zip_iterator_base<IteratorTuple>::type super_t;
184
-
185
- friend class thrust::iterator_core_access;
186
-
187
- // Dereferencing returns a tuple built from the dereferenced
188
- // iterators in the iterator tuple.
189
- __host__ __device__
190
- typename super_t::reference dereference() const;
191
-
192
- // Two zip_iterators are equal if the two first iterators of the
193
- // tuple are equal. Note this differs from Boost's implementation, which
194
- // considers the entire tuple.
195
- template<typename OtherIteratorTuple>
196
- inline __host__ __device__
197
- bool equal(const zip_iterator<OtherIteratorTuple> &other) const;
198
-
199
- // Advancing a zip_iterator means to advance all iterators in the tuple
200
- inline __host__ __device__
201
- void advance(typename super_t::difference_type n);
202
-
203
- // Incrementing a zip iterator means to increment all iterators in the tuple
204
- inline __host__ __device__
205
- void increment();
206
-
207
- // Decrementing a zip iterator means to decrement all iterators in the tuple
208
- inline __host__ __device__
209
- void decrement();
210
-
211
- // Distance is calculated using the first iterator in the tuple.
212
- template<typename OtherIteratorTuple>
213
- inline __host__ __device__
214
- typename super_t::difference_type
215
- distance_to(const zip_iterator<OtherIteratorTuple> &other) const;
216
-
217
- // The iterator tuple.
218
- IteratorTuple m_iterator_tuple;
219
-
220
- /*! \endcond
221
- */
222
- }; // end zip_iterator
223
-
224
- /*! \p make_zip_iterator creates a \p zip_iterator from a \p tuple
225
- * of iterators.
226
- *
227
- * \param t The \p tuple of iterators to copy.
228
- * \return A newly created \p zip_iterator which zips the iterators encapsulated in \p t.
229
- *
230
- * \see zip_iterator
231
- */
232
- template<typename IteratorTuple>
233
- inline __host__ __device__
234
- zip_iterator<IteratorTuple> make_zip_iterator(IteratorTuple t);
235
-
236
- /*! \} // end fancyiterators
237
- */
238
-
239
- /*! \} // end iterators
240
- */
241
-
242
- } // end thrust
243
-
244
- #include <thrust/iterator/detail/zip_iterator.inl>
245
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/dense_heads/free_anchor_retina_head.py DELETED
@@ -1,270 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
-
4
- from mmdet.core import bbox_overlaps
5
- from ..builder import HEADS
6
- from .retina_head import RetinaHead
7
-
8
- EPS = 1e-12
9
-
10
-
11
- @HEADS.register_module()
12
- class FreeAnchorRetinaHead(RetinaHead):
13
- """FreeAnchor RetinaHead used in https://arxiv.org/abs/1909.02466.
14
-
15
- Args:
16
- num_classes (int): Number of categories excluding the background
17
- category.
18
- in_channels (int): Number of channels in the input feature map.
19
- stacked_convs (int): Number of conv layers in cls and reg tower.
20
- Default: 4.
21
- conv_cfg (dict): dictionary to construct and config conv layer.
22
- Default: None.
23
- norm_cfg (dict): dictionary to construct and config norm layer.
24
- Default: norm_cfg=dict(type='GN', num_groups=32,
25
- requires_grad=True).
26
- pre_anchor_topk (int): Number of boxes that be token in each bag.
27
- bbox_thr (float): The threshold of the saturated linear function. It is
28
- usually the same with the IoU threshold used in NMS.
29
- gamma (float): Gamma parameter in focal loss.
30
- alpha (float): Alpha parameter in focal loss.
31
- """ # noqa: W605
32
-
33
- def __init__(self,
34
- num_classes,
35
- in_channels,
36
- stacked_convs=4,
37
- conv_cfg=None,
38
- norm_cfg=None,
39
- pre_anchor_topk=50,
40
- bbox_thr=0.6,
41
- gamma=2.0,
42
- alpha=0.5,
43
- **kwargs):
44
- super(FreeAnchorRetinaHead,
45
- self).__init__(num_classes, in_channels, stacked_convs, conv_cfg,
46
- norm_cfg, **kwargs)
47
-
48
- self.pre_anchor_topk = pre_anchor_topk
49
- self.bbox_thr = bbox_thr
50
- self.gamma = gamma
51
- self.alpha = alpha
52
-
53
- def loss(self,
54
- cls_scores,
55
- bbox_preds,
56
- gt_bboxes,
57
- gt_labels,
58
- img_metas,
59
- gt_bboxes_ignore=None):
60
- """Compute losses of the head.
61
-
62
- Args:
63
- cls_scores (list[Tensor]): Box scores for each scale level
64
- Has shape (N, num_anchors * num_classes, H, W)
65
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
66
- level with shape (N, num_anchors * 4, H, W)
67
- gt_bboxes (list[Tensor]): each item are the truth boxes for each
68
- image in [tl_x, tl_y, br_x, br_y] format.
69
- gt_labels (list[Tensor]): class indices corresponding to each box
70
- img_metas (list[dict]): Meta information of each image, e.g.,
71
- image size, scaling factor, etc.
72
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
73
- boxes can be ignored when computing the loss.
74
-
75
- Returns:
76
- dict[str, Tensor]: A dictionary of loss components.
77
- """
78
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
79
- assert len(featmap_sizes) == len(self.anchor_generator.base_anchors)
80
-
81
- anchor_list, _ = self.get_anchors(featmap_sizes, img_metas)
82
- anchors = [torch.cat(anchor) for anchor in anchor_list]
83
-
84
- # concatenate each level
85
- cls_scores = [
86
- cls.permute(0, 2, 3,
87
- 1).reshape(cls.size(0), -1, self.cls_out_channels)
88
- for cls in cls_scores
89
- ]
90
- bbox_preds = [
91
- bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4)
92
- for bbox_pred in bbox_preds
93
- ]
94
- cls_scores = torch.cat(cls_scores, dim=1)
95
- bbox_preds = torch.cat(bbox_preds, dim=1)
96
-
97
- cls_prob = torch.sigmoid(cls_scores)
98
- box_prob = []
99
- num_pos = 0
100
- positive_losses = []
101
- for _, (anchors_, gt_labels_, gt_bboxes_, cls_prob_,
102
- bbox_preds_) in enumerate(
103
- zip(anchors, gt_labels, gt_bboxes, cls_prob, bbox_preds)):
104
-
105
- with torch.no_grad():
106
- if len(gt_bboxes_) == 0:
107
- image_box_prob = torch.zeros(
108
- anchors_.size(0),
109
- self.cls_out_channels).type_as(bbox_preds_)
110
- else:
111
- # box_localization: a_{j}^{loc}, shape: [j, 4]
112
- pred_boxes = self.bbox_coder.decode(anchors_, bbox_preds_)
113
-
114
- # object_box_iou: IoU_{ij}^{loc}, shape: [i, j]
115
- object_box_iou = bbox_overlaps(gt_bboxes_, pred_boxes)
116
-
117
- # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j]
118
- t1 = self.bbox_thr
119
- t2 = object_box_iou.max(
120
- dim=1, keepdim=True).values.clamp(min=t1 + 1e-12)
121
- object_box_prob = ((object_box_iou - t1) /
122
- (t2 - t1)).clamp(
123
- min=0, max=1)
124
-
125
- # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j]
126
- num_obj = gt_labels_.size(0)
127
- indices = torch.stack([
128
- torch.arange(num_obj).type_as(gt_labels_), gt_labels_
129
- ],
130
- dim=0)
131
- object_cls_box_prob = torch.sparse_coo_tensor(
132
- indices, object_box_prob)
133
-
134
- # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j]
135
- """
136
- from "start" to "end" implement:
137
- image_box_iou = torch.sparse.max(object_cls_box_prob,
138
- dim=0).t()
139
-
140
- """
141
- # start
142
- box_cls_prob = torch.sparse.sum(
143
- object_cls_box_prob, dim=0).to_dense()
144
-
145
- indices = torch.nonzero(box_cls_prob, as_tuple=False).t_()
146
- if indices.numel() == 0:
147
- image_box_prob = torch.zeros(
148
- anchors_.size(0),
149
- self.cls_out_channels).type_as(object_box_prob)
150
- else:
151
- nonzero_box_prob = torch.where(
152
- (gt_labels_.unsqueeze(dim=-1) == indices[0]),
153
- object_box_prob[:, indices[1]],
154
- torch.tensor([
155
- 0
156
- ]).type_as(object_box_prob)).max(dim=0).values
157
-
158
- # upmap to shape [j, c]
159
- image_box_prob = torch.sparse_coo_tensor(
160
- indices.flip([0]),
161
- nonzero_box_prob,
162
- size=(anchors_.size(0),
163
- self.cls_out_channels)).to_dense()
164
- # end
165
-
166
- box_prob.append(image_box_prob)
167
-
168
- # construct bags for objects
169
- match_quality_matrix = bbox_overlaps(gt_bboxes_, anchors_)
170
- _, matched = torch.topk(
171
- match_quality_matrix,
172
- self.pre_anchor_topk,
173
- dim=1,
174
- sorted=False)
175
- del match_quality_matrix
176
-
177
- # matched_cls_prob: P_{ij}^{cls}
178
- matched_cls_prob = torch.gather(
179
- cls_prob_[matched], 2,
180
- gt_labels_.view(-1, 1, 1).repeat(1, self.pre_anchor_topk,
181
- 1)).squeeze(2)
182
-
183
- # matched_box_prob: P_{ij}^{loc}
184
- matched_anchors = anchors_[matched]
185
- matched_object_targets = self.bbox_coder.encode(
186
- matched_anchors,
187
- gt_bboxes_.unsqueeze(dim=1).expand_as(matched_anchors))
188
- loss_bbox = self.loss_bbox(
189
- bbox_preds_[matched],
190
- matched_object_targets,
191
- reduction_override='none').sum(-1)
192
- matched_box_prob = torch.exp(-loss_bbox)
193
-
194
- # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )}
195
- num_pos += len(gt_bboxes_)
196
- positive_losses.append(
197
- self.positive_bag_loss(matched_cls_prob, matched_box_prob))
198
- positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos)
199
-
200
- # box_prob: P{a_{j} \in A_{+}}
201
- box_prob = torch.stack(box_prob, dim=0)
202
-
203
- # negative_loss:
204
- # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B||
205
- negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max(
206
- 1, num_pos * self.pre_anchor_topk)
207
-
208
- # avoid the absence of gradients in regression subnet
209
- # when no ground-truth in a batch
210
- if num_pos == 0:
211
- positive_loss = bbox_preds.sum() * 0
212
-
213
- losses = {
214
- 'positive_bag_loss': positive_loss,
215
- 'negative_bag_loss': negative_loss
216
- }
217
- return losses
218
-
219
- def positive_bag_loss(self, matched_cls_prob, matched_box_prob):
220
- """Compute positive bag loss.
221
-
222
- :math:`-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )`.
223
-
224
- :math:`P_{ij}^{cls}`: matched_cls_prob, classification probability of matched samples.
225
-
226
- :math:`P_{ij}^{loc}`: matched_box_prob, box probability of matched samples.
227
-
228
- Args:
229
- matched_cls_prob (Tensor): Classification probabilty of matched
230
- samples in shape (num_gt, pre_anchor_topk).
231
- matched_box_prob (Tensor): BBox probability of matched samples,
232
- in shape (num_gt, pre_anchor_topk).
233
-
234
- Returns:
235
- Tensor: Positive bag loss in shape (num_gt,).
236
- """ # noqa: E501, W605
237
- # bag_prob = Mean-max(matched_prob)
238
- matched_prob = matched_cls_prob * matched_box_prob
239
- weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None)
240
- weight /= weight.sum(dim=1).unsqueeze(dim=-1)
241
- bag_prob = (weight * matched_prob).sum(dim=1)
242
- # positive_bag_loss = -self.alpha * log(bag_prob)
243
- return self.alpha * F.binary_cross_entropy(
244
- bag_prob, torch.ones_like(bag_prob), reduction='none')
245
-
246
- def negative_bag_loss(self, cls_prob, box_prob):
247
- """Compute negative bag loss.
248
-
249
- :math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`.
250
-
251
- :math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples.
252
-
253
- :math:`P_{j}^{bg}`: Classification probability of negative samples.
254
-
255
- Args:
256
- cls_prob (Tensor): Classification probability, in shape
257
- (num_img, num_anchors, num_classes).
258
- box_prob (Tensor): Box probability, in shape
259
- (num_img, num_anchors, num_classes).
260
-
261
- Returns:
262
- Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes).
263
- """ # noqa: E501, W605
264
- prob = cls_prob * (1 - box_prob)
265
- # There are some cases when neg_prob = 0.
266
- # This will cause the neg_prob.log() to be inf without clamp.
267
- prob = prob.clamp(min=EPS, max=1 - EPS)
268
- negative_bag_loss = prob**self.gamma * F.binary_cross_entropy(
269
- prob, torch.zeros_like(prob), reduction='none')
270
- return (1 - self.alpha) * negative_bag_loss
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/walt/datasets/walt_3d.py DELETED
@@ -1,535 +0,0 @@
1
- import itertools
2
- import logging
3
- import os.path as osp
4
- import tempfile
5
- from collections import OrderedDict
6
-
7
- import mmcv
8
- import numpy as np
9
- import pycocotools
10
- from mmcv.utils import print_log
11
- from pycocotools.coco import COCO
12
- from pycocotools.cocoeval import COCOeval
13
- from terminaltables import AsciiTable
14
-
15
- from mmdet.core import eval_recalls
16
- from .builder import DATASETS
17
- from .custom import CustomDatasetLocal
18
-
19
-
20
- def bounding_box(points):
21
- """returns a list containing the bottom left and the top right
22
- points in the sequence
23
- Here, we traverse the collection of points only once,
24
- to find the min and max for x and y
25
- """
26
- bot_left_x, bot_left_y = float('inf'), float('inf')
27
- top_right_x, top_right_y = float('-inf'), float('-inf')
28
- for point in points:
29
- x = point[0]
30
- y = point[1]
31
- if x < 0 or y < 0:
32
- continue
33
- bot_left_x = min(bot_left_x, x)
34
- bot_left_y = min(bot_left_y, y)
35
- top_right_x = max(top_right_x, x)
36
- top_right_y = max(top_right_y, y)
37
-
38
- return [bot_left_x, bot_left_y, top_right_x, top_right_y]
39
-
40
- lines = [[0,1],[1,3],[0,2],[3,2],[0,4],[1,5],[2,6],[3,7],[4,5],[5,7],[4,6],[7,6]]
41
-
42
- def get_boundingbox2d3d(cameraname, gt_data, extrinsics_path):
43
- f = open(extrinsics_path,"r")
44
- while True:
45
- a = f.readline()
46
- print(cameraname, a.split('\n')[0].split(' ')[0])
47
- if cameraname in a.split('\n')[0].split(' ')[0]:
48
- a = a.split('\n')[0].split(' ')
49
- break
50
-
51
- K = np.reshape(np.array(a[1:10]),[3,3]).astype(float)
52
- R = np.reshape(a[10:19], [3,3])
53
- T = np.array([[a[19]],[a[20]],[a[21]]])
54
- RT = np.hstack((R,T)).astype(float)
55
- KRT = np.matmul(K, RT)
56
- bb_3d_connected = []
57
- bb_3d_all = []
58
- bb_2d_all = []
59
- bb_3d_proj_all = []
60
-
61
- for indice, keypoints_3d in enumerate(gt_data['arr_0'][1]):
62
- parking_space = gt_data['arr_0'][0][indice][0]
63
-
64
- if gt_data['arr_0'][0][indice][1] == 0:
65
- continue
66
- points2d_all = []
67
- parking_space = np.vstack([parking_space, parking_space+[0,0,2]])
68
- parking_space_tranformed = []
69
- for point in parking_space:
70
- point = [point[0], point[1], point[2], 1]
71
- point = np.matmul(RT, point)
72
- parking_space_tranformed.append(list(point))
73
- point2d = np.matmul(K, point)
74
- if point2d[2] < 0:
75
- points2d_all.append([-100,-100,1])
76
- continue
77
- point2d = point2d/point2d[2]
78
- if point2d[0] < 0 or point2d[0] >2048:
79
- points2d_all.append([-100,-100,1])
80
- continue
81
- if point2d[1] < 0 or point2d[1] >2048:
82
- points2d_all.append([-100,-100,1])
83
- continue
84
-
85
- points2d_all.append(point2d)
86
-
87
- bb_3d_proj_all.append(points2d_all)
88
- bbox = bounding_box(points2d_all)
89
- if float('inf') in bbox:
90
- continue
91
- bb_2d_all.append(bbox)
92
- bb_3d_all.append(parking_space)
93
- #for line in lines:
94
- # bb_3d_connected.append(parking_space[line[0]])
95
- # bb_3d_connected.append(parking_space[line[1]])
96
- #asas
97
- return bb_3d_all, bb_2d_all, bb_3d_proj_all
98
-
99
-
100
- @DATASETS.register_module()
101
- class Walt3DDataset(CustomDatasetLocal):
102
-
103
- CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
104
- 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
105
- 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
106
- 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
107
- 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
108
- 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
109
- 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
110
- 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
111
- 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
112
- 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
113
- 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
114
- 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
115
- 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
116
- 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
117
-
118
- def load_annotations(self, ann_file):
119
- import glob
120
- count = 0
121
- data_infos = []
122
- self.data_annotations = []
123
- for i in glob.glob(ann_file + '*'):
124
- gt_data = np.load(i , allow_pickle = True)
125
- for img_folder in glob.glob(ann_file.replace('GT_data','images') + '/*'):
126
- cam_name = img_folder.split('/')[-1]
127
- img_name = i.split('/')[-1].replace('.npz','.png')
128
- info = dict(license=3, height=2048, width=2048, file_name = cam_name+'/' + img_name, date_captured = i.split('/')[-1].split('.')[0], id = count, filename = cam_name+'/' + img_name)
129
-
130
- #info = dict(license=3, height=2048, width=2048, file_name = i.split('/')[-1].replace('.npz','.png'), date_captured = i.split('/')[-1].split('.')[0], id = count, filename = i.split('/')[-1].replace('.npz','.png'))
131
- count = count+1
132
- data_infos.append(info)
133
- bb_3d_all, bb_2d_all, bb_3d_proj_all = get_boundingbox2d3d(cam_name, gt_data, ann_file.replace('GT_data','Extrinsics') + '/frame_par.txt')
134
- self.data_annotations.append([bb_3d_all, bb_2d_all, bb_3d_proj_all])
135
- break
136
- return data_infos
137
-
138
-
139
- def get_ann_info(self, idx):
140
- data = self.data_annotations[idx]
141
- gt_bboxes = np.array(data[1])
142
- gt_bboxes_3d = np.array(data[0])
143
- gt_bboxes_3d_proj = np.array(data[2])
144
-
145
-
146
- ann = dict(
147
- bboxes=gt_bboxes,
148
- bboxes_3d = gt_bboxes_3d,
149
- bboxes_3d_proj = gt_bboxes_3d_proj,
150
- labels = (np.zeros(len(gt_bboxes))+2).astype(int),
151
- bboxes_ignore=np.zeros((0, 4), dtype=np.float32),
152
- #masks=np.array([]),
153
- seg_map=np.array([]))
154
- return ann
155
-
156
- def get_cat_ids(self, idx):
157
- data = self.data_annotations[idx]
158
- gt_bboxes = np.array(data[1])
159
- return (np.zeros(len(gt_bboxes))+2).astype(int)
160
-
161
-
162
- def _filter_imgs(self, min_size=32):
163
- """Filter images too small or without ground truths."""
164
- valid_inds = []
165
- for data_info in self.data_infos:
166
- valid_inds.append(data_info['id'])
167
- print(valid_inds)
168
-
169
- return valid_inds
170
-
171
-
172
- def xyxy2xywh(self, bbox):
173
- """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
174
- evaluation.
175
-
176
- Args:
177
- bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
178
- ``xyxy`` order.
179
-
180
- Returns:
181
- list[float]: The converted bounding boxes, in ``xywh`` order.
182
- """
183
-
184
- _bbox = bbox.tolist()
185
- return [
186
- _bbox[0],
187
- _bbox[1],
188
- _bbox[2] - _bbox[0],
189
- _bbox[3] - _bbox[1],
190
- ]
191
-
192
- def _proposal2json(self, results):
193
- """Convert proposal results to COCO json style."""
194
- json_results = []
195
- for idx in range(len(self)):
196
- img_id = self.img_ids[idx]
197
- bboxes = results[idx]
198
- for i in range(bboxes.shape[0]):
199
- data = dict()
200
- data['image_id'] = img_id
201
- data['bbox'] = self.xyxy2xywh(bboxes[i])
202
- data['score'] = float(bboxes[i][4])
203
- data['category_id'] = 1
204
- json_results.append(data)
205
- return json_results
206
-
207
- def _det2json(self, results):
208
- """Convert detection results to COCO json style."""
209
- json_results = []
210
- for idx in range(len(self)):
211
- img_id = self.img_ids[idx]
212
- result = results[idx]
213
- for label in range(len(result)):
214
- bboxes = result[label]
215
- for i in range(bboxes.shape[0]):
216
- data = dict()
217
- data['image_id'] = img_id
218
- data['bbox'] = self.xyxy2xywh(bboxes[i])
219
- data['score'] = float(bboxes[i][4])
220
- data['category_id'] = self.cat_ids[label]
221
- json_results.append(data)
222
- return json_results
223
-
224
- def _segm2json(self, results):
225
- """Convert instance segmentation results to COCO json style."""
226
- bbox_json_results = []
227
- segm_json_results = []
228
- for idx in range(len(self)):
229
- img_id = self.img_ids[idx]
230
- det, seg = results[idx]
231
- for label in range(len(det)):
232
- # bbox results
233
- bboxes = det[label]
234
- for i in range(bboxes.shape[0]):
235
- data = dict()
236
- data['image_id'] = img_id
237
- data['bbox'] = self.xyxy2xywh(bboxes[i])
238
- data['score'] = float(bboxes[i][4])
239
- data['category_id'] = self.cat_ids[label]
240
- bbox_json_results.append(data)
241
-
242
- # segm results
243
- # some detectors use different scores for bbox and mask
244
- if isinstance(seg, tuple):
245
- segms = seg[0][label]
246
- mask_score = seg[1][label]
247
- else:
248
- segms = seg[label]
249
- mask_score = [bbox[4] for bbox in bboxes]
250
- for i in range(bboxes.shape[0]):
251
- data = dict()
252
- data['image_id'] = img_id
253
- data['bbox'] = self.xyxy2xywh(bboxes[i])
254
- data['score'] = float(mask_score[i])
255
- data['category_id'] = self.cat_ids[label]
256
- if isinstance(segms[i]['counts'], bytes):
257
- segms[i]['counts'] = segms[i]['counts'].decode()
258
- data['segmentation'] = segms[i]
259
- segm_json_results.append(data)
260
- return bbox_json_results, segm_json_results
261
-
262
- def results2json(self, results, outfile_prefix):
263
- """Dump the detection results to a COCO style json file.
264
-
265
- There are 3 types of results: proposals, bbox predictions, mask
266
- predictions, and they have different data types. This method will
267
- automatically recognize the type, and dump them to json files.
268
-
269
- Args:
270
- results (list[list | tuple | ndarray]): Testing results of the
271
- dataset.
272
- outfile_prefix (str): The filename prefix of the json files. If the
273
- prefix is "somepath/xxx", the json files will be named
274
- "somepath/xxx.bbox.json", "somepath/xxx.segm.json",
275
- "somepath/xxx.proposal.json".
276
-
277
- Returns:
278
- dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
279
- values are corresponding filenames.
280
- """
281
- result_files = dict()
282
- if isinstance(results[0], list):
283
- json_results = self._det2json(results)
284
- result_files['bbox'] = f'{outfile_prefix}.bbox.json'
285
- result_files['proposal'] = f'{outfile_prefix}.bbox.json'
286
- mmcv.dump(json_results, result_files['bbox'])
287
- elif isinstance(results[0], tuple):
288
- json_results = self._segm2json(results)
289
- result_files['bbox'] = f'{outfile_prefix}.bbox.json'
290
- result_files['proposal'] = f'{outfile_prefix}.bbox.json'
291
- result_files['segm'] = f'{outfile_prefix}.segm.json'
292
- mmcv.dump(json_results[0], result_files['bbox'])
293
- mmcv.dump(json_results[1], result_files['segm'])
294
- elif isinstance(results[0], np.ndarray):
295
- json_results = self._proposal2json(results)
296
- result_files['proposal'] = f'{outfile_prefix}.proposal.json'
297
- mmcv.dump(json_results, result_files['proposal'])
298
- else:
299
- raise TypeError('invalid type of results')
300
- return result_files
301
-
302
- def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
303
- gt_bboxes = []
304
- for i in range(len(self.img_ids)):
305
- ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
306
- ann_info = self.coco.load_anns(ann_ids)
307
- if len(ann_info) == 0:
308
- gt_bboxes.append(np.zeros((0, 4)))
309
- continue
310
- bboxes = []
311
- for ann in ann_info:
312
- if ann.get('ignore', False) or ann['iscrowd']:
313
- continue
314
- x1, y1, w, h = ann['bbox']
315
- bboxes.append([x1, y1, x1 + w, y1 + h])
316
- bboxes = np.array(bboxes, dtype=np.float32)
317
- if bboxes.shape[0] == 0:
318
- bboxes = np.zeros((0, 4))
319
- gt_bboxes.append(bboxes)
320
-
321
- recalls = eval_recalls(
322
- gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
323
- ar = recalls.mean(axis=1)
324
- return ar
325
-
326
- def format_results(self, results, jsonfile_prefix=None, **kwargs):
327
- """Format the results to json (standard format for COCO evaluation).
328
-
329
- Args:
330
- results (list[tuple | numpy.ndarray]): Testing results of the
331
- dataset.
332
- jsonfile_prefix (str | None): The prefix of json files. It includes
333
- the file path and the prefix of filename, e.g., "a/b/prefix".
334
- If not specified, a temp file will be created. Default: None.
335
-
336
- Returns:
337
- tuple: (result_files, tmp_dir), result_files is a dict containing \
338
- the json filepaths, tmp_dir is the temporal directory created \
339
- for saving json files when jsonfile_prefix is not specified.
340
- """
341
- assert isinstance(results, list), 'results must be a list'
342
- assert len(results) == len(self), (
343
- 'The length of results is not equal to the dataset len: {} != {}'.
344
- format(len(results), len(self)))
345
-
346
- if jsonfile_prefix is None:
347
- tmp_dir = tempfile.TemporaryDirectory()
348
- jsonfile_prefix = osp.join(tmp_dir.name, 'results')
349
- else:
350
- tmp_dir = None
351
- result_files = self.results2json(results, jsonfile_prefix)
352
- return result_files, tmp_dir
353
-
354
- def evaluate(self,
355
- results,
356
- metric='bbox',
357
- logger=None,
358
- jsonfile_prefix=None,
359
- classwise=False,
360
- proposal_nums=(100, 300, 1000),
361
- iou_thrs=None,
362
- metric_items=None):
363
- """Evaluation in COCO protocol.
364
-
365
- Args:
366
- results (list[list | tuple]): Testing results of the dataset.
367
- metric (str | list[str]): Metrics to be evaluated. Options are
368
- 'bbox', 'segm', 'proposal', 'proposal_fast'.
369
- logger (logging.Logger | str | None): Logger used for printing
370
- related information during evaluation. Default: None.
371
- jsonfile_prefix (str | None): The prefix of json files. It includes
372
- the file path and the prefix of filename, e.g., "a/b/prefix".
373
- If not specified, a temp file will be created. Default: None.
374
- classwise (bool): Whether to evaluating the AP for each class.
375
- proposal_nums (Sequence[int]): Proposal number used for evaluating
376
- recalls, such as recall@100, recall@1000.
377
- Default: (100, 300, 1000).
378
- iou_thrs (Sequence[float], optional): IoU threshold used for
379
- evaluating recalls/mAPs. If set to a list, the average of all
380
- IoUs will also be computed. If not specified, [0.50, 0.55,
381
- 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
382
- Default: None.
383
- metric_items (list[str] | str, optional): Metric items that will
384
- be returned. If not specified, ``['AR@100', 'AR@300',
385
- 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
386
- used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
387
- 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
388
- ``metric=='bbox' or metric=='segm'``.
389
-
390
- Returns:
391
- dict[str, float]: COCO style evaluation metric.
392
- """
393
-
394
- metrics = metric if isinstance(metric, list) else [metric]
395
- allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
396
- for metric in metrics:
397
- if metric not in allowed_metrics:
398
- raise KeyError(f'metric {metric} is not supported')
399
- if iou_thrs is None:
400
- iou_thrs = np.linspace(
401
- .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
402
- if metric_items is not None:
403
- if not isinstance(metric_items, list):
404
- metric_items = [metric_items]
405
-
406
- result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
407
-
408
- eval_results = OrderedDict()
409
- cocoGt = self.coco
410
- for metric in metrics:
411
- msg = f'Evaluating {metric}...'
412
- if logger is None:
413
- msg = '\n' + msg
414
- print_log(msg, logger=logger)
415
-
416
- if metric == 'proposal_fast':
417
- ar = self.fast_eval_recall(
418
- results, proposal_nums, iou_thrs, logger='silent')
419
- log_msg = []
420
- for i, num in enumerate(proposal_nums):
421
- eval_results[f'AR@{num}'] = ar[i]
422
- log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
423
- log_msg = ''.join(log_msg)
424
- print_log(log_msg, logger=logger)
425
- continue
426
-
427
- if metric not in result_files:
428
- raise KeyError(f'{metric} is not in results')
429
- try:
430
- cocoDt = cocoGt.loadRes(result_files[metric])
431
- except IndexError:
432
- print_log(
433
- 'The testing results of the whole dataset is empty.',
434
- logger=logger,
435
- level=logging.ERROR)
436
- break
437
-
438
- iou_type = 'bbox' if metric == 'proposal' else metric
439
- cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
440
- cocoEval.params.catIds = self.cat_ids
441
- cocoEval.params.imgIds = self.img_ids
442
- cocoEval.params.maxDets = list(proposal_nums)
443
- cocoEval.params.iouThrs = iou_thrs
444
- # mapping of cocoEval.stats
445
- coco_metric_names = {
446
- 'mAP': 0,
447
- 'mAP_50': 1,
448
- 'mAP_75': 2,
449
- 'mAP_s': 3,
450
- 'mAP_m': 4,
451
- 'mAP_l': 5,
452
- 'AR@100': 6,
453
- 'AR@300': 7,
454
- 'AR@1000': 8,
455
- 'AR_s@1000': 9,
456
- 'AR_m@1000': 10,
457
- 'AR_l@1000': 11
458
- }
459
- if metric_items is not None:
460
- for metric_item in metric_items:
461
- if metric_item not in coco_metric_names:
462
- raise KeyError(
463
- f'metric item {metric_item} is not supported')
464
-
465
- if metric == 'proposal':
466
- cocoEval.params.useCats = 0
467
- cocoEval.evaluate()
468
- cocoEval.accumulate()
469
- cocoEval.summarize()
470
- if metric_items is None:
471
- metric_items = [
472
- 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
473
- 'AR_m@1000', 'AR_l@1000'
474
- ]
475
-
476
- for item in metric_items:
477
- val = float(
478
- f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
479
- eval_results[item] = val
480
- else:
481
- cocoEval.evaluate()
482
- cocoEval.accumulate()
483
- cocoEval.summarize()
484
- if classwise: # Compute per-category AP
485
- # Compute per-category AP
486
- # from https://github.com/facebookresearch/detectron2/
487
- precisions = cocoEval.eval['precision']
488
- # precision: (iou, recall, cls, area range, max dets)
489
- assert len(self.cat_ids) == precisions.shape[2]
490
-
491
- results_per_category = []
492
- for idx, catId in enumerate(self.cat_ids):
493
- # area range index 0: all area ranges
494
- # max dets index -1: typically 100 per image
495
- nm = self.coco.loadCats(catId)[0]
496
- precision = precisions[:, :, idx, 0, -1]
497
- precision = precision[precision > -1]
498
- if precision.size:
499
- ap = np.mean(precision)
500
- else:
501
- ap = float('nan')
502
- results_per_category.append(
503
- (f'{nm["name"]}', f'{float(ap):0.3f}'))
504
-
505
- num_columns = min(6, len(results_per_category) * 2)
506
- results_flatten = list(
507
- itertools.chain(*results_per_category))
508
- headers = ['category', 'AP'] * (num_columns // 2)
509
- results_2d = itertools.zip_longest(*[
510
- results_flatten[i::num_columns]
511
- for i in range(num_columns)
512
- ])
513
- table_data = [headers]
514
- table_data += [result for result in results_2d]
515
- table = AsciiTable(table_data)
516
- print_log('\n' + table.table, logger=logger)
517
-
518
- if metric_items is None:
519
- metric_items = [
520
- 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
521
- ]
522
-
523
- for metric_item in metric_items:
524
- key = f'{metric}_{metric_item}'
525
- val = float(
526
- f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
527
- )
528
- eval_results[key] = val
529
- ap = cocoEval.stats[:6]
530
- eval_results[f'{metric}_mAP_copypaste'] = (
531
- f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
532
- f'{ap[4]:.3f} {ap[5]:.3f}')
533
- if tmp_dir is not None:
534
- tmp_dir.cleanup()
535
- return eval_results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Catmeow/AI_story_writing/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: AI Story Writing
3
- emoji: 📚
4
- colorFrom: pink
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.8
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/file_operations.py DELETED
@@ -1,267 +0,0 @@
1
- """File operations for AutoGPT"""
2
- from __future__ import annotations
3
-
4
- import os
5
- import os.path
6
- from typing import Generator
7
-
8
- import requests
9
- from colorama import Back, Fore
10
- from requests.adapters import HTTPAdapter, Retry
11
-
12
- from autogpt.spinner import Spinner
13
- from autogpt.utils import readable_file_size
14
- from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
15
-
16
- LOG_FILE = "file_logger.txt"
17
- LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE
18
-
19
-
20
- def check_duplicate_operation(operation: str, filename: str) -> bool:
21
- """Check if the operation has already been performed on the given file
22
-
23
- Args:
24
- operation (str): The operation to check for
25
- filename (str): The name of the file to check for
26
-
27
- Returns:
28
- bool: True if the operation has already been performed on the file
29
- """
30
- log_content = read_file(LOG_FILE)
31
- log_entry = f"{operation}: {filename}\n"
32
- return log_entry in log_content
33
-
34
-
35
- def log_operation(operation: str, filename: str) -> None:
36
- """Log the file operation to the file_logger.txt
37
-
38
- Args:
39
- operation (str): The operation to log
40
- filename (str): The name of the file the operation was performed on
41
- """
42
- log_entry = f"{operation}: {filename}\n"
43
-
44
- # Create the log file if it doesn't exist
45
- if not os.path.exists(LOG_FILE_PATH):
46
- with open(LOG_FILE_PATH, "w", encoding="utf-8") as f:
47
- f.write("File Operation Logger ")
48
-
49
- append_to_file(LOG_FILE, log_entry, shouldLog=False)
50
-
51
-
52
- def split_file(
53
- content: str, max_length: int = 4000, overlap: int = 0
54
- ) -> Generator[str, None, None]:
55
- """
56
- Split text into chunks of a specified maximum length with a specified overlap
57
- between chunks.
58
-
59
- :param content: The input text to be split into chunks
60
- :param max_length: The maximum length of each chunk,
61
- default is 4000 (about 1k token)
62
- :param overlap: The number of overlapping characters between chunks,
63
- default is no overlap
64
- :return: A generator yielding chunks of text
65
- """
66
- start = 0
67
- content_length = len(content)
68
-
69
- while start < content_length:
70
- end = start + max_length
71
- if end + overlap < content_length:
72
- chunk = content[start : end + overlap - 1]
73
- else:
74
- chunk = content[start:content_length]
75
-
76
- # Account for the case where the last chunk is shorter than the overlap, so it has already been consumed
77
- if len(chunk) <= overlap:
78
- break
79
-
80
- yield chunk
81
- start += max_length - overlap
82
-
83
-
84
- def read_file(filename: str) -> str:
85
- """Read a file and return the contents
86
-
87
- Args:
88
- filename (str): The name of the file to read
89
-
90
- Returns:
91
- str: The contents of the file
92
- """
93
- try:
94
- filepath = path_in_workspace(filename)
95
- with open(filepath, "r", encoding="utf-8") as f:
96
- content = f.read()
97
- return content
98
- except Exception as e:
99
- return f"Error: {str(e)}"
100
-
101
-
102
- def ingest_file(
103
- filename: str, memory, max_length: int = 4000, overlap: int = 200
104
- ) -> None:
105
- """
106
- Ingest a file by reading its content, splitting it into chunks with a specified
107
- maximum length and overlap, and adding the chunks to the memory storage.
108
-
109
- :param filename: The name of the file to ingest
110
- :param memory: An object with an add() method to store the chunks in memory
111
- :param max_length: The maximum length of each chunk, default is 4000
112
- :param overlap: The number of overlapping characters between chunks, default is 200
113
- """
114
- try:
115
- print(f"Working with file {filename}")
116
- content = read_file(filename)
117
- content_length = len(content)
118
- print(f"File length: {content_length} characters")
119
-
120
- chunks = list(split_file(content, max_length=max_length, overlap=overlap))
121
-
122
- num_chunks = len(chunks)
123
- for i, chunk in enumerate(chunks):
124
- print(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
125
- memory_to_add = (
126
- f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}"
127
- )
128
-
129
- memory.add(memory_to_add)
130
-
131
- print(f"Done ingesting {num_chunks} chunks from {filename}.")
132
- except Exception as e:
133
- print(f"Error while ingesting file '{filename}': {str(e)}")
134
-
135
-
136
- def write_to_file(filename: str, text: str) -> str:
137
- """Write text to a file
138
-
139
- Args:
140
- filename (str): The name of the file to write to
141
- text (str): The text to write to the file
142
-
143
- Returns:
144
- str: A message indicating success or failure
145
- """
146
- if check_duplicate_operation("write", filename):
147
- return "Error: File has already been updated."
148
- try:
149
- filepath = path_in_workspace(filename)
150
- directory = os.path.dirname(filepath)
151
- if not os.path.exists(directory):
152
- os.makedirs(directory)
153
- with open(filepath, "w", encoding="utf-8") as f:
154
- f.write(text)
155
- log_operation("write", filename)
156
- return "File written to successfully."
157
- except Exception as e:
158
- return f"Error: {str(e)}"
159
-
160
-
161
- def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str:
162
- """Append text to a file
163
-
164
- Args:
165
- filename (str): The name of the file to append to
166
- text (str): The text to append to the file
167
-
168
- Returns:
169
- str: A message indicating success or failure
170
- """
171
- try:
172
- filepath = path_in_workspace(filename)
173
- with open(filepath, "a") as f:
174
- f.write(text)
175
-
176
- if shouldLog:
177
- log_operation("append", filename)
178
-
179
- return "Text appended successfully."
180
- except Exception as e:
181
- return f"Error: {str(e)}"
182
-
183
-
184
- def delete_file(filename: str) -> str:
185
- """Delete a file
186
-
187
- Args:
188
- filename (str): The name of the file to delete
189
-
190
- Returns:
191
- str: A message indicating success or failure
192
- """
193
- if check_duplicate_operation("delete", filename):
194
- return "Error: File has already been deleted."
195
- try:
196
- filepath = path_in_workspace(filename)
197
- os.remove(filepath)
198
- log_operation("delete", filename)
199
- return "File deleted successfully."
200
- except Exception as e:
201
- return f"Error: {str(e)}"
202
-
203
-
204
- def search_files(directory: str) -> list[str]:
205
- """Search for files in a directory
206
-
207
- Args:
208
- directory (str): The directory to search in
209
-
210
- Returns:
211
- list[str]: A list of files found in the directory
212
- """
213
- found_files = []
214
-
215
- if directory in {"", "/"}:
216
- search_directory = WORKSPACE_PATH
217
- else:
218
- search_directory = path_in_workspace(directory)
219
-
220
- for root, _, files in os.walk(search_directory):
221
- for file in files:
222
- if file.startswith("."):
223
- continue
224
- relative_path = os.path.relpath(os.path.join(root, file), WORKSPACE_PATH)
225
- found_files.append(relative_path)
226
-
227
- return found_files
228
-
229
-
230
- def download_file(url, filename):
231
- """Downloads a file
232
- Args:
233
- url (str): URL of the file to download
234
- filename (str): Filename to save the file as
235
- """
236
- safe_filename = path_in_workspace(filename)
237
- try:
238
- message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
239
- with Spinner(message) as spinner:
240
- session = requests.Session()
241
- retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
242
- adapter = HTTPAdapter(max_retries=retry)
243
- session.mount("http://", adapter)
244
- session.mount("https://", adapter)
245
-
246
- total_size = 0
247
- downloaded_size = 0
248
-
249
- with session.get(url, allow_redirects=True, stream=True) as r:
250
- r.raise_for_status()
251
- total_size = int(r.headers.get("Content-Length", 0))
252
- downloaded_size = 0
253
-
254
- with open(safe_filename, "wb") as f:
255
- for chunk in r.iter_content(chunk_size=8192):
256
- f.write(chunk)
257
- downloaded_size += len(chunk)
258
-
259
- # Update the progress message
260
- progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
261
- spinner.update_message(f"{message} {progress}")
262
-
263
- return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(total_size)})'
264
- except requests.HTTPError as e:
265
- return f"Got an HTTP Error whilst trying to download file: {e}"
266
- except Exception as e:
267
- return "Error: " + str(e)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/client/css/message-input.css DELETED
@@ -1,27 +0,0 @@
1
- #message-input {
2
- margin-right: 30px;
3
- height: 64px;
4
- }
5
-
6
- #message-input::-webkit-scrollbar {
7
- width: 5px;
8
- }
9
-
10
- #message-input::-webkit-scrollbar-track {
11
- background: #f1f1f1;
12
- }
13
-
14
- #message-input::-webkit-scrollbar-thumb {
15
- background: #c7a2ff;
16
- }
17
-
18
- #message-input::-webkit-scrollbar-thumb:hover {
19
- background: #8b3dff;
20
- }
21
-
22
- @media screen and (max-width: 360px) {
23
- #message-input {
24
- margin: 0;
25
- }
26
- }
27
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/g4f/Provider/Providers/helpers/gpt4love.py DELETED
@@ -1,48 +0,0 @@
1
- import json
2
- import sys
3
- from re import findall
4
- from curl_cffi import requests
5
-
6
- config = json.loads(sys.argv[1])
7
- prompt = config['messages'][-1]['content']
8
-
9
- headers = {
10
- 'authority': 'api.gptplus.one',
11
- 'accept': 'application/json, text/plain, */*',
12
- 'accept-language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7,ja;q=0.6,zh-TW;q=0.5,zh;q=0.4',
13
- 'content-type': 'application/octet-stream',
14
- 'origin': 'https://ai.gptforlove.com/',
15
- 'referer': 'https://ai.gptforlove.com/',
16
- 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
17
- 'sec-ch-ua-mobile': '?0',
18
- 'sec-ch-ua-platform': '"macOS"',
19
- 'sec-fetch-dest': 'empty',
20
- 'sec-fetch-mode': 'cors',
21
- 'sec-fetch-site': 'cross-site',
22
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
23
- }
24
-
25
- json_data = {
26
- 'prompt': prompt,
27
- 'options': {}
28
- }
29
-
30
- def format(chunk):
31
- try:
32
- completion_chunk = findall(r'content":"(.*)"},"fin', chunk.decode())[0]
33
- print(completion_chunk, flush=True, end='')
34
-
35
- except Exception as e:
36
- print(f'[ERROR] an error occured, retrying... | [[{chunk.decode()}]]', flush=True)
37
- return
38
-
39
- while True:
40
- try:
41
- response = requests.post('https://api.gptplus.one/api/chat-process',
42
- headers=headers, json=json_data, content_callback=format, impersonate='chrome110')
43
-
44
- exit(0)
45
-
46
- except Exception as e:
47
- print('[ERROR] an error occured, retrying... |', e, flush=True)
48
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cran-May/SEA-Streamlit/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: 兮辞·析辞-常明
3
- emoji: 💻
4
- colorFrom: indigo
5
- colorTo: pink
6
- sdk: streamlit
7
- sdk_version: 1.27.2
8
- app_file: app.py
9
- pinned: true
10
- models:
11
- - Cran-May/OpenSLIDE
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/version.py DELETED
@@ -1,6 +0,0 @@
1
- """
2
- Expose version
3
- """
4
-
5
- __version__ = "3.2.0"
6
- VERSION = __version__.split(".")
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/svgLib/path/__init__.py DELETED
@@ -1,61 +0,0 @@
1
- from fontTools.pens.transformPen import TransformPen
2
- from fontTools.misc import etree
3
- from fontTools.misc.textTools import tostr
4
- from .parser import parse_path
5
- from .shapes import PathBuilder
6
-
7
-
8
- __all__ = [tostr(s) for s in ("SVGPath", "parse_path")]
9
-
10
-
11
- class SVGPath(object):
12
- """Parse SVG ``path`` elements from a file or string, and draw them
13
- onto a glyph object that supports the FontTools Pen protocol.
14
-
15
- For example, reading from an SVG file and drawing to a Defcon Glyph:
16
-
17
- import defcon
18
- glyph = defcon.Glyph()
19
- pen = glyph.getPen()
20
- svg = SVGPath("path/to/a.svg")
21
- svg.draw(pen)
22
-
23
- Or reading from a string containing SVG data, using the alternative
24
- 'fromstring' (a class method):
25
-
26
- data = '<?xml version="1.0" ...'
27
- svg = SVGPath.fromstring(data)
28
- svg.draw(pen)
29
-
30
- Both constructors can optionally take a 'transform' matrix (6-float
31
- tuple, or a FontTools Transform object) to modify the draw output.
32
- """
33
-
34
- def __init__(self, filename=None, transform=None):
35
- if filename is None:
36
- self.root = etree.ElementTree()
37
- else:
38
- tree = etree.parse(filename)
39
- self.root = tree.getroot()
40
- self.transform = transform
41
-
42
- @classmethod
43
- def fromstring(cls, data, transform=None):
44
- self = cls(transform=transform)
45
- self.root = etree.fromstring(data)
46
- return self
47
-
48
- def draw(self, pen):
49
- if self.transform:
50
- pen = TransformPen(pen, self.transform)
51
- pb = PathBuilder()
52
- # xpath | doesn't seem to reliable work so just walk it
53
- for el in self.root.iter():
54
- pb.add_path_from_element(el)
55
- original_pen = pen
56
- for path, transform in zip(pb.paths, pb.transforms):
57
- if transform:
58
- pen = TransformPen(original_pen, transform)
59
- else:
60
- pen = original_pen
61
- parse_path(path, pen)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/__main__.py DELETED
@@ -1,100 +0,0 @@
1
- import sys
2
- from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError
3
- from fontTools.ttLib.ttFont import *
4
- from fontTools.ttLib.ttCollection import TTCollection
5
-
6
-
7
- def main(args=None):
8
- """Open/save fonts with TTFont() or TTCollection()
9
-
10
- ./fonttools ttLib [-oFILE] [-yNUMBER] files...
11
-
12
- If multiple files are given on the command-line,
13
- they are each opened (as a font or collection),
14
- and added to the font list.
15
-
16
- If -o (output-file) argument is given, the font
17
- list is then saved to the output file, either as
18
- a single font, if there is only one font, or as
19
- a collection otherwise.
20
-
21
- If -y (font-number) argument is given, only the
22
- specified font from collections is opened.
23
-
24
- The above allow extracting a single font from a
25
- collection, or combining multiple fonts into a
26
- collection.
27
-
28
- If --lazy or --no-lazy are give, those are passed
29
- to the TTFont() or TTCollection() constructors.
30
- """
31
- from fontTools import configLogger
32
-
33
- if args is None:
34
- args = sys.argv[1:]
35
-
36
- import argparse
37
-
38
- parser = argparse.ArgumentParser(
39
- "fonttools ttLib",
40
- description="Open/save fonts with TTFont() or TTCollection()",
41
- epilog="""
42
- If multiple files are given on the command-line,
43
- they are each opened (as a font or collection),
44
- and added to the font list.
45
-
46
- The above, when combined with -o / --output,
47
- allows for extracting a single font from a
48
- collection, or combining multiple fonts into a
49
- collection.
50
- """,
51
- )
52
- parser.add_argument("font", metavar="font", nargs="*", help="Font file.")
53
- parser.add_argument(
54
- "-o", "--output", metavar="FILE", default=None, help="Output file."
55
- )
56
- parser.add_argument(
57
- "-y", metavar="NUMBER", default=-1, help="Font number to load from collections."
58
- )
59
- parser.add_argument(
60
- "--lazy", action="store_true", default=None, help="Load fonts lazily."
61
- )
62
- parser.add_argument(
63
- "--no-lazy", dest="lazy", action="store_false", help="Load fonts immediately."
64
- )
65
- parser.add_argument(
66
- "--flavor",
67
- dest="flavor",
68
- default=None,
69
- help="Flavor of output font. 'woff' or 'woff2'.",
70
- )
71
- options = parser.parse_args(args)
72
-
73
- fontNumber = int(options.y) if options.y is not None else None
74
- outFile = options.output
75
- lazy = options.lazy
76
- flavor = options.flavor
77
-
78
- fonts = []
79
- for f in options.font:
80
- try:
81
- font = TTFont(f, fontNumber=fontNumber, lazy=lazy)
82
- fonts.append(font)
83
- except TTLibFileIsCollectionError:
84
- collection = TTCollection(f, lazy=lazy)
85
- fonts.extend(collection.fonts)
86
-
87
- if outFile is not None:
88
- if len(fonts) == 1:
89
- fonts[0].flavor = flavor
90
- fonts[0].save(outFile)
91
- else:
92
- if flavor is not None:
93
- raise TTLibError("Cannot set flavor for collections.")
94
- collection = TTCollection()
95
- collection.fonts = fonts
96
- collection.save(outFile)
97
-
98
-
99
- if __name__ == "__main__":
100
- sys.exit(main())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_content.py DELETED
@@ -1,238 +0,0 @@
1
- import inspect
2
- import warnings
3
- from json import dumps as json_dumps
4
- from typing import (
5
- Any,
6
- AsyncIterable,
7
- AsyncIterator,
8
- Dict,
9
- Iterable,
10
- Iterator,
11
- Mapping,
12
- Optional,
13
- Tuple,
14
- Union,
15
- )
16
- from urllib.parse import urlencode
17
-
18
- from ._exceptions import StreamClosed, StreamConsumed
19
- from ._multipart import MultipartStream
20
- from ._types import (
21
- AsyncByteStream,
22
- RequestContent,
23
- RequestData,
24
- RequestFiles,
25
- ResponseContent,
26
- SyncByteStream,
27
- )
28
- from ._utils import peek_filelike_length, primitive_value_to_str
29
-
30
-
31
- class ByteStream(AsyncByteStream, SyncByteStream):
32
- def __init__(self, stream: bytes) -> None:
33
- self._stream = stream
34
-
35
- def __iter__(self) -> Iterator[bytes]:
36
- yield self._stream
37
-
38
- async def __aiter__(self) -> AsyncIterator[bytes]:
39
- yield self._stream
40
-
41
-
42
- class IteratorByteStream(SyncByteStream):
43
- CHUNK_SIZE = 65_536
44
-
45
- def __init__(self, stream: Iterable[bytes]):
46
- self._stream = stream
47
- self._is_stream_consumed = False
48
- self._is_generator = inspect.isgenerator(stream)
49
-
50
- def __iter__(self) -> Iterator[bytes]:
51
- if self._is_stream_consumed and self._is_generator:
52
- raise StreamConsumed()
53
-
54
- self._is_stream_consumed = True
55
- if hasattr(self._stream, "read"):
56
- # File-like interfaces should use 'read' directly.
57
- chunk = self._stream.read(self.CHUNK_SIZE)
58
- while chunk:
59
- yield chunk
60
- chunk = self._stream.read(self.CHUNK_SIZE)
61
- else:
62
- # Otherwise iterate.
63
- for part in self._stream:
64
- yield part
65
-
66
-
67
- class AsyncIteratorByteStream(AsyncByteStream):
68
- CHUNK_SIZE = 65_536
69
-
70
- def __init__(self, stream: AsyncIterable[bytes]):
71
- self._stream = stream
72
- self._is_stream_consumed = False
73
- self._is_generator = inspect.isasyncgen(stream)
74
-
75
- async def __aiter__(self) -> AsyncIterator[bytes]:
76
- if self._is_stream_consumed and self._is_generator:
77
- raise StreamConsumed()
78
-
79
- self._is_stream_consumed = True
80
- if hasattr(self._stream, "aread"):
81
- # File-like interfaces should use 'aread' directly.
82
- chunk = await self._stream.aread(self.CHUNK_SIZE)
83
- while chunk:
84
- yield chunk
85
- chunk = await self._stream.aread(self.CHUNK_SIZE)
86
- else:
87
- # Otherwise iterate.
88
- async for part in self._stream:
89
- yield part
90
-
91
-
92
- class UnattachedStream(AsyncByteStream, SyncByteStream):
93
- """
94
- If a request or response is serialized using pickle, then it is no longer
95
- attached to a stream for I/O purposes. Any stream operations should result
96
- in `httpx.StreamClosed`.
97
- """
98
-
99
- def __iter__(self) -> Iterator[bytes]:
100
- raise StreamClosed()
101
-
102
- async def __aiter__(self) -> AsyncIterator[bytes]:
103
- raise StreamClosed()
104
- yield b"" # pragma: no cover
105
-
106
-
107
- def encode_content(
108
- content: Union[str, bytes, Iterable[bytes], AsyncIterable[bytes]]
109
- ) -> Tuple[Dict[str, str], Union[SyncByteStream, AsyncByteStream]]:
110
- if isinstance(content, (bytes, str)):
111
- body = content.encode("utf-8") if isinstance(content, str) else content
112
- content_length = len(body)
113
- headers = {"Content-Length": str(content_length)} if body else {}
114
- return headers, ByteStream(body)
115
-
116
- elif isinstance(content, Iterable) and not isinstance(content, dict):
117
- # `not isinstance(content, dict)` is a bit oddly specific, but it
118
- # catches a case that's easy for users to make in error, and would
119
- # otherwise pass through here, like any other bytes-iterable,
120
- # because `dict` happens to be iterable. See issue #2491.
121
- content_length_or_none = peek_filelike_length(content)
122
-
123
- if content_length_or_none is None:
124
- headers = {"Transfer-Encoding": "chunked"}
125
- else:
126
- headers = {"Content-Length": str(content_length_or_none)}
127
- return headers, IteratorByteStream(content) # type: ignore
128
-
129
- elif isinstance(content, AsyncIterable):
130
- headers = {"Transfer-Encoding": "chunked"}
131
- return headers, AsyncIteratorByteStream(content)
132
-
133
- raise TypeError(f"Unexpected type for 'content', {type(content)!r}")
134
-
135
-
136
- def encode_urlencoded_data(
137
- data: RequestData,
138
- ) -> Tuple[Dict[str, str], ByteStream]:
139
- plain_data = []
140
- for key, value in data.items():
141
- if isinstance(value, (list, tuple)):
142
- plain_data.extend([(key, primitive_value_to_str(item)) for item in value])
143
- else:
144
- plain_data.append((key, primitive_value_to_str(value)))
145
- body = urlencode(plain_data, doseq=True).encode("utf-8")
146
- content_length = str(len(body))
147
- content_type = "application/x-www-form-urlencoded"
148
- headers = {"Content-Length": content_length, "Content-Type": content_type}
149
- return headers, ByteStream(body)
150
-
151
-
152
- def encode_multipart_data(
153
- data: RequestData, files: RequestFiles, boundary: Optional[bytes]
154
- ) -> Tuple[Dict[str, str], MultipartStream]:
155
- multipart = MultipartStream(data=data, files=files, boundary=boundary)
156
- headers = multipart.get_headers()
157
- return headers, multipart
158
-
159
-
160
- def encode_text(text: str) -> Tuple[Dict[str, str], ByteStream]:
161
- body = text.encode("utf-8")
162
- content_length = str(len(body))
163
- content_type = "text/plain; charset=utf-8"
164
- headers = {"Content-Length": content_length, "Content-Type": content_type}
165
- return headers, ByteStream(body)
166
-
167
-
168
- def encode_html(html: str) -> Tuple[Dict[str, str], ByteStream]:
169
- body = html.encode("utf-8")
170
- content_length = str(len(body))
171
- content_type = "text/html; charset=utf-8"
172
- headers = {"Content-Length": content_length, "Content-Type": content_type}
173
- return headers, ByteStream(body)
174
-
175
-
176
- def encode_json(json: Any) -> Tuple[Dict[str, str], ByteStream]:
177
- body = json_dumps(json).encode("utf-8")
178
- content_length = str(len(body))
179
- content_type = "application/json"
180
- headers = {"Content-Length": content_length, "Content-Type": content_type}
181
- return headers, ByteStream(body)
182
-
183
-
184
- def encode_request(
185
- content: Optional[RequestContent] = None,
186
- data: Optional[RequestData] = None,
187
- files: Optional[RequestFiles] = None,
188
- json: Optional[Any] = None,
189
- boundary: Optional[bytes] = None,
190
- ) -> Tuple[Dict[str, str], Union[SyncByteStream, AsyncByteStream]]:
191
- """
192
- Handles encoding the given `content`, `data`, `files`, and `json`,
193
- returning a two-tuple of (<headers>, <stream>).
194
- """
195
- if data is not None and not isinstance(data, Mapping):
196
- # We prefer to separate `content=<bytes|str|byte iterator|bytes aiterator>`
197
- # for raw request content, and `data=<form data>` for url encoded or
198
- # multipart form content.
199
- #
200
- # However for compat with requests, we *do* still support
201
- # `data=<bytes...>` usages. We deal with that case here, treating it
202
- # as if `content=<...>` had been supplied instead.
203
- message = "Use 'content=<...>' to upload raw bytes/text content."
204
- warnings.warn(message, DeprecationWarning)
205
- return encode_content(data)
206
-
207
- if content is not None:
208
- return encode_content(content)
209
- elif files:
210
- return encode_multipart_data(data or {}, files, boundary)
211
- elif data:
212
- return encode_urlencoded_data(data)
213
- elif json is not None:
214
- return encode_json(json)
215
-
216
- return {}, ByteStream(b"")
217
-
218
-
219
- def encode_response(
220
- content: Optional[ResponseContent] = None,
221
- text: Optional[str] = None,
222
- html: Optional[str] = None,
223
- json: Optional[Any] = None,
224
- ) -> Tuple[Dict[str, str], Union[SyncByteStream, AsyncByteStream]]:
225
- """
226
- Handles encoding the given `content`, returning a two-tuple of
227
- (<headers>, <stream>).
228
- """
229
- if content is not None:
230
- return encode_content(content)
231
- elif text is not None:
232
- return encode_text(text)
233
- elif html is not None:
234
- return encode_html(html)
235
- elif json is not None:
236
- return encode_json(json)
237
-
238
- return {}, ByteStream(b"")