Commit
·
41d2f90
1
Parent(s):
deb9cd3
Update parquet files (step 30 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/tests/test_api.py +0 -38
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage How to Get the Most Out of It.md +0 -101
- spaces/1gistliPinn/ChatGPT4/Examples/E Elio Le Story Tese Torrent.md +0 -94
- spaces/1phancelerku/anime-remove-background/Download attack on titan mod APK for Android - Free and Easy.md +0 -126
- spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/txt_processors/zh.py +0 -117
- spaces/AIGC-Audio/Make_An_Audio/ldm/modules/x_transformer.py +0 -641
- spaces/AIGText/GlyphControl/annotator/render_images.py +0 -95
- spaces/Abhilashvj/planogram-compliance/classify/train.py +0 -537
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Equing.py +0 -81
- spaces/Aditya9790/yolo7-object-tracking/models/experimental.py +0 -272
- spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/ngu_dialect.py +0 -30
- spaces/AlexWang/lama/bin/predict_inner_features.py +0 -119
- spaces/Ame42/rwms/local_utils.py +0 -344
- spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/cppipc/policy.h +0 -25
- spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/models.py +0 -770
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/autoencoderkl.md +0 -43
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/cycle_diffusion.md +0 -33
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_lms.py +0 -140
- spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py +0 -2
- spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/pisa_ssd_head.py +0 -139
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py +0 -2
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/exllamav2.py +0 -133
- spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/unet.py +0 -894
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_win32_console.py +0 -662
- spaces/Benson/text-generation/Examples/Descargar 60 Lakh Cancin.md +0 -135
- spaces/Benson/text-generation/Examples/Descargar Android Euro Camin Simulador 2.md +0 -67
- spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/crt/__init__.py +0 -27
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/cli/chardetect.py +0 -112
- spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/appdirs.py +0 -608
- spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/util.h +0 -589
- spaces/CVPR/lama-example/saicinpainting/training/losses/style_loss.py +0 -155
- spaces/CVPR/lama-example/saicinpainting/training/modules/fake_fakes.py +0 -47
- spaces/CVPR/v-doc_abstractive_mac/demo.py +0 -83
- spaces/Caoyunkang/Segment-Any-Anomaly/SAM/scripts/amg.py +0 -238
- spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/components/YamlReader.js +0 -83
- spaces/CognitiveLabs/Research-Assistant/statics/README_zh.md +0 -41
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/templating.py +0 -1
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/IconButton-abe5ede9.js +0 -2
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_paths.py +0 -117
- spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/postprocessing/dula/layout_old.py +0 -134
- spaces/Datasculptor/StyleGAN-NADA/op/fused_bias_act.cpp +0 -21
- spaces/DeepakJaiz/QA_evaluator/README.md +0 -12
- spaces/Demosthene-OR/avr23-cds-translation/tabs/custom_vectorizer.py +0 -14
- spaces/DragGan/DragGan-Inversion/PTI/training/coaches/base_coach.py +0 -158
- spaces/DragGan/DragGan-Inversion/PTI/training/projectors/w_plus_projector.py +0 -145
- spaces/Dragonnext/charybdis/greeting.md +0 -17
- spaces/EsoCode/text-generation-webui/modules/sampler_hijack.py +0 -204
- spaces/EuroPython2022/mmocr-demo/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015.py +0 -33
- spaces/Evanell/Venus/README.md +0 -10
- spaces/FourthBrainGenAI/DeepLearningAIDemoChatBot/app.py +0 -281
spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/tests/test_api.py
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
import unittest
|
2 |
-
import requests
|
3 |
-
from unittest.mock import MagicMock
|
4 |
-
from gpt4free.quora.api import retry_request
|
5 |
-
|
6 |
-
|
7 |
-
class TestRetryRequest(unittest.TestCase):
|
8 |
-
def test_successful_request(self):
|
9 |
-
# Mock a successful request with a 200 status code
|
10 |
-
mock_response = MagicMock()
|
11 |
-
mock_response.status_code = 200
|
12 |
-
requests.get = MagicMock(return_value=mock_response)
|
13 |
-
|
14 |
-
# Call the function and assert that it returns the response
|
15 |
-
response = retry_request(requests.get, "http://example.com", max_attempts=3)
|
16 |
-
self.assertEqual(response.status_code, 200)
|
17 |
-
|
18 |
-
def test_exponential_backoff(self):
|
19 |
-
# Mock a failed request that succeeds after two retries
|
20 |
-
mock_response = MagicMock()
|
21 |
-
mock_response.status_code = 200
|
22 |
-
requests.get = MagicMock(side_effect=[requests.exceptions.RequestException] * 2 + [mock_response])
|
23 |
-
|
24 |
-
# Call the function and assert that it retries with exponential backoff
|
25 |
-
with self.assertLogs() as logs:
|
26 |
-
response = retry_request(requests.get, "http://example.com", max_attempts=3, delay=1)
|
27 |
-
self.assertEqual(response.status_code, 200)
|
28 |
-
self.assertGreaterEqual(len(logs.output), 2)
|
29 |
-
self.assertIn("Retrying in 1 seconds...", logs.output[0])
|
30 |
-
self.assertIn("Retrying in 2 seconds...", logs.output[1])
|
31 |
-
|
32 |
-
def test_too_many_attempts(self):
|
33 |
-
# Mock a failed request that never succeeds
|
34 |
-
requests.get = MagicMock(side_effect=requests.exceptions.RequestException)
|
35 |
-
|
36 |
-
# Call the function and assert that it raises an exception after the maximum number of attempts
|
37 |
-
with self.assertRaises(RuntimeError):
|
38 |
-
retry_request(requests.get, "http://example.com", max_attempts=3)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage How to Get the Most Out of It.md
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage: A Comprehensive Guide</h1>
|
3 |
-
<p>Adobe Photoshop is the most popular and powerful image editing software in the world. It allows you to create, edit, and enhance photos, graphics, and designs with a variety of tools and features. Whether you are a professional designer, photographer, or hobbyist, Adobe Photoshop can help you achieve your creative vision.</p>
|
4 |
-
<p>In this article, we will introduce you to Adobe Photoshop CC 2014, the latest version of the software that was released in June 2014. We will explain what Adobe Photoshop CC 2014 is, what are its main features, how to install and activate it, and how to use it for design and photography. By the end of this article, you will have a better understanding of Adobe Photoshop CC 2014 and how to make the most of it.</p>
|
5 |
-
<h2>Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguagel</h2><br /><p><b><b>DOWNLOAD</b> ===== <a href="https://byltly.com/2uKze0">https://byltly.com/2uKze0</a></b></p><br /><br />
|
6 |
-
<h2>What is Adobe Photoshop CC 2014?</h2>
|
7 |
-
<p>Adobe Photoshop CC 2014 is the fourteenth major release of Adobe Photoshop, which is part of the Adobe Creative Cloud subscription service. It is also known as Adobe Photoshop 15 or Adobe Photoshop 2014. It is available for Windows and Mac OS X operating systems, and it supports both 32-bit and 64-bit architectures.</p>
|
8 |
-
<h3>The main features of Adobe Photoshop CC 2014</h3>
|
9 |
-
<p>Adobe Photoshop CC 2014 introduces several new features and enhancements that improve the performance, functionality, and usability of the software. Some of the most notable new features are:</p>
|
10 |
-
<ul>
|
11 |
-
<li><b>Editing images directly in Adobe Camera Raw.</b> This means that you no longer have to convert your raw files into Photoshop format before you can start editing them. Adobe Camera Raw is a powerful image editing tool that gives you complete control over your raw files.</li>
|
12 |
-
<li><b>Applying blur effects with Blur Gallery.</b> This feature allows you to create realistic motion blur, spin blur, and path blur effects with ease. You can also use multiple blurs in one image and adjust them individually.</li>
|
13 |
-
<li><b>Enhancing typography with new controls and fonts.</b> This feature gives you more options to customize your text, such as font size variations, font matching, smart quotes, hyphenation, and more. You can also access over 900 fonts from Typekit, a library of high-quality fonts that are integrated with Creative Cloud.</li>
|
14 |
-
<li><b>Creating and managing assets with Creative Cloud Libraries.</b> This feature lets you create, categorize, and store your favorite colors, brushes, text styles, graphics, and vector images in one easily accessible place. Then you can access them anywhere: Assets you create under the same Adobe ID will be visible across different computers—in a variety of applications like Photoshop CC—wherever you sign in.</li>
|
15 |
-
</ul>
|
16 |
-
<h3>The system requirements for Adobe Photoshop CC 2014</h3>
|
17 |
-
<p>To run Adobe Photoshop CC 2014 smoothly on your computer, you need to meet the following minimum system requirements:</p>
|
18 |
-
<table>
|
19 |
-
<tr>
|
20 |
-
<th>Operating system</th>
|
21 |
-
<th>Processor</th>
|
22 |
-
<th>RAM</th>
|
23 |
-
<th>Hard disk space</th>
|
24 |
-
<th>Graphics card</th>
|
25 |
-
</tr>
|
26 |
-
<tr>
|
27 |
-
<td>Windows 7 SP1 or later (32-bit or 64-bit)</td>
|
28 |
-
<td>Intel Pentium 4 or AMD Athlon 64 processor (2 GHz or faster)</td>
|
29 |
-
<td>2 GB (8 GB recommended)</td>
|
30 |
-
<td>2 GB of available hard-disk space for installation; additional free space required during installation (cannot install on removable flash storage devices)</td>
|
31 |
-
<td>1024 x 768 display (1280 x 800 recommended) with OpenGL® 2.0–capable system</td>
|
32 |
-
</tr>
|
33 |
-
<tr>
|
34 |
-
<td>Mac OS X v10.7 or later (64-bit only)</td>
|
35 |
-
<td>Multicore Intel processor with 64-bit support</td>
|
36 |
-
<td>2 GB (8 GB recommended)</td>
|
37 |
-
<td>3.2 GB of available hard-disk space for installation; additional free space required during installation (cannot install on a volume that uses a case-sensitive file system or on removable flash storage devices)</td>
|
38 |
-
<td>1024 x 768 display (1280 x 800 recommended) with OpenGL® 2.0–capable system</td>
|
39 |
-
</tr>
|
40 |
-
</table>
|
41 |
-
<h2>How to install and activate Adobe Photoshop CC 2014?</h2>
|
42 |
-
<p>To install and activate Adobe Photoshop CC 2014 on your computer, you need to follow these steps:</p>
|
43 |
-
<p>Adobe Photoshop CC 2014 crack download<br />
|
44 |
-
Adobe Photoshop CC 2014 multilingual portable<br />
|
45 |
-
Adobe Photoshop CC 2014 serial number<br />
|
46 |
-
Adobe Photoshop CC 2014 offline installer<br />
|
47 |
-
Adobe Photoshop CC 2014 free trial<br />
|
48 |
-
Adobe Photoshop CC 2014 full version<br />
|
49 |
-
Adobe Photoshop CC 2014 keygen<br />
|
50 |
-
Adobe Photoshop CC 2014 system requirements<br />
|
51 |
-
Adobe Photoshop CC 2014 tutorial<br />
|
52 |
-
Adobe Photoshop CC 2014 update<br />
|
53 |
-
Adobe Photoshop CC 2014 features<br />
|
54 |
-
Adobe Photoshop CC 2014 license key<br />
|
55 |
-
Adobe Photoshop CC 2014 activation code<br />
|
56 |
-
Adobe Photoshop CC 2014 patch<br />
|
57 |
-
Adobe Photoshop CC 2014 direct download link<br />
|
58 |
-
Adobe Photoshop CC 2014 torrent<br />
|
59 |
-
Adobe Photoshop CC 2014 mac<br />
|
60 |
-
Adobe Photoshop CC 2014 windows<br />
|
61 |
-
Adobe Photoshop CC 2014 x64 bit<br />
|
62 |
-
Adobe Photoshop CC 2014 x32 bit<br />
|
63 |
-
Adobe Photoshop CC 2014 latest version<br />
|
64 |
-
Adobe Photoshop CC 2014 review<br />
|
65 |
-
Adobe Photoshop CC 2014 tips and tricks<br />
|
66 |
-
Adobe Photoshop CC 2014 plugins<br />
|
67 |
-
Adobe Photoshop CC 2014 brushes<br />
|
68 |
-
Adobe Photoshop CC 2014 presets<br />
|
69 |
-
Adobe Photoshop CC 2014 filters<br />
|
70 |
-
Adobe Photoshop CC 2014 actions<br />
|
71 |
-
Adobe Photoshop CC 2014 fonts<br />
|
72 |
-
Adobe Photoshop CC 2014 tools<br />
|
73 |
-
Adobe Photoshop CC 2014 shortcuts<br />
|
74 |
-
Adobe Photoshop CC 2014 layers<br />
|
75 |
-
Adobe Photoshop CC 2014 masks<br />
|
76 |
-
Adobe Photoshop CC 2014 smart objects<br />
|
77 |
-
Adobe Photoshop CC 2014 adjustment layers<br />
|
78 |
-
Adobe Photoshop CC 2014 blending modes<br />
|
79 |
-
Adobe Photoshop CC 2014 selection tools<br />
|
80 |
-
Adobe Photoshop CC 2014 transform tools<br />
|
81 |
-
Adobe Photoshop CC 2014 crop tool<br />
|
82 |
-
Adobe Photoshop CC 2014 healing tools<br />
|
83 |
-
Adobe Photoshop CC 2014 clone stamp tool<br />
|
84 |
-
Adobe Photoshop CC 2014 pen tool<br />
|
85 |
-
Adobe Photoshop CC 2014 text tool<br />
|
86 |
-
Adobe Photoshop CC 2014 shape tool<br />
|
87 |
-
Adobe Photoshop CC 2014 gradient tool<br />
|
88 |
-
Adobe Photoshop CC 2014 paint bucket tool<br />
|
89 |
-
Adobe Photoshop CC 2014 eraser tool<br />
|
90 |
-
Adobe Photoshop CC 2014 dodge and burn tools<br />
|
91 |
-
Adobe Photoshop CC 2014 sponge tool</p>
|
92 |
-
<h3>Downloading the setup files</h3>
|
93 |
-
<p>You can download the setup files for Adobe Photoshop CC 2014 from the official website of Adobe or from other trusted sources online. Make sure you download the correct version for your operating system and architecture (32-bit or 64-bit). The setup files are usually compressed in ZIP or RAR format, so you need to extract them before installing.</p>
|
94 |
-
<h3>Installing Adobe Photoshop CC 2014</h3>
|
95 |
-
<p>To install Adobe Photoshop CC 2014 on your computer, you need to run the setup.exe file that you extracted from the downloaded file. Follow the instructions on the screen to complete the installation process. You may need to restart your computer after the installation is finished.</p>
|
96 |
-
<h3>Activating Adobe Photoshop CC 2014 with a serial number or a patch</h3>
|
97 |
-
<p>To activate Adobe Photoshop CC 2014 on your computer, you need to have a valid serial number or a patch that can bypass the activation process. A serial number is a unique code that identifies your license for using the software. A patch is a small program that modifies the original software code to remove the activation requirement.</p>
|
98 |
-
<p>You can obtain a serial number or a patch from various sources online, such as forums, blogs, or websites that offer cracked software. However, be careful when downloading these files as they may contain viruses or malware that can harm your computer. Also, using cracked software is illegal and unethical as it violates the terms and conditions of Adobe.</p>
|
99 |
-
<p>If you have a serial number for Adobe Photoshop CC 2014, you can enter it when prompted during the installation process or after launching the software for the first time. If you have a patch for Adobe Photoshop CC</p> 0a6ba089eb<br />
|
100 |
-
<br />
|
101 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/E Elio Le Story Tese Torrent.md
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>E Elio Le Story Tese Torrent: How to Download Their Music for Free</h1>
|
3 |
-
|
4 |
-
<p>Elio e le Storie Tese is an Italian comedy rock band that was formed in 1980. The band is known for their humorous and satirical lyrics, their eclectic musical style, and their live performances. The band has released 14 studio albums, 5 live albums, and several singles and compilations. Some of their most popular songs are "La terra dei cachi", "Mio cuggino", "Born to Be Abramo", and "La canzone mononota".</p>
|
5 |
-
|
6 |
-
<p>If you are a fan of Elio e le Storie Tese and you want to download their music for free, you might be tempted to use a torrent site or a file-sharing platform that hosts pirated copies of their albums. However, this is not a legal or safe way to get their music. You might be breaking the law, violating the rights of the band and their record label, exposing yourself to malware or viruses, or risking legal troubles or penalties.</p>
|
7 |
-
<h2>E Elio Le Story Tese Torrent</h2><br /><p><b><b>Download</b> –––––>>> <a href="https://imgfil.com/2uxYMw">https://imgfil.com/2uxYMw</a></b></p><br /><br />
|
8 |
-
|
9 |
-
<p>The best way to download E Elio Le Story Tese Torrent legally and safely is to use a streaming service that offers their music in your preferred language and region. Some of the popular streaming platforms that have Elio e le Storie Tese in their library are:</p>
|
10 |
-
|
11 |
-
<ul>
|
12 |
-
<li>Spotify: You can listen to Elio e le Storie Tese on Spotify for free with ads, or you can upgrade to Spotify Premium for ad-free listening, offline mode, and other features.</li>
|
13 |
-
<li>Apple Music: You can listen to Elio e le Storie Tese on Apple Music with a subscription fee, or you can try it for free for three months.</li>
|
14 |
-
<li>Deezer: You can listen to Elio e le Storie Tese on Deezer for free with ads, or you can upgrade to Deezer Premium for ad-free listening, offline mode, and other features.</li>
|
15 |
-
<li>YouTube Music: You can listen to Elio e le Storie Tese on YouTube Music for free with ads, or you can upgrade to YouTube Music Premium for ad-free listening, offline mode, and other features.</li>
|
16 |
-
<li>Amazon Music: You can listen to Elio e le Storie Tese on Amazon Music with a subscription fee, or you can try it for free for 30 days.</li>
|
17 |
-
</ul>
|
18 |
-
|
19 |
-
<p>All these streaming services offer high-quality audio, as well as various payment options and customer support. However, they may not be available in all countries or regions, so you should check their availability and pricing before choosing one.</p>
|
20 |
-
|
21 |
-
<h2>How to Download E Elio Le Story Tese Torrent Illegally</h2>
|
22 |
-
|
23 |
-
<p>If you still want to download E Elio Le Story Tese Torrent illegally, you should be aware of the risks and consequences involved. Some of the notorious websites that offer Elio e le Storie Tese torrents are:</p>
|
24 |
-
|
25 |
-
<ul>
|
26 |
-
<li>RuTracker.org: This is a Russian torrent site that has a large collection of music torrents, including Elio e le Storie Tese albums in FLAC format.</li>
|
27 |
-
<li>Marok.org: This is an Italian torrent site that has some videos of Elio e le Storie Tese live performances.</li>
|
28 |
-
<li>Archive.org: This is a digital library that hosts various media files, including some audio files of Elio e le Storie Tese songs.</li>
|
29 |
-
<li>Direct-Download.com: This is a file-sharing platform that has a link to download Elio e le Storie Tese discography in RAR format.</li>
|
30 |
-
</ul>
|
31 |
-
|
32 |
-
<p>These websites claim to provide free and fast downloads of E Elio Le Story Tese Torrent files in various formats and resolutions. However, they are not authorized by the original creators or distributors of the music, and they violate the copyright laws and intellectual property rights of the music industry. Moreover, they are risky and unsafe to use, as they may contain malware or viruses that can infect your device or steal your personal information. They may also expose you to legal troubles or penalties if you are caught downloading or sharing pirated content.</p>
|
33 |
-
|
34 |
-
<h2>Conclusion</h2>
|
35 |
-
|
36 |
-
<p>Elio e le Storie Tese is a band that will appeal to fans of comedy rock and Italian music. If you want to download their music for free, you have several options online, but not all of them are legal or safe. The best way to download E Elio Le Story Tese Torrent legally and safely is to use a streaming service that offers their music in your preferred language and region. However, if you choose to download E Elio Le Story Tese Torrent illegally, you should be aware of the risks and consequences involved.</p>
|
37 |
-
|
38 |
-
<p>In this article, we have provided you with some information and tips on how to download E Elio Le Story Tese Torrent legally or illegally. We hope you have enjoyed reading this article and found it useful. Now go ahead and download E Elio Le Story Tese Torrent and enjoy their music!</p>
|
39 |
-
<p></p>
|
40 |
-
<h2>Why Elio e le Storie Tese is a Unique Band</h2>
|
41 |
-
|
42 |
-
<p>Elio e le Storie Tese is not just a comedy rock band, but also a cultural phenomenon in Italy. The band has been praised for their originality, creativity, and versatility. They have experimented with various genres and styles, such as pop, rock, jazz, funk, metal, classical, folk, rap, and more. They have also collaborated with many famous artists and personalities, such as Luciano Pavarotti, Ennio Morricone, Giorgio Moroder, Renato Zero, Jovanotti, and Fabio Fazio.</p>
|
43 |
-
|
44 |
-
<p>Elio e le Storie Tese is also known for their social and political satire, their parody of Italian stereotypes and clichés, and their criticism of the Italian society and media. The band has often used irony, sarcasm, absurdity, and nonsense to convey their messages and opinions. They have also created many fictional characters and alter egos, such as Rocco Tanica, Faso, Cesareo, Mangoni, Feiez, Elio Samaga Hukapan Kariyana Turu (the Sri Lankan version of Elio), and Il Complesso Misterioso (a fake band that competed in the Sanremo Music Festival).</p>
|
45 |
-
|
46 |
-
<p>Elio e le Storie Tese is a band that has influenced many other artists and comedians in Italy and abroad. They have also received many awards and recognitions for their music and career. They have been nominated for several MTV Europe Music Awards and Italian Music Awards. They have also won the Critics' Award at the Sanremo Music Festival twice (in 1996 and 2013). In 2016, they announced their farewell tour, which ended in 2018 with a final concert in Milan.</p>
|
47 |
-
|
48 |
-
<h2>How to Support Elio e le Storie Tese</h2>
|
49 |
-
|
50 |
-
<p>If you love Elio e le Storie Tese and you want to support them, you can do so in various ways. Here are some suggestions:</p>
|
51 |
-
|
52 |
-
<ul>
|
53 |
-
<li>Buy their music: You can buy their albums, singles, compilations, or special editions from their official website or from online stores such as Amazon or iTunes.</li>
|
54 |
-
<li>Watch their videos: You can watch their music videos, live performances, interviews, documentaries, or sketches on their official YouTube channel or on other platforms such as Vimeo or Dailymotion.</li>
|
55 |
-
<li>Follow them on social media: You can follow them on Facebook, Twitter, Instagram, or other social networks to get updates on their news, events, projects, or personal lives.</li>
|
56 |
-
<li>Join their fan club: You can join their official fan club "Elii" to get access to exclusive content, merchandise, discounts, contests, or meet-and-greets.</li>
|
57 |
-
<li>Donate to their causes: You can donate to their charitable causes or initiatives that they support or promote. For example, you can donate to the Fondazione Umberto Veronesi (a foundation that supports scientific research on cancer), to the Emergency (a humanitarian organization that provides medical care to victims of war and poverty), or to the Lega del Filo d'Oro (an association that helps deafblind people).</li>
|
58 |
-
</ul>
|
59 |
-
|
60 |
-
<p>By supporting Elio e le Storie Tese, you are not only showing your appreciation for their music and artistry but also contributing to their legacy and impact on the Italian culture and society.</p>
|
61 |
-
<h2>How to Discover More About Elio e le Storie Tese</h2>
|
62 |
-
|
63 |
-
<p>If you are curious about Elio e le Storie Tese and you want to discover more about their music and history, you can do so in various ways. Here are some suggestions:</p>
|
64 |
-
|
65 |
-
<ul>
|
66 |
-
<li>Read their books: You can read their books that contain their lyrics, stories, anecdotes, illustrations, or photos. Some of their books are "Elio Samaga Hukapan Kariyana Turu", "Gli Occhi del Cuore", "Il Mistero dei Bulli", and "La Risposta è Nelle Stelle".</li>
|
67 |
-
<li>Watch their movies: You can watch their movies that feature their songs, sketches, or appearances. Some of their movies are "Tutti Gli Uomini del Deficiente", "La Febbre del Sabato Sera", "Fuga da Reuma Park", and "Boris Il Film".</li>
|
68 |
-
<li>Listen to their podcasts: You can listen to their podcasts that cover various topics, such as music, cinema, literature, or current affairs. Some of their podcasts are "Elio e le Storie Tese Show", "Elio e le Storie Tese Radio Show", and "Elio e le Storie Tese Podcast".</li>
|
69 |
-
<li>Visit their website: You can visit their official website that contains their news, biography, discography, tour dates, merchandise, or contacts.</li>
|
70 |
-
<li>Subscribe to their newsletter: You can subscribe to their official newsletter that will send you updates on their activities, projects, or offers.</li>
|
71 |
-
</ul>
|
72 |
-
|
73 |
-
<p>By discovering more about Elio e le Storie Tese, you are not only enriching your knowledge and appreciation for their music and artistry but also joining their loyal and passionate fan community.</p>
|
74 |
-
|
75 |
-
<h2>How to Share E Elio Le Story Tese Torrent with Others</h2>
|
76 |
-
|
77 |
-
<p>If you love E Elio Le Story Tese Torrent and you want to share it with others, you can do so in various ways. Here are some suggestions:</p>
|
78 |
-
|
79 |
-
<ul>
|
80 |
-
<li>Create a playlist: You can create a playlist of your favorite Elio e le Storie Tese songs and share it with your friends or family on social media or streaming platforms.</li>
|
81 |
-
<li>Write a review: You can write a review of your favorite Elio e le Storie Tese album or song and share it with other fans or music lovers on blogs or forums.</li>
|
82 |
-
<li>Make a tribute: You can make a tribute to Elio e le Storie Tese by covering their songs, making a fan art, writing a fan fiction, or cosplaying their characters.</li>
|
83 |
-
<li>Attend a concert: You can attend one of their live concerts and enjoy their music and performance with other fans. You can also take photos or videos and share them online.</li>
|
84 |
-
<li>Recommend them: You can recommend Elio e le Storie Tese to someone who might like their music or style. You can also introduce them to some of their songs or albums that suit their taste or mood.</li>
|
85 |
-
</ul>
|
86 |
-
|
87 |
-
<p>By sharing E Elio Le Story Tese Torrent with others, you are not only spreading your love and enthusiasm for their music and artistry but also supporting their career and success.</p>
|
88 |
-
<h2>Conclusion</h2>
|
89 |
-
|
90 |
-
<p>E Elio Le Story Tese Torrent is a keyword that refers to the illegal and unsafe way of downloading the music of Elio e le Storie Tese, an Italian comedy rock band that has been entertaining audiences since 1980. The band is known for their witty and satirical lyrics, their eclectic musical style, and their energetic live performances. The band has released 14 studio albums, 5 live albums, and several singles and compilations.</p>
|
91 |
-
|
92 |
-
<p>In this article, we have provided you with some information and tips on how to download E Elio Le Story Tese Torrent legally and safely, how to discover more about Elio e le Storie Tese, and how to share their music with others. We hope you have enjoyed reading this article and found it useful. Now go ahead and enjoy E Elio Le Story Tese Torrent online!</p> 3cee63e6c2<br />
|
93 |
-
<br />
|
94 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download attack on titan mod APK for Android - Free and Easy.md
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Attack on Titan Mod Free Download in APKPure</h1>
|
3 |
-
<p>If you are a fan of the popular anime and manga series Attack on Titan, you might be interested in playing the game based on it. However, if you want to enjoy some extra features and enhancements, you might want to try the mod version of the game. In this article, we will show you how to download and install Attack on Titan mod free from APKPure, one of the best sources for Android apps and games.</p>
|
4 |
-
<h2>attack on titan mod free download in apkpure</h2><br /><p><b><b>Download Zip</b> ○ <a href="https://jinyurl.com/2uNMLd">https://jinyurl.com/2uNMLd</a></b></p><br /><br />
|
5 |
-
<h2>What is Attack on Titan?</h2>
|
6 |
-
<p>Attack on Titan is a Japanese manga series written and illustrated by Hajime Isayama. It is set in a world where humanity lives inside cities surrounded by three enormous walls that protect them from gigantic man-eating humanoids referred to as Titans. The story follows Eren Yeager, who vows to exterminate the Titans after they bring about the destruction of his hometown and the death of his mother.</p>
|
7 |
-
<p>The manga series has been adapted into an anime television series, which has four seasons so far. The anime series has received critical acclaim and commercial success, winning several awards and becoming one of the best-selling manga series of all time.</p>
|
8 |
-
<p>The game based on the anime series is called Attack on Titan / A.O.T. Wings of Freedom. It is an action hack and slash game that lets you play as one of the beloved characters from the series. You can use the Three Dimensional Maneuver Gear to fly around and fight against the Titans. You can also discover the story from the anime, with some original twists, and experience the thrill of being in the anime.</p>
|
9 |
-
<p>attack on titan mod apk download for android<br />
|
10 |
-
attack on titan mod minecraft pe free download<br />
|
11 |
-
attack on titan mod apk unlimited money and gems<br />
|
12 |
-
attack on titan mod menu apk download latest version<br />
|
13 |
-
attack on titan mod for gta san andreas free download<br />
|
14 |
-
attack on titan mod apk offline no root<br />
|
15 |
-
attack on titan mod pack for minecraft java edition<br />
|
16 |
-
attack on titan mod apk rexdl<br />
|
17 |
-
attack on titan mod among us free download<br />
|
18 |
-
attack on titan mod apk obb highly compressed<br />
|
19 |
-
attack on titan mod for roblox free download<br />
|
20 |
-
attack on titan mod apk unlimited everything<br />
|
21 |
-
attack on titan mod for skyrim special edition<br />
|
22 |
-
attack on titan mod apk happymod<br />
|
23 |
-
attack on titan mod for gta 5 pc free download<br />
|
24 |
-
attack on titan mod apk revdl<br />
|
25 |
-
attack on titan mod for sims 4 free download<br />
|
26 |
-
attack on titan mod apk all characters unlocked<br />
|
27 |
-
attack on titan mod for fallout 4 xbox one<br />
|
28 |
-
attack on titan mod apk android 1<br />
|
29 |
-
attack on titan mod for terraria free download<br />
|
30 |
-
attack on titan mod apk god mode<br />
|
31 |
-
attack on titan mod for left 4 dead 2 free download<br />
|
32 |
-
attack on titan mod apk unlimited coins and diamonds<br />
|
33 |
-
attack on titan mod for stardew valley free download<br />
|
34 |
-
attack on titan mod apk no ads<br />
|
35 |
-
attack on titan mod for ark survival evolved free download<br />
|
36 |
-
attack on titan mod apk unlimited health and stamina<br />
|
37 |
-
attack on titan mod for subnautica free download<br />
|
38 |
-
attack on titan mod apk latest update<br />
|
39 |
-
attack on titan mod for starbound free download<br />
|
40 |
-
attack on titan mod apk no verification<br />
|
41 |
-
attack on titan mod for mount and blade warband free download<br />
|
42 |
-
attack on titan mod apk unlimited blades and gas<br />
|
43 |
-
attack on titan mod for rimworld free download<br />
|
44 |
-
attack on titan mod apk no human verification<br />
|
45 |
-
attack on titan mod for dragon age inquisition free download<br />
|
46 |
-
attack on titan mod apk unlimited skills and items<br />
|
47 |
-
attack on titan mod for xcom 2 free download<br />
|
48 |
-
attack on titan mod apk one hit kill<br />
|
49 |
-
attack on titan mod for witcher 3 free download<br />
|
50 |
-
attack on titan mod apk online multiplayer<br />
|
51 |
-
attack on titan mod for dark souls 3 free download<br />
|
52 |
-
attack on titan mod apk all episodes unlocked<br />
|
53 |
-
attack on titan mod for dying light free download<br />
|
54 |
-
attack on titan mod apk original version<br />
|
55 |
-
attack on titan mod for just cause 3 free download</p>
|
56 |
-
<p>Some of the main features and characters of the game are:</p>
|
57 |
-
<ul>
|
58 |
-
<li>Play as Eren, Mikasa, Armin, Levi, Erwin, and more</li>
|
59 |
-
<li>Use various weapons and skills to defeat different types of Titans</li>
|
60 |
-
<li>Explore various locations from the anime, such as Shiganshina, Trost, Forest of Giant Trees, etc.</li>
|
61 |
-
<li>Enjoy stunning graphics and sound effects that match the anime style</li>
|
62 |
-
<li>Play solo or in cooperation with up to four players online</li>
|
63 |
-
</ul>
|
64 |
-
<h2>What is APKPure?</h2>
|
65 |
-
<p>APKPure is a website that offers APK files for Android apps and games. APK stands for Android Package Kit, which is a file format that contains all the elements needed to install an app or game on your Android device. Normally, you would download apps and games from Google Play Store, which is the official source for Android apps. However, there are some reasons why you might want to use APKPure instead.</p>
|
66 |
-
<p>Some of the benefits of using APKPure are:</p>
|
67 |
-
<ul>
|
68 |
-
<li>You can download apps and games that are not available in your country or region</li>
|
69 |
-
<li>You can download apps and games that are not compatible with your device or Android version</li>
|
70 |
-
<li>You can download apps and games that have been removed from Google Play Store</li>
|
71 |
-
<li>You can download older versions of apps and games that work better for you</li>
|
72 |
-
<li>You can download modded versions of apps and games that have extra features or enhancements</li>
|
73 |
-
</ul>
|
74 |
-
<p>However, there are also some risks of using APKPure that you should be aware of:</p>
|
75 |
-
<ul>
|
76 |
-
<li>You might download malicious or harmful files that can damage your device or steal your data</li>
|
77 |
-
<li>You might violate the terms and conditions of the app or game developers and face legal consequences</li>
|
78 |
-
<li>You might miss out on the updates and bug fixes that are provided by Google Play Store</li>
|
79 |
-
<li>You might encounter compatibility or performance issues with some apps or games</li>
|
80 |
-
</ul>
|
81 |
-
<p>Therefore, you should always be careful and cautious when using APKPure or any other third-party source for Android apps and games. You should always check the ratings, reviews, and permissions of the files before downloading them. You should also scan the files with a reliable antivirus software before installing them. And you should always backup your data and device before trying any new app or game.</p>
|
82 |
-
<h2>How to download and install Attack on Titan mod in APKPure?</h2>
|
83 |
-
<p>If you want to try the mod version of Attack on Titan / A.O.T. Wings of Freedom, which has some extra features such as unlimited money, unlocked characters, and more, you can download it from APKPure website. Here are the steps to download and install Attack on Titan mod in APKPure:</p>
|
84 |
-
<ol>
|
85 |
-
<li>Go to the APKPure website and search for Attack on Titan mod or click on this link: [Attack on Titan Mod APK 1.1.2.12 - Download Attack on Titan Mod for Android]</li>
|
86 |
-
<li>Click on the green Download APK button and wait for the file to be downloaded to your device</li>
|
87 |
-
<li>Once the file is downloaded, go to your device settings and enable the option to install apps from unknown sources. This will allow you to install apps that are not from Google Play Store</li>
|
88 |
-
<li>Locate the downloaded APK file in your device storage and tap on it to start the installation process</li>
|
89 |
-
<li>Follow the instructions on the screen and grant the necessary permissions to the app</li>
|
90 |
-
<li>Wait for the installation to finish and then launch the app from your app drawer or home screen</li>
|
91 |
-
</ol>
|
92 |
-
<p>Congratulations! You have successfully downloaded and installed Attack on Titan mod in APKPure. You can now enjoy playing the game with some extra features and enhancements.</p>
|
93 |
-
<h2>Conclusion</h2>
|
94 |
-
<p>In this article, we have shown you how to download and install Attack on Titan mod free from APKPure, one of the best sources for Android apps and games. We have also explained what is Attack on Titan, what is APKPure, and what are the benefits and risks of using APKPure. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below.</p>
|
95 |
-
<p>If you liked this article, please share it with your friends and family who might be interested in playing Attack on Titan mod. And if you want to read more articles like this, please subscribe to our newsletter or follow us on social media. Thank you for reading!</p>
|
96 |
-
<h2>FAQs</h2>
|
97 |
-
<h3>What is an APK file and why do I need it?</h3>
|
98 |
-
<p>An APK file is a file format that contains all the elements needed to install an app or game on your Android device. You need an APK file when you want to download an app or game that is not available in Google Play Store or that is not compatible with your device or Android version.</p>
|
99 |
-
<h3>Is APKPure safe and reliable?</h3>
|
100 |
-
<p>APKPure is one of the most popular and trusted websites that offers APK files for Android apps and games. It has millions of users and thousands of positive reviews. However, like any other third-party source, it also has some risks of downloading malicious or harmful files that can damage your device or steal your data. Therefore, you should always be careful and cautious when using APKPure or any other third-party source for Android apps and games.</p>
|
101 |
-
<h3>What are the requirements and compatibility of Attack on Titan mod?</h3>
|
102 |
-
<p>The requirements and compatibility of Attack on Titan mod are as follows:</p>
|
103 |
-
<ul>
|
104 |
-
<li>The minimum Android version required is 4.0.3 (Ice Cream Sandwich) or higher</li>
|
105 |
-
<li>The minimum RAM required is 1 GB or higher</li>
|
106 |
-
<li>The minimum storage space required is 500 MB or higher</li>
|
107 |
-
<li>The app supports English, Japanese, Chinese, Korean, French, German, Spanish, Italian, Portuguese, Russian, Turkish, Arabic, Indonesian, Thai, Vietnamese languages</li>
|
108 |
-
<li>The app is compatible with most Android devices such as Samsung, Huawei, Xiaomi, LG, Sony, Motorola, etc.</li>
|
109 |
-
</ul>
|
110 |
-
<h3>What are the features and advantages of Attack on Titan mod?</h3>
|
111 |
-
<p>The features and advantages of Attack on Titan mod are as follows:</p>
|
112 |
-
<ul>
|
113 |
-
<li>You can get unlimited money to buy weapons, items, upgrades, etc.</li>
|
114 |
-
<li>You can unlock all the characters and skills to play as your favorite character</li>
|
115 |
-
<li>You can enjoy the game without any ads or interruptions</li>
|
116 |
-
<li>You can customize the game settings to suit your preferences and device performance</li>
|
117 |
-
<li>You can experience the game with some extra enhancements and improvements</li>
|
118 |
-
</ul>
|
119 |
-
<h3>How can I update or uninstall Attack on Titan mod?</h3>
|
120 |
-
<p>If you want to update or uninstall Attack on Titan mod, you can follow these steps:</p>
|
121 |
-
<ol>
|
122 |
-
<li>To update the app, you need to download the latest version of the APK file from APKPure website and install it over the existing app. You don't need to uninstall the previous version, but you should backup your data before updating</li>
|
123 |
-
<li>To uninstall the app, you need to go to your device settings and find the app in the list of installed apps. Then, you need to tap on the app and select the option to uninstall it. You should also delete the APK file from your device storage</li>
|
124 |
-
</ol></p> 401be4b1e0<br />
|
125 |
-
<br />
|
126 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/txt_processors/zh.py
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
import jieba
|
3 |
-
from pypinyin import pinyin, Style
|
4 |
-
from text_to_speech.utils.text.text_norm import NSWNormalizer
|
5 |
-
from text_to_speech.data_gen.tts.txt_processors.base_text_processor import BaseTxtProcessor, register_txt_processors
|
6 |
-
from text_to_speech.utils.text.text_encoder import PUNCS, is_sil_phoneme
|
7 |
-
|
8 |
-
ALL_SHENMU = ['zh', 'ch', 'sh', 'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', 'j',
|
9 |
-
'q', 'x', 'r', 'z', 'c', 's', 'y', 'w']
|
10 |
-
|
11 |
-
|
12 |
-
@register_txt_processors('zh')
|
13 |
-
class TxtProcessor(BaseTxtProcessor):
|
14 |
-
table = {ord(f): ord(t) for f, t in zip(
|
15 |
-
u':,。!?【】()%#@&1234567890',
|
16 |
-
u':,.!?[]()%#@&1234567890')}
|
17 |
-
|
18 |
-
@staticmethod
|
19 |
-
def sp_phonemes():
|
20 |
-
return ['|', '#']
|
21 |
-
|
22 |
-
@staticmethod
|
23 |
-
def preprocess_text(text):
|
24 |
-
text = text.translate(TxtProcessor.table)
|
25 |
-
text = NSWNormalizer(text).normalize(remove_punc=False).lower()
|
26 |
-
text = re.sub("[\'\"()]+", "", text)
|
27 |
-
text = re.sub("[-]+", " ", text)
|
28 |
-
text = re.sub(f"[^ A-Za-z\u4e00-\u9fff{PUNCS}]", "", text)
|
29 |
-
text = re.sub(f"([{PUNCS}])+", r"\1", text) # !! -> !
|
30 |
-
text = re.sub(f"([{PUNCS}])", r" \1 ", text)
|
31 |
-
text = re.sub(rf"\s+", r"", text)
|
32 |
-
text = re.sub(rf"[A-Za-z]+", r"$", text)
|
33 |
-
return text
|
34 |
-
|
35 |
-
@classmethod
|
36 |
-
def pinyin_with_en(cls, txt, style):
|
37 |
-
x = pinyin(txt, style)
|
38 |
-
x = [t[0] for t in x]
|
39 |
-
x_ = []
|
40 |
-
for t in x:
|
41 |
-
if '$' not in t:
|
42 |
-
x_.append(t)
|
43 |
-
else:
|
44 |
-
x_ += list(t)
|
45 |
-
x_ = [t if t != '$' else 'ENG' for t in x_]
|
46 |
-
return x_
|
47 |
-
|
48 |
-
@classmethod
|
49 |
-
def process(cls, txt, pre_align_args):
|
50 |
-
txt = cls.preprocess_text(txt)
|
51 |
-
txt = txt.replace("嗯", "蒽") # pypin会把嗯的声母韵母识别为'',导致ph2word出现错位。
|
52 |
-
# https://blog.csdn.net/zhoulei124/article/details/89055403
|
53 |
-
|
54 |
-
shengmu = cls.pinyin_with_en(txt, style=Style.INITIALS)
|
55 |
-
yunmu = cls.pinyin_with_en(txt, style=
|
56 |
-
Style.FINALS_TONE3 if pre_align_args['use_tone'] else Style.FINALS)
|
57 |
-
assert len(shengmu) == len(yunmu)
|
58 |
-
for i in range(len(shengmu)):
|
59 |
-
if shengmu[i] == '' and yunmu[i] == '':
|
60 |
-
print(f"发现了一个声母韵母都是空的文字:{txt[i]}")
|
61 |
-
ph_list = []
|
62 |
-
for a, b in zip(shengmu, yunmu):
|
63 |
-
if a == b:
|
64 |
-
ph_list += [a]
|
65 |
-
else:
|
66 |
-
ph_list += [a + "%" + b]
|
67 |
-
seg_list = '#'.join(jieba.cut(txt))
|
68 |
-
assert len(ph_list) == len([s for s in seg_list if s != '#']), (ph_list, seg_list)
|
69 |
-
|
70 |
-
# 加入词边界'#'
|
71 |
-
ph_list_ = []
|
72 |
-
seg_idx = 0
|
73 |
-
for p in ph_list:
|
74 |
-
if seg_list[seg_idx] == '#':
|
75 |
-
ph_list_.append('#')
|
76 |
-
seg_idx += 1
|
77 |
-
elif len(ph_list_) > 0:
|
78 |
-
ph_list_.append("|")
|
79 |
-
seg_idx += 1
|
80 |
-
finished = False
|
81 |
-
if not finished:
|
82 |
-
ph_list_ += [x for x in p.split("%") if x != '']
|
83 |
-
|
84 |
-
ph_list = ph_list_
|
85 |
-
|
86 |
-
# 去除静音符号周围的词边界标记 [..., '#', ',', '#', ...]
|
87 |
-
sil_phonemes = list(PUNCS) + TxtProcessor.sp_phonemes()
|
88 |
-
ph_list_ = []
|
89 |
-
for i in range(0, len(ph_list), 1):
|
90 |
-
if ph_list[i] != '#' or (ph_list[i - 1] not in sil_phonemes and ph_list[i + 1] not in sil_phonemes):
|
91 |
-
ph_list_.append(ph_list[i])
|
92 |
-
ph_list = ph_list_
|
93 |
-
|
94 |
-
txt_struct = [[w, []] for w in txt]
|
95 |
-
i = 0
|
96 |
-
for ph in ph_list:
|
97 |
-
if ph == '|' or ph == '#':
|
98 |
-
i += 1
|
99 |
-
continue
|
100 |
-
# elif ph in [',', '.']:
|
101 |
-
elif ph in [',', '.', '?', '!', ':']:
|
102 |
-
i += 1
|
103 |
-
txt_struct[i][1].append(ph)
|
104 |
-
i += 1
|
105 |
-
continue
|
106 |
-
txt_struct[i][1].append(ph)
|
107 |
-
# return ph_list, txt
|
108 |
-
txt_struct.insert(0, ['<BOS>', ['<BOS>']])
|
109 |
-
txt_struct.append(['<EOS>', ['<EOS>']])
|
110 |
-
return txt_struct, txt
|
111 |
-
|
112 |
-
|
113 |
-
if __name__ == '__main__':
|
114 |
-
# t = 'simon演唱过后,simon还进行了simon精彩的文艺演出simon.'
|
115 |
-
t = '你当我傻啊?脑子那么大怎么塞进去???'
|
116 |
-
phs, txt = TxtProcessor.process(t, {'use_tone': True})
|
117 |
-
print(phs, txt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/x_transformer.py
DELETED
@@ -1,641 +0,0 @@
|
|
1 |
-
"""shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers"""
|
2 |
-
import torch
|
3 |
-
from torch import nn, einsum
|
4 |
-
import torch.nn.functional as F
|
5 |
-
from functools import partial
|
6 |
-
from inspect import isfunction
|
7 |
-
from collections import namedtuple
|
8 |
-
from einops import rearrange, repeat, reduce
|
9 |
-
|
10 |
-
# constants
|
11 |
-
|
12 |
-
DEFAULT_DIM_HEAD = 64
|
13 |
-
|
14 |
-
Intermediates = namedtuple('Intermediates', [
|
15 |
-
'pre_softmax_attn',
|
16 |
-
'post_softmax_attn'
|
17 |
-
])
|
18 |
-
|
19 |
-
LayerIntermediates = namedtuple('Intermediates', [
|
20 |
-
'hiddens',
|
21 |
-
'attn_intermediates'
|
22 |
-
])
|
23 |
-
|
24 |
-
|
25 |
-
class AbsolutePositionalEmbedding(nn.Module):
|
26 |
-
def __init__(self, dim, max_seq_len):
|
27 |
-
super().__init__()
|
28 |
-
self.emb = nn.Embedding(max_seq_len, dim)
|
29 |
-
self.init_()
|
30 |
-
|
31 |
-
def init_(self):
|
32 |
-
nn.init.normal_(self.emb.weight, std=0.02)
|
33 |
-
|
34 |
-
def forward(self, x):
|
35 |
-
n = torch.arange(x.shape[1], device=x.device)
|
36 |
-
return self.emb(n)[None, :, :]
|
37 |
-
|
38 |
-
|
39 |
-
class FixedPositionalEmbedding(nn.Module):
|
40 |
-
def __init__(self, dim):
|
41 |
-
super().__init__()
|
42 |
-
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
|
43 |
-
self.register_buffer('inv_freq', inv_freq)
|
44 |
-
|
45 |
-
def forward(self, x, seq_dim=1, offset=0):
|
46 |
-
t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset
|
47 |
-
sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
|
48 |
-
emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
|
49 |
-
return emb[None, :, :]
|
50 |
-
|
51 |
-
|
52 |
-
# helpers
|
53 |
-
|
54 |
-
def exists(val):
|
55 |
-
return val is not None
|
56 |
-
|
57 |
-
|
58 |
-
def default(val, d):
|
59 |
-
if exists(val):
|
60 |
-
return val
|
61 |
-
return d() if isfunction(d) else d
|
62 |
-
|
63 |
-
|
64 |
-
def always(val):
|
65 |
-
def inner(*args, **kwargs):
|
66 |
-
return val
|
67 |
-
return inner
|
68 |
-
|
69 |
-
|
70 |
-
def not_equals(val):
|
71 |
-
def inner(x):
|
72 |
-
return x != val
|
73 |
-
return inner
|
74 |
-
|
75 |
-
|
76 |
-
def equals(val):
|
77 |
-
def inner(x):
|
78 |
-
return x == val
|
79 |
-
return inner
|
80 |
-
|
81 |
-
|
82 |
-
def max_neg_value(tensor):
|
83 |
-
return -torch.finfo(tensor.dtype).max
|
84 |
-
|
85 |
-
|
86 |
-
# keyword argument helpers
|
87 |
-
|
88 |
-
def pick_and_pop(keys, d):
|
89 |
-
values = list(map(lambda key: d.pop(key), keys))
|
90 |
-
return dict(zip(keys, values))
|
91 |
-
|
92 |
-
|
93 |
-
def group_dict_by_key(cond, d):
|
94 |
-
return_val = [dict(), dict()]
|
95 |
-
for key in d.keys():
|
96 |
-
match = bool(cond(key))
|
97 |
-
ind = int(not match)
|
98 |
-
return_val[ind][key] = d[key]
|
99 |
-
return (*return_val,)
|
100 |
-
|
101 |
-
|
102 |
-
def string_begins_with(prefix, str):
|
103 |
-
return str.startswith(prefix)
|
104 |
-
|
105 |
-
|
106 |
-
def group_by_key_prefix(prefix, d):
|
107 |
-
return group_dict_by_key(partial(string_begins_with, prefix), d)
|
108 |
-
|
109 |
-
|
110 |
-
def groupby_prefix_and_trim(prefix, d):
|
111 |
-
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
|
112 |
-
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
|
113 |
-
return kwargs_without_prefix, kwargs
|
114 |
-
|
115 |
-
|
116 |
-
# classes
|
117 |
-
class Scale(nn.Module):
|
118 |
-
def __init__(self, value, fn):
|
119 |
-
super().__init__()
|
120 |
-
self.value = value
|
121 |
-
self.fn = fn
|
122 |
-
|
123 |
-
def forward(self, x, **kwargs):
|
124 |
-
x, *rest = self.fn(x, **kwargs)
|
125 |
-
return (x * self.value, *rest)
|
126 |
-
|
127 |
-
|
128 |
-
class Rezero(nn.Module):
|
129 |
-
def __init__(self, fn):
|
130 |
-
super().__init__()
|
131 |
-
self.fn = fn
|
132 |
-
self.g = nn.Parameter(torch.zeros(1))
|
133 |
-
|
134 |
-
def forward(self, x, **kwargs):
|
135 |
-
x, *rest = self.fn(x, **kwargs)
|
136 |
-
return (x * self.g, *rest)
|
137 |
-
|
138 |
-
|
139 |
-
class ScaleNorm(nn.Module):
|
140 |
-
def __init__(self, dim, eps=1e-5):
|
141 |
-
super().__init__()
|
142 |
-
self.scale = dim ** -0.5
|
143 |
-
self.eps = eps
|
144 |
-
self.g = nn.Parameter(torch.ones(1))
|
145 |
-
|
146 |
-
def forward(self, x):
|
147 |
-
norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
|
148 |
-
return x / norm.clamp(min=self.eps) * self.g
|
149 |
-
|
150 |
-
|
151 |
-
class RMSNorm(nn.Module):
|
152 |
-
def __init__(self, dim, eps=1e-8):
|
153 |
-
super().__init__()
|
154 |
-
self.scale = dim ** -0.5
|
155 |
-
self.eps = eps
|
156 |
-
self.g = nn.Parameter(torch.ones(dim))
|
157 |
-
|
158 |
-
def forward(self, x):
|
159 |
-
norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
|
160 |
-
return x / norm.clamp(min=self.eps) * self.g
|
161 |
-
|
162 |
-
|
163 |
-
class Residual(nn.Module):
|
164 |
-
def forward(self, x, residual):
|
165 |
-
return x + residual
|
166 |
-
|
167 |
-
|
168 |
-
class GRUGating(nn.Module):
|
169 |
-
def __init__(self, dim):
|
170 |
-
super().__init__()
|
171 |
-
self.gru = nn.GRUCell(dim, dim)
|
172 |
-
|
173 |
-
def forward(self, x, residual):
|
174 |
-
gated_output = self.gru(
|
175 |
-
rearrange(x, 'b n d -> (b n) d'),
|
176 |
-
rearrange(residual, 'b n d -> (b n) d')
|
177 |
-
)
|
178 |
-
|
179 |
-
return gated_output.reshape_as(x)
|
180 |
-
|
181 |
-
|
182 |
-
# feedforward
|
183 |
-
|
184 |
-
class GEGLU(nn.Module):
|
185 |
-
def __init__(self, dim_in, dim_out):
|
186 |
-
super().__init__()
|
187 |
-
self.proj = nn.Linear(dim_in, dim_out * 2)
|
188 |
-
|
189 |
-
def forward(self, x):
|
190 |
-
x, gate = self.proj(x).chunk(2, dim=-1)
|
191 |
-
return x * F.gelu(gate)
|
192 |
-
|
193 |
-
|
194 |
-
class FeedForward(nn.Module):
|
195 |
-
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
|
196 |
-
super().__init__()
|
197 |
-
inner_dim = int(dim * mult)
|
198 |
-
dim_out = default(dim_out, dim)
|
199 |
-
project_in = nn.Sequential(
|
200 |
-
nn.Linear(dim, inner_dim),
|
201 |
-
nn.GELU()
|
202 |
-
) if not glu else GEGLU(dim, inner_dim)
|
203 |
-
|
204 |
-
self.net = nn.Sequential(
|
205 |
-
project_in,
|
206 |
-
nn.Dropout(dropout),
|
207 |
-
nn.Linear(inner_dim, dim_out)
|
208 |
-
)
|
209 |
-
|
210 |
-
def forward(self, x):
|
211 |
-
return self.net(x)
|
212 |
-
|
213 |
-
|
214 |
-
# attention.
|
215 |
-
class Attention(nn.Module):
|
216 |
-
def __init__(
|
217 |
-
self,
|
218 |
-
dim,
|
219 |
-
dim_head=DEFAULT_DIM_HEAD,
|
220 |
-
heads=8,
|
221 |
-
causal=False,
|
222 |
-
mask=None,
|
223 |
-
talking_heads=False,
|
224 |
-
sparse_topk=None,
|
225 |
-
use_entmax15=False,
|
226 |
-
num_mem_kv=0,
|
227 |
-
dropout=0.,
|
228 |
-
on_attn=False
|
229 |
-
):
|
230 |
-
super().__init__()
|
231 |
-
if use_entmax15:
|
232 |
-
raise NotImplementedError("Check out entmax activation instead of softmax activation!")
|
233 |
-
self.scale = dim_head ** -0.5
|
234 |
-
self.heads = heads
|
235 |
-
self.causal = causal
|
236 |
-
self.mask = mask
|
237 |
-
|
238 |
-
inner_dim = dim_head * heads
|
239 |
-
|
240 |
-
self.to_q = nn.Linear(dim, inner_dim, bias=False)
|
241 |
-
self.to_k = nn.Linear(dim, inner_dim, bias=False)
|
242 |
-
self.to_v = nn.Linear(dim, inner_dim, bias=False)
|
243 |
-
self.dropout = nn.Dropout(dropout)
|
244 |
-
|
245 |
-
# talking heads
|
246 |
-
self.talking_heads = talking_heads
|
247 |
-
if talking_heads:
|
248 |
-
self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads))
|
249 |
-
self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads))
|
250 |
-
|
251 |
-
# explicit topk sparse attention
|
252 |
-
self.sparse_topk = sparse_topk
|
253 |
-
|
254 |
-
# entmax
|
255 |
-
#self.attn_fn = entmax15 if use_entmax15 else F.softmax
|
256 |
-
self.attn_fn = F.softmax
|
257 |
-
|
258 |
-
# add memory key / values
|
259 |
-
self.num_mem_kv = num_mem_kv
|
260 |
-
if num_mem_kv > 0:
|
261 |
-
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
|
262 |
-
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
|
263 |
-
|
264 |
-
# attention on attention
|
265 |
-
self.attn_on_attn = on_attn
|
266 |
-
self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim)
|
267 |
-
|
268 |
-
def forward(
|
269 |
-
self,
|
270 |
-
x,
|
271 |
-
context=None,
|
272 |
-
mask=None,
|
273 |
-
context_mask=None,
|
274 |
-
rel_pos=None,
|
275 |
-
sinusoidal_emb=None,
|
276 |
-
prev_attn=None,
|
277 |
-
mem=None
|
278 |
-
):
|
279 |
-
b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device
|
280 |
-
kv_input = default(context, x)
|
281 |
-
|
282 |
-
q_input = x
|
283 |
-
k_input = kv_input
|
284 |
-
v_input = kv_input
|
285 |
-
|
286 |
-
if exists(mem):
|
287 |
-
k_input = torch.cat((mem, k_input), dim=-2)
|
288 |
-
v_input = torch.cat((mem, v_input), dim=-2)
|
289 |
-
|
290 |
-
if exists(sinusoidal_emb):
|
291 |
-
# in shortformer, the query would start at a position offset depending on the past cached memory
|
292 |
-
offset = k_input.shape[-2] - q_input.shape[-2]
|
293 |
-
q_input = q_input + sinusoidal_emb(q_input, offset=offset)
|
294 |
-
k_input = k_input + sinusoidal_emb(k_input)
|
295 |
-
|
296 |
-
q = self.to_q(q_input)
|
297 |
-
k = self.to_k(k_input)
|
298 |
-
v = self.to_v(v_input)
|
299 |
-
|
300 |
-
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
|
301 |
-
|
302 |
-
input_mask = None
|
303 |
-
if any(map(exists, (mask, context_mask))):
|
304 |
-
q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool())
|
305 |
-
k_mask = q_mask if not exists(context) else context_mask
|
306 |
-
k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool())
|
307 |
-
q_mask = rearrange(q_mask, 'b i -> b () i ()')
|
308 |
-
k_mask = rearrange(k_mask, 'b j -> b () () j')
|
309 |
-
input_mask = q_mask * k_mask
|
310 |
-
|
311 |
-
if self.num_mem_kv > 0:
|
312 |
-
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v))
|
313 |
-
k = torch.cat((mem_k, k), dim=-2)
|
314 |
-
v = torch.cat((mem_v, v), dim=-2)
|
315 |
-
if exists(input_mask):
|
316 |
-
input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True)
|
317 |
-
|
318 |
-
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
|
319 |
-
mask_value = max_neg_value(dots)
|
320 |
-
|
321 |
-
if exists(prev_attn):
|
322 |
-
dots = dots + prev_attn
|
323 |
-
|
324 |
-
pre_softmax_attn = dots
|
325 |
-
|
326 |
-
if talking_heads:
|
327 |
-
dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous()
|
328 |
-
|
329 |
-
if exists(rel_pos):
|
330 |
-
dots = rel_pos(dots)
|
331 |
-
|
332 |
-
if exists(input_mask):
|
333 |
-
dots.masked_fill_(~input_mask, mask_value)
|
334 |
-
del input_mask
|
335 |
-
|
336 |
-
if self.causal:
|
337 |
-
i, j = dots.shape[-2:]
|
338 |
-
r = torch.arange(i, device=device)
|
339 |
-
mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j')
|
340 |
-
mask = F.pad(mask, (j - i, 0), value=False)
|
341 |
-
dots.masked_fill_(mask, mask_value)
|
342 |
-
del mask
|
343 |
-
|
344 |
-
if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]:
|
345 |
-
top, _ = dots.topk(self.sparse_topk, dim=-1)
|
346 |
-
vk = top[..., -1].unsqueeze(-1).expand_as(dots)
|
347 |
-
mask = dots < vk
|
348 |
-
dots.masked_fill_(mask, mask_value)
|
349 |
-
del mask
|
350 |
-
|
351 |
-
attn = self.attn_fn(dots, dim=-1)
|
352 |
-
post_softmax_attn = attn
|
353 |
-
|
354 |
-
attn = self.dropout(attn)
|
355 |
-
|
356 |
-
if talking_heads:
|
357 |
-
attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous()
|
358 |
-
|
359 |
-
out = einsum('b h i j, b h j d -> b h i d', attn, v)
|
360 |
-
out = rearrange(out, 'b h n d -> b n (h d)')
|
361 |
-
|
362 |
-
intermediates = Intermediates(
|
363 |
-
pre_softmax_attn=pre_softmax_attn,
|
364 |
-
post_softmax_attn=post_softmax_attn
|
365 |
-
)
|
366 |
-
|
367 |
-
return self.to_out(out), intermediates
|
368 |
-
|
369 |
-
|
370 |
-
class AttentionLayers(nn.Module):
|
371 |
-
def __init__(
|
372 |
-
self,
|
373 |
-
dim,
|
374 |
-
depth,
|
375 |
-
heads=8,
|
376 |
-
causal=False,
|
377 |
-
cross_attend=False,
|
378 |
-
only_cross=False,
|
379 |
-
use_scalenorm=False,
|
380 |
-
use_rmsnorm=False,
|
381 |
-
use_rezero=False,
|
382 |
-
rel_pos_num_buckets=32,
|
383 |
-
rel_pos_max_distance=128,
|
384 |
-
position_infused_attn=False,
|
385 |
-
custom_layers=None,
|
386 |
-
sandwich_coef=None,
|
387 |
-
par_ratio=None,
|
388 |
-
residual_attn=False,
|
389 |
-
cross_residual_attn=False,
|
390 |
-
macaron=False,
|
391 |
-
pre_norm=True,
|
392 |
-
gate_residual=False,
|
393 |
-
**kwargs
|
394 |
-
):
|
395 |
-
super().__init__()
|
396 |
-
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
|
397 |
-
attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs)
|
398 |
-
|
399 |
-
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
|
400 |
-
|
401 |
-
self.dim = dim
|
402 |
-
self.depth = depth
|
403 |
-
self.layers = nn.ModuleList([])
|
404 |
-
|
405 |
-
self.has_pos_emb = position_infused_attn
|
406 |
-
self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None
|
407 |
-
self.rotary_pos_emb = always(None)
|
408 |
-
|
409 |
-
assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
|
410 |
-
self.rel_pos = None
|
411 |
-
|
412 |
-
self.pre_norm = pre_norm
|
413 |
-
|
414 |
-
self.residual_attn = residual_attn
|
415 |
-
self.cross_residual_attn = cross_residual_attn
|
416 |
-
|
417 |
-
norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm
|
418 |
-
norm_class = RMSNorm if use_rmsnorm else norm_class
|
419 |
-
norm_fn = partial(norm_class, dim)
|
420 |
-
|
421 |
-
norm_fn = nn.Identity if use_rezero else norm_fn
|
422 |
-
branch_fn = Rezero if use_rezero else None
|
423 |
-
|
424 |
-
if cross_attend and not only_cross:
|
425 |
-
default_block = ('a', 'c', 'f')
|
426 |
-
elif cross_attend and only_cross:
|
427 |
-
default_block = ('c', 'f')
|
428 |
-
else:
|
429 |
-
default_block = ('a', 'f')
|
430 |
-
|
431 |
-
if macaron:
|
432 |
-
default_block = ('f',) + default_block
|
433 |
-
|
434 |
-
if exists(custom_layers):
|
435 |
-
layer_types = custom_layers
|
436 |
-
elif exists(par_ratio):
|
437 |
-
par_depth = depth * len(default_block)
|
438 |
-
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
|
439 |
-
default_block = tuple(filter(not_equals('f'), default_block))
|
440 |
-
par_attn = par_depth // par_ratio
|
441 |
-
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
|
442 |
-
par_width = (depth_cut + depth_cut // par_attn) // par_attn
|
443 |
-
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
|
444 |
-
par_block = default_block + ('f',) * (par_width - len(default_block))
|
445 |
-
par_head = par_block * par_attn
|
446 |
-
layer_types = par_head + ('f',) * (par_depth - len(par_head))
|
447 |
-
elif exists(sandwich_coef):
|
448 |
-
assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
|
449 |
-
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
|
450 |
-
else:
|
451 |
-
layer_types = default_block * depth
|
452 |
-
|
453 |
-
self.layer_types = layer_types
|
454 |
-
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
|
455 |
-
|
456 |
-
for layer_type in self.layer_types:
|
457 |
-
if layer_type == 'a':
|
458 |
-
layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs)
|
459 |
-
elif layer_type == 'c':
|
460 |
-
layer = Attention(dim, heads=heads, **attn_kwargs)
|
461 |
-
elif layer_type == 'f':
|
462 |
-
layer = FeedForward(dim, **ff_kwargs)
|
463 |
-
layer = layer if not macaron else Scale(0.5, layer)
|
464 |
-
else:
|
465 |
-
raise Exception(f'invalid layer type {layer_type}')
|
466 |
-
|
467 |
-
if isinstance(layer, Attention) and exists(branch_fn):
|
468 |
-
layer = branch_fn(layer)
|
469 |
-
|
470 |
-
if gate_residual:
|
471 |
-
residual_fn = GRUGating(dim)
|
472 |
-
else:
|
473 |
-
residual_fn = Residual()
|
474 |
-
|
475 |
-
self.layers.append(nn.ModuleList([
|
476 |
-
norm_fn(),
|
477 |
-
layer,
|
478 |
-
residual_fn
|
479 |
-
]))
|
480 |
-
|
481 |
-
def forward(
|
482 |
-
self,
|
483 |
-
x,
|
484 |
-
context=None,
|
485 |
-
mask=None,
|
486 |
-
context_mask=None,
|
487 |
-
mems=None,
|
488 |
-
return_hiddens=False
|
489 |
-
):
|
490 |
-
hiddens = []
|
491 |
-
intermediates = []
|
492 |
-
prev_attn = None
|
493 |
-
prev_cross_attn = None
|
494 |
-
|
495 |
-
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
|
496 |
-
|
497 |
-
for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)):
|
498 |
-
is_last = ind == (len(self.layers) - 1)
|
499 |
-
|
500 |
-
if layer_type == 'a':
|
501 |
-
hiddens.append(x)
|
502 |
-
layer_mem = mems.pop(0)
|
503 |
-
|
504 |
-
residual = x
|
505 |
-
|
506 |
-
if self.pre_norm:
|
507 |
-
x = norm(x)
|
508 |
-
|
509 |
-
if layer_type == 'a':
|
510 |
-
out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos,
|
511 |
-
prev_attn=prev_attn, mem=layer_mem)
|
512 |
-
elif layer_type == 'c':
|
513 |
-
out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn)
|
514 |
-
elif layer_type == 'f':
|
515 |
-
out = block(x)
|
516 |
-
|
517 |
-
x = residual_fn(out, residual)
|
518 |
-
|
519 |
-
if layer_type in ('a', 'c'):
|
520 |
-
intermediates.append(inter)
|
521 |
-
|
522 |
-
if layer_type == 'a' and self.residual_attn:
|
523 |
-
prev_attn = inter.pre_softmax_attn
|
524 |
-
elif layer_type == 'c' and self.cross_residual_attn:
|
525 |
-
prev_cross_attn = inter.pre_softmax_attn
|
526 |
-
|
527 |
-
if not self.pre_norm and not is_last:
|
528 |
-
x = norm(x)
|
529 |
-
|
530 |
-
if return_hiddens:
|
531 |
-
intermediates = LayerIntermediates(
|
532 |
-
hiddens=hiddens,
|
533 |
-
attn_intermediates=intermediates
|
534 |
-
)
|
535 |
-
|
536 |
-
return x, intermediates
|
537 |
-
|
538 |
-
return x
|
539 |
-
|
540 |
-
|
541 |
-
class Encoder(AttentionLayers):
|
542 |
-
def __init__(self, **kwargs):
|
543 |
-
assert 'causal' not in kwargs, 'cannot set causality on encoder'
|
544 |
-
super().__init__(causal=False, **kwargs)
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
class TransformerWrapper(nn.Module):
|
549 |
-
def __init__(
|
550 |
-
self,
|
551 |
-
*,
|
552 |
-
num_tokens,
|
553 |
-
max_seq_len,
|
554 |
-
attn_layers,
|
555 |
-
emb_dim=None,
|
556 |
-
max_mem_len=0.,
|
557 |
-
emb_dropout=0.,
|
558 |
-
num_memory_tokens=None,
|
559 |
-
tie_embedding=False,
|
560 |
-
use_pos_emb=True
|
561 |
-
):
|
562 |
-
super().__init__()
|
563 |
-
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
|
564 |
-
|
565 |
-
dim = attn_layers.dim
|
566 |
-
emb_dim = default(emb_dim, dim)
|
567 |
-
|
568 |
-
self.max_seq_len = max_seq_len
|
569 |
-
self.max_mem_len = max_mem_len
|
570 |
-
self.num_tokens = num_tokens
|
571 |
-
|
572 |
-
self.token_emb = nn.Embedding(num_tokens, emb_dim)
|
573 |
-
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if (
|
574 |
-
use_pos_emb and not attn_layers.has_pos_emb) else always(0)
|
575 |
-
self.emb_dropout = nn.Dropout(emb_dropout)
|
576 |
-
|
577 |
-
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
|
578 |
-
self.attn_layers = attn_layers
|
579 |
-
self.norm = nn.LayerNorm(dim)
|
580 |
-
|
581 |
-
self.init_()
|
582 |
-
|
583 |
-
self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t()
|
584 |
-
|
585 |
-
# memory tokens (like [cls]) from Memory Transformers paper
|
586 |
-
num_memory_tokens = default(num_memory_tokens, 0)
|
587 |
-
self.num_memory_tokens = num_memory_tokens
|
588 |
-
if num_memory_tokens > 0:
|
589 |
-
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
|
590 |
-
|
591 |
-
# let funnel encoder know number of memory tokens, if specified
|
592 |
-
if hasattr(attn_layers, 'num_memory_tokens'):
|
593 |
-
attn_layers.num_memory_tokens = num_memory_tokens
|
594 |
-
|
595 |
-
def init_(self):
|
596 |
-
nn.init.normal_(self.token_emb.weight, std=0.02)
|
597 |
-
|
598 |
-
def forward(
|
599 |
-
self,
|
600 |
-
x,
|
601 |
-
return_embeddings=False,
|
602 |
-
mask=None,
|
603 |
-
return_mems=False,
|
604 |
-
return_attn=False,
|
605 |
-
mems=None,
|
606 |
-
**kwargs
|
607 |
-
):
|
608 |
-
b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens
|
609 |
-
x = self.token_emb(x)
|
610 |
-
x += self.pos_emb(x)
|
611 |
-
x = self.emb_dropout(x)
|
612 |
-
|
613 |
-
x = self.project_emb(x)
|
614 |
-
|
615 |
-
if num_mem > 0:
|
616 |
-
mem = repeat(self.memory_tokens, 'n d -> b n d', b=b)
|
617 |
-
x = torch.cat((mem, x), dim=1)
|
618 |
-
|
619 |
-
# auto-handle masking after appending memory tokens
|
620 |
-
if exists(mask):
|
621 |
-
mask = F.pad(mask, (num_mem, 0), value=True)
|
622 |
-
|
623 |
-
x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs)
|
624 |
-
x = self.norm(x)
|
625 |
-
|
626 |
-
mem, x = x[:, :num_mem], x[:, num_mem:]
|
627 |
-
|
628 |
-
out = self.to_logits(x) if not return_embeddings else x
|
629 |
-
|
630 |
-
if return_mems:
|
631 |
-
hiddens = intermediates.hiddens
|
632 |
-
new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens
|
633 |
-
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
|
634 |
-
return out, new_mems
|
635 |
-
|
636 |
-
if return_attn:
|
637 |
-
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
|
638 |
-
return out, attn_maps
|
639 |
-
|
640 |
-
return out
|
641 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGText/GlyphControl/annotator/render_images.py
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
from PIL import Image, ImageFont, ImageDraw
|
2 |
-
import random
|
3 |
-
|
4 |
-
# resize height to image_height first, then shrink or pad to image_width
|
5 |
-
def resize_and_pad_image(pil_image, image_size):
|
6 |
-
|
7 |
-
if isinstance(image_size, (tuple, list)) and len(image_size) == 2:
|
8 |
-
image_width, image_height = image_size
|
9 |
-
elif isinstance(image_size, int):
|
10 |
-
image_width = image_height = image_size
|
11 |
-
else:
|
12 |
-
raise ValueError(f"Image size should be int or list/tuple of int not {image_size}")
|
13 |
-
|
14 |
-
while pil_image.size[1] >= 2 * image_height:
|
15 |
-
pil_image = pil_image.resize(
|
16 |
-
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
|
17 |
-
)
|
18 |
-
|
19 |
-
scale = image_height / pil_image.size[1]
|
20 |
-
pil_image = pil_image.resize(tuple(round(x * scale) for x in pil_image.size),resample=Image.BICUBIC)
|
21 |
-
|
22 |
-
# shrink
|
23 |
-
if pil_image.size[0] > image_width:
|
24 |
-
pil_image = pil_image.resize((image_width, image_height),resample=Image.BICUBIC)
|
25 |
-
|
26 |
-
# padding
|
27 |
-
if pil_image.size[0] < image_width:
|
28 |
-
img = Image.new(mode="RGB",size=(image_width,image_height), color="white")
|
29 |
-
width, _ = pil_image.size
|
30 |
-
img.paste(pil_image,((image_width - width)//2, 0))
|
31 |
-
pil_image = img
|
32 |
-
|
33 |
-
return pil_image
|
34 |
-
|
35 |
-
def render_text_image_custom(image_size, bboxes, rendered_txt_values, num_rows_values, align = "center"):
|
36 |
-
# aligns = ["center", "left", "right"]
|
37 |
-
"""Render text image based on the list of bbox called `bboxes`.
|
38 |
-
Support font that can be choosed.
|
39 |
-
"""
|
40 |
-
print(image_size, bboxes, rendered_txt_values, num_rows_values, align)
|
41 |
-
background = Image.new("RGB", image_size, "white")
|
42 |
-
font = ImageFont.truetype("calibri.ttf", encoding='utf-8', size=512)
|
43 |
-
|
44 |
-
for text, bbox, num_rows in zip(rendered_txt_values, bboxes, num_rows_values):
|
45 |
-
|
46 |
-
if len(text) == 0:
|
47 |
-
continue
|
48 |
-
|
49 |
-
text = text.strip()
|
50 |
-
if num_rows != 1:
|
51 |
-
word_tokens = text.split()
|
52 |
-
num_tokens = len(word_tokens)
|
53 |
-
index_list = range(1, num_tokens + 1)
|
54 |
-
if num_tokens > num_rows:
|
55 |
-
index_list = random.sample(index_list, num_rows)
|
56 |
-
index_list.sort()
|
57 |
-
line_list = []
|
58 |
-
start_idx = 0
|
59 |
-
for index in index_list:
|
60 |
-
line_list.append(
|
61 |
-
" ".join(word_tokens
|
62 |
-
[start_idx: index]
|
63 |
-
)
|
64 |
-
)
|
65 |
-
start_idx = index
|
66 |
-
text = "\n".join(line_list)
|
67 |
-
|
68 |
-
if 'ratio' not in bbox or bbox['ratio'] == 0 or bbox['ratio'] < 1e-4:
|
69 |
-
image4ratio = Image.new("RGB", (512, 512), "white")
|
70 |
-
draw = ImageDraw.Draw(image4ratio)
|
71 |
-
_, _ , w, h = draw.textbbox(xy=(0,0),text = text, font=font)
|
72 |
-
ratio = w / h
|
73 |
-
else:
|
74 |
-
ratio = bbox['ratio']
|
75 |
-
|
76 |
-
width = int(bbox['width'] * image_size[1])
|
77 |
-
height = int(width / ratio)
|
78 |
-
top_left_x = int(bbox['top_left_x'] * image_size[0])
|
79 |
-
top_left_y = int(bbox['top_left_y'] * image_size[1])
|
80 |
-
yaw = bbox['yaw']
|
81 |
-
|
82 |
-
text_image = Image.new("RGB", (512, 512), "white")
|
83 |
-
draw = ImageDraw.Draw(text_image)
|
84 |
-
x,y,w,h = draw.textbbox(xy=(0,0),text = text, font=font)
|
85 |
-
text_image = Image.new("RGB", (w, h), "white")
|
86 |
-
draw = ImageDraw.Draw(text_image)
|
87 |
-
draw.text((-x/2,-y/2), text, "black", font=font, align=align)
|
88 |
-
text_image = resize_and_pad_image(text_image, (width, height))
|
89 |
-
text_image = text_image.rotate(angle=-yaw, expand=True, fillcolor="white")
|
90 |
-
# image = Image.new("RGB", (w, h), "white")
|
91 |
-
# draw = ImageDraw.Draw(image)
|
92 |
-
|
93 |
-
background.paste(text_image, (top_left_x, top_left_y))
|
94 |
-
|
95 |
-
return background
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/classify/train.py
DELETED
@@ -1,537 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
Train a YOLOv5 classifier model on a classification dataset
|
4 |
-
|
5 |
-
Usage - Single-GPU training:
|
6 |
-
$ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224
|
7 |
-
|
8 |
-
Usage - Multi-GPU DDP training:
|
9 |
-
$ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
|
10 |
-
|
11 |
-
Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data'
|
12 |
-
YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt
|
13 |
-
Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html
|
14 |
-
"""
|
15 |
-
|
16 |
-
import argparse
|
17 |
-
import os
|
18 |
-
import subprocess
|
19 |
-
import sys
|
20 |
-
import time
|
21 |
-
from copy import deepcopy
|
22 |
-
from datetime import datetime
|
23 |
-
from pathlib import Path
|
24 |
-
|
25 |
-
import torch
|
26 |
-
import torch.distributed as dist
|
27 |
-
import torch.hub as hub
|
28 |
-
import torch.optim.lr_scheduler as lr_scheduler
|
29 |
-
import torchvision
|
30 |
-
from torch.cuda import amp
|
31 |
-
from tqdm import tqdm
|
32 |
-
|
33 |
-
FILE = Path(__file__).resolve()
|
34 |
-
ROOT = FILE.parents[1] # YOLOv5 root directory
|
35 |
-
if str(ROOT) not in sys.path:
|
36 |
-
sys.path.append(str(ROOT)) # add ROOT to PATH
|
37 |
-
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
38 |
-
|
39 |
-
from classify import val as validate
|
40 |
-
from models.experimental import attempt_load
|
41 |
-
from models.yolo import ClassificationModel, DetectionModel
|
42 |
-
from utils.dataloaders import create_classification_dataloader
|
43 |
-
from utils.general import (
|
44 |
-
DATASETS_DIR,
|
45 |
-
LOGGER,
|
46 |
-
TQDM_BAR_FORMAT,
|
47 |
-
WorkingDirectory,
|
48 |
-
check_git_info,
|
49 |
-
check_git_status,
|
50 |
-
check_requirements,
|
51 |
-
colorstr,
|
52 |
-
download,
|
53 |
-
increment_path,
|
54 |
-
init_seeds,
|
55 |
-
print_args,
|
56 |
-
yaml_save,
|
57 |
-
)
|
58 |
-
from utils.loggers import GenericLogger
|
59 |
-
from utils.plots import imshow_cls
|
60 |
-
from utils.torch_utils import (
|
61 |
-
ModelEMA,
|
62 |
-
model_info,
|
63 |
-
reshape_classifier_output,
|
64 |
-
select_device,
|
65 |
-
smart_DDP,
|
66 |
-
smart_optimizer,
|
67 |
-
smartCrossEntropyLoss,
|
68 |
-
torch_distributed_zero_first,
|
69 |
-
)
|
70 |
-
|
71 |
-
LOCAL_RANK = int(
|
72 |
-
os.getenv("LOCAL_RANK", -1)
|
73 |
-
) # https://pytorch.org/docs/stable/elastic/run.html
|
74 |
-
RANK = int(os.getenv("RANK", -1))
|
75 |
-
WORLD_SIZE = int(os.getenv("WORLD_SIZE", 1))
|
76 |
-
GIT_INFO = check_git_info()
|
77 |
-
|
78 |
-
|
79 |
-
def train(opt, device):
|
80 |
-
init_seeds(opt.seed + 1 + RANK, deterministic=True)
|
81 |
-
save_dir, data, bs, epochs, nw, imgsz, pretrained = (
|
82 |
-
opt.save_dir,
|
83 |
-
Path(opt.data),
|
84 |
-
opt.batch_size,
|
85 |
-
opt.epochs,
|
86 |
-
min(os.cpu_count() - 1, opt.workers),
|
87 |
-
opt.imgsz,
|
88 |
-
str(opt.pretrained).lower() == "true",
|
89 |
-
)
|
90 |
-
cuda = device.type != "cpu"
|
91 |
-
|
92 |
-
# Directories
|
93 |
-
wdir = save_dir / "weights"
|
94 |
-
wdir.mkdir(parents=True, exist_ok=True) # make dir
|
95 |
-
last, best = wdir / "last.pt", wdir / "best.pt"
|
96 |
-
|
97 |
-
# Save run settings
|
98 |
-
yaml_save(save_dir / "opt.yaml", vars(opt))
|
99 |
-
|
100 |
-
# Logger
|
101 |
-
logger = (
|
102 |
-
GenericLogger(opt=opt, console_logger=LOGGER)
|
103 |
-
if RANK in {-1, 0}
|
104 |
-
else None
|
105 |
-
)
|
106 |
-
|
107 |
-
# Download Dataset
|
108 |
-
with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
|
109 |
-
data_dir = data if data.is_dir() else (DATASETS_DIR / data)
|
110 |
-
if not data_dir.is_dir():
|
111 |
-
LOGGER.info(
|
112 |
-
f"\nDataset not found ⚠️, missing path {data_dir}, attempting download..."
|
113 |
-
)
|
114 |
-
t = time.time()
|
115 |
-
if str(data) == "imagenet":
|
116 |
-
subprocess.run(
|
117 |
-
f"bash {ROOT / 'data/scripts/get_imagenet.sh'}",
|
118 |
-
shell=True,
|
119 |
-
check=True,
|
120 |
-
)
|
121 |
-
else:
|
122 |
-
url = f"https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip"
|
123 |
-
download(url, dir=data_dir.parent)
|
124 |
-
s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n"
|
125 |
-
LOGGER.info(s)
|
126 |
-
|
127 |
-
# Dataloaders
|
128 |
-
nc = len(
|
129 |
-
[x for x in (data_dir / "train").glob("*") if x.is_dir()]
|
130 |
-
) # number of classes
|
131 |
-
trainloader = create_classification_dataloader(
|
132 |
-
path=data_dir / "train",
|
133 |
-
imgsz=imgsz,
|
134 |
-
batch_size=bs // WORLD_SIZE,
|
135 |
-
augment=True,
|
136 |
-
cache=opt.cache,
|
137 |
-
rank=LOCAL_RANK,
|
138 |
-
workers=nw,
|
139 |
-
)
|
140 |
-
|
141 |
-
test_dir = (
|
142 |
-
data_dir / "test" if (data_dir / "test").exists() else data_dir / "val"
|
143 |
-
) # data/test or data/val
|
144 |
-
if RANK in {-1, 0}:
|
145 |
-
testloader = create_classification_dataloader(
|
146 |
-
path=test_dir,
|
147 |
-
imgsz=imgsz,
|
148 |
-
batch_size=bs // WORLD_SIZE * 2,
|
149 |
-
augment=False,
|
150 |
-
cache=opt.cache,
|
151 |
-
rank=-1,
|
152 |
-
workers=nw,
|
153 |
-
)
|
154 |
-
|
155 |
-
# Model
|
156 |
-
with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
|
157 |
-
if Path(opt.model).is_file() or opt.model.endswith(".pt"):
|
158 |
-
model = attempt_load(opt.model, device="cpu", fuse=False)
|
159 |
-
elif (
|
160 |
-
opt.model in torchvision.models.__dict__
|
161 |
-
): # TorchVision models i.e. resnet50, efficientnet_b0
|
162 |
-
model = torchvision.models.__dict__[opt.model](
|
163 |
-
weights="IMAGENET1K_V1" if pretrained else None
|
164 |
-
)
|
165 |
-
else:
|
166 |
-
m = hub.list(
|
167 |
-
"ultralytics/yolov5"
|
168 |
-
) # + hub.list('pytorch/vision') # models
|
169 |
-
raise ModuleNotFoundError(
|
170 |
-
f"--model {opt.model} not found. Available models are: \n"
|
171 |
-
+ "\n".join(m)
|
172 |
-
)
|
173 |
-
if isinstance(model, DetectionModel):
|
174 |
-
LOGGER.warning(
|
175 |
-
"WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'"
|
176 |
-
)
|
177 |
-
model = ClassificationModel(
|
178 |
-
model=model, nc=nc, cutoff=opt.cutoff or 10
|
179 |
-
) # convert to classification model
|
180 |
-
reshape_classifier_output(model, nc) # update class count
|
181 |
-
for m in model.modules():
|
182 |
-
if not pretrained and hasattr(m, "reset_parameters"):
|
183 |
-
m.reset_parameters()
|
184 |
-
if isinstance(m, torch.nn.Dropout) and opt.dropout is not None:
|
185 |
-
m.p = opt.dropout # set dropout
|
186 |
-
for p in model.parameters():
|
187 |
-
p.requires_grad = True # for training
|
188 |
-
model = model.to(device)
|
189 |
-
|
190 |
-
# Info
|
191 |
-
if RANK in {-1, 0}:
|
192 |
-
model.names = trainloader.dataset.classes # attach class names
|
193 |
-
model.transforms = (
|
194 |
-
testloader.dataset.torch_transforms
|
195 |
-
) # attach inference transforms
|
196 |
-
model_info(model)
|
197 |
-
if opt.verbose:
|
198 |
-
LOGGER.info(model)
|
199 |
-
images, labels = next(iter(trainloader))
|
200 |
-
file = imshow_cls(
|
201 |
-
images[:25],
|
202 |
-
labels[:25],
|
203 |
-
names=model.names,
|
204 |
-
f=save_dir / "train_images.jpg",
|
205 |
-
)
|
206 |
-
logger.log_images(file, name="Train Examples")
|
207 |
-
logger.log_graph(model, imgsz) # log model
|
208 |
-
|
209 |
-
# Optimizer
|
210 |
-
optimizer = smart_optimizer(
|
211 |
-
model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay
|
212 |
-
)
|
213 |
-
|
214 |
-
# Scheduler
|
215 |
-
lrf = 0.01 # final lr (fraction of lr0)
|
216 |
-
# lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine
|
217 |
-
lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear
|
218 |
-
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
|
219 |
-
# scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1,
|
220 |
-
# final_div_factor=1 / 25 / lrf)
|
221 |
-
|
222 |
-
# EMA
|
223 |
-
ema = ModelEMA(model) if RANK in {-1, 0} else None
|
224 |
-
|
225 |
-
# DDP mode
|
226 |
-
if cuda and RANK != -1:
|
227 |
-
model = smart_DDP(model)
|
228 |
-
|
229 |
-
# Train
|
230 |
-
t0 = time.time()
|
231 |
-
criterion = smartCrossEntropyLoss(
|
232 |
-
label_smoothing=opt.label_smoothing
|
233 |
-
) # loss function
|
234 |
-
best_fitness = 0.0
|
235 |
-
scaler = amp.GradScaler(enabled=cuda)
|
236 |
-
val = test_dir.stem # 'val' or 'test'
|
237 |
-
LOGGER.info(
|
238 |
-
f"Image sizes {imgsz} train, {imgsz} test\n"
|
239 |
-
f"Using {nw * WORLD_SIZE} dataloader workers\n"
|
240 |
-
f"Logging results to {colorstr('bold', save_dir)}\n"
|
241 |
-
f"Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n"
|
242 |
-
f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}"
|
243 |
-
)
|
244 |
-
for epoch in range(epochs): # loop over the dataset multiple times
|
245 |
-
tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness
|
246 |
-
model.train()
|
247 |
-
if RANK != -1:
|
248 |
-
trainloader.sampler.set_epoch(epoch)
|
249 |
-
pbar = enumerate(trainloader)
|
250 |
-
if RANK in {-1, 0}:
|
251 |
-
pbar = tqdm(
|
252 |
-
enumerate(trainloader),
|
253 |
-
total=len(trainloader),
|
254 |
-
bar_format=TQDM_BAR_FORMAT,
|
255 |
-
)
|
256 |
-
for i, (images, labels) in pbar: # progress bar
|
257 |
-
images, labels = images.to(device, non_blocking=True), labels.to(
|
258 |
-
device
|
259 |
-
)
|
260 |
-
|
261 |
-
# Forward
|
262 |
-
with amp.autocast(enabled=cuda): # stability issues when enabled
|
263 |
-
loss = criterion(model(images), labels)
|
264 |
-
|
265 |
-
# Backward
|
266 |
-
scaler.scale(loss).backward()
|
267 |
-
|
268 |
-
# Optimize
|
269 |
-
scaler.unscale_(optimizer) # unscale gradients
|
270 |
-
torch.nn.utils.clip_grad_norm_(
|
271 |
-
model.parameters(), max_norm=10.0
|
272 |
-
) # clip gradients
|
273 |
-
scaler.step(optimizer)
|
274 |
-
scaler.update()
|
275 |
-
optimizer.zero_grad()
|
276 |
-
if ema:
|
277 |
-
ema.update(model)
|
278 |
-
|
279 |
-
if RANK in {-1, 0}:
|
280 |
-
# Print
|
281 |
-
tloss = (tloss * i + loss.item()) / (
|
282 |
-
i + 1
|
283 |
-
) # update mean losses
|
284 |
-
mem = "%.3gG" % (
|
285 |
-
torch.cuda.memory_reserved() / 1e9
|
286 |
-
if torch.cuda.is_available()
|
287 |
-
else 0
|
288 |
-
) # (GB)
|
289 |
-
pbar.desc = (
|
290 |
-
f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}"
|
291 |
-
+ " " * 36
|
292 |
-
)
|
293 |
-
|
294 |
-
# Test
|
295 |
-
if i == len(pbar) - 1: # last batch
|
296 |
-
top1, top5, vloss = validate.run(
|
297 |
-
model=ema.ema,
|
298 |
-
dataloader=testloader,
|
299 |
-
criterion=criterion,
|
300 |
-
pbar=pbar,
|
301 |
-
) # test accuracy, loss
|
302 |
-
fitness = top1 # define fitness as top1 accuracy
|
303 |
-
|
304 |
-
# Scheduler
|
305 |
-
scheduler.step()
|
306 |
-
|
307 |
-
# Log metrics
|
308 |
-
if RANK in {-1, 0}:
|
309 |
-
# Best fitness
|
310 |
-
if fitness > best_fitness:
|
311 |
-
best_fitness = fitness
|
312 |
-
|
313 |
-
# Log
|
314 |
-
metrics = {
|
315 |
-
"train/loss": tloss,
|
316 |
-
f"{val}/loss": vloss,
|
317 |
-
"metrics/accuracy_top1": top1,
|
318 |
-
"metrics/accuracy_top5": top5,
|
319 |
-
"lr/0": optimizer.param_groups[0]["lr"],
|
320 |
-
} # learning rate
|
321 |
-
logger.log_metrics(metrics, epoch)
|
322 |
-
|
323 |
-
# Save model
|
324 |
-
final_epoch = epoch + 1 == epochs
|
325 |
-
if (not opt.nosave) or final_epoch:
|
326 |
-
ckpt = {
|
327 |
-
"epoch": epoch,
|
328 |
-
"best_fitness": best_fitness,
|
329 |
-
"model": deepcopy(
|
330 |
-
ema.ema
|
331 |
-
).half(), # deepcopy(de_parallel(model)).half(),
|
332 |
-
"ema": None, # deepcopy(ema.ema).half(),
|
333 |
-
"updates": ema.updates,
|
334 |
-
"optimizer": None, # optimizer.state_dict(),
|
335 |
-
"opt": vars(opt),
|
336 |
-
"git": GIT_INFO, # {remote, branch, commit} if a git repo
|
337 |
-
"date": datetime.now().isoformat(),
|
338 |
-
}
|
339 |
-
|
340 |
-
# Save last, best and delete
|
341 |
-
torch.save(ckpt, last)
|
342 |
-
if best_fitness == fitness:
|
343 |
-
torch.save(ckpt, best)
|
344 |
-
del ckpt
|
345 |
-
|
346 |
-
# Train complete
|
347 |
-
if RANK in {-1, 0} and final_epoch:
|
348 |
-
LOGGER.info(
|
349 |
-
f"\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)"
|
350 |
-
f"\nResults saved to {colorstr('bold', save_dir)}"
|
351 |
-
f"\nPredict: python classify/predict.py --weights {best} --source im.jpg"
|
352 |
-
f"\nValidate: python classify/val.py --weights {best} --data {data_dir}"
|
353 |
-
f"\nExport: python export.py --weights {best} --include onnx"
|
354 |
-
f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')"
|
355 |
-
f"\nVisualize: https://netron.app\n"
|
356 |
-
)
|
357 |
-
|
358 |
-
# Plot examples
|
359 |
-
images, labels = (
|
360 |
-
x[:25] for x in next(iter(testloader))
|
361 |
-
) # first 25 images and labels
|
362 |
-
pred = torch.max(ema.ema(images.to(device)), 1)[1]
|
363 |
-
file = imshow_cls(
|
364 |
-
images,
|
365 |
-
labels,
|
366 |
-
pred,
|
367 |
-
model.names,
|
368 |
-
verbose=False,
|
369 |
-
f=save_dir / "test_images.jpg",
|
370 |
-
)
|
371 |
-
|
372 |
-
# Log results
|
373 |
-
meta = {
|
374 |
-
"epochs": epochs,
|
375 |
-
"top1_acc": best_fitness,
|
376 |
-
"date": datetime.now().isoformat(),
|
377 |
-
}
|
378 |
-
logger.log_images(
|
379 |
-
file, name="Test Examples (true-predicted)", epoch=epoch
|
380 |
-
)
|
381 |
-
logger.log_model(best, epochs, metadata=meta)
|
382 |
-
|
383 |
-
|
384 |
-
def parse_opt(known=False):
|
385 |
-
parser = argparse.ArgumentParser()
|
386 |
-
parser.add_argument(
|
387 |
-
"--model",
|
388 |
-
type=str,
|
389 |
-
default="yolov5s-cls.pt",
|
390 |
-
help="initial weights path",
|
391 |
-
)
|
392 |
-
parser.add_argument(
|
393 |
-
"--data",
|
394 |
-
type=str,
|
395 |
-
default="imagenette160",
|
396 |
-
help="cifar10, cifar100, mnist, imagenet, ...",
|
397 |
-
)
|
398 |
-
parser.add_argument(
|
399 |
-
"--epochs", type=int, default=10, help="total training epochs"
|
400 |
-
)
|
401 |
-
parser.add_argument(
|
402 |
-
"--batch-size",
|
403 |
-
type=int,
|
404 |
-
default=64,
|
405 |
-
help="total batch size for all GPUs",
|
406 |
-
)
|
407 |
-
parser.add_argument(
|
408 |
-
"--imgsz",
|
409 |
-
"--img",
|
410 |
-
"--img-size",
|
411 |
-
type=int,
|
412 |
-
default=224,
|
413 |
-
help="train, val image size (pixels)",
|
414 |
-
)
|
415 |
-
parser.add_argument(
|
416 |
-
"--nosave", action="store_true", help="only save final checkpoint"
|
417 |
-
)
|
418 |
-
parser.add_argument(
|
419 |
-
"--cache",
|
420 |
-
type=str,
|
421 |
-
nargs="?",
|
422 |
-
const="ram",
|
423 |
-
help='--cache images in "ram" (default) or "disk"',
|
424 |
-
)
|
425 |
-
parser.add_argument(
|
426 |
-
"--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu"
|
427 |
-
)
|
428 |
-
parser.add_argument(
|
429 |
-
"--workers",
|
430 |
-
type=int,
|
431 |
-
default=8,
|
432 |
-
help="max dataloader workers (per RANK in DDP mode)",
|
433 |
-
)
|
434 |
-
parser.add_argument(
|
435 |
-
"--project",
|
436 |
-
default=ROOT / "runs/train-cls",
|
437 |
-
help="save to project/name",
|
438 |
-
)
|
439 |
-
parser.add_argument("--name", default="exp", help="save to project/name")
|
440 |
-
parser.add_argument(
|
441 |
-
"--exist-ok",
|
442 |
-
action="store_true",
|
443 |
-
help="existing project/name ok, do not increment",
|
444 |
-
)
|
445 |
-
parser.add_argument(
|
446 |
-
"--pretrained",
|
447 |
-
nargs="?",
|
448 |
-
const=True,
|
449 |
-
default=True,
|
450 |
-
help="start from i.e. --pretrained False",
|
451 |
-
)
|
452 |
-
parser.add_argument(
|
453 |
-
"--optimizer",
|
454 |
-
choices=["SGD", "Adam", "AdamW", "RMSProp"],
|
455 |
-
default="Adam",
|
456 |
-
help="optimizer",
|
457 |
-
)
|
458 |
-
parser.add_argument(
|
459 |
-
"--lr0", type=float, default=0.001, help="initial learning rate"
|
460 |
-
)
|
461 |
-
parser.add_argument(
|
462 |
-
"--decay", type=float, default=5e-5, help="weight decay"
|
463 |
-
)
|
464 |
-
parser.add_argument(
|
465 |
-
"--label-smoothing",
|
466 |
-
type=float,
|
467 |
-
default=0.1,
|
468 |
-
help="Label smoothing epsilon",
|
469 |
-
)
|
470 |
-
parser.add_argument(
|
471 |
-
"--cutoff",
|
472 |
-
type=int,
|
473 |
-
default=None,
|
474 |
-
help="Model layer cutoff index for Classify() head",
|
475 |
-
)
|
476 |
-
parser.add_argument(
|
477 |
-
"--dropout", type=float, default=None, help="Dropout (fraction)"
|
478 |
-
)
|
479 |
-
parser.add_argument("--verbose", action="store_true", help="Verbose mode")
|
480 |
-
parser.add_argument(
|
481 |
-
"--seed", type=int, default=0, help="Global training seed"
|
482 |
-
)
|
483 |
-
parser.add_argument(
|
484 |
-
"--local_rank",
|
485 |
-
type=int,
|
486 |
-
default=-1,
|
487 |
-
help="Automatic DDP Multi-GPU argument, do not modify",
|
488 |
-
)
|
489 |
-
return parser.parse_known_args()[0] if known else parser.parse_args()
|
490 |
-
|
491 |
-
|
492 |
-
def main(opt):
|
493 |
-
# Checks
|
494 |
-
if RANK in {-1, 0}:
|
495 |
-
print_args(vars(opt))
|
496 |
-
check_git_status()
|
497 |
-
check_requirements()
|
498 |
-
|
499 |
-
# DDP mode
|
500 |
-
device = select_device(opt.device, batch_size=opt.batch_size)
|
501 |
-
if LOCAL_RANK != -1:
|
502 |
-
assert (
|
503 |
-
opt.batch_size != -1
|
504 |
-
), "AutoBatch is coming soon for classification, please pass a valid --batch-size"
|
505 |
-
assert (
|
506 |
-
opt.batch_size % WORLD_SIZE == 0
|
507 |
-
), f"--batch-size {opt.batch_size} must be multiple of WORLD_SIZE"
|
508 |
-
assert (
|
509 |
-
torch.cuda.device_count() > LOCAL_RANK
|
510 |
-
), "insufficient CUDA devices for DDP command"
|
511 |
-
torch.cuda.set_device(LOCAL_RANK)
|
512 |
-
device = torch.device("cuda", LOCAL_RANK)
|
513 |
-
dist.init_process_group(
|
514 |
-
backend="nccl" if dist.is_nccl_available() else "gloo"
|
515 |
-
)
|
516 |
-
|
517 |
-
# Parameters
|
518 |
-
opt.save_dir = increment_path(
|
519 |
-
Path(opt.project) / opt.name, exist_ok=opt.exist_ok
|
520 |
-
) # increment run
|
521 |
-
|
522 |
-
# Train
|
523 |
-
train(opt, device)
|
524 |
-
|
525 |
-
|
526 |
-
def run(**kwargs):
|
527 |
-
# Usage: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m')
|
528 |
-
opt = parse_opt(True)
|
529 |
-
for k, v in kwargs.items():
|
530 |
-
setattr(opt, k, v)
|
531 |
-
main(opt)
|
532 |
-
return opt
|
533 |
-
|
534 |
-
|
535 |
-
if __name__ == "__main__":
|
536 |
-
opt = parse_opt()
|
537 |
-
main(opt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Equing.py
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import json
|
4 |
-
from abc import ABC, abstractmethod
|
5 |
-
|
6 |
-
import requests
|
7 |
-
|
8 |
-
from ...typing import Any, CreateResult
|
9 |
-
from ..base_provider import BaseProvider
|
10 |
-
|
11 |
-
|
12 |
-
class Equing(BaseProvider):
|
13 |
-
url: str = 'https://next.eqing.tech/'
|
14 |
-
working = False
|
15 |
-
supports_stream = True
|
16 |
-
supports_gpt_35_turbo = True
|
17 |
-
supports_gpt_4 = False
|
18 |
-
|
19 |
-
@staticmethod
|
20 |
-
@abstractmethod
|
21 |
-
def create_completion(
|
22 |
-
model: str,
|
23 |
-
messages: list[dict[str, str]],
|
24 |
-
stream: bool, **kwargs: Any) -> CreateResult:
|
25 |
-
|
26 |
-
headers = {
|
27 |
-
'authority' : 'next.eqing.tech',
|
28 |
-
'accept' : 'text/event-stream',
|
29 |
-
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
30 |
-
'cache-control' : 'no-cache',
|
31 |
-
'content-type' : 'application/json',
|
32 |
-
'origin' : 'https://next.eqing.tech',
|
33 |
-
'plugins' : '0',
|
34 |
-
'pragma' : 'no-cache',
|
35 |
-
'referer' : 'https://next.eqing.tech/',
|
36 |
-
'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
|
37 |
-
'sec-ch-ua-mobile' : '?0',
|
38 |
-
'sec-ch-ua-platform': '"macOS"',
|
39 |
-
'sec-fetch-dest' : 'empty',
|
40 |
-
'sec-fetch-mode' : 'cors',
|
41 |
-
'sec-fetch-site' : 'same-origin',
|
42 |
-
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
|
43 |
-
'usesearch' : 'false',
|
44 |
-
'x-requested-with' : 'XMLHttpRequest'
|
45 |
-
}
|
46 |
-
|
47 |
-
json_data = {
|
48 |
-
'messages' : messages,
|
49 |
-
'stream' : stream,
|
50 |
-
'model' : model,
|
51 |
-
'temperature' : kwargs.get('temperature', 0.5),
|
52 |
-
'presence_penalty' : kwargs.get('presence_penalty', 0),
|
53 |
-
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
|
54 |
-
'top_p' : kwargs.get('top_p', 1),
|
55 |
-
}
|
56 |
-
|
57 |
-
response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
|
58 |
-
headers=headers, json=json_data, stream=stream)
|
59 |
-
|
60 |
-
if not stream:
|
61 |
-
yield response.json()["choices"][0]["message"]["content"]
|
62 |
-
return
|
63 |
-
|
64 |
-
for line in response.iter_content(chunk_size=1024):
|
65 |
-
if line:
|
66 |
-
if b'content' in line:
|
67 |
-
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
|
68 |
-
token = line_json['choices'][0]['delta'].get('content')
|
69 |
-
if token:
|
70 |
-
yield token
|
71 |
-
|
72 |
-
@classmethod
|
73 |
-
@property
|
74 |
-
def params(cls):
|
75 |
-
params = [
|
76 |
-
("model", "str"),
|
77 |
-
("messages", "list[dict[str, str]]"),
|
78 |
-
("stream", "bool"),
|
79 |
-
]
|
80 |
-
param = ", ".join([": ".join(p) for p in params])
|
81 |
-
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aditya9790/yolo7-object-tracking/models/experimental.py
DELETED
@@ -1,272 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import random
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
|
6 |
-
from models.common import Conv, DWConv
|
7 |
-
from utils.google_utils import attempt_download
|
8 |
-
|
9 |
-
|
10 |
-
class CrossConv(nn.Module):
|
11 |
-
# Cross Convolution Downsample
|
12 |
-
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
|
13 |
-
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
|
14 |
-
super(CrossConv, self).__init__()
|
15 |
-
c_ = int(c2 * e) # hidden channels
|
16 |
-
self.cv1 = Conv(c1, c_, (1, k), (1, s))
|
17 |
-
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
|
18 |
-
self.add = shortcut and c1 == c2
|
19 |
-
|
20 |
-
def forward(self, x):
|
21 |
-
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
22 |
-
|
23 |
-
|
24 |
-
class Sum(nn.Module):
|
25 |
-
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
|
26 |
-
def __init__(self, n, weight=False): # n: number of inputs
|
27 |
-
super(Sum, self).__init__()
|
28 |
-
self.weight = weight # apply weights boolean
|
29 |
-
self.iter = range(n - 1) # iter object
|
30 |
-
if weight:
|
31 |
-
self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights
|
32 |
-
|
33 |
-
def forward(self, x):
|
34 |
-
y = x[0] # no weight
|
35 |
-
if self.weight:
|
36 |
-
w = torch.sigmoid(self.w) * 2
|
37 |
-
for i in self.iter:
|
38 |
-
y = y + x[i + 1] * w[i]
|
39 |
-
else:
|
40 |
-
for i in self.iter:
|
41 |
-
y = y + x[i + 1]
|
42 |
-
return y
|
43 |
-
|
44 |
-
|
45 |
-
class MixConv2d(nn.Module):
|
46 |
-
# Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
|
47 |
-
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
|
48 |
-
super(MixConv2d, self).__init__()
|
49 |
-
groups = len(k)
|
50 |
-
if equal_ch: # equal c_ per group
|
51 |
-
i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
|
52 |
-
c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
|
53 |
-
else: # equal weight.numel() per group
|
54 |
-
b = [c2] + [0] * groups
|
55 |
-
a = np.eye(groups + 1, groups, k=-1)
|
56 |
-
a -= np.roll(a, 1, axis=1)
|
57 |
-
a *= np.array(k) ** 2
|
58 |
-
a[0] = 1
|
59 |
-
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
|
60 |
-
|
61 |
-
self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
|
62 |
-
self.bn = nn.BatchNorm2d(c2)
|
63 |
-
self.act = nn.LeakyReLU(0.1, inplace=True)
|
64 |
-
|
65 |
-
def forward(self, x):
|
66 |
-
return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
|
67 |
-
|
68 |
-
|
69 |
-
class Ensemble(nn.ModuleList):
|
70 |
-
# Ensemble of models
|
71 |
-
def __init__(self):
|
72 |
-
super(Ensemble, self).__init__()
|
73 |
-
|
74 |
-
def forward(self, x, augment=False):
|
75 |
-
y = []
|
76 |
-
for module in self:
|
77 |
-
y.append(module(x, augment)[0])
|
78 |
-
# y = torch.stack(y).max(0)[0] # max ensemble
|
79 |
-
# y = torch.stack(y).mean(0) # mean ensemble
|
80 |
-
y = torch.cat(y, 1) # nms ensemble
|
81 |
-
return y, None # inference, train output
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
class ORT_NMS(torch.autograd.Function):
|
88 |
-
'''ONNX-Runtime NMS operation'''
|
89 |
-
@staticmethod
|
90 |
-
def forward(ctx,
|
91 |
-
boxes,
|
92 |
-
scores,
|
93 |
-
max_output_boxes_per_class=torch.tensor([100]),
|
94 |
-
iou_threshold=torch.tensor([0.45]),
|
95 |
-
score_threshold=torch.tensor([0.25])):
|
96 |
-
device = boxes.device
|
97 |
-
batch = scores.shape[0]
|
98 |
-
num_det = random.randint(0, 100)
|
99 |
-
batches = torch.randint(0, batch, (num_det,)).sort()[0].to(device)
|
100 |
-
idxs = torch.arange(100, 100 + num_det).to(device)
|
101 |
-
zeros = torch.zeros((num_det,), dtype=torch.int64).to(device)
|
102 |
-
selected_indices = torch.cat([batches[None], zeros[None], idxs[None]], 0).T.contiguous()
|
103 |
-
selected_indices = selected_indices.to(torch.int64)
|
104 |
-
return selected_indices
|
105 |
-
|
106 |
-
@staticmethod
|
107 |
-
def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold):
|
108 |
-
return g.op("NonMaxSuppression", boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold)
|
109 |
-
|
110 |
-
|
111 |
-
class TRT_NMS(torch.autograd.Function):
|
112 |
-
'''TensorRT NMS operation'''
|
113 |
-
@staticmethod
|
114 |
-
def forward(
|
115 |
-
ctx,
|
116 |
-
boxes,
|
117 |
-
scores,
|
118 |
-
background_class=-1,
|
119 |
-
box_coding=1,
|
120 |
-
iou_threshold=0.45,
|
121 |
-
max_output_boxes=100,
|
122 |
-
plugin_version="1",
|
123 |
-
score_activation=0,
|
124 |
-
score_threshold=0.25,
|
125 |
-
):
|
126 |
-
batch_size, num_boxes, num_classes = scores.shape
|
127 |
-
num_det = torch.randint(0, max_output_boxes, (batch_size, 1), dtype=torch.int32)
|
128 |
-
det_boxes = torch.randn(batch_size, max_output_boxes, 4)
|
129 |
-
det_scores = torch.randn(batch_size, max_output_boxes)
|
130 |
-
det_classes = torch.randint(0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32)
|
131 |
-
return num_det, det_boxes, det_scores, det_classes
|
132 |
-
|
133 |
-
@staticmethod
|
134 |
-
def symbolic(g,
|
135 |
-
boxes,
|
136 |
-
scores,
|
137 |
-
background_class=-1,
|
138 |
-
box_coding=1,
|
139 |
-
iou_threshold=0.45,
|
140 |
-
max_output_boxes=100,
|
141 |
-
plugin_version="1",
|
142 |
-
score_activation=0,
|
143 |
-
score_threshold=0.25):
|
144 |
-
out = g.op("TRT::EfficientNMS_TRT",
|
145 |
-
boxes,
|
146 |
-
scores,
|
147 |
-
background_class_i=background_class,
|
148 |
-
box_coding_i=box_coding,
|
149 |
-
iou_threshold_f=iou_threshold,
|
150 |
-
max_output_boxes_i=max_output_boxes,
|
151 |
-
plugin_version_s=plugin_version,
|
152 |
-
score_activation_i=score_activation,
|
153 |
-
score_threshold_f=score_threshold,
|
154 |
-
outputs=4)
|
155 |
-
nums, boxes, scores, classes = out
|
156 |
-
return nums, boxes, scores, classes
|
157 |
-
|
158 |
-
|
159 |
-
class ONNX_ORT(nn.Module):
|
160 |
-
'''onnx module with ONNX-Runtime NMS operation.'''
|
161 |
-
def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=640, device=None, n_classes=80):
|
162 |
-
super().__init__()
|
163 |
-
self.device = device if device else torch.device("cpu")
|
164 |
-
self.max_obj = torch.tensor([max_obj]).to(device)
|
165 |
-
self.iou_threshold = torch.tensor([iou_thres]).to(device)
|
166 |
-
self.score_threshold = torch.tensor([score_thres]).to(device)
|
167 |
-
self.max_wh = max_wh # if max_wh != 0 : non-agnostic else : agnostic
|
168 |
-
self.convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]],
|
169 |
-
dtype=torch.float32,
|
170 |
-
device=self.device)
|
171 |
-
self.n_classes=n_classes
|
172 |
-
|
173 |
-
def forward(self, x):
|
174 |
-
boxes = x[:, :, :4]
|
175 |
-
conf = x[:, :, 4:5]
|
176 |
-
scores = x[:, :, 5:]
|
177 |
-
if self.n_classes == 1:
|
178 |
-
scores = conf # for models with one class, cls_loss is 0 and cls_conf is always 0.5,
|
179 |
-
# so there is no need to multiplicate.
|
180 |
-
else:
|
181 |
-
scores *= conf # conf = obj_conf * cls_conf
|
182 |
-
boxes @= self.convert_matrix
|
183 |
-
max_score, category_id = scores.max(2, keepdim=True)
|
184 |
-
dis = category_id.float() * self.max_wh
|
185 |
-
nmsbox = boxes + dis
|
186 |
-
max_score_tp = max_score.transpose(1, 2).contiguous()
|
187 |
-
selected_indices = ORT_NMS.apply(nmsbox, max_score_tp, self.max_obj, self.iou_threshold, self.score_threshold)
|
188 |
-
X, Y = selected_indices[:, 0], selected_indices[:, 2]
|
189 |
-
selected_boxes = boxes[X, Y, :]
|
190 |
-
selected_categories = category_id[X, Y, :].float()
|
191 |
-
selected_scores = max_score[X, Y, :]
|
192 |
-
X = X.unsqueeze(1).float()
|
193 |
-
return torch.cat([X, selected_boxes, selected_categories, selected_scores], 1)
|
194 |
-
|
195 |
-
class ONNX_TRT(nn.Module):
|
196 |
-
'''onnx module with TensorRT NMS operation.'''
|
197 |
-
def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None ,device=None, n_classes=80):
|
198 |
-
super().__init__()
|
199 |
-
assert max_wh is None
|
200 |
-
self.device = device if device else torch.device('cpu')
|
201 |
-
self.background_class = -1,
|
202 |
-
self.box_coding = 1,
|
203 |
-
self.iou_threshold = iou_thres
|
204 |
-
self.max_obj = max_obj
|
205 |
-
self.plugin_version = '1'
|
206 |
-
self.score_activation = 0
|
207 |
-
self.score_threshold = score_thres
|
208 |
-
self.n_classes=n_classes
|
209 |
-
|
210 |
-
def forward(self, x):
|
211 |
-
boxes = x[:, :, :4]
|
212 |
-
conf = x[:, :, 4:5]
|
213 |
-
scores = x[:, :, 5:]
|
214 |
-
if self.n_classes == 1:
|
215 |
-
scores = conf # for models with one class, cls_loss is 0 and cls_conf is always 0.5,
|
216 |
-
# so there is no need to multiplicate.
|
217 |
-
else:
|
218 |
-
scores *= conf # conf = obj_conf * cls_conf
|
219 |
-
num_det, det_boxes, det_scores, det_classes = TRT_NMS.apply(boxes, scores, self.background_class, self.box_coding,
|
220 |
-
self.iou_threshold, self.max_obj,
|
221 |
-
self.plugin_version, self.score_activation,
|
222 |
-
self.score_threshold)
|
223 |
-
return num_det, det_boxes, det_scores, det_classes
|
224 |
-
|
225 |
-
|
226 |
-
class End2End(nn.Module):
|
227 |
-
'''export onnx or tensorrt model with NMS operation.'''
|
228 |
-
def __init__(self, model, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None, device=None, n_classes=80):
|
229 |
-
super().__init__()
|
230 |
-
device = device if device else torch.device('cpu')
|
231 |
-
assert isinstance(max_wh,(int)) or max_wh is None
|
232 |
-
self.model = model.to(device)
|
233 |
-
self.model.model[-1].end2end = True
|
234 |
-
self.patch_model = ONNX_TRT if max_wh is None else ONNX_ORT
|
235 |
-
self.end2end = self.patch_model(max_obj, iou_thres, score_thres, max_wh, device, n_classes)
|
236 |
-
self.end2end.eval()
|
237 |
-
|
238 |
-
def forward(self, x):
|
239 |
-
x = self.model(x)
|
240 |
-
x = self.end2end(x)
|
241 |
-
return x
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
def attempt_load(weights, map_location=None):
|
248 |
-
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
|
249 |
-
model = Ensemble()
|
250 |
-
for w in weights if isinstance(weights, list) else [weights]:
|
251 |
-
attempt_download(w)
|
252 |
-
ckpt = torch.load(w, map_location=map_location) # load
|
253 |
-
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model
|
254 |
-
|
255 |
-
# Compatibility updates
|
256 |
-
for m in model.modules():
|
257 |
-
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
|
258 |
-
m.inplace = True # pytorch 1.7.0 compatibility
|
259 |
-
elif type(m) is nn.Upsample:
|
260 |
-
m.recompute_scale_factor = None # torch 1.11.0 compatibility
|
261 |
-
elif type(m) is Conv:
|
262 |
-
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
|
263 |
-
|
264 |
-
if len(model) == 1:
|
265 |
-
return model[-1] # return model
|
266 |
-
else:
|
267 |
-
print('Ensemble created with %s\n' % weights)
|
268 |
-
for k in ['names', 'stride']:
|
269 |
-
setattr(model, k, getattr(model[-1], k))
|
270 |
-
return model # return ensemble
|
271 |
-
|
272 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/ngu_dialect.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
import opencc
|
3 |
-
|
4 |
-
|
5 |
-
dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou',
|
6 |
-
'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing',
|
7 |
-
'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang',
|
8 |
-
'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan',
|
9 |
-
'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen',
|
10 |
-
'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'}
|
11 |
-
|
12 |
-
converters = {}
|
13 |
-
|
14 |
-
for dialect in dialects.values():
|
15 |
-
try:
|
16 |
-
converters[dialect] = opencc.OpenCC(dialect)
|
17 |
-
except:
|
18 |
-
pass
|
19 |
-
|
20 |
-
|
21 |
-
def ngu_dialect_to_ipa(text, dialect):
|
22 |
-
dialect = dialects[dialect]
|
23 |
-
text = converters[dialect].convert(text).replace('-','').replace('$',' ')
|
24 |
-
text = re.sub(r'[、;:]', ',', text)
|
25 |
-
text = re.sub(r'\s*,\s*', ', ', text)
|
26 |
-
text = re.sub(r'\s*。\s*', '. ', text)
|
27 |
-
text = re.sub(r'\s*?\s*', '? ', text)
|
28 |
-
text = re.sub(r'\s*!\s*', '! ', text)
|
29 |
-
text = re.sub(r'\s*$', '', text)
|
30 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWang/lama/bin/predict_inner_features.py
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
|
3 |
-
# Example command:
|
4 |
-
# ./bin/predict.py \
|
5 |
-
# model.path=<path to checkpoint, prepared by make_checkpoint.py> \
|
6 |
-
# indir=<path to input data> \
|
7 |
-
# outdir=<where to store predicts>
|
8 |
-
|
9 |
-
import logging
|
10 |
-
import os
|
11 |
-
import sys
|
12 |
-
import traceback
|
13 |
-
|
14 |
-
from saicinpainting.evaluation.utils import move_to_device
|
15 |
-
|
16 |
-
os.environ['OMP_NUM_THREADS'] = '1'
|
17 |
-
os.environ['OPENBLAS_NUM_THREADS'] = '1'
|
18 |
-
os.environ['MKL_NUM_THREADS'] = '1'
|
19 |
-
os.environ['VECLIB_MAXIMUM_THREADS'] = '1'
|
20 |
-
os.environ['NUMEXPR_NUM_THREADS'] = '1'
|
21 |
-
|
22 |
-
import cv2
|
23 |
-
import hydra
|
24 |
-
import numpy as np
|
25 |
-
import torch
|
26 |
-
import tqdm
|
27 |
-
import yaml
|
28 |
-
from omegaconf import OmegaConf
|
29 |
-
from torch.utils.data._utils.collate import default_collate
|
30 |
-
|
31 |
-
from saicinpainting.training.data.datasets import make_default_val_dataset
|
32 |
-
from saicinpainting.training.trainers import load_checkpoint, DefaultInpaintingTrainingModule
|
33 |
-
from saicinpainting.utils import register_debug_signal_handlers, get_shape
|
34 |
-
|
35 |
-
LOGGER = logging.getLogger(__name__)
|
36 |
-
|
37 |
-
|
38 |
-
@hydra.main(config_path='../configs/prediction', config_name='default_inner_features.yaml')
|
39 |
-
def main(predict_config: OmegaConf):
|
40 |
-
try:
|
41 |
-
register_debug_signal_handlers() # kill -10 <pid> will result in traceback dumped into log
|
42 |
-
|
43 |
-
device = torch.device(predict_config.device)
|
44 |
-
|
45 |
-
train_config_path = os.path.join(predict_config.model.path, 'config.yaml')
|
46 |
-
with open(train_config_path, 'r') as f:
|
47 |
-
train_config = OmegaConf.create(yaml.safe_load(f))
|
48 |
-
|
49 |
-
checkpoint_path = os.path.join(predict_config.model.path, 'models', predict_config.model.checkpoint)
|
50 |
-
model = load_checkpoint(train_config, checkpoint_path, strict=False)
|
51 |
-
model.freeze()
|
52 |
-
model.to(device)
|
53 |
-
|
54 |
-
assert isinstance(model, DefaultInpaintingTrainingModule), 'Only DefaultInpaintingTrainingModule is supported'
|
55 |
-
assert isinstance(getattr(model.generator, 'model', None), torch.nn.Sequential)
|
56 |
-
|
57 |
-
if not predict_config.indir.endswith('/'):
|
58 |
-
predict_config.indir += '/'
|
59 |
-
|
60 |
-
dataset = make_default_val_dataset(predict_config.indir, **predict_config.dataset)
|
61 |
-
|
62 |
-
max_level = max(predict_config.levels)
|
63 |
-
|
64 |
-
with torch.no_grad():
|
65 |
-
for img_i in tqdm.trange(len(dataset)):
|
66 |
-
mask_fname = dataset.mask_filenames[img_i]
|
67 |
-
cur_out_fname = os.path.join(predict_config.outdir, os.path.splitext(mask_fname[len(predict_config.indir):])[0])
|
68 |
-
os.makedirs(os.path.dirname(cur_out_fname), exist_ok=True)
|
69 |
-
|
70 |
-
batch = move_to_device(default_collate([dataset[img_i]]), device)
|
71 |
-
|
72 |
-
img = batch['image']
|
73 |
-
mask = batch['mask']
|
74 |
-
mask[:] = 0
|
75 |
-
mask_h, mask_w = mask.shape[-2:]
|
76 |
-
mask[:, :,
|
77 |
-
mask_h // 2 - predict_config.hole_radius : mask_h // 2 + predict_config.hole_radius,
|
78 |
-
mask_w // 2 - predict_config.hole_radius : mask_w // 2 + predict_config.hole_radius] = 1
|
79 |
-
|
80 |
-
masked_img = torch.cat([img * (1 - mask), mask], dim=1)
|
81 |
-
|
82 |
-
feats = masked_img
|
83 |
-
for level_i, level in enumerate(model.generator.model):
|
84 |
-
feats = level(feats)
|
85 |
-
if level_i in predict_config.levels:
|
86 |
-
cur_feats = torch.cat([f for f in feats if torch.is_tensor(f)], dim=1) \
|
87 |
-
if isinstance(feats, tuple) else feats
|
88 |
-
|
89 |
-
if predict_config.slice_channels:
|
90 |
-
cur_feats = cur_feats[:, slice(*predict_config.slice_channels)]
|
91 |
-
|
92 |
-
cur_feat = cur_feats.pow(2).mean(1).pow(0.5).clone()
|
93 |
-
cur_feat -= cur_feat.min()
|
94 |
-
cur_feat /= cur_feat.std()
|
95 |
-
cur_feat = cur_feat.clamp(0, 1) / 1
|
96 |
-
cur_feat = cur_feat.cpu().numpy()[0]
|
97 |
-
cur_feat *= 255
|
98 |
-
cur_feat = np.clip(cur_feat, 0, 255).astype('uint8')
|
99 |
-
cv2.imwrite(cur_out_fname + f'_lev{level_i:02d}_norm.png', cur_feat)
|
100 |
-
|
101 |
-
# for channel_i in predict_config.channels:
|
102 |
-
#
|
103 |
-
# cur_feat = cur_feats[0, channel_i].clone().detach().cpu().numpy()
|
104 |
-
# cur_feat -= cur_feat.min()
|
105 |
-
# cur_feat /= cur_feat.max()
|
106 |
-
# cur_feat *= 255
|
107 |
-
# cur_feat = np.clip(cur_feat, 0, 255).astype('uint8')
|
108 |
-
# cv2.imwrite(cur_out_fname + f'_lev{level_i}_ch{channel_i}.png', cur_feat)
|
109 |
-
elif level_i >= max_level:
|
110 |
-
break
|
111 |
-
except KeyboardInterrupt:
|
112 |
-
LOGGER.warning('Interrupted by user')
|
113 |
-
except Exception as ex:
|
114 |
-
LOGGER.critical(f'Prediction failed due to {ex}:\n{traceback.format_exc()}')
|
115 |
-
sys.exit(1)
|
116 |
-
|
117 |
-
|
118 |
-
if __name__ == '__main__':
|
119 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ame42/rwms/local_utils.py
DELETED
@@ -1,344 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import re
|
3 |
-
import numpy
|
4 |
-
import pandas
|
5 |
-
from sklearn.ensemble import RandomForestRegressor
|
6 |
-
from sklearn.tree import export_graphviz
|
7 |
-
import pickle as pkl
|
8 |
-
|
9 |
-
l2 = "2L"
|
10 |
-
l1 = "1L"
|
11 |
-
s2 = "2S"
|
12 |
-
s1 = "1S"
|
13 |
-
date_time_col = "Date Time (GMT+01)"
|
14 |
-
time_col = "Time (GMT+01)"
|
15 |
-
dur_col = "Daylight duration (SEC)"
|
16 |
-
date_col = "Date"
|
17 |
-
id_col = "id"
|
18 |
-
well_col = "Well index"
|
19 |
-
blind_col = "THP BLIND (PSI)"
|
20 |
-
temp_col = "TEMP (°F)"
|
21 |
-
flp_col = "FLP (PSI)"
|
22 |
-
ro_col = "THP R/O (PSI)"
|
23 |
-
man_col = "Manifold Pressure (PSI)"
|
24 |
-
sim_col = f'Predicted {ro_col}'
|
25 |
-
ql_col = 'Liquid production (BBL/D)'
|
26 |
-
out_folder = "output/"
|
27 |
-
well_key = "wellhead"
|
28 |
-
flow_key = "flowstation"
|
29 |
-
|
30 |
-
model_file = "rf-AWNW"
|
31 |
-
scaler_file = "ss-AWNW"
|
32 |
-
|
33 |
-
day_mode = '22-11-2020'
|
34 |
-
all_mode = 'All'
|
35 |
-
train_mode = 'Train'
|
36 |
-
test_mode = 'Test'
|
37 |
-
|
38 |
-
|
39 |
-
def round_to_n(x, n):
|
40 |
-
x = x if x % 10 != 5 else x + 1
|
41 |
-
n = n if x > 9 else n - 1
|
42 |
-
return x if x == 0 else round(x, -int(math.floor(math.log10(abs(x)))) + (n - 1))
|
43 |
-
|
44 |
-
|
45 |
-
def to_sec(h, m, s):
|
46 |
-
return (int(h) * 60 * 60) + (int(m) * 60) + int(s)
|
47 |
-
|
48 |
-
|
49 |
-
def from_sec(t):
|
50 |
-
return f"{t // (60 * 60):0>2}:{(t % (60 * 60)) // 60:0>2}:{(t % (60 * 60)) % 60:0>2}"
|
51 |
-
|
52 |
-
|
53 |
-
def column_matcher(title):
|
54 |
-
if re.search("#", string=title) is not None:
|
55 |
-
found = id_col
|
56 |
-
elif re.search(".*(Date|DATE).*(Time|TIME).*GMT.*", string=title) is not None:
|
57 |
-
found = date_time_col
|
58 |
-
elif re.search("THP.*R/O.*(PSI|units)", string=title) is not None:
|
59 |
-
found = ro_col
|
60 |
-
elif re.search(".*TEMP.*(F|units)", string=title) is not None:
|
61 |
-
found = temp_col
|
62 |
-
elif re.search(".*FLP.*(PSI|units)", string=title) is not None:
|
63 |
-
found = flp_col
|
64 |
-
elif re.search("THP.*BLIND.*(PSI|units)", string=title) is not None:
|
65 |
-
found = blind_col
|
66 |
-
elif re.search("THP.*(PSI|units)", string=title) is not None:
|
67 |
-
found = blind_col
|
68 |
-
elif re.search(".*1S.*PSI.*", string=title) is not None:
|
69 |
-
found = s1
|
70 |
-
elif re.search(".*2S.*PSI.*", string=title) is not None:
|
71 |
-
found = s2
|
72 |
-
elif re.search(".*1L.*PSI.*", string=title) is not None:
|
73 |
-
found = l1
|
74 |
-
elif re.search(".*2L.*PSI.*", string=title) is not None:
|
75 |
-
found = l2
|
76 |
-
else:
|
77 |
-
found = False
|
78 |
-
|
79 |
-
return found
|
80 |
-
|
81 |
-
|
82 |
-
def file_matcher(name: str):
|
83 |
-
if re.search("\\d+-\\d+-\\d+.*flow.*man.*", string=name.lower()) is not None:
|
84 |
-
flowstation = True
|
85 |
-
else:
|
86 |
-
flowstation = False
|
87 |
-
|
88 |
-
return flowstation
|
89 |
-
|
90 |
-
|
91 |
-
def file_matcher2(name: str):
|
92 |
-
if re.search(".*1s.*", string=name.lower()) is not None:
|
93 |
-
well = s1
|
94 |
-
elif re.search(".*1l.*", string=name.lower()) is not None:
|
95 |
-
well = l1
|
96 |
-
elif re.search(".*2s.*", string=name.lower()) is not None:
|
97 |
-
well = s2
|
98 |
-
else:
|
99 |
-
well = l2
|
100 |
-
|
101 |
-
return well
|
102 |
-
|
103 |
-
|
104 |
-
def restructure(data, count, duration, times, dates):
|
105 |
-
for datetime in data[date_time_col]:
|
106 |
-
try:
|
107 |
-
date_time = re.sub("\\.0(?=\\s)", "", datetime)
|
108 |
-
datetime_array = date_time.split()
|
109 |
-
date = datetime_array[0].split("/")
|
110 |
-
|
111 |
-
time_array = datetime_array[1].split(":")
|
112 |
-
|
113 |
-
if datetime_array[2] == "PM" and time_array[0] != "12":
|
114 |
-
hour = int(time_array[0]) + 12
|
115 |
-
elif datetime_array[2] == "AM" and time_array[0] == "12":
|
116 |
-
hour = int(time_array[0]) - 12
|
117 |
-
else:
|
118 |
-
hour = time_array[0]
|
119 |
-
|
120 |
-
minutes = time_array[1]
|
121 |
-
sec = round_to_n(int(time_array[2]), 1)
|
122 |
-
|
123 |
-
if sec == 60:
|
124 |
-
sec = "00"
|
125 |
-
minutes = int(minutes) + 1
|
126 |
-
|
127 |
-
if minutes == 60:
|
128 |
-
minutes = "00"
|
129 |
-
hour = int(hour) + 1
|
130 |
-
|
131 |
-
if hour == 24:
|
132 |
-
hour = "00"
|
133 |
-
date[1] = int(date[1]) + 1
|
134 |
-
|
135 |
-
duration.append(to_sec(hour, minutes, sec))
|
136 |
-
times.append(f"{hour}:{minutes}:{sec}")
|
137 |
-
dates.append(f"{date[1]}/{date[0]}/{date[2]}")
|
138 |
-
date_time = f"{date[1]}/{date[0]}/{date[2]} {datetime_array[1]} {datetime_array[2]}"
|
139 |
-
|
140 |
-
data.loc[count, date_time_col] = date_time
|
141 |
-
count += 1
|
142 |
-
except IndexError:
|
143 |
-
print(f"\n\n{datetime}", flush=True)
|
144 |
-
raise
|
145 |
-
|
146 |
-
data.insert(1, dur_col, numpy.array(duration), True)
|
147 |
-
data.insert(2, time_col, numpy.array(times), True)
|
148 |
-
data.insert(3, date_col, numpy.array(dates), True)
|
149 |
-
return data.drop(axis=1, columns="index", errors='ignore')
|
150 |
-
|
151 |
-
|
152 |
-
def try_key(temp, key):
|
153 |
-
try:
|
154 |
-
temp[f"{key}"]
|
155 |
-
except KeyError:
|
156 |
-
temp[f"{key}"] = dict()
|
157 |
-
|
158 |
-
|
159 |
-
def find_data(index, wlhd):
|
160 |
-
for w in wlhd:
|
161 |
-
if index == w[0]:
|
162 |
-
return w[1]
|
163 |
-
|
164 |
-
return None
|
165 |
-
|
166 |
-
|
167 |
-
def split_join(flowstation: pandas.DataFrame, wellhead: pandas.DataFrame, offset):
|
168 |
-
joined = []
|
169 |
-
info = [s1, l1, s2, l2]
|
170 |
-
for i, o in zip(info, offset):
|
171 |
-
# print(f'\n\nNow working on {i} column\n')
|
172 |
-
data = flowstation.drop(flowstation.columns.difference([i, 'Daylight duration (SEC)']),
|
173 |
-
axis=1)
|
174 |
-
data.rename(columns={i: man_col}, inplace=True)
|
175 |
-
data.insert(2, well_col, [i for _ in range(data.shape[0])], True)
|
176 |
-
|
177 |
-
# print(f"{data.shape[0]} rows before drop and merge")
|
178 |
-
data_well = find_data(i, wellhead)
|
179 |
-
if data_well is not None:
|
180 |
-
data_well.drop_duplicates(inplace=True, subset=[time_col])
|
181 |
-
data = data.merge(data_well, how='inner', on=[dur_col])
|
182 |
-
|
183 |
-
# print(f"{data.shape[0]} rows after drop and merge")
|
184 |
-
# offset the rows by the required amount 'o'
|
185 |
-
data_y = data.drop(data.columns.difference([ro_col, id_col]), axis=1, errors="ignore").iloc[o:]
|
186 |
-
data_x = data.drop(columns=[ro_col], axis=1, errors="ignore").iloc[:(data.shape[0] - 1 - o)]
|
187 |
-
data_y.reset_index(inplace=True)
|
188 |
-
data_x.reset_index(inplace=True)
|
189 |
-
data_y.drop(columns=["index"], axis=1, inplace=True)
|
190 |
-
data_x.drop(columns=["index"], axis=1, inplace=True)
|
191 |
-
data = data_y.merge(data_x, how='inner', on=[id_col])
|
192 |
-
joined.append((i, data))
|
193 |
-
|
194 |
-
return joined
|
195 |
-
|
196 |
-
|
197 |
-
class WellDataPoint:
|
198 |
-
|
199 |
-
def __init__(self, thp, day_sec, man_pres, temp, _l1=0, _s1=1, _l2=0, _s2=0):
|
200 |
-
self.thp = thp
|
201 |
-
self.day_sec = day_sec
|
202 |
-
self.man_pres = man_pres
|
203 |
-
self.temp = temp
|
204 |
-
self.l1 = _l1
|
205 |
-
self.s1 = _s1
|
206 |
-
self.l2 = _l2
|
207 |
-
self.s2 = _s2
|
208 |
-
|
209 |
-
def __str__(self):
|
210 |
-
day_sec, deli, i, man_pres, temp, well, well_titles = self.fields()
|
211 |
-
return f"""\033[1;31mTesting data\033[0m
|
212 |
-
{day_sec:>20}{deli:3}{self.day_sec} seconds
|
213 |
-
{man_pres:>20}{deli:3}{self.man_pres} psi
|
214 |
-
{temp:>20}{deli:3}{self.temp} °F
|
215 |
-
{well:>20}{deli:3}{well_titles[i]}
|
216 |
-
"""
|
217 |
-
|
218 |
-
def fields(self):
|
219 |
-
deli = ' '
|
220 |
-
day_sec = "Day duration:"
|
221 |
-
man_pres = "Manifold Pressure:"
|
222 |
-
temp = "Temperature:"
|
223 |
-
well = "Well Name:"
|
224 |
-
wells = [self.l1, self.l2, self.s1, self.s2]
|
225 |
-
well_titles = ["Awoba NW 1L", "Awoba NW 2L", "Awoba NW 1S", "Awoba NW 2S"] # List of well titles
|
226 |
-
i = 0
|
227 |
-
# Find the well with dummy value 1
|
228 |
-
while not (wells[i]): # not(0) yields true and not(anything else) yields false
|
229 |
-
i += 1
|
230 |
-
return day_sec, deli, i, man_pres, temp, well, well_titles
|
231 |
-
|
232 |
-
def __plain__(self):
|
233 |
-
day_sec, deli, i, man_pres, temp, well, well_titles = self.fields()
|
234 |
-
space = '40'
|
235 |
-
d_space = '3'
|
236 |
-
return f"""Testing data
|
237 |
-
{day_sec:>{space}}{deli:{d_space}}{self.day_sec} seconds
|
238 |
-
{man_pres:>{space}}{deli:{d_space}}{self.man_pres} psi
|
239 |
-
{temp:>{space}}{deli:{d_space}}{self.temp} °F
|
240 |
-
{well:>{space}}{deli:{d_space}}{well_titles[i]}
|
241 |
-
"""
|
242 |
-
|
243 |
-
def __repr__(self):
|
244 |
-
return f"Practice([{self.day_sec}, {self.man_pres}, {self.temp}, {self.l1}, {self.s1}, {self.l2}, {self.s2}])"
|
245 |
-
|
246 |
-
def get_x(self):
|
247 |
-
return [self.day_sec, self.man_pres, self.temp, self.l1, self.s1, self.l2, self.s2]
|
248 |
-
|
249 |
-
def get_y(self):
|
250 |
-
return self.thp
|
251 |
-
|
252 |
-
|
253 |
-
def oversample_balance(data: pandas.DataFrame):
|
254 |
-
# get buckets for control column
|
255 |
-
data = data.astype(float, errors='ignore')
|
256 |
-
mx = data[ro_col].max(axis=0, skipna=True)
|
257 |
-
mn = data[ro_col].min(axis=0, skipna=True)
|
258 |
-
rng = mx - mn
|
259 |
-
bucket = rng / 10
|
260 |
-
|
261 |
-
# shuffle data into buckets
|
262 |
-
max_count = 0
|
263 |
-
counter = mn
|
264 |
-
temp = []
|
265 |
-
results = []
|
266 |
-
|
267 |
-
while counter < mx:
|
268 |
-
|
269 |
-
sub_data = data[data[ro_col].between(counter, counter + bucket, inclusive='right')]
|
270 |
-
if sub_data.shape[0] > 0 and float(sub_data[ro_col].min(axis=0, skipna=True)) > 0:
|
271 |
-
temp.append(sub_data)
|
272 |
-
|
273 |
-
max_count = max_count if sub_data.shape[0] < max_count else sub_data.shape[0]
|
274 |
-
|
275 |
-
counter += bucket
|
276 |
-
|
277 |
-
for r in temp:
|
278 |
-
counter = 0
|
279 |
-
pumped_data = r
|
280 |
-
print(r.shape, "\n", r.head())
|
281 |
-
# add elements of r to pumped_data
|
282 |
-
while pumped_data.shape[0] < max_count:
|
283 |
-
new_row = r.iloc[[counter % r.shape[0]]]
|
284 |
-
|
285 |
-
pumped_data = pandas.concat([pumped_data, new_row], ignore_index=True)
|
286 |
-
|
287 |
-
# add final results to results series
|
288 |
-
results.append(pumped_data)
|
289 |
-
|
290 |
-
return pandas.concat(results, ignore_index=True)
|
291 |
-
|
292 |
-
|
293 |
-
def parse_well_id(well_id):
|
294 |
-
return f"Awoba NW {well_id}"
|
295 |
-
|
296 |
-
|
297 |
-
def parse_well_id_2(well_id):
|
298 |
-
return f"Abura {well_id}"
|
299 |
-
|
300 |
-
|
301 |
-
def print_graph(model: RandomForestRegressor, x):
|
302 |
-
for est, idx in zip(model.estimators_, len(model.estimators_)):
|
303 |
-
file = f'tree_{idx}.dot'
|
304 |
-
export_graphviz(model, out_file=file, feature_names=x.columns,
|
305 |
-
class_names=['extreme', 'moderate', 'vulnerable', 'non-vulnerable'],
|
306 |
-
rounded=True, proportion=False, precision=4, filled=True)
|
307 |
-
|
308 |
-
|
309 |
-
def write_state_files(model, scaler):
|
310 |
-
pkl.dump(model, open(f"{model_file}.mdl", "wb"))
|
311 |
-
pkl.dump(scaler, open(f"{scaler_file}.sts", "wb"))
|
312 |
-
|
313 |
-
|
314 |
-
def keep_useful_cols(data, columns=None):
|
315 |
-
if columns is None:
|
316 |
-
columns = [ro_col, dur_col, man_col, well_col, time_col, date_col, blind_col, flp_col, temp_col]
|
317 |
-
return data.drop(data.columns.difference(columns), axis=1)
|
318 |
-
|
319 |
-
|
320 |
-
def read_state_files(mdl, scl):
|
321 |
-
mdl = pkl.load(open(f"{mdl}.mdl", "rb"))
|
322 |
-
scl = pkl.load(open(f"{scl}.sts", "rb"))
|
323 |
-
return mdl, scl
|
324 |
-
|
325 |
-
|
326 |
-
def change_well_to_dummy(wl):
|
327 |
-
_l1, _l2, _s1, _s2 = 0, 0, 0, 0
|
328 |
-
|
329 |
-
if wl == parse_well_id(l1):
|
330 |
-
_l1 = 1
|
331 |
-
elif wl == parse_well_id(s1):
|
332 |
-
_s1 = 1
|
333 |
-
elif wl == parse_well_id(l2):
|
334 |
-
_l2 = 1
|
335 |
-
elif wl == parse_well_id(s2):
|
336 |
-
_s2 = 1
|
337 |
-
|
338 |
-
return _l1, _l2, _s1, _s2
|
339 |
-
|
340 |
-
|
341 |
-
def calc_excel(pres):
|
342 |
-
# from well Abura 2S
|
343 |
-
return pres + 624, pres * 31.88
|
344 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/cppipc/policy.h
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#include <type_traits>
|
4 |
-
|
5 |
-
#include "libipc/def.h"
|
6 |
-
#include "libipc/prod_cons.h"
|
7 |
-
|
8 |
-
#include "libipc/circ/elem_array.h"
|
9 |
-
|
10 |
-
namespace ipc {
|
11 |
-
namespace policy {
|
12 |
-
|
13 |
-
template <template <typename, std::size_t...> class Elems, typename Flag>
|
14 |
-
struct choose;
|
15 |
-
|
16 |
-
template <typename Flag>
|
17 |
-
struct choose<circ::elem_array, Flag> {
|
18 |
-
using flag_t = Flag;
|
19 |
-
|
20 |
-
template <std::size_t DataSize, std::size_t AlignSize>
|
21 |
-
using elems_t = circ::elem_array<ipc::prod_cons_impl<flag_t>, DataSize, AlignSize>;
|
22 |
-
};
|
23 |
-
|
24 |
-
} // namespace policy
|
25 |
-
} // namespace ipc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/models.py
DELETED
@@ -1,770 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
# https://github.com/rosinality/stylegan2-pytorch/blob/master/model.py
|
4 |
-
|
5 |
-
import math
|
6 |
-
import random
|
7 |
-
import functools
|
8 |
-
import operator
|
9 |
-
|
10 |
-
import torch
|
11 |
-
from torch import nn
|
12 |
-
from torch.nn import functional as F
|
13 |
-
import torch.nn.init as init
|
14 |
-
from torch.autograd import Function
|
15 |
-
|
16 |
-
from .op_edit import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
|
17 |
-
|
18 |
-
|
19 |
-
class PixelNorm(nn.Module):
|
20 |
-
def __init__(self):
|
21 |
-
super().__init__()
|
22 |
-
|
23 |
-
def forward(self, input):
|
24 |
-
return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
|
25 |
-
|
26 |
-
|
27 |
-
def make_kernel(k):
|
28 |
-
k = torch.tensor(k, dtype=torch.float32)
|
29 |
-
if k.ndim == 1:
|
30 |
-
k = k[None, :] * k[:, None]
|
31 |
-
k /= k.sum()
|
32 |
-
return k
|
33 |
-
|
34 |
-
|
35 |
-
class Upsample(nn.Module):
|
36 |
-
def __init__(self, kernel, factor=2):
|
37 |
-
super().__init__()
|
38 |
-
|
39 |
-
self.factor = factor
|
40 |
-
kernel = make_kernel(kernel) * (factor ** 2)
|
41 |
-
self.register_buffer("kernel", kernel)
|
42 |
-
|
43 |
-
p = kernel.shape[0] - factor
|
44 |
-
|
45 |
-
pad0 = (p + 1) // 2 + factor - 1
|
46 |
-
pad1 = p // 2
|
47 |
-
|
48 |
-
self.pad = (pad0, pad1)
|
49 |
-
|
50 |
-
def forward(self, input):
|
51 |
-
out = upfirdn2d(input, self.kernel, up=self.factor,
|
52 |
-
down=1, pad=self.pad)
|
53 |
-
return out
|
54 |
-
|
55 |
-
|
56 |
-
class Downsample(nn.Module):
|
57 |
-
def __init__(self, kernel, factor=2):
|
58 |
-
super().__init__()
|
59 |
-
|
60 |
-
self.factor = factor
|
61 |
-
kernel = make_kernel(kernel)
|
62 |
-
self.register_buffer("kernel", kernel)
|
63 |
-
|
64 |
-
p = kernel.shape[0] - factor
|
65 |
-
|
66 |
-
pad0 = (p + 1) // 2
|
67 |
-
pad1 = p // 2
|
68 |
-
|
69 |
-
self.pad = (pad0, pad1)
|
70 |
-
|
71 |
-
def forward(self, input):
|
72 |
-
out = upfirdn2d(input, self.kernel, up=1,
|
73 |
-
down=self.factor, pad=self.pad)
|
74 |
-
return out
|
75 |
-
|
76 |
-
|
77 |
-
class Blur(nn.Module):
|
78 |
-
def __init__(self, kernel, pad, upsample_factor=1):
|
79 |
-
super().__init__()
|
80 |
-
|
81 |
-
kernel = make_kernel(kernel)
|
82 |
-
|
83 |
-
if upsample_factor > 1:
|
84 |
-
kernel = kernel * (upsample_factor ** 2)
|
85 |
-
|
86 |
-
self.register_buffer("kernel", kernel)
|
87 |
-
|
88 |
-
self.pad = pad
|
89 |
-
|
90 |
-
def forward(self, input):
|
91 |
-
out = upfirdn2d(input, self.kernel, pad=self.pad)
|
92 |
-
return out
|
93 |
-
|
94 |
-
|
95 |
-
class EqualConv2d(nn.Module):
|
96 |
-
def __init__(
|
97 |
-
self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
|
98 |
-
):
|
99 |
-
super().__init__()
|
100 |
-
|
101 |
-
self.weight = nn.Parameter(
|
102 |
-
torch.randn(out_channel, in_channel, kernel_size, kernel_size)
|
103 |
-
)
|
104 |
-
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
|
105 |
-
|
106 |
-
self.stride = stride
|
107 |
-
self.padding = padding
|
108 |
-
|
109 |
-
if bias:
|
110 |
-
self.bias = nn.Parameter(torch.zeros(out_channel))
|
111 |
-
|
112 |
-
else:
|
113 |
-
self.bias = None
|
114 |
-
|
115 |
-
def forward(self, input):
|
116 |
-
out = F.conv2d(
|
117 |
-
input,
|
118 |
-
self.weight * self.scale,
|
119 |
-
bias=self.bias,
|
120 |
-
stride=self.stride,
|
121 |
-
padding=self.padding,
|
122 |
-
)
|
123 |
-
return out
|
124 |
-
|
125 |
-
def __repr__(self):
|
126 |
-
return (
|
127 |
-
f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},"
|
128 |
-
f" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})"
|
129 |
-
)
|
130 |
-
|
131 |
-
|
132 |
-
class EqualLinear(nn.Module):
|
133 |
-
def __init__(
|
134 |
-
self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
|
135 |
-
):
|
136 |
-
super().__init__()
|
137 |
-
|
138 |
-
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
|
139 |
-
|
140 |
-
if bias:
|
141 |
-
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
|
142 |
-
else:
|
143 |
-
self.bias = None
|
144 |
-
|
145 |
-
self.activation = activation
|
146 |
-
|
147 |
-
self.scale = (1 / math.sqrt(in_dim)) * lr_mul
|
148 |
-
self.lr_mul = lr_mul
|
149 |
-
|
150 |
-
def forward(self, input):
|
151 |
-
if self.activation:
|
152 |
-
out = F.linear(input, self.weight * self.scale)
|
153 |
-
out = fused_leaky_relu(out, self.bias * self.lr_mul)
|
154 |
-
else:
|
155 |
-
out = F.linear(
|
156 |
-
input, self.weight * self.scale, bias=self.bias * self.lr_mul
|
157 |
-
)
|
158 |
-
return out
|
159 |
-
|
160 |
-
def __repr__(self):
|
161 |
-
return (
|
162 |
-
f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})"
|
163 |
-
)
|
164 |
-
|
165 |
-
|
166 |
-
class ScaledLeakyReLU(nn.Module):
|
167 |
-
def __init__(self, negative_slope=0.2):
|
168 |
-
super().__init__()
|
169 |
-
self.negative_slope = negative_slope
|
170 |
-
|
171 |
-
def forward(self, input):
|
172 |
-
out = F.leaky_relu(input, negative_slope=self.negative_slope)
|
173 |
-
return out * math.sqrt(2)
|
174 |
-
|
175 |
-
|
176 |
-
class ModulatedConv2d(nn.Module):
|
177 |
-
def __init__(
|
178 |
-
self,
|
179 |
-
in_channel,
|
180 |
-
out_channel,
|
181 |
-
kernel_size,
|
182 |
-
style_dim,
|
183 |
-
demodulate=True,
|
184 |
-
upsample=False,
|
185 |
-
downsample=False,
|
186 |
-
blur_kernel=[1, 3, 3, 1],
|
187 |
-
):
|
188 |
-
super().__init__()
|
189 |
-
|
190 |
-
self.eps = 1e-8
|
191 |
-
self.kernel_size = kernel_size
|
192 |
-
self.in_channel = in_channel
|
193 |
-
self.out_channel = out_channel
|
194 |
-
self.upsample = upsample
|
195 |
-
self.downsample = downsample
|
196 |
-
|
197 |
-
if upsample:
|
198 |
-
factor = 2
|
199 |
-
p = (len(blur_kernel) - factor) - (kernel_size - 1)
|
200 |
-
pad0 = (p + 1) // 2 + factor - 1
|
201 |
-
pad1 = p // 2 + 1
|
202 |
-
self.blur = Blur(blur_kernel, pad=(
|
203 |
-
pad0, pad1), upsample_factor=factor)
|
204 |
-
|
205 |
-
if downsample:
|
206 |
-
factor = 2
|
207 |
-
p = (len(blur_kernel) - factor) + (kernel_size - 1)
|
208 |
-
pad0 = (p + 1) // 2
|
209 |
-
pad1 = p // 2
|
210 |
-
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
|
211 |
-
|
212 |
-
fan_in = in_channel * kernel_size ** 2
|
213 |
-
self.scale = 1 / math.sqrt(fan_in)
|
214 |
-
self.padding = kernel_size // 2
|
215 |
-
self.weight = nn.Parameter(
|
216 |
-
torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
|
217 |
-
)
|
218 |
-
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
|
219 |
-
self.demodulate = demodulate
|
220 |
-
|
221 |
-
def __repr__(self):
|
222 |
-
return (
|
223 |
-
f"{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, "
|
224 |
-
f"upsample={self.upsample}, downsample={self.downsample})"
|
225 |
-
)
|
226 |
-
|
227 |
-
def forward(self, input, style):
|
228 |
-
batch, in_channel, height, width = input.shape
|
229 |
-
|
230 |
-
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
|
231 |
-
weight = self.scale * self.weight * style
|
232 |
-
|
233 |
-
if self.demodulate:
|
234 |
-
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
|
235 |
-
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
|
236 |
-
|
237 |
-
weight = weight.view(
|
238 |
-
batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
|
239 |
-
)
|
240 |
-
|
241 |
-
if self.upsample:
|
242 |
-
input = input.view(1, batch * in_channel, height, width)
|
243 |
-
weight = weight.view(
|
244 |
-
batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
|
245 |
-
)
|
246 |
-
weight = weight.transpose(1, 2).reshape(
|
247 |
-
batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
|
248 |
-
)
|
249 |
-
out = F.conv_transpose2d(
|
250 |
-
input, weight, padding=0, stride=2, groups=batch)
|
251 |
-
_, _, height, width = out.shape
|
252 |
-
out = out.view(batch, self.out_channel, height, width)
|
253 |
-
out = self.blur(out)
|
254 |
-
|
255 |
-
elif self.downsample:
|
256 |
-
input = self.blur(input)
|
257 |
-
_, _, height, width = input.shape
|
258 |
-
input = input.view(1, batch * in_channel, height, width)
|
259 |
-
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
|
260 |
-
_, _, height, width = out.shape
|
261 |
-
out = out.view(batch, self.out_channel, height, width)
|
262 |
-
|
263 |
-
else:
|
264 |
-
input = input.view(1, batch * in_channel, height, width)
|
265 |
-
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
|
266 |
-
_, _, height, width = out.shape
|
267 |
-
out = out.view(batch, self.out_channel, height, width)
|
268 |
-
|
269 |
-
return out
|
270 |
-
|
271 |
-
|
272 |
-
class NoiseInjection(nn.Module):
|
273 |
-
def __init__(self):
|
274 |
-
super().__init__()
|
275 |
-
self.weight = nn.Parameter(torch.zeros(1))
|
276 |
-
|
277 |
-
def forward(self, image, noise=None):
|
278 |
-
if noise is None:
|
279 |
-
batch, _, height, width = image.shape
|
280 |
-
noise = image.new_empty(batch, 1, height, width).normal_()
|
281 |
-
return image + self.weight * noise
|
282 |
-
|
283 |
-
|
284 |
-
class ConstantInput(nn.Module):
|
285 |
-
def __init__(self, channel, size=4):
|
286 |
-
super().__init__()
|
287 |
-
self.input = nn.Parameter(torch.randn(1, channel, size, size // 2))
|
288 |
-
|
289 |
-
def forward(self, input):
|
290 |
-
batch = input.shape[0]
|
291 |
-
out = self.input.repeat(batch, 1, 1, 1)
|
292 |
-
return out
|
293 |
-
|
294 |
-
|
295 |
-
class StyledConv(nn.Module):
|
296 |
-
def __init__(
|
297 |
-
self,
|
298 |
-
in_channel,
|
299 |
-
out_channel,
|
300 |
-
kernel_size,
|
301 |
-
style_dim,
|
302 |
-
upsample=False,
|
303 |
-
blur_kernel=[1, 3, 3, 1],
|
304 |
-
demodulate=True,
|
305 |
-
):
|
306 |
-
super().__init__()
|
307 |
-
self.conv = ModulatedConv2d(
|
308 |
-
in_channel,
|
309 |
-
out_channel,
|
310 |
-
kernel_size,
|
311 |
-
style_dim,
|
312 |
-
upsample=upsample,
|
313 |
-
blur_kernel=blur_kernel,
|
314 |
-
demodulate=demodulate,
|
315 |
-
)
|
316 |
-
self.noise = NoiseInjection()
|
317 |
-
self.activate = FusedLeakyReLU(out_channel)
|
318 |
-
|
319 |
-
def forward(self, input, style, noise=None):
|
320 |
-
out = self.conv(input, style)
|
321 |
-
out = self.noise(out, noise=noise)
|
322 |
-
out = self.activate(out)
|
323 |
-
return out
|
324 |
-
|
325 |
-
|
326 |
-
class ToRGB(nn.Module):
|
327 |
-
def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
|
328 |
-
super().__init__()
|
329 |
-
if upsample:
|
330 |
-
self.upsample = Upsample(blur_kernel)
|
331 |
-
|
332 |
-
self.conv = ModulatedConv2d(
|
333 |
-
in_channel, 3, 1, style_dim, demodulate=False)
|
334 |
-
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
|
335 |
-
|
336 |
-
def forward(self, input, style, skip=None):
|
337 |
-
out = self.conv(input, style)
|
338 |
-
out = out + self.bias
|
339 |
-
|
340 |
-
if skip is not None:
|
341 |
-
skip = self.upsample(skip)
|
342 |
-
out = out + skip
|
343 |
-
|
344 |
-
return out
|
345 |
-
|
346 |
-
|
347 |
-
class Generator(nn.Module):
|
348 |
-
def __init__(
|
349 |
-
self,
|
350 |
-
size,
|
351 |
-
style_dim,
|
352 |
-
n_mlp,
|
353 |
-
channel_multiplier=1,
|
354 |
-
blur_kernel=[1, 3, 3, 1],
|
355 |
-
lr_mlp=0.01,
|
356 |
-
small=False,
|
357 |
-
small_isaac=False,
|
358 |
-
):
|
359 |
-
super().__init__()
|
360 |
-
|
361 |
-
self.size = size
|
362 |
-
|
363 |
-
if small and size > 64:
|
364 |
-
raise ValueError("small only works for sizes <= 64")
|
365 |
-
|
366 |
-
self.style_dim = style_dim
|
367 |
-
layers = [PixelNorm()]
|
368 |
-
|
369 |
-
for i in range(n_mlp):
|
370 |
-
layers.append(
|
371 |
-
EqualLinear(
|
372 |
-
style_dim, style_dim, lr_mul=lr_mlp, activation="fused_lrelu"
|
373 |
-
)
|
374 |
-
)
|
375 |
-
|
376 |
-
self.style = nn.Sequential(*layers)
|
377 |
-
|
378 |
-
if small:
|
379 |
-
self.channels = {
|
380 |
-
4: 64 * channel_multiplier,
|
381 |
-
8: 64 * channel_multiplier,
|
382 |
-
16: 64 * channel_multiplier,
|
383 |
-
32: 64 * channel_multiplier,
|
384 |
-
64: 64 * channel_multiplier,
|
385 |
-
}
|
386 |
-
elif small_isaac:
|
387 |
-
self.channels = {4: 256, 8: 256,
|
388 |
-
16: 256, 32: 256, 64: 128, 128: 128}
|
389 |
-
else:
|
390 |
-
self.channels = {
|
391 |
-
4: 512,
|
392 |
-
8: 512,
|
393 |
-
16: 512,
|
394 |
-
32: 512,
|
395 |
-
64: 256 * channel_multiplier,
|
396 |
-
128: 128 * channel_multiplier,
|
397 |
-
256: 64 * channel_multiplier,
|
398 |
-
512: 32 * channel_multiplier,
|
399 |
-
1024: 16 * channel_multiplier,
|
400 |
-
}
|
401 |
-
|
402 |
-
self.input = ConstantInput(self.channels[4])
|
403 |
-
self.conv1 = StyledConv(
|
404 |
-
self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
|
405 |
-
)
|
406 |
-
self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
|
407 |
-
|
408 |
-
self.log_size = int(math.log(size, 2))
|
409 |
-
self.num_layers = (self.log_size - 2) * 2 + 1
|
410 |
-
|
411 |
-
self.convs = nn.ModuleList()
|
412 |
-
self.upsamples = nn.ModuleList()
|
413 |
-
self.to_rgbs = nn.ModuleList()
|
414 |
-
self.noises = nn.Module()
|
415 |
-
|
416 |
-
in_channel = self.channels[4]
|
417 |
-
|
418 |
-
for layer_idx in range(self.num_layers):
|
419 |
-
res = (layer_idx + 5) // 2
|
420 |
-
shape = [1, 1, 2 ** res, 2 ** res // 2]
|
421 |
-
self.noises.register_buffer(
|
422 |
-
"noise_{}".format(layer_idx), torch.randn(*shape)
|
423 |
-
)
|
424 |
-
|
425 |
-
for i in range(3, self.log_size + 1):
|
426 |
-
out_channel = self.channels[2 ** i]
|
427 |
-
|
428 |
-
self.convs.append(
|
429 |
-
StyledConv(
|
430 |
-
in_channel,
|
431 |
-
out_channel,
|
432 |
-
3,
|
433 |
-
style_dim,
|
434 |
-
upsample=True,
|
435 |
-
blur_kernel=blur_kernel,
|
436 |
-
)
|
437 |
-
)
|
438 |
-
|
439 |
-
self.convs.append(
|
440 |
-
StyledConv(
|
441 |
-
out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
|
442 |
-
)
|
443 |
-
)
|
444 |
-
|
445 |
-
self.to_rgbs.append(ToRGB(out_channel, style_dim))
|
446 |
-
in_channel = out_channel
|
447 |
-
|
448 |
-
self.n_latent = self.log_size * 2 - 2
|
449 |
-
|
450 |
-
def make_noise(self):
|
451 |
-
device = self.input.input.device
|
452 |
-
|
453 |
-
noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2 // 2, device=device)]
|
454 |
-
|
455 |
-
for i in range(3, self.log_size + 1):
|
456 |
-
for _ in range(2):
|
457 |
-
noises.append(torch.randn(
|
458 |
-
1, 1, 2 ** i, 2 ** i // 2, device=device))
|
459 |
-
|
460 |
-
return noises
|
461 |
-
|
462 |
-
def mean_latent(self, n_latent):
|
463 |
-
latent_in = torch.randn(
|
464 |
-
n_latent, self.style_dim, device=self.input.input.device
|
465 |
-
)
|
466 |
-
latent = self.style(latent_in).mean(0, keepdim=True)
|
467 |
-
|
468 |
-
return latent
|
469 |
-
|
470 |
-
def get_latent(self, input):
|
471 |
-
return self.style(input)
|
472 |
-
|
473 |
-
def forward(
|
474 |
-
self,
|
475 |
-
styles,
|
476 |
-
return_latents=False,
|
477 |
-
return_features=False,
|
478 |
-
inject_index=None,
|
479 |
-
truncation=1,
|
480 |
-
truncation_latent=None,
|
481 |
-
input_is_latent=False,
|
482 |
-
noise=None,
|
483 |
-
randomize_noise=True,
|
484 |
-
real=False,
|
485 |
-
):
|
486 |
-
if not input_is_latent:
|
487 |
-
styles = [self.style(s) for s in styles]
|
488 |
-
if noise is None:
|
489 |
-
if randomize_noise:
|
490 |
-
noise = [None] * self.num_layers
|
491 |
-
else:
|
492 |
-
noise = [
|
493 |
-
getattr(self.noises, "noise_{}".format(i))
|
494 |
-
for i in range(self.num_layers)
|
495 |
-
]
|
496 |
-
|
497 |
-
if truncation < 1:
|
498 |
-
# print('truncation_latent: ', truncation_latent.shape)
|
499 |
-
if not real: # if type(styles) == list:
|
500 |
-
style_t = []
|
501 |
-
for style in styles:
|
502 |
-
style_t.append(
|
503 |
-
truncation_latent + truncation *
|
504 |
-
(style - truncation_latent)
|
505 |
-
) # (-1.1162e-03-(-1.0914e-01))*0.8+(-1.0914e-01)
|
506 |
-
styles = style_t
|
507 |
-
else: # styles are latent (tensor: 1,18,512), for real PTI output
|
508 |
-
truncation_latent = truncation_latent.repeat(
|
509 |
-
18, 1).unsqueeze(0) # (1,512) --> (1,18,512)
|
510 |
-
styles = torch.add(truncation_latent, torch.mul(
|
511 |
-
torch.sub(styles, truncation_latent), truncation))
|
512 |
-
# print('now styles after truncation : ', styles)
|
513 |
-
# if type(styles) == list and len(styles) < 2: # this if for input as list of [(1,512)]
|
514 |
-
if not real:
|
515 |
-
if len(styles) < 2:
|
516 |
-
inject_index = self.n_latent
|
517 |
-
if styles[0].ndim < 3:
|
518 |
-
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
|
519 |
-
else:
|
520 |
-
latent = styles[0]
|
521 |
-
elif type(styles) == list:
|
522 |
-
if inject_index is None:
|
523 |
-
inject_index = 4
|
524 |
-
|
525 |
-
latent = styles[0].unsqueeze(0)
|
526 |
-
if latent.shape[1] == 1:
|
527 |
-
latent = latent.repeat(1, inject_index, 1)
|
528 |
-
else:
|
529 |
-
latent = latent[:, :inject_index, :]
|
530 |
-
latent2 = styles[1].unsqueeze(1).repeat(
|
531 |
-
1, self.n_latent - inject_index, 1)
|
532 |
-
latent = torch.cat([latent, latent2], 1)
|
533 |
-
# input is tensor of size with torch.Size([1, 18, 512]), for real PTI output
|
534 |
-
else:
|
535 |
-
latent = styles
|
536 |
-
|
537 |
-
# print(f'processed latent: {latent.shape}')
|
538 |
-
|
539 |
-
features = {}
|
540 |
-
out = self.input(latent)
|
541 |
-
features["out_0"] = out
|
542 |
-
out = self.conv1(out, latent[:, 0], noise=noise[0])
|
543 |
-
features["conv1_0"] = out
|
544 |
-
|
545 |
-
skip = self.to_rgb1(out, latent[:, 1])
|
546 |
-
features["skip_0"] = skip
|
547 |
-
i = 1
|
548 |
-
for conv1, conv2, noise1, noise2, to_rgb in zip(
|
549 |
-
self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
|
550 |
-
):
|
551 |
-
out = conv1(out, latent[:, i], noise=noise1)
|
552 |
-
features["conv1_{}".format(i)] = out
|
553 |
-
out = conv2(out, latent[:, i + 1], noise=noise2)
|
554 |
-
features["conv2_{}".format(i)] = out
|
555 |
-
skip = to_rgb(out, latent[:, i + 2], skip)
|
556 |
-
features["skip_{}".format(i)] = skip
|
557 |
-
|
558 |
-
i += 2
|
559 |
-
|
560 |
-
image = skip
|
561 |
-
|
562 |
-
if return_latents:
|
563 |
-
return image, latent
|
564 |
-
elif return_features:
|
565 |
-
return image, features
|
566 |
-
else:
|
567 |
-
return image, None
|
568 |
-
|
569 |
-
|
570 |
-
class ConvLayer(nn.Sequential):
|
571 |
-
def __init__(
|
572 |
-
self,
|
573 |
-
in_channel,
|
574 |
-
out_channel,
|
575 |
-
kernel_size,
|
576 |
-
downsample=False,
|
577 |
-
blur_kernel=[1, 3, 3, 1],
|
578 |
-
bias=True,
|
579 |
-
activate=True,
|
580 |
-
):
|
581 |
-
layers = []
|
582 |
-
|
583 |
-
if downsample:
|
584 |
-
factor = 2
|
585 |
-
p = (len(blur_kernel) - factor) + (kernel_size - 1)
|
586 |
-
pad0 = (p + 1) // 2
|
587 |
-
pad1 = p // 2
|
588 |
-
|
589 |
-
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
|
590 |
-
|
591 |
-
stride = 2
|
592 |
-
self.padding = 0
|
593 |
-
|
594 |
-
else:
|
595 |
-
stride = 1
|
596 |
-
self.padding = kernel_size // 2
|
597 |
-
|
598 |
-
layers.append(
|
599 |
-
EqualConv2d(
|
600 |
-
in_channel,
|
601 |
-
out_channel,
|
602 |
-
kernel_size,
|
603 |
-
padding=self.padding,
|
604 |
-
stride=stride,
|
605 |
-
bias=bias and not activate,
|
606 |
-
)
|
607 |
-
)
|
608 |
-
|
609 |
-
if activate:
|
610 |
-
if bias:
|
611 |
-
layers.append(FusedLeakyReLU(out_channel))
|
612 |
-
else:
|
613 |
-
layers.append(ScaledLeakyReLU(0.2))
|
614 |
-
|
615 |
-
super().__init__(*layers)
|
616 |
-
|
617 |
-
|
618 |
-
class ResBlock(nn.Module):
|
619 |
-
def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
|
620 |
-
super().__init__()
|
621 |
-
|
622 |
-
self.conv1 = ConvLayer(in_channel, in_channel, 3)
|
623 |
-
self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
|
624 |
-
|
625 |
-
self.skip = ConvLayer(
|
626 |
-
in_channel, out_channel, 1, downsample=True, activate=False, bias=False
|
627 |
-
)
|
628 |
-
|
629 |
-
def forward(self, input):
|
630 |
-
out = self.conv1(input)
|
631 |
-
out = self.conv2(out)
|
632 |
-
|
633 |
-
skip = self.skip(input)
|
634 |
-
out = (out + skip) / math.sqrt(2)
|
635 |
-
|
636 |
-
return out
|
637 |
-
|
638 |
-
|
639 |
-
class StyleDiscriminator(nn.Module):
|
640 |
-
def __init__(
|
641 |
-
self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1], small=False
|
642 |
-
):
|
643 |
-
super().__init__()
|
644 |
-
|
645 |
-
if small:
|
646 |
-
channels = {4: 64, 8: 64, 16: 64, 32: 64, 64: 64}
|
647 |
-
|
648 |
-
else:
|
649 |
-
channels = {
|
650 |
-
4: 512,
|
651 |
-
8: 512,
|
652 |
-
16: 512,
|
653 |
-
32: 512,
|
654 |
-
64: 256 * channel_multiplier,
|
655 |
-
128: 128 * channel_multiplier,
|
656 |
-
256: 64 * channel_multiplier,
|
657 |
-
512: 32 * channel_multiplier,
|
658 |
-
1024: 16 * channel_multiplier,
|
659 |
-
}
|
660 |
-
|
661 |
-
convs = [ConvLayer(3, channels[size], 1)]
|
662 |
-
|
663 |
-
log_size = int(math.log(size, 2))
|
664 |
-
in_channel = channels[size]
|
665 |
-
|
666 |
-
for i in range(log_size, 2, -1):
|
667 |
-
out_channel = channels[2 ** (i - 1)]
|
668 |
-
|
669 |
-
convs.append(ResBlock(in_channel, out_channel, blur_kernel))
|
670 |
-
|
671 |
-
in_channel = out_channel
|
672 |
-
|
673 |
-
self.convs = nn.Sequential(*convs)
|
674 |
-
|
675 |
-
self.stddev_group = 4
|
676 |
-
self.stddev_feat = 1
|
677 |
-
|
678 |
-
self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
|
679 |
-
self.final_linear = nn.Sequential(
|
680 |
-
EqualLinear(channels[4] * 4 * 4, channels[4],
|
681 |
-
activation="fused_lrelu"),
|
682 |
-
EqualLinear(channels[4], 1),
|
683 |
-
)
|
684 |
-
|
685 |
-
def forward(self, input):
|
686 |
-
h = input
|
687 |
-
h_list = []
|
688 |
-
|
689 |
-
for index, blocklist in enumerate(self.convs):
|
690 |
-
h = blocklist(h)
|
691 |
-
h_list.append(h)
|
692 |
-
|
693 |
-
out = h
|
694 |
-
batch, channel, height, width = out.shape
|
695 |
-
group = min(batch, self.stddev_group)
|
696 |
-
stddev = out.view(
|
697 |
-
group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
|
698 |
-
)
|
699 |
-
stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
|
700 |
-
stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
|
701 |
-
stddev = stddev.repeat(group, 1, height, width)
|
702 |
-
out = torch.cat([out, stddev], 1)
|
703 |
-
|
704 |
-
out = self.final_conv(out)
|
705 |
-
h_list.append(out)
|
706 |
-
|
707 |
-
out = out.view(batch, -1)
|
708 |
-
out = self.final_linear(out)
|
709 |
-
|
710 |
-
return out, h_list
|
711 |
-
|
712 |
-
|
713 |
-
class StyleEncoder(nn.Module):
|
714 |
-
def __init__(self, size, w_dim=512):
|
715 |
-
super().__init__()
|
716 |
-
|
717 |
-
channels = {
|
718 |
-
4: 512,
|
719 |
-
8: 512,
|
720 |
-
16: 512,
|
721 |
-
32: 512,
|
722 |
-
64: 256,
|
723 |
-
128: 128,
|
724 |
-
256: 64,
|
725 |
-
512: 32,
|
726 |
-
1024: 16
|
727 |
-
}
|
728 |
-
|
729 |
-
self.w_dim = w_dim
|
730 |
-
log_size = int(math.log(size, 2))
|
731 |
-
convs = [ConvLayer(3, channels[size], 1)]
|
732 |
-
|
733 |
-
in_channel = channels[size]
|
734 |
-
for i in range(log_size, 2, -1):
|
735 |
-
out_channel = channels[2 ** (i - 1)]
|
736 |
-
convs.append(ResBlock(in_channel, out_channel))
|
737 |
-
in_channel = out_channel
|
738 |
-
|
739 |
-
convs.append(EqualConv2d(
|
740 |
-
in_channel, 2*self.w_dim, 4, padding=0, bias=False))
|
741 |
-
|
742 |
-
self.convs = nn.Sequential(*convs)
|
743 |
-
|
744 |
-
def forward(self, input):
|
745 |
-
out = self.convs(input)
|
746 |
-
# return out.view(len(input), self.n_latents, self.w_dim)
|
747 |
-
reshaped = out.view(len(input), 2*self.w_dim)
|
748 |
-
return reshaped[:, :self.w_dim], reshaped[:, self.w_dim:]
|
749 |
-
|
750 |
-
|
751 |
-
def kaiming_init(m):
|
752 |
-
if isinstance(m, (nn.Linear, nn.Conv2d)):
|
753 |
-
init.kaiming_normal_(m.weight)
|
754 |
-
if m.bias is not None:
|
755 |
-
m.bias.data.fill_(0)
|
756 |
-
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
|
757 |
-
m.weight.data.fill_(1)
|
758 |
-
if m.bias is not None:
|
759 |
-
m.bias.data.fill_(0)
|
760 |
-
|
761 |
-
|
762 |
-
def normal_init(m):
|
763 |
-
if isinstance(m, (nn.Linear, nn.Conv2d)):
|
764 |
-
init.normal_(m.weight, 0, 0.02)
|
765 |
-
if m.bias is not None:
|
766 |
-
m.bias.data.fill_(0)
|
767 |
-
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
|
768 |
-
m.weight.data.fill_(1)
|
769 |
-
if m.bias is not None:
|
770 |
-
m.bias.data.fill_(0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/autoencoderkl.md
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
# AutoencoderKL
|
2 |
-
|
3 |
-
The variational autoencoder (VAE) model with KL loss was introduced in [Auto-Encoding Variational Bayes](https://arxiv.org/abs/1312.6114v11) by Diederik P. Kingma and Max Welling. The model is used in 🤗 Diffusers to encode images into latents and to decode latent representations into images.
|
4 |
-
|
5 |
-
The abstract from the paper is:
|
6 |
-
|
7 |
-
*How can we perform efficient inference and learning in directed probabilistic models, in the presence of continuous latent variables with intractable posterior distributions, and large datasets? We introduce a stochastic variational inference and learning algorithm that scales to large datasets and, under some mild differentiability conditions, even works in the intractable case. Our contributions are two-fold. First, we show that a reparameterization of the variational lower bound yields a lower bound estimator that can be straightforwardly optimized using standard stochastic gradient methods. Second, we show that for i.i.d. datasets with continuous latent variables per datapoint, posterior inference can be made especially efficient by fitting an approximate inference model (also called a recognition model) to the intractable posterior using the proposed lower bound estimator. Theoretical advantages are reflected in experimental results.*
|
8 |
-
|
9 |
-
## Loading from the original format
|
10 |
-
|
11 |
-
By default the [`AutoencoderKL`] should be loaded with [`~ModelMixin.from_pretrained`], but it can also be loaded
|
12 |
-
from the original format using [`FromOriginalVAEMixin.from_single_file`] as follows:
|
13 |
-
|
14 |
-
```py
|
15 |
-
from diffusers import AutoencoderKL
|
16 |
-
|
17 |
-
url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors" # can also be local file
|
18 |
-
model = AutoencoderKL.from_single_file(url)
|
19 |
-
```
|
20 |
-
|
21 |
-
## AutoencoderKL
|
22 |
-
|
23 |
-
[[autodoc]] AutoencoderKL
|
24 |
-
|
25 |
-
## AutoencoderKLOutput
|
26 |
-
|
27 |
-
[[autodoc]] models.autoencoder_kl.AutoencoderKLOutput
|
28 |
-
|
29 |
-
## DecoderOutput
|
30 |
-
|
31 |
-
[[autodoc]] models.vae.DecoderOutput
|
32 |
-
|
33 |
-
## FlaxAutoencoderKL
|
34 |
-
|
35 |
-
[[autodoc]] FlaxAutoencoderKL
|
36 |
-
|
37 |
-
## FlaxAutoencoderKLOutput
|
38 |
-
|
39 |
-
[[autodoc]] models.vae_flax.FlaxAutoencoderKLOutput
|
40 |
-
|
41 |
-
## FlaxDecoderOutput
|
42 |
-
|
43 |
-
[[autodoc]] models.vae_flax.FlaxDecoderOutput
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/cycle_diffusion.md
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Cycle Diffusion
|
14 |
-
|
15 |
-
Cycle Diffusion is a text guided image-to-image generation model proposed in [Unifying Diffusion Models' Latent Space, with Applications to CycleDiffusion and Guidance](https://huggingface.co/papers/2210.05559) by Chen Henry Wu, Fernando De la Torre.
|
16 |
-
|
17 |
-
The abstract from the paper is:
|
18 |
-
|
19 |
-
*Diffusion models have achieved unprecedented performance in generative modeling. The commonly-adopted formulation of the latent code of diffusion models is a sequence of gradually denoised samples, as opposed to the simpler (e.g., Gaussian) latent space of GANs, VAEs, and normalizing flows. This paper provides an alternative, Gaussian formulation of the latent space of various diffusion models, as well as an invertible DPM-Encoder that maps images into the latent space. While our formulation is purely based on the definition of diffusion models, we demonstrate several intriguing consequences. (1) Empirically, we observe that a common latent space emerges from two diffusion models trained independently on related domains. In light of this finding, we propose CycleDiffusion, which uses DPM-Encoder for unpaired image-to-image translation. Furthermore, applying CycleDiffusion to text-to-image diffusion models, we show that large-scale text-to-image diffusion models can be used as zero-shot image-to-image editors. (2) One can guide pre-trained diffusion models and GANs by controlling the latent codes in a unified, plug-and-play formulation based on energy-based models. Using the CLIP model and a face recognition model as guidance, we demonstrate that diffusion models have better coverage of low-density sub-populations and individuals than GANs.*
|
20 |
-
|
21 |
-
<Tip>
|
22 |
-
|
23 |
-
Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
|
24 |
-
|
25 |
-
</Tip>
|
26 |
-
|
27 |
-
## CycleDiffusionPipeline
|
28 |
-
[[autodoc]] CycleDiffusionPipeline
|
29 |
-
- all
|
30 |
-
- __call__
|
31 |
-
|
32 |
-
## StableDiffusionPiplineOutput
|
33 |
-
[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_lms.py
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from diffusers import LMSDiscreteScheduler
|
4 |
-
from diffusers.utils import torch_device
|
5 |
-
|
6 |
-
from .test_schedulers import SchedulerCommonTest
|
7 |
-
|
8 |
-
|
9 |
-
class LMSDiscreteSchedulerTest(SchedulerCommonTest):
|
10 |
-
scheduler_classes = (LMSDiscreteScheduler,)
|
11 |
-
num_inference_steps = 10
|
12 |
-
|
13 |
-
def get_scheduler_config(self, **kwargs):
|
14 |
-
config = {
|
15 |
-
"num_train_timesteps": 1100,
|
16 |
-
"beta_start": 0.0001,
|
17 |
-
"beta_end": 0.02,
|
18 |
-
"beta_schedule": "linear",
|
19 |
-
}
|
20 |
-
|
21 |
-
config.update(**kwargs)
|
22 |
-
return config
|
23 |
-
|
24 |
-
def test_timesteps(self):
|
25 |
-
for timesteps in [10, 50, 100, 1000]:
|
26 |
-
self.check_over_configs(num_train_timesteps=timesteps)
|
27 |
-
|
28 |
-
def test_betas(self):
|
29 |
-
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
|
30 |
-
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
|
31 |
-
|
32 |
-
def test_schedules(self):
|
33 |
-
for schedule in ["linear", "scaled_linear"]:
|
34 |
-
self.check_over_configs(beta_schedule=schedule)
|
35 |
-
|
36 |
-
def test_prediction_type(self):
|
37 |
-
for prediction_type in ["epsilon", "v_prediction"]:
|
38 |
-
self.check_over_configs(prediction_type=prediction_type)
|
39 |
-
|
40 |
-
def test_time_indices(self):
|
41 |
-
for t in [0, 500, 800]:
|
42 |
-
self.check_over_forward(time_step=t)
|
43 |
-
|
44 |
-
def test_full_loop_no_noise(self):
|
45 |
-
scheduler_class = self.scheduler_classes[0]
|
46 |
-
scheduler_config = self.get_scheduler_config()
|
47 |
-
scheduler = scheduler_class(**scheduler_config)
|
48 |
-
|
49 |
-
scheduler.set_timesteps(self.num_inference_steps)
|
50 |
-
|
51 |
-
model = self.dummy_model()
|
52 |
-
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
|
53 |
-
|
54 |
-
for i, t in enumerate(scheduler.timesteps):
|
55 |
-
sample = scheduler.scale_model_input(sample, t)
|
56 |
-
|
57 |
-
model_output = model(sample, t)
|
58 |
-
|
59 |
-
output = scheduler.step(model_output, t, sample)
|
60 |
-
sample = output.prev_sample
|
61 |
-
|
62 |
-
result_sum = torch.sum(torch.abs(sample))
|
63 |
-
result_mean = torch.mean(torch.abs(sample))
|
64 |
-
|
65 |
-
assert abs(result_sum.item() - 1006.388) < 1e-2
|
66 |
-
assert abs(result_mean.item() - 1.31) < 1e-3
|
67 |
-
|
68 |
-
def test_full_loop_with_v_prediction(self):
|
69 |
-
scheduler_class = self.scheduler_classes[0]
|
70 |
-
scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
|
71 |
-
scheduler = scheduler_class(**scheduler_config)
|
72 |
-
|
73 |
-
scheduler.set_timesteps(self.num_inference_steps)
|
74 |
-
|
75 |
-
model = self.dummy_model()
|
76 |
-
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
|
77 |
-
|
78 |
-
for i, t in enumerate(scheduler.timesteps):
|
79 |
-
sample = scheduler.scale_model_input(sample, t)
|
80 |
-
|
81 |
-
model_output = model(sample, t)
|
82 |
-
|
83 |
-
output = scheduler.step(model_output, t, sample)
|
84 |
-
sample = output.prev_sample
|
85 |
-
|
86 |
-
result_sum = torch.sum(torch.abs(sample))
|
87 |
-
result_mean = torch.mean(torch.abs(sample))
|
88 |
-
|
89 |
-
assert abs(result_sum.item() - 0.0017) < 1e-2
|
90 |
-
assert abs(result_mean.item() - 2.2676e-06) < 1e-3
|
91 |
-
|
92 |
-
def test_full_loop_device(self):
|
93 |
-
scheduler_class = self.scheduler_classes[0]
|
94 |
-
scheduler_config = self.get_scheduler_config()
|
95 |
-
scheduler = scheduler_class(**scheduler_config)
|
96 |
-
|
97 |
-
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
|
98 |
-
|
99 |
-
model = self.dummy_model()
|
100 |
-
sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
|
101 |
-
sample = sample.to(torch_device)
|
102 |
-
|
103 |
-
for i, t in enumerate(scheduler.timesteps):
|
104 |
-
sample = scheduler.scale_model_input(sample, t)
|
105 |
-
|
106 |
-
model_output = model(sample, t)
|
107 |
-
|
108 |
-
output = scheduler.step(model_output, t, sample)
|
109 |
-
sample = output.prev_sample
|
110 |
-
|
111 |
-
result_sum = torch.sum(torch.abs(sample))
|
112 |
-
result_mean = torch.mean(torch.abs(sample))
|
113 |
-
|
114 |
-
assert abs(result_sum.item() - 1006.388) < 1e-2
|
115 |
-
assert abs(result_mean.item() - 1.31) < 1e-3
|
116 |
-
|
117 |
-
def test_full_loop_device_karras_sigmas(self):
|
118 |
-
scheduler_class = self.scheduler_classes[0]
|
119 |
-
scheduler_config = self.get_scheduler_config()
|
120 |
-
scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True)
|
121 |
-
|
122 |
-
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
|
123 |
-
|
124 |
-
model = self.dummy_model()
|
125 |
-
sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
|
126 |
-
sample = sample.to(torch_device)
|
127 |
-
|
128 |
-
for t in scheduler.timesteps:
|
129 |
-
sample = scheduler.scale_model_input(sample, t)
|
130 |
-
|
131 |
-
model_output = model(sample, t)
|
132 |
-
|
133 |
-
output = scheduler.step(model_output, t, sample)
|
134 |
-
sample = output.prev_sample
|
135 |
-
|
136 |
-
result_sum = torch.sum(torch.abs(sample))
|
137 |
-
result_mean = torch.mean(torch.abs(sample))
|
138 |
-
|
139 |
-
assert abs(result_sum.item() - 3812.9927) < 2e-2
|
140 |
-
assert abs(result_mean.item() - 4.9648) < 1e-3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/pisa_ssd_head.py
DELETED
@@ -1,139 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from mmdet.core import multi_apply
|
4 |
-
from ..builder import HEADS
|
5 |
-
from ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p
|
6 |
-
from .ssd_head import SSDHead
|
7 |
-
|
8 |
-
|
9 |
-
# TODO: add loss evaluator for SSD
|
10 |
-
@HEADS.register_module()
|
11 |
-
class PISASSDHead(SSDHead):
|
12 |
-
|
13 |
-
def loss(self,
|
14 |
-
cls_scores,
|
15 |
-
bbox_preds,
|
16 |
-
gt_bboxes,
|
17 |
-
gt_labels,
|
18 |
-
img_metas,
|
19 |
-
gt_bboxes_ignore=None):
|
20 |
-
"""Compute losses of the head.
|
21 |
-
|
22 |
-
Args:
|
23 |
-
cls_scores (list[Tensor]): Box scores for each scale level
|
24 |
-
Has shape (N, num_anchors * num_classes, H, W)
|
25 |
-
bbox_preds (list[Tensor]): Box energies / deltas for each scale
|
26 |
-
level with shape (N, num_anchors * 4, H, W)
|
27 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes of each image
|
28 |
-
with shape (num_obj, 4).
|
29 |
-
gt_labels (list[Tensor]): Ground truth labels of each image
|
30 |
-
with shape (num_obj, 4).
|
31 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
32 |
-
image size, scaling factor, etc.
|
33 |
-
gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.
|
34 |
-
Default: None.
|
35 |
-
|
36 |
-
Returns:
|
37 |
-
dict: Loss dict, comprise classification loss regression loss and
|
38 |
-
carl loss.
|
39 |
-
"""
|
40 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
|
41 |
-
assert len(featmap_sizes) == self.anchor_generator.num_levels
|
42 |
-
|
43 |
-
device = cls_scores[0].device
|
44 |
-
|
45 |
-
anchor_list, valid_flag_list = self.get_anchors(
|
46 |
-
featmap_sizes, img_metas, device=device)
|
47 |
-
cls_reg_targets = self.get_targets(
|
48 |
-
anchor_list,
|
49 |
-
valid_flag_list,
|
50 |
-
gt_bboxes,
|
51 |
-
img_metas,
|
52 |
-
gt_bboxes_ignore_list=gt_bboxes_ignore,
|
53 |
-
gt_labels_list=gt_labels,
|
54 |
-
label_channels=1,
|
55 |
-
unmap_outputs=False,
|
56 |
-
return_sampling_results=True)
|
57 |
-
if cls_reg_targets is None:
|
58 |
-
return None
|
59 |
-
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
|
60 |
-
num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
|
61 |
-
|
62 |
-
num_images = len(img_metas)
|
63 |
-
all_cls_scores = torch.cat([
|
64 |
-
s.permute(0, 2, 3, 1).reshape(
|
65 |
-
num_images, -1, self.cls_out_channels) for s in cls_scores
|
66 |
-
], 1)
|
67 |
-
all_labels = torch.cat(labels_list, -1).view(num_images, -1)
|
68 |
-
all_label_weights = torch.cat(label_weights_list,
|
69 |
-
-1).view(num_images, -1)
|
70 |
-
all_bbox_preds = torch.cat([
|
71 |
-
b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
|
72 |
-
for b in bbox_preds
|
73 |
-
], -2)
|
74 |
-
all_bbox_targets = torch.cat(bbox_targets_list,
|
75 |
-
-2).view(num_images, -1, 4)
|
76 |
-
all_bbox_weights = torch.cat(bbox_weights_list,
|
77 |
-
-2).view(num_images, -1, 4)
|
78 |
-
|
79 |
-
# concat all level anchors to a single tensor
|
80 |
-
all_anchors = []
|
81 |
-
for i in range(num_images):
|
82 |
-
all_anchors.append(torch.cat(anchor_list[i]))
|
83 |
-
|
84 |
-
isr_cfg = self.train_cfg.get('isr', None)
|
85 |
-
all_targets = (all_labels.view(-1), all_label_weights.view(-1),
|
86 |
-
all_bbox_targets.view(-1,
|
87 |
-
4), all_bbox_weights.view(-1, 4))
|
88 |
-
# apply ISR-P
|
89 |
-
if isr_cfg is not None:
|
90 |
-
all_targets = isr_p(
|
91 |
-
all_cls_scores.view(-1, all_cls_scores.size(-1)),
|
92 |
-
all_bbox_preds.view(-1, 4),
|
93 |
-
all_targets,
|
94 |
-
torch.cat(all_anchors),
|
95 |
-
sampling_results_list,
|
96 |
-
loss_cls=CrossEntropyLoss(),
|
97 |
-
bbox_coder=self.bbox_coder,
|
98 |
-
**self.train_cfg.isr,
|
99 |
-
num_class=self.num_classes)
|
100 |
-
(new_labels, new_label_weights, new_bbox_targets,
|
101 |
-
new_bbox_weights) = all_targets
|
102 |
-
all_labels = new_labels.view(all_labels.shape)
|
103 |
-
all_label_weights = new_label_weights.view(all_label_weights.shape)
|
104 |
-
all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape)
|
105 |
-
all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape)
|
106 |
-
|
107 |
-
# add CARL loss
|
108 |
-
carl_loss_cfg = self.train_cfg.get('carl', None)
|
109 |
-
if carl_loss_cfg is not None:
|
110 |
-
loss_carl = carl_loss(
|
111 |
-
all_cls_scores.view(-1, all_cls_scores.size(-1)),
|
112 |
-
all_targets[0],
|
113 |
-
all_bbox_preds.view(-1, 4),
|
114 |
-
all_targets[2],
|
115 |
-
SmoothL1Loss(beta=1.),
|
116 |
-
**self.train_cfg.carl,
|
117 |
-
avg_factor=num_total_pos,
|
118 |
-
num_class=self.num_classes)
|
119 |
-
|
120 |
-
# check NaN and Inf
|
121 |
-
assert torch.isfinite(all_cls_scores).all().item(), \
|
122 |
-
'classification scores become infinite or NaN!'
|
123 |
-
assert torch.isfinite(all_bbox_preds).all().item(), \
|
124 |
-
'bbox predications become infinite or NaN!'
|
125 |
-
|
126 |
-
losses_cls, losses_bbox = multi_apply(
|
127 |
-
self.loss_single,
|
128 |
-
all_cls_scores,
|
129 |
-
all_bbox_preds,
|
130 |
-
all_anchors,
|
131 |
-
all_labels,
|
132 |
-
all_label_weights,
|
133 |
-
all_bbox_targets,
|
134 |
-
all_bbox_weights,
|
135 |
-
num_total_samples=num_total_pos)
|
136 |
-
loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
|
137 |
-
if carl_loss_cfg is not None:
|
138 |
-
loss_dict.update(loss_carl)
|
139 |
-
return loss_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './deeplabv3_r50-d8_512x512_40k_voc12aug.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/exllamav2.py
DELETED
@@ -1,133 +0,0 @@
|
|
1 |
-
import random
|
2 |
-
from pathlib import Path
|
3 |
-
|
4 |
-
import torch
|
5 |
-
from exllamav2 import (
|
6 |
-
ExLlamaV2,
|
7 |
-
ExLlamaV2Cache,
|
8 |
-
ExLlamaV2Config,
|
9 |
-
ExLlamaV2Tokenizer
|
10 |
-
)
|
11 |
-
from exllamav2.generator import ExLlamaV2BaseGenerator, ExLlamaV2Sampler
|
12 |
-
|
13 |
-
from modules import shared
|
14 |
-
from modules.logging_colors import logger
|
15 |
-
from modules.text_generation import get_max_prompt_length
|
16 |
-
|
17 |
-
try:
|
18 |
-
import flash_attn
|
19 |
-
except ModuleNotFoundError:
|
20 |
-
logger.warning(
|
21 |
-
'You are running ExLlamaV2 without flash-attention. This will cause the VRAM usage '
|
22 |
-
'to be a lot higher than it could be.\n'
|
23 |
-
'Try installing flash-attention following the instructions here: '
|
24 |
-
'https://github.com/Dao-AILab/flash-attention#installation-and-features'
|
25 |
-
)
|
26 |
-
pass
|
27 |
-
|
28 |
-
|
29 |
-
class Exllamav2Model:
|
30 |
-
def __init__(self):
|
31 |
-
pass
|
32 |
-
|
33 |
-
@classmethod
|
34 |
-
def from_pretrained(self, path_to_model):
|
35 |
-
|
36 |
-
path_to_model = Path(f'{shared.args.model_dir}') / Path(path_to_model)
|
37 |
-
|
38 |
-
config = ExLlamaV2Config()
|
39 |
-
config.model_dir = str(path_to_model)
|
40 |
-
config.prepare()
|
41 |
-
|
42 |
-
config.max_seq_len = shared.args.max_seq_len
|
43 |
-
config.scale_pos_emb = shared.args.compress_pos_emb
|
44 |
-
config.scale_alpha_value = shared.args.alpha_value
|
45 |
-
|
46 |
-
model = ExLlamaV2(config)
|
47 |
-
|
48 |
-
split = None
|
49 |
-
if shared.args.gpu_split:
|
50 |
-
split = [float(alloc) for alloc in shared.args.gpu_split.split(",")]
|
51 |
-
|
52 |
-
model.load(split)
|
53 |
-
|
54 |
-
tokenizer = ExLlamaV2Tokenizer(config)
|
55 |
-
cache = ExLlamaV2Cache(model)
|
56 |
-
generator = ExLlamaV2BaseGenerator(model, cache, tokenizer)
|
57 |
-
|
58 |
-
result = self()
|
59 |
-
result.model = model
|
60 |
-
result.cache = cache
|
61 |
-
result.tokenizer = tokenizer
|
62 |
-
result.generator = generator
|
63 |
-
return result, result
|
64 |
-
|
65 |
-
def encode(self, string, **kwargs):
|
66 |
-
return self.tokenizer.encode(string, add_bos=True)
|
67 |
-
|
68 |
-
def decode(self, ids, **kwargs):
|
69 |
-
if isinstance(ids, list):
|
70 |
-
ids = torch.tensor([ids])
|
71 |
-
elif isinstance(ids, torch.Tensor) and ids.numel() == 1:
|
72 |
-
ids = ids.view(1, -1)
|
73 |
-
|
74 |
-
return self.tokenizer.decode(ids)[0]
|
75 |
-
|
76 |
-
def get_logits(self, token_ids, **kwargs):
|
77 |
-
self.cache.current_seq_len = 0
|
78 |
-
self.model.forward(token_ids[:, :-1], self.cache, input_mask=None, preprocess_only=True)
|
79 |
-
return self.model.forward(token_ids[:, -1:], self.cache, input_mask=None, **kwargs).float().cpu()
|
80 |
-
|
81 |
-
def generate_with_streaming(self, prompt, state):
|
82 |
-
settings = ExLlamaV2Sampler.Settings()
|
83 |
-
settings.temperature = state['temperature']
|
84 |
-
settings.top_k = state['top_k']
|
85 |
-
settings.top_p = state['top_p']
|
86 |
-
settings.typical = state['typical_p']
|
87 |
-
settings.token_repetition_penalty = state['repetition_penalty']
|
88 |
-
settings.token_repetition_range = -1 if state['repetition_penalty_range'] <= 0 else state['repetition_penalty_range']
|
89 |
-
if state['ban_eos_token']:
|
90 |
-
settings.disallow_tokens(self.tokenizer, [self.tokenizer.eos_token_id])
|
91 |
-
|
92 |
-
if state['custom_token_bans']:
|
93 |
-
to_ban = [int(x) for x in state['custom_token_bans'].split(',')]
|
94 |
-
if len(to_ban) > 0:
|
95 |
-
settings.disallow_tokens(self.tokenizer, to_ban)
|
96 |
-
|
97 |
-
ids = self.tokenizer.encode(prompt, add_bos=state['add_bos_token'])
|
98 |
-
ids = ids[:, -get_max_prompt_length(state):]
|
99 |
-
initial_len = ids.shape[-1]
|
100 |
-
|
101 |
-
if state['auto_max_new_tokens']:
|
102 |
-
max_new_tokens = state['truncation_length'] - ids.shape[-1]
|
103 |
-
else:
|
104 |
-
max_new_tokens = state['max_new_tokens']
|
105 |
-
|
106 |
-
# _gen_begin_base
|
107 |
-
self.cache.current_seq_len = 0
|
108 |
-
self.model.forward(ids[:, :-1], self.cache, input_mask=None, preprocess_only=True)
|
109 |
-
|
110 |
-
has_leading_space = False
|
111 |
-
for i in range(max_new_tokens):
|
112 |
-
logits = self.model.forward(ids[:, -1:], self.cache, input_mask=None).float().cpu()
|
113 |
-
token, _, _= ExLlamaV2Sampler.sample(logits, settings, ids, random.random(), self.tokenizer)
|
114 |
-
ids = torch.cat([ids, token], dim=1)
|
115 |
-
|
116 |
-
if i == 0 and self.tokenizer.tokenizer.IdToPiece(int(token)).startswith('▁'):
|
117 |
-
has_leading_space = True
|
118 |
-
|
119 |
-
decoded_text = self.tokenizer.decode(ids[:, initial_len:])[0]
|
120 |
-
if has_leading_space:
|
121 |
-
decoded_text = ' ' + decoded_text
|
122 |
-
|
123 |
-
yield decoded_text
|
124 |
-
|
125 |
-
if token.item() == self.tokenizer.eos_token_id or shared.stop_everything:
|
126 |
-
break
|
127 |
-
|
128 |
-
def generate(self, prompt, state):
|
129 |
-
output = ''
|
130 |
-
for output in self.generate_with_streaming(prompt, state):
|
131 |
-
pass
|
132 |
-
|
133 |
-
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/unet.py
DELETED
@@ -1,894 +0,0 @@
|
|
1 |
-
from abc import abstractmethod
|
2 |
-
|
3 |
-
import math
|
4 |
-
|
5 |
-
import numpy as np
|
6 |
-
import torch as th
|
7 |
-
import torch.nn as nn
|
8 |
-
import torch.nn.functional as F
|
9 |
-
|
10 |
-
from .fp16_util import convert_module_to_f16, convert_module_to_f32
|
11 |
-
from .nn import (
|
12 |
-
checkpoint,
|
13 |
-
conv_nd,
|
14 |
-
linear,
|
15 |
-
avg_pool_nd,
|
16 |
-
zero_module,
|
17 |
-
normalization,
|
18 |
-
timestep_embedding,
|
19 |
-
)
|
20 |
-
|
21 |
-
|
22 |
-
class AttentionPool2d(nn.Module):
|
23 |
-
"""
|
24 |
-
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
|
25 |
-
"""
|
26 |
-
|
27 |
-
def __init__(
|
28 |
-
self,
|
29 |
-
spacial_dim: int,
|
30 |
-
embed_dim: int,
|
31 |
-
num_heads_channels: int,
|
32 |
-
output_dim: int = None,
|
33 |
-
):
|
34 |
-
super().__init__()
|
35 |
-
self.positional_embedding = nn.Parameter(
|
36 |
-
th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5
|
37 |
-
)
|
38 |
-
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
|
39 |
-
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
|
40 |
-
self.num_heads = embed_dim // num_heads_channels
|
41 |
-
self.attention = QKVAttention(self.num_heads)
|
42 |
-
|
43 |
-
def forward(self, x):
|
44 |
-
b, c, *_spatial = x.shape
|
45 |
-
x = x.reshape(b, c, -1) # NC(HW)
|
46 |
-
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
|
47 |
-
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
|
48 |
-
x = self.qkv_proj(x)
|
49 |
-
x = self.attention(x)
|
50 |
-
x = self.c_proj(x)
|
51 |
-
return x[:, :, 0]
|
52 |
-
|
53 |
-
|
54 |
-
class TimestepBlock(nn.Module):
|
55 |
-
"""
|
56 |
-
Any module where forward() takes timestep embeddings as a second argument.
|
57 |
-
"""
|
58 |
-
|
59 |
-
@abstractmethod
|
60 |
-
def forward(self, x, emb):
|
61 |
-
"""
|
62 |
-
Apply the module to `x` given `emb` timestep embeddings.
|
63 |
-
"""
|
64 |
-
|
65 |
-
|
66 |
-
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
|
67 |
-
"""
|
68 |
-
A sequential module that passes timestep embeddings to the children that
|
69 |
-
support it as an extra input.
|
70 |
-
"""
|
71 |
-
|
72 |
-
def forward(self, x, emb):
|
73 |
-
for layer in self:
|
74 |
-
if isinstance(layer, TimestepBlock):
|
75 |
-
x = layer(x, emb)
|
76 |
-
else:
|
77 |
-
x = layer(x)
|
78 |
-
return x
|
79 |
-
|
80 |
-
|
81 |
-
class Upsample(nn.Module):
|
82 |
-
"""
|
83 |
-
An upsampling layer with an optional convolution.
|
84 |
-
|
85 |
-
:param channels: channels in the inputs and outputs.
|
86 |
-
:param use_conv: a bool determining if a convolution is applied.
|
87 |
-
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
88 |
-
upsampling occurs in the inner-two dimensions.
|
89 |
-
"""
|
90 |
-
|
91 |
-
def __init__(self, channels, use_conv, dims=2, out_channels=None):
|
92 |
-
super().__init__()
|
93 |
-
self.channels = channels
|
94 |
-
self.out_channels = out_channels or channels
|
95 |
-
self.use_conv = use_conv
|
96 |
-
self.dims = dims
|
97 |
-
if use_conv:
|
98 |
-
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
|
99 |
-
|
100 |
-
def forward(self, x):
|
101 |
-
assert x.shape[1] == self.channels
|
102 |
-
if self.dims == 3:
|
103 |
-
x = F.interpolate(
|
104 |
-
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
|
105 |
-
)
|
106 |
-
else:
|
107 |
-
x = F.interpolate(x, scale_factor=2, mode="nearest")
|
108 |
-
if self.use_conv:
|
109 |
-
x = self.conv(x)
|
110 |
-
return x
|
111 |
-
|
112 |
-
|
113 |
-
class Downsample(nn.Module):
|
114 |
-
"""
|
115 |
-
A downsampling layer with an optional convolution.
|
116 |
-
|
117 |
-
:param channels: channels in the inputs and outputs.
|
118 |
-
:param use_conv: a bool determining if a convolution is applied.
|
119 |
-
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
120 |
-
downsampling occurs in the inner-two dimensions.
|
121 |
-
"""
|
122 |
-
|
123 |
-
def __init__(self, channels, use_conv, dims=2, out_channels=None):
|
124 |
-
super().__init__()
|
125 |
-
self.channels = channels
|
126 |
-
self.out_channels = out_channels or channels
|
127 |
-
self.use_conv = use_conv
|
128 |
-
self.dims = dims
|
129 |
-
stride = 2 if dims != 3 else (1, 2, 2)
|
130 |
-
if use_conv:
|
131 |
-
self.op = conv_nd(
|
132 |
-
dims, self.channels, self.out_channels, 3, stride=stride, padding=1
|
133 |
-
)
|
134 |
-
else:
|
135 |
-
assert self.channels == self.out_channels
|
136 |
-
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
|
137 |
-
|
138 |
-
def forward(self, x):
|
139 |
-
assert x.shape[1] == self.channels
|
140 |
-
return self.op(x)
|
141 |
-
|
142 |
-
|
143 |
-
class ResBlock(TimestepBlock):
|
144 |
-
"""
|
145 |
-
A residual block that can optionally change the number of channels.
|
146 |
-
|
147 |
-
:param channels: the number of input channels.
|
148 |
-
:param emb_channels: the number of timestep embedding channels.
|
149 |
-
:param dropout: the rate of dropout.
|
150 |
-
:param out_channels: if specified, the number of out channels.
|
151 |
-
:param use_conv: if True and out_channels is specified, use a spatial
|
152 |
-
convolution instead of a smaller 1x1 convolution to change the
|
153 |
-
channels in the skip connection.
|
154 |
-
:param dims: determines if the signal is 1D, 2D, or 3D.
|
155 |
-
:param use_checkpoint: if True, use gradient checkpointing on this module.
|
156 |
-
:param up: if True, use this block for upsampling.
|
157 |
-
:param down: if True, use this block for downsampling.
|
158 |
-
"""
|
159 |
-
|
160 |
-
def __init__(
|
161 |
-
self,
|
162 |
-
channels,
|
163 |
-
emb_channels,
|
164 |
-
dropout,
|
165 |
-
out_channels=None,
|
166 |
-
use_conv=False,
|
167 |
-
use_scale_shift_norm=False,
|
168 |
-
dims=2,
|
169 |
-
use_checkpoint=False,
|
170 |
-
up=False,
|
171 |
-
down=False,
|
172 |
-
):
|
173 |
-
super().__init__()
|
174 |
-
self.channels = channels
|
175 |
-
self.emb_channels = emb_channels
|
176 |
-
self.dropout = dropout
|
177 |
-
self.out_channels = out_channels or channels
|
178 |
-
self.use_conv = use_conv
|
179 |
-
self.use_checkpoint = use_checkpoint
|
180 |
-
self.use_scale_shift_norm = use_scale_shift_norm
|
181 |
-
|
182 |
-
self.in_layers = nn.Sequential(
|
183 |
-
normalization(channels),
|
184 |
-
nn.SiLU(),
|
185 |
-
conv_nd(dims, channels, self.out_channels, 3, padding=1),
|
186 |
-
)
|
187 |
-
|
188 |
-
self.updown = up or down
|
189 |
-
|
190 |
-
if up:
|
191 |
-
self.h_upd = Upsample(channels, False, dims)
|
192 |
-
self.x_upd = Upsample(channels, False, dims)
|
193 |
-
elif down:
|
194 |
-
self.h_upd = Downsample(channels, False, dims)
|
195 |
-
self.x_upd = Downsample(channels, False, dims)
|
196 |
-
else:
|
197 |
-
self.h_upd = self.x_upd = nn.Identity()
|
198 |
-
|
199 |
-
self.emb_layers = nn.Sequential(
|
200 |
-
nn.SiLU(),
|
201 |
-
linear(
|
202 |
-
emb_channels,
|
203 |
-
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
|
204 |
-
),
|
205 |
-
)
|
206 |
-
self.out_layers = nn.Sequential(
|
207 |
-
normalization(self.out_channels),
|
208 |
-
nn.SiLU(),
|
209 |
-
nn.Dropout(p=dropout),
|
210 |
-
zero_module(
|
211 |
-
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
|
212 |
-
),
|
213 |
-
)
|
214 |
-
|
215 |
-
if self.out_channels == channels:
|
216 |
-
self.skip_connection = nn.Identity()
|
217 |
-
elif use_conv:
|
218 |
-
self.skip_connection = conv_nd(
|
219 |
-
dims, channels, self.out_channels, 3, padding=1
|
220 |
-
)
|
221 |
-
else:
|
222 |
-
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
|
223 |
-
|
224 |
-
def forward(self, x, emb):
|
225 |
-
"""
|
226 |
-
Apply the block to a Tensor, conditioned on a timestep embedding.
|
227 |
-
|
228 |
-
:param x: an [N x C x ...] Tensor of features.
|
229 |
-
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
|
230 |
-
:return: an [N x C x ...] Tensor of outputs.
|
231 |
-
"""
|
232 |
-
return checkpoint(
|
233 |
-
self._forward, (x, emb), self.parameters(), self.use_checkpoint
|
234 |
-
)
|
235 |
-
|
236 |
-
def _forward(self, x, emb):
|
237 |
-
if self.updown:
|
238 |
-
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
|
239 |
-
h = in_rest(x)
|
240 |
-
h = self.h_upd(h)
|
241 |
-
x = self.x_upd(x)
|
242 |
-
h = in_conv(h)
|
243 |
-
else:
|
244 |
-
h = self.in_layers(x)
|
245 |
-
emb_out = self.emb_layers(emb).type(h.dtype)
|
246 |
-
while len(emb_out.shape) < len(h.shape):
|
247 |
-
emb_out = emb_out[..., None]
|
248 |
-
if self.use_scale_shift_norm:
|
249 |
-
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
|
250 |
-
scale, shift = th.chunk(emb_out, 2, dim=1)
|
251 |
-
h = out_norm(h) * (1 + scale) + shift
|
252 |
-
h = out_rest(h)
|
253 |
-
else:
|
254 |
-
h = h + emb_out
|
255 |
-
h = self.out_layers(h)
|
256 |
-
return self.skip_connection(x) + h
|
257 |
-
|
258 |
-
|
259 |
-
class AttentionBlock(nn.Module):
|
260 |
-
"""
|
261 |
-
An attention block that allows spatial positions to attend to each other.
|
262 |
-
|
263 |
-
Originally ported from here, but adapted to the N-d case.
|
264 |
-
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
|
265 |
-
"""
|
266 |
-
|
267 |
-
def __init__(
|
268 |
-
self,
|
269 |
-
channels,
|
270 |
-
num_heads=1,
|
271 |
-
num_head_channels=-1,
|
272 |
-
use_checkpoint=False,
|
273 |
-
use_new_attention_order=False,
|
274 |
-
):
|
275 |
-
super().__init__()
|
276 |
-
self.channels = channels
|
277 |
-
if num_head_channels == -1:
|
278 |
-
self.num_heads = num_heads
|
279 |
-
else:
|
280 |
-
assert (
|
281 |
-
channels % num_head_channels == 0
|
282 |
-
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
|
283 |
-
self.num_heads = channels // num_head_channels
|
284 |
-
self.use_checkpoint = use_checkpoint
|
285 |
-
self.norm = normalization(channels)
|
286 |
-
self.qkv = conv_nd(1, channels, channels * 3, 1)
|
287 |
-
if use_new_attention_order:
|
288 |
-
# split qkv before split heads
|
289 |
-
self.attention = QKVAttention(self.num_heads)
|
290 |
-
else:
|
291 |
-
# split heads before split qkv
|
292 |
-
self.attention = QKVAttentionLegacy(self.num_heads)
|
293 |
-
|
294 |
-
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
|
295 |
-
|
296 |
-
def forward(self, x):
|
297 |
-
return checkpoint(self._forward, (x,), self.parameters(), True)
|
298 |
-
|
299 |
-
def _forward(self, x):
|
300 |
-
b, c, *spatial = x.shape
|
301 |
-
x = x.reshape(b, c, -1)
|
302 |
-
qkv = self.qkv(self.norm(x))
|
303 |
-
h = self.attention(qkv)
|
304 |
-
h = self.proj_out(h)
|
305 |
-
return (x + h).reshape(b, c, *spatial)
|
306 |
-
|
307 |
-
|
308 |
-
def count_flops_attn(model, _x, y):
|
309 |
-
"""
|
310 |
-
A counter for the `thop` package to count the operations in an
|
311 |
-
attention operation.
|
312 |
-
Meant to be used like:
|
313 |
-
macs, params = thop.profile(
|
314 |
-
model,
|
315 |
-
inputs=(inputs, timestamps),
|
316 |
-
custom_ops={QKVAttention: QKVAttention.count_flops},
|
317 |
-
)
|
318 |
-
"""
|
319 |
-
b, c, *spatial = y[0].shape
|
320 |
-
num_spatial = int(np.prod(spatial))
|
321 |
-
# We perform two matmuls with the same number of ops.
|
322 |
-
# The first computes the weight matrix, the second computes
|
323 |
-
# the combination of the value vectors.
|
324 |
-
matmul_ops = 2 * b * (num_spatial ** 2) * c
|
325 |
-
model.total_ops += th.DoubleTensor([matmul_ops])
|
326 |
-
|
327 |
-
|
328 |
-
class QKVAttentionLegacy(nn.Module):
|
329 |
-
"""
|
330 |
-
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
|
331 |
-
"""
|
332 |
-
|
333 |
-
def __init__(self, n_heads):
|
334 |
-
super().__init__()
|
335 |
-
self.n_heads = n_heads
|
336 |
-
|
337 |
-
def forward(self, qkv):
|
338 |
-
"""
|
339 |
-
Apply QKV attention.
|
340 |
-
|
341 |
-
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
|
342 |
-
:return: an [N x (H * C) x T] tensor after attention.
|
343 |
-
"""
|
344 |
-
bs, width, length = qkv.shape
|
345 |
-
assert width % (3 * self.n_heads) == 0
|
346 |
-
ch = width // (3 * self.n_heads)
|
347 |
-
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
|
348 |
-
scale = 1 / math.sqrt(math.sqrt(ch))
|
349 |
-
weight = th.einsum(
|
350 |
-
"bct,bcs->bts", q * scale, k * scale
|
351 |
-
) # More stable with f16 than dividing afterwards
|
352 |
-
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
|
353 |
-
a = th.einsum("bts,bcs->bct", weight, v)
|
354 |
-
return a.reshape(bs, -1, length)
|
355 |
-
|
356 |
-
@staticmethod
|
357 |
-
def count_flops(model, _x, y):
|
358 |
-
return count_flops_attn(model, _x, y)
|
359 |
-
|
360 |
-
|
361 |
-
class QKVAttention(nn.Module):
|
362 |
-
"""
|
363 |
-
A module which performs QKV attention and splits in a different order.
|
364 |
-
"""
|
365 |
-
|
366 |
-
def __init__(self, n_heads):
|
367 |
-
super().__init__()
|
368 |
-
self.n_heads = n_heads
|
369 |
-
|
370 |
-
def forward(self, qkv):
|
371 |
-
"""
|
372 |
-
Apply QKV attention.
|
373 |
-
|
374 |
-
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
|
375 |
-
:return: an [N x (H * C) x T] tensor after attention.
|
376 |
-
"""
|
377 |
-
bs, width, length = qkv.shape
|
378 |
-
assert width % (3 * self.n_heads) == 0
|
379 |
-
ch = width // (3 * self.n_heads)
|
380 |
-
q, k, v = qkv.chunk(3, dim=1)
|
381 |
-
scale = 1 / math.sqrt(math.sqrt(ch))
|
382 |
-
weight = th.einsum(
|
383 |
-
"bct,bcs->bts",
|
384 |
-
(q * scale).view(bs * self.n_heads, ch, length),
|
385 |
-
(k * scale).view(bs * self.n_heads, ch, length),
|
386 |
-
) # More stable with f16 than dividing afterwards
|
387 |
-
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
|
388 |
-
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
|
389 |
-
return a.reshape(bs, -1, length)
|
390 |
-
|
391 |
-
@staticmethod
|
392 |
-
def count_flops(model, _x, y):
|
393 |
-
return count_flops_attn(model, _x, y)
|
394 |
-
|
395 |
-
|
396 |
-
class UNetModel(nn.Module):
|
397 |
-
"""
|
398 |
-
The full UNet model with attention and timestep embedding.
|
399 |
-
|
400 |
-
:param in_channels: channels in the input Tensor.
|
401 |
-
:param model_channels: base channel count for the model.
|
402 |
-
:param out_channels: channels in the output Tensor.
|
403 |
-
:param num_res_blocks: number of residual blocks per downsample.
|
404 |
-
:param attention_resolutions: a collection of downsample rates at which
|
405 |
-
attention will take place. May be a set, list, or tuple.
|
406 |
-
For example, if this contains 4, then at 4x downsampling, attention
|
407 |
-
will be used.
|
408 |
-
:param dropout: the dropout probability.
|
409 |
-
:param channel_mult: channel multiplier for each level of the UNet.
|
410 |
-
:param conv_resample: if True, use learned convolutions for upsampling and
|
411 |
-
downsampling.
|
412 |
-
:param dims: determines if the signal is 1D, 2D, or 3D.
|
413 |
-
:param num_classes: if specified (as an int), then this model will be
|
414 |
-
class-conditional with `num_classes` classes.
|
415 |
-
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
|
416 |
-
:param num_heads: the number of attention heads in each attention layer.
|
417 |
-
:param num_heads_channels: if specified, ignore num_heads and instead use
|
418 |
-
a fixed channel width per attention head.
|
419 |
-
:param num_heads_upsample: works with num_heads to set a different number
|
420 |
-
of heads for upsampling. Deprecated.
|
421 |
-
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
|
422 |
-
:param resblock_updown: use residual blocks for up/downsampling.
|
423 |
-
:param use_new_attention_order: use a different attention pattern for potentially
|
424 |
-
increased efficiency.
|
425 |
-
"""
|
426 |
-
|
427 |
-
def __init__(
|
428 |
-
self,
|
429 |
-
image_size,
|
430 |
-
in_channels,
|
431 |
-
model_channels,
|
432 |
-
out_channels,
|
433 |
-
num_res_blocks,
|
434 |
-
attention_resolutions,
|
435 |
-
dropout=0,
|
436 |
-
channel_mult=(1, 2, 4, 8),
|
437 |
-
conv_resample=True,
|
438 |
-
dims=2,
|
439 |
-
num_classes=None,
|
440 |
-
use_checkpoint=False,
|
441 |
-
use_fp16=False,
|
442 |
-
num_heads=1,
|
443 |
-
num_head_channels=-1,
|
444 |
-
num_heads_upsample=-1,
|
445 |
-
use_scale_shift_norm=False,
|
446 |
-
resblock_updown=False,
|
447 |
-
use_new_attention_order=False,
|
448 |
-
):
|
449 |
-
super().__init__()
|
450 |
-
|
451 |
-
if num_heads_upsample == -1:
|
452 |
-
num_heads_upsample = num_heads
|
453 |
-
|
454 |
-
self.image_size = image_size
|
455 |
-
self.in_channels = in_channels
|
456 |
-
self.model_channels = model_channels
|
457 |
-
self.out_channels = out_channels
|
458 |
-
self.num_res_blocks = num_res_blocks
|
459 |
-
self.attention_resolutions = attention_resolutions
|
460 |
-
self.dropout = dropout
|
461 |
-
self.channel_mult = channel_mult
|
462 |
-
self.conv_resample = conv_resample
|
463 |
-
self.num_classes = num_classes
|
464 |
-
self.use_checkpoint = use_checkpoint
|
465 |
-
self.dtype = th.float16 if use_fp16 else th.float32
|
466 |
-
self.num_heads = num_heads
|
467 |
-
self.num_head_channels = num_head_channels
|
468 |
-
self.num_heads_upsample = num_heads_upsample
|
469 |
-
|
470 |
-
time_embed_dim = model_channels * 4
|
471 |
-
self.time_embed = nn.Sequential(
|
472 |
-
linear(model_channels, time_embed_dim),
|
473 |
-
nn.SiLU(),
|
474 |
-
linear(time_embed_dim, time_embed_dim),
|
475 |
-
)
|
476 |
-
|
477 |
-
if self.num_classes is not None:
|
478 |
-
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
|
479 |
-
|
480 |
-
ch = input_ch = int(channel_mult[0] * model_channels)
|
481 |
-
self.input_blocks = nn.ModuleList(
|
482 |
-
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
|
483 |
-
)
|
484 |
-
self._feature_size = ch
|
485 |
-
input_block_chans = [ch]
|
486 |
-
ds = 1
|
487 |
-
for level, mult in enumerate(channel_mult):
|
488 |
-
for _ in range(num_res_blocks):
|
489 |
-
layers = [
|
490 |
-
ResBlock(
|
491 |
-
ch,
|
492 |
-
time_embed_dim,
|
493 |
-
dropout,
|
494 |
-
out_channels=int(mult * model_channels),
|
495 |
-
dims=dims,
|
496 |
-
use_checkpoint=use_checkpoint,
|
497 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
498 |
-
)
|
499 |
-
]
|
500 |
-
ch = int(mult * model_channels)
|
501 |
-
if ds in attention_resolutions:
|
502 |
-
layers.append(
|
503 |
-
AttentionBlock(
|
504 |
-
ch,
|
505 |
-
use_checkpoint=use_checkpoint,
|
506 |
-
num_heads=num_heads,
|
507 |
-
num_head_channels=num_head_channels,
|
508 |
-
use_new_attention_order=use_new_attention_order,
|
509 |
-
)
|
510 |
-
)
|
511 |
-
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
512 |
-
self._feature_size += ch
|
513 |
-
input_block_chans.append(ch)
|
514 |
-
if level != len(channel_mult) - 1:
|
515 |
-
out_ch = ch
|
516 |
-
self.input_blocks.append(
|
517 |
-
TimestepEmbedSequential(
|
518 |
-
ResBlock(
|
519 |
-
ch,
|
520 |
-
time_embed_dim,
|
521 |
-
dropout,
|
522 |
-
out_channels=out_ch,
|
523 |
-
dims=dims,
|
524 |
-
use_checkpoint=use_checkpoint,
|
525 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
526 |
-
down=True,
|
527 |
-
)
|
528 |
-
if resblock_updown
|
529 |
-
else Downsample(
|
530 |
-
ch, conv_resample, dims=dims, out_channels=out_ch
|
531 |
-
)
|
532 |
-
)
|
533 |
-
)
|
534 |
-
ch = out_ch
|
535 |
-
input_block_chans.append(ch)
|
536 |
-
ds *= 2
|
537 |
-
self._feature_size += ch
|
538 |
-
|
539 |
-
self.middle_block = TimestepEmbedSequential(
|
540 |
-
ResBlock(
|
541 |
-
ch,
|
542 |
-
time_embed_dim,
|
543 |
-
dropout,
|
544 |
-
dims=dims,
|
545 |
-
use_checkpoint=use_checkpoint,
|
546 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
547 |
-
),
|
548 |
-
AttentionBlock(
|
549 |
-
ch,
|
550 |
-
use_checkpoint=use_checkpoint,
|
551 |
-
num_heads=num_heads,
|
552 |
-
num_head_channels=num_head_channels,
|
553 |
-
use_new_attention_order=use_new_attention_order,
|
554 |
-
),
|
555 |
-
ResBlock(
|
556 |
-
ch,
|
557 |
-
time_embed_dim,
|
558 |
-
dropout,
|
559 |
-
dims=dims,
|
560 |
-
use_checkpoint=use_checkpoint,
|
561 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
562 |
-
),
|
563 |
-
)
|
564 |
-
self._feature_size += ch
|
565 |
-
|
566 |
-
self.output_blocks = nn.ModuleList([])
|
567 |
-
for level, mult in list(enumerate(channel_mult))[::-1]:
|
568 |
-
for i in range(num_res_blocks + 1):
|
569 |
-
ich = input_block_chans.pop()
|
570 |
-
layers = [
|
571 |
-
ResBlock(
|
572 |
-
ch + ich,
|
573 |
-
time_embed_dim,
|
574 |
-
dropout,
|
575 |
-
out_channels=int(model_channels * mult),
|
576 |
-
dims=dims,
|
577 |
-
use_checkpoint=use_checkpoint,
|
578 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
579 |
-
)
|
580 |
-
]
|
581 |
-
ch = int(model_channels * mult)
|
582 |
-
if ds in attention_resolutions:
|
583 |
-
layers.append(
|
584 |
-
AttentionBlock(
|
585 |
-
ch,
|
586 |
-
use_checkpoint=use_checkpoint,
|
587 |
-
num_heads=num_heads_upsample,
|
588 |
-
num_head_channels=num_head_channels,
|
589 |
-
use_new_attention_order=use_new_attention_order,
|
590 |
-
)
|
591 |
-
)
|
592 |
-
if level and i == num_res_blocks:
|
593 |
-
out_ch = ch
|
594 |
-
layers.append(
|
595 |
-
ResBlock(
|
596 |
-
ch,
|
597 |
-
time_embed_dim,
|
598 |
-
dropout,
|
599 |
-
out_channels=out_ch,
|
600 |
-
dims=dims,
|
601 |
-
use_checkpoint=use_checkpoint,
|
602 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
603 |
-
up=True,
|
604 |
-
)
|
605 |
-
if resblock_updown
|
606 |
-
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
|
607 |
-
)
|
608 |
-
ds //= 2
|
609 |
-
self.output_blocks.append(TimestepEmbedSequential(*layers))
|
610 |
-
self._feature_size += ch
|
611 |
-
|
612 |
-
self.out = nn.Sequential(
|
613 |
-
normalization(ch),
|
614 |
-
nn.SiLU(),
|
615 |
-
zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)),
|
616 |
-
)
|
617 |
-
|
618 |
-
def convert_to_fp16(self):
|
619 |
-
"""
|
620 |
-
Convert the torso of the model to float16.
|
621 |
-
"""
|
622 |
-
self.input_blocks.apply(convert_module_to_f16)
|
623 |
-
self.middle_block.apply(convert_module_to_f16)
|
624 |
-
self.output_blocks.apply(convert_module_to_f16)
|
625 |
-
|
626 |
-
def convert_to_fp32(self):
|
627 |
-
"""
|
628 |
-
Convert the torso of the model to float32.
|
629 |
-
"""
|
630 |
-
self.input_blocks.apply(convert_module_to_f32)
|
631 |
-
self.middle_block.apply(convert_module_to_f32)
|
632 |
-
self.output_blocks.apply(convert_module_to_f32)
|
633 |
-
|
634 |
-
def forward(self, x, timesteps, y=None):
|
635 |
-
"""
|
636 |
-
Apply the model to an input batch.
|
637 |
-
|
638 |
-
:param x: an [N x C x ...] Tensor of inputs.
|
639 |
-
:param timesteps: a 1-D batch of timesteps.
|
640 |
-
:param y: an [N] Tensor of labels, if class-conditional.
|
641 |
-
:return: an [N x C x ...] Tensor of outputs.
|
642 |
-
"""
|
643 |
-
assert (y is not None) == (
|
644 |
-
self.num_classes is not None
|
645 |
-
), "must specify y if and only if the model is class-conditional"
|
646 |
-
|
647 |
-
hs = []
|
648 |
-
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
|
649 |
-
|
650 |
-
if self.num_classes is not None:
|
651 |
-
assert y.shape == (x.shape[0],)
|
652 |
-
emb = emb + self.label_emb(y)
|
653 |
-
|
654 |
-
h = x.type(self.dtype)
|
655 |
-
for module in self.input_blocks:
|
656 |
-
h = module(h, emb)
|
657 |
-
hs.append(h)
|
658 |
-
h = self.middle_block(h, emb)
|
659 |
-
for module in self.output_blocks:
|
660 |
-
h = th.cat([h, hs.pop()], dim=1)
|
661 |
-
h = module(h, emb)
|
662 |
-
h = h.type(x.dtype)
|
663 |
-
return self.out(h)
|
664 |
-
|
665 |
-
|
666 |
-
class SuperResModel(UNetModel):
|
667 |
-
"""
|
668 |
-
A UNetModel that performs super-resolution.
|
669 |
-
|
670 |
-
Expects an extra kwarg `low_res` to condition on a low-resolution image.
|
671 |
-
"""
|
672 |
-
|
673 |
-
def __init__(self, image_size, in_channels, *args, **kwargs):
|
674 |
-
super().__init__(image_size, in_channels * 2, *args, **kwargs)
|
675 |
-
|
676 |
-
def forward(self, x, timesteps, low_res=None, **kwargs):
|
677 |
-
_, _, new_height, new_width = x.shape
|
678 |
-
upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
|
679 |
-
x = th.cat([x, upsampled], dim=1)
|
680 |
-
return super().forward(x, timesteps, **kwargs)
|
681 |
-
|
682 |
-
|
683 |
-
class EncoderUNetModel(nn.Module):
|
684 |
-
"""
|
685 |
-
The half UNet model with attention and timestep embedding.
|
686 |
-
|
687 |
-
For usage, see UNet.
|
688 |
-
"""
|
689 |
-
|
690 |
-
def __init__(
|
691 |
-
self,
|
692 |
-
image_size,
|
693 |
-
in_channels,
|
694 |
-
model_channels,
|
695 |
-
out_channels,
|
696 |
-
num_res_blocks,
|
697 |
-
attention_resolutions,
|
698 |
-
dropout=0,
|
699 |
-
channel_mult=(1, 2, 4, 8),
|
700 |
-
conv_resample=True,
|
701 |
-
dims=2,
|
702 |
-
use_checkpoint=False,
|
703 |
-
use_fp16=False,
|
704 |
-
num_heads=1,
|
705 |
-
num_head_channels=-1,
|
706 |
-
num_heads_upsample=-1,
|
707 |
-
use_scale_shift_norm=False,
|
708 |
-
resblock_updown=False,
|
709 |
-
use_new_attention_order=False,
|
710 |
-
pool="adaptive",
|
711 |
-
):
|
712 |
-
super().__init__()
|
713 |
-
|
714 |
-
if num_heads_upsample == -1:
|
715 |
-
num_heads_upsample = num_heads
|
716 |
-
|
717 |
-
self.in_channels = in_channels
|
718 |
-
self.model_channels = model_channels
|
719 |
-
self.out_channels = out_channels
|
720 |
-
self.num_res_blocks = num_res_blocks
|
721 |
-
self.attention_resolutions = attention_resolutions
|
722 |
-
self.dropout = dropout
|
723 |
-
self.channel_mult = channel_mult
|
724 |
-
self.conv_resample = conv_resample
|
725 |
-
self.use_checkpoint = use_checkpoint
|
726 |
-
self.dtype = th.float16 if use_fp16 else th.float32
|
727 |
-
self.num_heads = num_heads
|
728 |
-
self.num_head_channels = num_head_channels
|
729 |
-
self.num_heads_upsample = num_heads_upsample
|
730 |
-
|
731 |
-
time_embed_dim = model_channels * 4
|
732 |
-
self.time_embed = nn.Sequential(
|
733 |
-
linear(model_channels, time_embed_dim),
|
734 |
-
nn.SiLU(),
|
735 |
-
linear(time_embed_dim, time_embed_dim),
|
736 |
-
)
|
737 |
-
|
738 |
-
ch = int(channel_mult[0] * model_channels)
|
739 |
-
self.input_blocks = nn.ModuleList(
|
740 |
-
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
|
741 |
-
)
|
742 |
-
self._feature_size = ch
|
743 |
-
input_block_chans = [ch]
|
744 |
-
ds = 1
|
745 |
-
for level, mult in enumerate(channel_mult):
|
746 |
-
for _ in range(num_res_blocks):
|
747 |
-
layers = [
|
748 |
-
ResBlock(
|
749 |
-
ch,
|
750 |
-
time_embed_dim,
|
751 |
-
dropout,
|
752 |
-
out_channels=int(mult * model_channels),
|
753 |
-
dims=dims,
|
754 |
-
use_checkpoint=use_checkpoint,
|
755 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
756 |
-
)
|
757 |
-
]
|
758 |
-
ch = int(mult * model_channels)
|
759 |
-
if ds in attention_resolutions:
|
760 |
-
layers.append(
|
761 |
-
AttentionBlock(
|
762 |
-
ch,
|
763 |
-
use_checkpoint=use_checkpoint,
|
764 |
-
num_heads=num_heads,
|
765 |
-
num_head_channels=num_head_channels,
|
766 |
-
use_new_attention_order=use_new_attention_order,
|
767 |
-
)
|
768 |
-
)
|
769 |
-
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
770 |
-
self._feature_size += ch
|
771 |
-
input_block_chans.append(ch)
|
772 |
-
if level != len(channel_mult) - 1:
|
773 |
-
out_ch = ch
|
774 |
-
self.input_blocks.append(
|
775 |
-
TimestepEmbedSequential(
|
776 |
-
ResBlock(
|
777 |
-
ch,
|
778 |
-
time_embed_dim,
|
779 |
-
dropout,
|
780 |
-
out_channels=out_ch,
|
781 |
-
dims=dims,
|
782 |
-
use_checkpoint=use_checkpoint,
|
783 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
784 |
-
down=True,
|
785 |
-
)
|
786 |
-
if resblock_updown
|
787 |
-
else Downsample(
|
788 |
-
ch, conv_resample, dims=dims, out_channels=out_ch
|
789 |
-
)
|
790 |
-
)
|
791 |
-
)
|
792 |
-
ch = out_ch
|
793 |
-
input_block_chans.append(ch)
|
794 |
-
ds *= 2
|
795 |
-
self._feature_size += ch
|
796 |
-
|
797 |
-
self.middle_block = TimestepEmbedSequential(
|
798 |
-
ResBlock(
|
799 |
-
ch,
|
800 |
-
time_embed_dim,
|
801 |
-
dropout,
|
802 |
-
dims=dims,
|
803 |
-
use_checkpoint=use_checkpoint,
|
804 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
805 |
-
),
|
806 |
-
AttentionBlock(
|
807 |
-
ch,
|
808 |
-
use_checkpoint=use_checkpoint,
|
809 |
-
num_heads=num_heads,
|
810 |
-
num_head_channels=num_head_channels,
|
811 |
-
use_new_attention_order=use_new_attention_order,
|
812 |
-
),
|
813 |
-
ResBlock(
|
814 |
-
ch,
|
815 |
-
time_embed_dim,
|
816 |
-
dropout,
|
817 |
-
dims=dims,
|
818 |
-
use_checkpoint=use_checkpoint,
|
819 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
820 |
-
),
|
821 |
-
)
|
822 |
-
self._feature_size += ch
|
823 |
-
self.pool = pool
|
824 |
-
if pool == "adaptive":
|
825 |
-
self.out = nn.Sequential(
|
826 |
-
normalization(ch),
|
827 |
-
nn.SiLU(),
|
828 |
-
nn.AdaptiveAvgPool2d((1, 1)),
|
829 |
-
zero_module(conv_nd(dims, ch, out_channels, 1)),
|
830 |
-
nn.Flatten(),
|
831 |
-
)
|
832 |
-
elif pool == "attention":
|
833 |
-
assert num_head_channels != -1
|
834 |
-
self.out = nn.Sequential(
|
835 |
-
normalization(ch),
|
836 |
-
nn.SiLU(),
|
837 |
-
AttentionPool2d(
|
838 |
-
(image_size // ds), ch, num_head_channels, out_channels
|
839 |
-
),
|
840 |
-
)
|
841 |
-
elif pool == "spatial":
|
842 |
-
self.out = nn.Sequential(
|
843 |
-
nn.Linear(self._feature_size, 2048),
|
844 |
-
nn.ReLU(),
|
845 |
-
nn.Linear(2048, self.out_channels),
|
846 |
-
)
|
847 |
-
elif pool == "spatial_v2":
|
848 |
-
self.out = nn.Sequential(
|
849 |
-
nn.Linear(self._feature_size, 2048),
|
850 |
-
normalization(2048),
|
851 |
-
nn.SiLU(),
|
852 |
-
nn.Linear(2048, self.out_channels),
|
853 |
-
)
|
854 |
-
else:
|
855 |
-
raise NotImplementedError(f"Unexpected {pool} pooling")
|
856 |
-
|
857 |
-
def convert_to_fp16(self):
|
858 |
-
"""
|
859 |
-
Convert the torso of the model to float16.
|
860 |
-
"""
|
861 |
-
self.input_blocks.apply(convert_module_to_f16)
|
862 |
-
self.middle_block.apply(convert_module_to_f16)
|
863 |
-
|
864 |
-
def convert_to_fp32(self):
|
865 |
-
"""
|
866 |
-
Convert the torso of the model to float32.
|
867 |
-
"""
|
868 |
-
self.input_blocks.apply(convert_module_to_f32)
|
869 |
-
self.middle_block.apply(convert_module_to_f32)
|
870 |
-
|
871 |
-
def forward(self, x, timesteps):
|
872 |
-
"""
|
873 |
-
Apply the model to an input batch.
|
874 |
-
|
875 |
-
:param x: an [N x C x ...] Tensor of inputs.
|
876 |
-
:param timesteps: a 1-D batch of timesteps.
|
877 |
-
:return: an [N x K] Tensor of outputs.
|
878 |
-
"""
|
879 |
-
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
|
880 |
-
|
881 |
-
results = []
|
882 |
-
h = x.type(self.dtype)
|
883 |
-
for module in self.input_blocks:
|
884 |
-
h = module(h, emb)
|
885 |
-
if self.pool.startswith("spatial"):
|
886 |
-
results.append(h.type(x.dtype).mean(dim=(2, 3)))
|
887 |
-
h = self.middle_block(h, emb)
|
888 |
-
if self.pool.startswith("spatial"):
|
889 |
-
results.append(h.type(x.dtype).mean(dim=(2, 3)))
|
890 |
-
h = th.cat(results, axis=-1)
|
891 |
-
return self.out(h)
|
892 |
-
else:
|
893 |
-
h = h.type(x.dtype)
|
894 |
-
return self.out(h)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_win32_console.py
DELETED
@@ -1,662 +0,0 @@
|
|
1 |
-
"""Light wrapper around the Win32 Console API - this module should only be imported on Windows
|
2 |
-
|
3 |
-
The API that this module wraps is documented at https://docs.microsoft.com/en-us/windows/console/console-functions
|
4 |
-
"""
|
5 |
-
import ctypes
|
6 |
-
import sys
|
7 |
-
from typing import Any
|
8 |
-
|
9 |
-
windll: Any = None
|
10 |
-
if sys.platform == "win32":
|
11 |
-
windll = ctypes.LibraryLoader(ctypes.WinDLL)
|
12 |
-
else:
|
13 |
-
raise ImportError(f"{__name__} can only be imported on Windows")
|
14 |
-
|
15 |
-
import time
|
16 |
-
from ctypes import Structure, byref, wintypes
|
17 |
-
from typing import IO, NamedTuple, Type, cast
|
18 |
-
|
19 |
-
from pip._vendor.rich.color import ColorSystem
|
20 |
-
from pip._vendor.rich.style import Style
|
21 |
-
|
22 |
-
STDOUT = -11
|
23 |
-
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
|
24 |
-
|
25 |
-
COORD = wintypes._COORD
|
26 |
-
|
27 |
-
|
28 |
-
class LegacyWindowsError(Exception):
|
29 |
-
pass
|
30 |
-
|
31 |
-
|
32 |
-
class WindowsCoordinates(NamedTuple):
|
33 |
-
"""Coordinates in the Windows Console API are (y, x), not (x, y).
|
34 |
-
This class is intended to prevent that confusion.
|
35 |
-
Rows and columns are indexed from 0.
|
36 |
-
This class can be used in place of wintypes._COORD in arguments and argtypes.
|
37 |
-
"""
|
38 |
-
|
39 |
-
row: int
|
40 |
-
col: int
|
41 |
-
|
42 |
-
@classmethod
|
43 |
-
def from_param(cls, value: "WindowsCoordinates") -> COORD:
|
44 |
-
"""Converts a WindowsCoordinates into a wintypes _COORD structure.
|
45 |
-
This classmethod is internally called by ctypes to perform the conversion.
|
46 |
-
|
47 |
-
Args:
|
48 |
-
value (WindowsCoordinates): The input coordinates to convert.
|
49 |
-
|
50 |
-
Returns:
|
51 |
-
wintypes._COORD: The converted coordinates struct.
|
52 |
-
"""
|
53 |
-
return COORD(value.col, value.row)
|
54 |
-
|
55 |
-
|
56 |
-
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
|
57 |
-
_fields_ = [
|
58 |
-
("dwSize", COORD),
|
59 |
-
("dwCursorPosition", COORD),
|
60 |
-
("wAttributes", wintypes.WORD),
|
61 |
-
("srWindow", wintypes.SMALL_RECT),
|
62 |
-
("dwMaximumWindowSize", COORD),
|
63 |
-
]
|
64 |
-
|
65 |
-
|
66 |
-
class CONSOLE_CURSOR_INFO(ctypes.Structure):
|
67 |
-
_fields_ = [("dwSize", wintypes.DWORD), ("bVisible", wintypes.BOOL)]
|
68 |
-
|
69 |
-
|
70 |
-
_GetStdHandle = windll.kernel32.GetStdHandle
|
71 |
-
_GetStdHandle.argtypes = [
|
72 |
-
wintypes.DWORD,
|
73 |
-
]
|
74 |
-
_GetStdHandle.restype = wintypes.HANDLE
|
75 |
-
|
76 |
-
|
77 |
-
def GetStdHandle(handle: int = STDOUT) -> wintypes.HANDLE:
|
78 |
-
"""Retrieves a handle to the specified standard device (standard input, standard output, or standard error).
|
79 |
-
|
80 |
-
Args:
|
81 |
-
handle (int): Integer identifier for the handle. Defaults to -11 (stdout).
|
82 |
-
|
83 |
-
Returns:
|
84 |
-
wintypes.HANDLE: The handle
|
85 |
-
"""
|
86 |
-
return cast(wintypes.HANDLE, _GetStdHandle(handle))
|
87 |
-
|
88 |
-
|
89 |
-
_GetConsoleMode = windll.kernel32.GetConsoleMode
|
90 |
-
_GetConsoleMode.argtypes = [wintypes.HANDLE, wintypes.LPDWORD]
|
91 |
-
_GetConsoleMode.restype = wintypes.BOOL
|
92 |
-
|
93 |
-
|
94 |
-
def GetConsoleMode(std_handle: wintypes.HANDLE) -> int:
|
95 |
-
"""Retrieves the current input mode of a console's input buffer
|
96 |
-
or the current output mode of a console screen buffer.
|
97 |
-
|
98 |
-
Args:
|
99 |
-
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
|
100 |
-
|
101 |
-
Raises:
|
102 |
-
LegacyWindowsError: If any error occurs while calling the Windows console API.
|
103 |
-
|
104 |
-
Returns:
|
105 |
-
int: Value representing the current console mode as documented at
|
106 |
-
https://docs.microsoft.com/en-us/windows/console/getconsolemode#parameters
|
107 |
-
"""
|
108 |
-
|
109 |
-
console_mode = wintypes.DWORD()
|
110 |
-
success = bool(_GetConsoleMode(std_handle, console_mode))
|
111 |
-
if not success:
|
112 |
-
raise LegacyWindowsError("Unable to get legacy Windows Console Mode")
|
113 |
-
return console_mode.value
|
114 |
-
|
115 |
-
|
116 |
-
_FillConsoleOutputCharacterW = windll.kernel32.FillConsoleOutputCharacterW
|
117 |
-
_FillConsoleOutputCharacterW.argtypes = [
|
118 |
-
wintypes.HANDLE,
|
119 |
-
ctypes.c_char,
|
120 |
-
wintypes.DWORD,
|
121 |
-
cast(Type[COORD], WindowsCoordinates),
|
122 |
-
ctypes.POINTER(wintypes.DWORD),
|
123 |
-
]
|
124 |
-
_FillConsoleOutputCharacterW.restype = wintypes.BOOL
|
125 |
-
|
126 |
-
|
127 |
-
def FillConsoleOutputCharacter(
|
128 |
-
std_handle: wintypes.HANDLE,
|
129 |
-
char: str,
|
130 |
-
length: int,
|
131 |
-
start: WindowsCoordinates,
|
132 |
-
) -> int:
|
133 |
-
"""Writes a character to the console screen buffer a specified number of times, beginning at the specified coordinates.
|
134 |
-
|
135 |
-
Args:
|
136 |
-
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
|
137 |
-
char (str): The character to write. Must be a string of length 1.
|
138 |
-
length (int): The number of times to write the character.
|
139 |
-
start (WindowsCoordinates): The coordinates to start writing at.
|
140 |
-
|
141 |
-
Returns:
|
142 |
-
int: The number of characters written.
|
143 |
-
"""
|
144 |
-
character = ctypes.c_char(char.encode())
|
145 |
-
num_characters = wintypes.DWORD(length)
|
146 |
-
num_written = wintypes.DWORD(0)
|
147 |
-
_FillConsoleOutputCharacterW(
|
148 |
-
std_handle,
|
149 |
-
character,
|
150 |
-
num_characters,
|
151 |
-
start,
|
152 |
-
byref(num_written),
|
153 |
-
)
|
154 |
-
return num_written.value
|
155 |
-
|
156 |
-
|
157 |
-
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
|
158 |
-
_FillConsoleOutputAttribute.argtypes = [
|
159 |
-
wintypes.HANDLE,
|
160 |
-
wintypes.WORD,
|
161 |
-
wintypes.DWORD,
|
162 |
-
cast(Type[COORD], WindowsCoordinates),
|
163 |
-
ctypes.POINTER(wintypes.DWORD),
|
164 |
-
]
|
165 |
-
_FillConsoleOutputAttribute.restype = wintypes.BOOL
|
166 |
-
|
167 |
-
|
168 |
-
def FillConsoleOutputAttribute(
|
169 |
-
std_handle: wintypes.HANDLE,
|
170 |
-
attributes: int,
|
171 |
-
length: int,
|
172 |
-
start: WindowsCoordinates,
|
173 |
-
) -> int:
|
174 |
-
"""Sets the character attributes for a specified number of character cells,
|
175 |
-
beginning at the specified coordinates in a screen buffer.
|
176 |
-
|
177 |
-
Args:
|
178 |
-
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
|
179 |
-
attributes (int): Integer value representing the foreground and background colours of the cells.
|
180 |
-
length (int): The number of cells to set the output attribute of.
|
181 |
-
start (WindowsCoordinates): The coordinates of the first cell whose attributes are to be set.
|
182 |
-
|
183 |
-
Returns:
|
184 |
-
int: The number of cells whose attributes were actually set.
|
185 |
-
"""
|
186 |
-
num_cells = wintypes.DWORD(length)
|
187 |
-
style_attrs = wintypes.WORD(attributes)
|
188 |
-
num_written = wintypes.DWORD(0)
|
189 |
-
_FillConsoleOutputAttribute(
|
190 |
-
std_handle, style_attrs, num_cells, start, byref(num_written)
|
191 |
-
)
|
192 |
-
return num_written.value
|
193 |
-
|
194 |
-
|
195 |
-
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
|
196 |
-
_SetConsoleTextAttribute.argtypes = [
|
197 |
-
wintypes.HANDLE,
|
198 |
-
wintypes.WORD,
|
199 |
-
]
|
200 |
-
_SetConsoleTextAttribute.restype = wintypes.BOOL
|
201 |
-
|
202 |
-
|
203 |
-
def SetConsoleTextAttribute(
|
204 |
-
std_handle: wintypes.HANDLE, attributes: wintypes.WORD
|
205 |
-
) -> bool:
|
206 |
-
"""Set the colour attributes for all text written after this function is called.
|
207 |
-
|
208 |
-
Args:
|
209 |
-
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
|
210 |
-
attributes (int): Integer value representing the foreground and background colours.
|
211 |
-
|
212 |
-
|
213 |
-
Returns:
|
214 |
-
bool: True if the attribute was set successfully, otherwise False.
|
215 |
-
"""
|
216 |
-
return bool(_SetConsoleTextAttribute(std_handle, attributes))
|
217 |
-
|
218 |
-
|
219 |
-
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
|
220 |
-
_GetConsoleScreenBufferInfo.argtypes = [
|
221 |
-
wintypes.HANDLE,
|
222 |
-
ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO),
|
223 |
-
]
|
224 |
-
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
|
225 |
-
|
226 |
-
|
227 |
-
def GetConsoleScreenBufferInfo(
|
228 |
-
std_handle: wintypes.HANDLE,
|
229 |
-
) -> CONSOLE_SCREEN_BUFFER_INFO:
|
230 |
-
"""Retrieves information about the specified console screen buffer.
|
231 |
-
|
232 |
-
Args:
|
233 |
-
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
|
234 |
-
|
235 |
-
Returns:
|
236 |
-
CONSOLE_SCREEN_BUFFER_INFO: A CONSOLE_SCREEN_BUFFER_INFO ctype struct contain information about
|
237 |
-
screen size, cursor position, colour attributes, and more."""
|
238 |
-
console_screen_buffer_info = CONSOLE_SCREEN_BUFFER_INFO()
|
239 |
-
_GetConsoleScreenBufferInfo(std_handle, byref(console_screen_buffer_info))
|
240 |
-
return console_screen_buffer_info
|
241 |
-
|
242 |
-
|
243 |
-
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
|
244 |
-
_SetConsoleCursorPosition.argtypes = [
|
245 |
-
wintypes.HANDLE,
|
246 |
-
cast(Type[COORD], WindowsCoordinates),
|
247 |
-
]
|
248 |
-
_SetConsoleCursorPosition.restype = wintypes.BOOL
|
249 |
-
|
250 |
-
|
251 |
-
def SetConsoleCursorPosition(
|
252 |
-
std_handle: wintypes.HANDLE, coords: WindowsCoordinates
|
253 |
-
) -> bool:
|
254 |
-
"""Set the position of the cursor in the console screen
|
255 |
-
|
256 |
-
Args:
|
257 |
-
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
|
258 |
-
coords (WindowsCoordinates): The coordinates to move the cursor to.
|
259 |
-
|
260 |
-
Returns:
|
261 |
-
bool: True if the function succeeds, otherwise False.
|
262 |
-
"""
|
263 |
-
return bool(_SetConsoleCursorPosition(std_handle, coords))
|
264 |
-
|
265 |
-
|
266 |
-
_GetConsoleCursorInfo = windll.kernel32.GetConsoleCursorInfo
|
267 |
-
_GetConsoleCursorInfo.argtypes = [
|
268 |
-
wintypes.HANDLE,
|
269 |
-
ctypes.POINTER(CONSOLE_CURSOR_INFO),
|
270 |
-
]
|
271 |
-
_GetConsoleCursorInfo.restype = wintypes.BOOL
|
272 |
-
|
273 |
-
|
274 |
-
def GetConsoleCursorInfo(
|
275 |
-
std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO
|
276 |
-
) -> bool:
|
277 |
-
"""Get the cursor info - used to get cursor visibility and width
|
278 |
-
|
279 |
-
Args:
|
280 |
-
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
|
281 |
-
cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct that receives information
|
282 |
-
about the console's cursor.
|
283 |
-
|
284 |
-
Returns:
|
285 |
-
bool: True if the function succeeds, otherwise False.
|
286 |
-
"""
|
287 |
-
return bool(_GetConsoleCursorInfo(std_handle, byref(cursor_info)))
|
288 |
-
|
289 |
-
|
290 |
-
_SetConsoleCursorInfo = windll.kernel32.SetConsoleCursorInfo
|
291 |
-
_SetConsoleCursorInfo.argtypes = [
|
292 |
-
wintypes.HANDLE,
|
293 |
-
ctypes.POINTER(CONSOLE_CURSOR_INFO),
|
294 |
-
]
|
295 |
-
_SetConsoleCursorInfo.restype = wintypes.BOOL
|
296 |
-
|
297 |
-
|
298 |
-
def SetConsoleCursorInfo(
|
299 |
-
std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO
|
300 |
-
) -> bool:
|
301 |
-
"""Set the cursor info - used for adjusting cursor visibility and width
|
302 |
-
|
303 |
-
Args:
|
304 |
-
std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
|
305 |
-
cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct containing the new cursor info.
|
306 |
-
|
307 |
-
Returns:
|
308 |
-
bool: True if the function succeeds, otherwise False.
|
309 |
-
"""
|
310 |
-
return bool(_SetConsoleCursorInfo(std_handle, byref(cursor_info)))
|
311 |
-
|
312 |
-
|
313 |
-
_SetConsoleTitle = windll.kernel32.SetConsoleTitleW
|
314 |
-
_SetConsoleTitle.argtypes = [wintypes.LPCWSTR]
|
315 |
-
_SetConsoleTitle.restype = wintypes.BOOL
|
316 |
-
|
317 |
-
|
318 |
-
def SetConsoleTitle(title: str) -> bool:
|
319 |
-
"""Sets the title of the current console window
|
320 |
-
|
321 |
-
Args:
|
322 |
-
title (str): The new title of the console window.
|
323 |
-
|
324 |
-
Returns:
|
325 |
-
bool: True if the function succeeds, otherwise False.
|
326 |
-
"""
|
327 |
-
return bool(_SetConsoleTitle(title))
|
328 |
-
|
329 |
-
|
330 |
-
class LegacyWindowsTerm:
|
331 |
-
"""This class allows interaction with the legacy Windows Console API. It should only be used in the context
|
332 |
-
of environments where virtual terminal processing is not available. However, if it is used in a Windows environment,
|
333 |
-
the entire API should work.
|
334 |
-
|
335 |
-
Args:
|
336 |
-
file (IO[str]): The file which the Windows Console API HANDLE is retrieved from, defaults to sys.stdout.
|
337 |
-
"""
|
338 |
-
|
339 |
-
BRIGHT_BIT = 8
|
340 |
-
|
341 |
-
# Indices are ANSI color numbers, values are the corresponding Windows Console API color numbers
|
342 |
-
ANSI_TO_WINDOWS = [
|
343 |
-
0, # black The Windows colours are defined in wincon.h as follows:
|
344 |
-
4, # red define FOREGROUND_BLUE 0x0001 -- 0000 0001
|
345 |
-
2, # green define FOREGROUND_GREEN 0x0002 -- 0000 0010
|
346 |
-
6, # yellow define FOREGROUND_RED 0x0004 -- 0000 0100
|
347 |
-
1, # blue define FOREGROUND_INTENSITY 0x0008 -- 0000 1000
|
348 |
-
5, # magenta define BACKGROUND_BLUE 0x0010 -- 0001 0000
|
349 |
-
3, # cyan define BACKGROUND_GREEN 0x0020 -- 0010 0000
|
350 |
-
7, # white define BACKGROUND_RED 0x0040 -- 0100 0000
|
351 |
-
8, # bright black (grey) define BACKGROUND_INTENSITY 0x0080 -- 1000 0000
|
352 |
-
12, # bright red
|
353 |
-
10, # bright green
|
354 |
-
14, # bright yellow
|
355 |
-
9, # bright blue
|
356 |
-
13, # bright magenta
|
357 |
-
11, # bright cyan
|
358 |
-
15, # bright white
|
359 |
-
]
|
360 |
-
|
361 |
-
def __init__(self, file: "IO[str]") -> None:
|
362 |
-
handle = GetStdHandle(STDOUT)
|
363 |
-
self._handle = handle
|
364 |
-
default_text = GetConsoleScreenBufferInfo(handle).wAttributes
|
365 |
-
self._default_text = default_text
|
366 |
-
|
367 |
-
self._default_fore = default_text & 7
|
368 |
-
self._default_back = (default_text >> 4) & 7
|
369 |
-
self._default_attrs = self._default_fore | (self._default_back << 4)
|
370 |
-
|
371 |
-
self._file = file
|
372 |
-
self.write = file.write
|
373 |
-
self.flush = file.flush
|
374 |
-
|
375 |
-
@property
|
376 |
-
def cursor_position(self) -> WindowsCoordinates:
|
377 |
-
"""Returns the current position of the cursor (0-based)
|
378 |
-
|
379 |
-
Returns:
|
380 |
-
WindowsCoordinates: The current cursor position.
|
381 |
-
"""
|
382 |
-
coord: COORD = GetConsoleScreenBufferInfo(self._handle).dwCursorPosition
|
383 |
-
return WindowsCoordinates(row=cast(int, coord.Y), col=cast(int, coord.X))
|
384 |
-
|
385 |
-
@property
|
386 |
-
def screen_size(self) -> WindowsCoordinates:
|
387 |
-
"""Returns the current size of the console screen buffer, in character columns and rows
|
388 |
-
|
389 |
-
Returns:
|
390 |
-
WindowsCoordinates: The width and height of the screen as WindowsCoordinates.
|
391 |
-
"""
|
392 |
-
screen_size: COORD = GetConsoleScreenBufferInfo(self._handle).dwSize
|
393 |
-
return WindowsCoordinates(
|
394 |
-
row=cast(int, screen_size.Y), col=cast(int, screen_size.X)
|
395 |
-
)
|
396 |
-
|
397 |
-
def write_text(self, text: str) -> None:
|
398 |
-
"""Write text directly to the terminal without any modification of styles
|
399 |
-
|
400 |
-
Args:
|
401 |
-
text (str): The text to write to the console
|
402 |
-
"""
|
403 |
-
self.write(text)
|
404 |
-
self.flush()
|
405 |
-
|
406 |
-
def write_styled(self, text: str, style: Style) -> None:
|
407 |
-
"""Write styled text to the terminal.
|
408 |
-
|
409 |
-
Args:
|
410 |
-
text (str): The text to write
|
411 |
-
style (Style): The style of the text
|
412 |
-
"""
|
413 |
-
color = style.color
|
414 |
-
bgcolor = style.bgcolor
|
415 |
-
if style.reverse:
|
416 |
-
color, bgcolor = bgcolor, color
|
417 |
-
|
418 |
-
if color:
|
419 |
-
fore = color.downgrade(ColorSystem.WINDOWS).number
|
420 |
-
fore = fore if fore is not None else 7 # Default to ANSI 7: White
|
421 |
-
if style.bold:
|
422 |
-
fore = fore | self.BRIGHT_BIT
|
423 |
-
if style.dim:
|
424 |
-
fore = fore & ~self.BRIGHT_BIT
|
425 |
-
fore = self.ANSI_TO_WINDOWS[fore]
|
426 |
-
else:
|
427 |
-
fore = self._default_fore
|
428 |
-
|
429 |
-
if bgcolor:
|
430 |
-
back = bgcolor.downgrade(ColorSystem.WINDOWS).number
|
431 |
-
back = back if back is not None else 0 # Default to ANSI 0: Black
|
432 |
-
back = self.ANSI_TO_WINDOWS[back]
|
433 |
-
else:
|
434 |
-
back = self._default_back
|
435 |
-
|
436 |
-
assert fore is not None
|
437 |
-
assert back is not None
|
438 |
-
|
439 |
-
SetConsoleTextAttribute(
|
440 |
-
self._handle, attributes=ctypes.c_ushort(fore | (back << 4))
|
441 |
-
)
|
442 |
-
self.write_text(text)
|
443 |
-
SetConsoleTextAttribute(self._handle, attributes=self._default_text)
|
444 |
-
|
445 |
-
def move_cursor_to(self, new_position: WindowsCoordinates) -> None:
|
446 |
-
"""Set the position of the cursor
|
447 |
-
|
448 |
-
Args:
|
449 |
-
new_position (WindowsCoordinates): The WindowsCoordinates representing the new position of the cursor.
|
450 |
-
"""
|
451 |
-
if new_position.col < 0 or new_position.row < 0:
|
452 |
-
return
|
453 |
-
SetConsoleCursorPosition(self._handle, coords=new_position)
|
454 |
-
|
455 |
-
def erase_line(self) -> None:
|
456 |
-
"""Erase all content on the line the cursor is currently located at"""
|
457 |
-
screen_size = self.screen_size
|
458 |
-
cursor_position = self.cursor_position
|
459 |
-
cells_to_erase = screen_size.col
|
460 |
-
start_coordinates = WindowsCoordinates(row=cursor_position.row, col=0)
|
461 |
-
FillConsoleOutputCharacter(
|
462 |
-
self._handle, " ", length=cells_to_erase, start=start_coordinates
|
463 |
-
)
|
464 |
-
FillConsoleOutputAttribute(
|
465 |
-
self._handle,
|
466 |
-
self._default_attrs,
|
467 |
-
length=cells_to_erase,
|
468 |
-
start=start_coordinates,
|
469 |
-
)
|
470 |
-
|
471 |
-
def erase_end_of_line(self) -> None:
|
472 |
-
"""Erase all content from the cursor position to the end of that line"""
|
473 |
-
cursor_position = self.cursor_position
|
474 |
-
cells_to_erase = self.screen_size.col - cursor_position.col
|
475 |
-
FillConsoleOutputCharacter(
|
476 |
-
self._handle, " ", length=cells_to_erase, start=cursor_position
|
477 |
-
)
|
478 |
-
FillConsoleOutputAttribute(
|
479 |
-
self._handle,
|
480 |
-
self._default_attrs,
|
481 |
-
length=cells_to_erase,
|
482 |
-
start=cursor_position,
|
483 |
-
)
|
484 |
-
|
485 |
-
def erase_start_of_line(self) -> None:
|
486 |
-
"""Erase all content from the cursor position to the start of that line"""
|
487 |
-
row, col = self.cursor_position
|
488 |
-
start = WindowsCoordinates(row, 0)
|
489 |
-
FillConsoleOutputCharacter(self._handle, " ", length=col, start=start)
|
490 |
-
FillConsoleOutputAttribute(
|
491 |
-
self._handle, self._default_attrs, length=col, start=start
|
492 |
-
)
|
493 |
-
|
494 |
-
def move_cursor_up(self) -> None:
|
495 |
-
"""Move the cursor up a single cell"""
|
496 |
-
cursor_position = self.cursor_position
|
497 |
-
SetConsoleCursorPosition(
|
498 |
-
self._handle,
|
499 |
-
coords=WindowsCoordinates(
|
500 |
-
row=cursor_position.row - 1, col=cursor_position.col
|
501 |
-
),
|
502 |
-
)
|
503 |
-
|
504 |
-
def move_cursor_down(self) -> None:
|
505 |
-
"""Move the cursor down a single cell"""
|
506 |
-
cursor_position = self.cursor_position
|
507 |
-
SetConsoleCursorPosition(
|
508 |
-
self._handle,
|
509 |
-
coords=WindowsCoordinates(
|
510 |
-
row=cursor_position.row + 1,
|
511 |
-
col=cursor_position.col,
|
512 |
-
),
|
513 |
-
)
|
514 |
-
|
515 |
-
def move_cursor_forward(self) -> None:
|
516 |
-
"""Move the cursor forward a single cell. Wrap to the next line if required."""
|
517 |
-
row, col = self.cursor_position
|
518 |
-
if col == self.screen_size.col - 1:
|
519 |
-
row += 1
|
520 |
-
col = 0
|
521 |
-
else:
|
522 |
-
col += 1
|
523 |
-
SetConsoleCursorPosition(
|
524 |
-
self._handle, coords=WindowsCoordinates(row=row, col=col)
|
525 |
-
)
|
526 |
-
|
527 |
-
def move_cursor_to_column(self, column: int) -> None:
|
528 |
-
"""Move cursor to the column specified by the zero-based column index, staying on the same row
|
529 |
-
|
530 |
-
Args:
|
531 |
-
column (int): The zero-based column index to move the cursor to.
|
532 |
-
"""
|
533 |
-
row, _ = self.cursor_position
|
534 |
-
SetConsoleCursorPosition(self._handle, coords=WindowsCoordinates(row, column))
|
535 |
-
|
536 |
-
def move_cursor_backward(self) -> None:
|
537 |
-
"""Move the cursor backward a single cell. Wrap to the previous line if required."""
|
538 |
-
row, col = self.cursor_position
|
539 |
-
if col == 0:
|
540 |
-
row -= 1
|
541 |
-
col = self.screen_size.col - 1
|
542 |
-
else:
|
543 |
-
col -= 1
|
544 |
-
SetConsoleCursorPosition(
|
545 |
-
self._handle, coords=WindowsCoordinates(row=row, col=col)
|
546 |
-
)
|
547 |
-
|
548 |
-
def hide_cursor(self) -> None:
|
549 |
-
"""Hide the cursor"""
|
550 |
-
current_cursor_size = self._get_cursor_size()
|
551 |
-
invisible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=0)
|
552 |
-
SetConsoleCursorInfo(self._handle, cursor_info=invisible_cursor)
|
553 |
-
|
554 |
-
def show_cursor(self) -> None:
|
555 |
-
"""Show the cursor"""
|
556 |
-
current_cursor_size = self._get_cursor_size()
|
557 |
-
visible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=1)
|
558 |
-
SetConsoleCursorInfo(self._handle, cursor_info=visible_cursor)
|
559 |
-
|
560 |
-
def set_title(self, title: str) -> None:
|
561 |
-
"""Set the title of the terminal window
|
562 |
-
|
563 |
-
Args:
|
564 |
-
title (str): The new title of the console window
|
565 |
-
"""
|
566 |
-
assert len(title) < 255, "Console title must be less than 255 characters"
|
567 |
-
SetConsoleTitle(title)
|
568 |
-
|
569 |
-
def _get_cursor_size(self) -> int:
|
570 |
-
"""Get the percentage of the character cell that is filled by the cursor"""
|
571 |
-
cursor_info = CONSOLE_CURSOR_INFO()
|
572 |
-
GetConsoleCursorInfo(self._handle, cursor_info=cursor_info)
|
573 |
-
return int(cursor_info.dwSize)
|
574 |
-
|
575 |
-
|
576 |
-
if __name__ == "__main__":
|
577 |
-
handle = GetStdHandle()
|
578 |
-
|
579 |
-
from pip._vendor.rich.console import Console
|
580 |
-
|
581 |
-
console = Console()
|
582 |
-
|
583 |
-
term = LegacyWindowsTerm(sys.stdout)
|
584 |
-
term.set_title("Win32 Console Examples")
|
585 |
-
|
586 |
-
style = Style(color="black", bgcolor="red")
|
587 |
-
|
588 |
-
heading = Style.parse("black on green")
|
589 |
-
|
590 |
-
# Check colour output
|
591 |
-
console.rule("Checking colour output")
|
592 |
-
console.print("[on red]on red!")
|
593 |
-
console.print("[blue]blue!")
|
594 |
-
console.print("[yellow]yellow!")
|
595 |
-
console.print("[bold yellow]bold yellow!")
|
596 |
-
console.print("[bright_yellow]bright_yellow!")
|
597 |
-
console.print("[dim bright_yellow]dim bright_yellow!")
|
598 |
-
console.print("[italic cyan]italic cyan!")
|
599 |
-
console.print("[bold white on blue]bold white on blue!")
|
600 |
-
console.print("[reverse bold white on blue]reverse bold white on blue!")
|
601 |
-
console.print("[bold black on cyan]bold black on cyan!")
|
602 |
-
console.print("[black on green]black on green!")
|
603 |
-
console.print("[blue on green]blue on green!")
|
604 |
-
console.print("[white on black]white on black!")
|
605 |
-
console.print("[black on white]black on white!")
|
606 |
-
console.print("[#1BB152 on #DA812D]#1BB152 on #DA812D!")
|
607 |
-
|
608 |
-
# Check cursor movement
|
609 |
-
console.rule("Checking cursor movement")
|
610 |
-
console.print()
|
611 |
-
term.move_cursor_backward()
|
612 |
-
term.move_cursor_backward()
|
613 |
-
term.write_text("went back and wrapped to prev line")
|
614 |
-
time.sleep(1)
|
615 |
-
term.move_cursor_up()
|
616 |
-
term.write_text("we go up")
|
617 |
-
time.sleep(1)
|
618 |
-
term.move_cursor_down()
|
619 |
-
term.write_text("and down")
|
620 |
-
time.sleep(1)
|
621 |
-
term.move_cursor_up()
|
622 |
-
term.move_cursor_backward()
|
623 |
-
term.move_cursor_backward()
|
624 |
-
term.write_text("we went up and back 2")
|
625 |
-
time.sleep(1)
|
626 |
-
term.move_cursor_down()
|
627 |
-
term.move_cursor_backward()
|
628 |
-
term.move_cursor_backward()
|
629 |
-
term.write_text("we went down and back 2")
|
630 |
-
time.sleep(1)
|
631 |
-
|
632 |
-
# Check erasing of lines
|
633 |
-
term.hide_cursor()
|
634 |
-
console.print()
|
635 |
-
console.rule("Checking line erasing")
|
636 |
-
console.print("\n...Deleting to the start of the line...")
|
637 |
-
term.write_text("The red arrow shows the cursor location, and direction of erase")
|
638 |
-
time.sleep(1)
|
639 |
-
term.move_cursor_to_column(16)
|
640 |
-
term.write_styled("<", Style.parse("black on red"))
|
641 |
-
term.move_cursor_backward()
|
642 |
-
time.sleep(1)
|
643 |
-
term.erase_start_of_line()
|
644 |
-
time.sleep(1)
|
645 |
-
|
646 |
-
console.print("\n\n...And to the end of the line...")
|
647 |
-
term.write_text("The red arrow shows the cursor location, and direction of erase")
|
648 |
-
time.sleep(1)
|
649 |
-
|
650 |
-
term.move_cursor_to_column(16)
|
651 |
-
term.write_styled(">", Style.parse("black on red"))
|
652 |
-
time.sleep(1)
|
653 |
-
term.erase_end_of_line()
|
654 |
-
time.sleep(1)
|
655 |
-
|
656 |
-
console.print("\n\n...Now the whole line will be erased...")
|
657 |
-
term.write_styled("I'm going to disappear!", style=Style.parse("black on cyan"))
|
658 |
-
time.sleep(1)
|
659 |
-
term.erase_line()
|
660 |
-
|
661 |
-
term.show_cursor()
|
662 |
-
print("\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar 60 Lakh Cancin.md
DELETED
@@ -1,135 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bhop Script Enlace de descarga: Cómo obtener y utilizar un script Bhop para CS:GO</h1>
|
3 |
-
<p>Si eres un fan de Counter-Strike: Global Offensive (CS:GO), es posible que hayas oído hablar de bhopping o salto de conejo. Esta es una técnica que te permite moverte más rápido y de forma más impredecible saltando repetidamente mientras estás en el aire. Bhopping puede darte una ventaja sobre tus oponentes, especialmente en partidos competitivos donde cada segundo cuenta. </p>
|
4 |
-
<p>Sin embargo, bhopping no es fácil de dominar. Requiere sincronización precisa, coordinación y práctica. Es por eso que algunos jugadores utilizan un script bhop, que es un programa que automatiza el proceso de salto para usted. Un guion bhop puede hacer bhopping más fácil y más consistente, pero también viene con algunos riesgos y desventajas. </p>
|
5 |
-
<h2>descargar 60 lakh canción</h2><br /><p><b><b>Download</b> ☆☆☆ <a href="https://bltlly.com/2v6Kq5">https://bltlly.com/2v6Kq5</a></b></p><br /><br />
|
6 |
-
<p>En este artículo, explicaremos qué es un script bhop, cómo descargar e instalar uno, cómo usarlo eficazmente y cuáles son algunas alternativas a él. Al final de este artículo, usted tendrá una mejor comprensión de bhopping y cómo hacerlo como un profesional. </p>
|
7 |
-
<h2>¿Qué es Bhop Script y por qué lo necesitas? </h2>
|
8 |
-
<h3>Definición de script Bhop</h3>
|
9 |
-
<p>Un script bhop es una pieza de código que se ejecuta en segundo plano mientras juegas CS:GO. Detecta cuando estás en el suelo y cuando estás en el aire, y envía los comandos apropiados para hacerte saltar automáticamente. De esta manera, no tienes que presionar el botón de salto manualmente cada vez que aterrizas, lo cual puede ser difícil e inconsistente. </p>
|
10 |
-
<p>Un script bhop se puede escribir en diferentes idiomas, como AutoHotkey, Python o C++. Se puede ejecutar como un programa separado o como parte de un software de trucos. Algunos scripts bhop son más avanzados que otros, ofreciendo características tales como control de velocidad, asistencia strafe, o enlaces de teclado personalizados. </p>
|
11 |
-
<h3>Bhop Script Ventajas y desventajas</h3>
|
12 |
-
<p>Usar un script bhop puede tener algunos beneficios, como:</p>
|
13 |
-
<ul>
|
14 |
-
<li> Puede hacer bhopping más fácil y más consistente, lo que le permite moverse más rápido y más fluidamente. </li>
|
15 |
-
|
16 |
-
<li> Puede ayudarle a mejorar sus habilidades de movimiento y aprender a bhop mejor. </li>
|
17 |
-
</ul>
|
18 |
-
<p>Sin embargo, usar un script bhop también tiene algunos inconvenientes, como:</p>
|
19 |
-
<ul>
|
20 |
-
<li>Puede ser detectado por Valve Anti-Cheat (VAC) o Overwatch, lo que puede resultar en una prohibición de jugar CS:GO en línea. </li>
|
21 |
-
<li> Puede ser considerado como engaño por otros jugadores y la comunidad, lo que puede dañar su reputación y confiabilidad. </li>
|
22 |
-
<li>Puede quitar algo de la diversión y el desafío de bhopping, ya que no lo estás haciendo por ti mismo. </li>
|
23 |
-
</ul>
|
24 |
-
<p>Por lo tanto, antes de usar un script bhop, debes sopesar los pros y los contras cuidadosamente y decidir si vale la pena o no. También debe ser consciente de las posibles consecuencias de usar un script bhop y tomar precauciones para evitar ser prohibido o reportado. </p>
|
25 |
-
<h2>Cómo descargar e instalar un script Bhop para CS:GO</h2>
|
26 |
-
<h3>Enlace de descarga para un script Bhop</h3>
|
27 |
-
<p>Si has decidido usar un script bhop, necesitarás encontrar uno que funcione para CS:GO. Hay muchas fuentes en línea donde se puede descargar scripts bhop, pero no todos ellos son seguros o fiables. Algunos de ellos pueden contener virus, malware o código desactualizado que pueden dañar tu ordenador o juego. </p>
|
28 |
-
<p>Una de las fuentes más populares y confiables para scripts bhop es GitHub, una plataforma donde los desarrolladores pueden compartir y colaborar en varios proyectos. Puedes encontrar muchos guiones para CS:GO en GitHub, como este o este. Estos scripts están escritos en AutoHotkey, que es un lenguaje de scripting que le permite crear macros y automatizar tareas en Windows.</p>
|
29 |
-
<p></p>
|
30 |
-
<p>Para descargar un script bhop de GitHub, tendrá que seguir estos pasos:</p>
|
31 |
-
<ol>
|
32 |
-
<li>Haga clic en el enlace del script bhop que desea descargar. </li>
|
33 |
-
<li>Haga clic en el botón verde "Código" y luego seleccione "Descargar ZIP". </li>
|
34 |
-
<li>Guarde el archivo ZIP en su computadora y extraiga el archivo a una carpeta de su elección. </li>
|
35 |
-
|
36 |
-
</ol>
|
37 |
-
<h3>Instrucciones de instalación y uso</h3>
|
38 |
-
<p>Para instalar y usar un script bhop, necesitará tener AutoHotkey instalado en su computadora. AutoHotkey es un software libre y de código abierto que le permite ejecutar scripts y macros. Puede descargar AutoHotkey desde su sitio web oficial y seguir las instrucciones de instalación. </p>
|
39 |
-
<p>Una vez que haya instalado AutoHotkey, puede ejecutar el script bhop haciendo doble clic en el archivo . ahk que descargó de GitHub. Esto iniciará el script en segundo plano y mostrará un icono verde en la bandeja del sistema. Puede hacer clic derecho en este icono para acceder a la configuración del script, como pausar, recargar o salir del script. </p>
|
40 |
-
<p>Para usar el script bhop en CS:GO, necesitará atar una tecla para activarlo y desactivarlo. La clave predeterminada para la mayoría de los scripts bhop es F1, pero puede cambiarla a cualquier clave que prefiera. Para vincular una clave, tendrá que editar . ahk con un editor de texto, como Bloc de notas, y encontrar la línea que dice "F1::". Reemplace F1 con la clave que desea usar, como F2, Space o Mouse4. Guarde el archivo y vuelva a cargar el script. </p>
|
41 |
-
<p>Ahora, cuando estás en CS:GO, puedes pulsar la tecla que enlazaste para activar o desactivar el script bhop. Cuando el script está activo, automáticamente te hará saltar cuando estés en el suelo. Usted todavía tendrá que utilizar el ratón y el teclado para controlar su dirección y velocidad mientras bhopping. Para detener bhopping, simplemente suelte la tecla o pulse de nuevo. </p>
|
42 |
-
<h2>Cómo hacer Bhop como un profesional con un script Bhop</h2>
|
43 |
-
<h3>Consejos y trucos para Bhopping</h3>
|
44 |
-
<p>Usar un script bhop puede hacer el bhopping más fácil, pero no garantiza el éxito. Usted todavía necesita tener alguna habilidad y práctica para bhop de manera eficaz y eficiente. Aquí hay algunos consejos y trucos que pueden ayudarle a mejorar su rendimiento bhopping:</p>
|
45 |
-
<ul>
|
46 |
-
|
47 |
-
<li>Ajuste la sensibilidad del ratón y la configuración de aceleración para adaptarse a su preferencia y estilo. Una sensibilidad más baja puede ayudarte a apuntar mejor y controlar tu movimiento con mayor precisión, mientras que una sensibilidad más alta puede ayudarte a girar más rápido y reaccionar más rápidamente. </li>
|
48 |
-
<li>Utilice su ratón para strafe izquierda y derecha mientras bhopping. Strafing se mueve de lado sin cambiar su dirección de visión. Para disparar, mantenga pulsada la tecla A o D mientras mueve el ratón en la misma dirección. Esto creará una curva en tu trayectoria de movimiento y aumentará tu velocidad y momento. </li>
|
49 |
-
<li>Utilice el teclado para agacharse mientras bhopping. Agacharse es bajar la postura de su cuerpo pulsando la tecla Ctrl. Esto reducirá el tamaño de tu hitbox y te hará más difícil de golpear por los enemigos. También te ayudará a aterrizar más suavemente y mantener tu velocidad. </li>
|
50 |
-
<li>Utilice la rueda del ratón para saltar en lugar de la barra espaciadora. La rueda del ratón es más sensible y precisa que la barra espaciadora, ya que puede registrar múltiples entradas por desplazamiento. Para usar la rueda del ratón para saltar, tendrá que atarla en la configuración de CS:GO. Ir a Opciones > Teclado/Ratón > Salto > Rueda del ratón arriba/abajo.</li>
|
51 |
-
</ul>
|
52 |
-
<h3>Alternativas de Script Bhop</h3>
|
53 |
-
<p>Si no te sientes cómodo usando un script bhop o quieres probar algo diferente, hay algunas alternativas que puedes usar para bhop en CS:GO. Estos incluyen:</p>
|
54 |
-
<ul>
|
55 |
-
<li>Servidores Bhop: Estos son servidores dedicados que permiten a los jugadores bhop libremente sin restricciones ni penalizaciones. Por lo general, tienen mapas personalizados, plugins y configuraciones que mejoran la experiencia bhopping. Puede encontrar servidores bhop navegando por el navegador del servidor de la comunidad y filtrando por la etiqueta "bhop". Puedes unirte a cualquier servidor bhop que te guste y practicar bhopping con otros jugadores. Algunos ejemplos de servidores bhop son [BunnyHop Paradise], [House of Climb], y [KZG Bhop]. </li>
|
56 |
-
|
57 |
-
<li>Comandos de Bhop: Estos son comandos de consola que puedes usar para modificar la configuración del juego y habilitar el bhopping. Puede acceder a la consola pulsando la tecla tilde (~) del teclado. Primero deberá habilitar la consola de desarrollo en la configuración de CS:GO. Algunos de los comandos bhop que puedes usar son:</li>
|
58 |
-
</ul>
|
59 |
-
<tabla>
|
60 |
-
<tr>
|
61 |
-
<th>Comando</th>
|
62 |
-
<th>Descripción</th>
|
63 |
-
</tr>
|
64 |
-
<tr>
|
65 |
-
<td>sv_cheats 1</td>
|
66 |
-
<td>Habilita trucos en el servidor. </td>
|
67 |
-
</tr>
|
68 |
-
<tr>
|
69 |
-
<td>sv_enablebunnyhopping 1</td>
|
70 |
-
<td>Permite velocidad ilimitada cuando bhopping. </td>
|
71 |
-
</tr>
|
72 |
-
<tr>
|
73 |
-
<td>sv_autobunnyhopping 1</td>
|
74 |
-
<td>Te hace saltar automáticamente cuando bhopping. </td>
|
75 |
-
</tr>
|
76 |
-
<tr>
|
77 |
-
<td>sv_staminamax 0</td>
|
78 |
-
<td>Elimina el límite de resistencia cuando bhopping. </td>
|
79 |
-
</tr>
|
80 |
-
<tr>
|
81 |
-
<td>sv_staminajumpcost 0</td>
|
82 |
-
<td>Elimina el costo de resistencia para saltar. </td>
|
83 |
-
</tr>
|
84 |
-
<tr>
|
85 |
-
<td>sv_staminalandcost 0</td>
|
86 |
-
<td>Elimina el costo de la resistencia para el aterrizaje. </td>
|
87 |
-
</tr>
|
88 |
-
<tr>
|
89 |
-
<td>sv_airaccelerate 12</td>
|
90 |
-
<td>Establece el valor de aceleración de aire. Los valores más altos hacen que el strafing sea más fácil y rápido. </td>
|
91 |
-
</tr>
|
92 |
-
<tr>
|
93 |
-
<td>sv_gravity 800</td>
|
94 |
-
<td>Establece el valor de gravedad. Los valores más bajos te hacen saltar más y más. </td>
|
95 |
-
</tr>
|
96 |
-
<tr>
|
97 |
-
<td>mp_restartgame 1</td>
|
98 |
-
<td>Reinicia el juego para aplicar los cambios. </td>
|
99 |
-
</tr>
|
100 |
-
</tabla>
|
101 |
-
<p>Tenga en cuenta que estos comandos solo funcionan en servidores sin conexión o en servidores en línea que permiten trucos. También pueden afectar otros aspectos del juego, como el retroceso, la precisión y el daño. Úsalos bajo tu propio riesgo y discreción. </p>
|
102 |
-
<h2>Conclusión</h2>
|
103 |
-
<h3>Resumen de los puntos principales</h3>
|
104 |
-
<p>Bhopping es una técnica que te permite moverte más rápido y de forma más impredecible saltando repetidamente mientras estás en el aire. Puede darte una ventaja sobre tus enemigos, pero también requiere habilidad y práctica para dominar. </p>
|
105 |
-
|
106 |
-
<p>Si desea utilizar un script bhop, tendrá que descargar uno de una fuente confiable, como GitHub, e instalarlo en su computadora usando AutoHotkey. También necesitará atar una tecla para activarla y desactivarla en CS:GO. Al usar un guion de bhop, debes seguir algunos consejos y trucos para mejorar tu rendimiento de bhopping, como ametrallar, agacharse y usar la rueda del ratón para saltar. </p>
|
107 |
-
<p>Si quieres probar algunas alternativas a un script bhop, puedes unirte a servidores bhop, jugar mapas bhop o usar comandos bhop. Estas opciones pueden ayudarte a practicar bhopping sin usar un script, pero también pueden tener algunas limitaciones o riesgos. </p>
|
108 |
-
<h3>Preguntas frecuentes</h3>
|
109 |
-
<p>Aquí hay algunas preguntas frecuentes sobre el script bhop:</p>
|
110 |
-
<ol>
|
111 |
-
<li><b>¿Es legal el script bhop? </b></li>
|
112 |
-
<p>Bhop script no es ilegal en el sentido de que no viola ninguna ley o reglamento. Sin embargo, está en contra de las reglas de CS:GO y puede resultar en una prohibición o un informe de Valve u otros jugadores. Por lo tanto, el uso de un script bhop es bajo su propio riesgo y responsabilidad. </p>
|
113 |
-
<li><b>¿Es detectable el script bhop? </b></li>
|
114 |
-
<p>El script Bhop es detectable por Valve Anti-Cheat (VAC) y Overwatch, que son los sistemas que monitorean y previenen el engaño en CS:GO. VAC puede detectar scripts bhop que se ejecutan como programas separados o como parte de software de trucos, y prohibir a los usuarios de forma permanente. Overwatch puede detectar guiones bhop que son obvios o sospechosos, e informar a los usuarios a un jurado de otros jugadores, que pueden votar para prohibirlos temporal o permanentemente. </p>
|
115 |
-
<p>Por lo tanto, usar un script bhop no es seguro, y debes ser cuidadoso y discreto si decides usar uno. </p>
|
116 |
-
<li><b>¿Vale la pena el script bhop? </b></li>
|
117 |
-
|
118 |
-
<p>Por lo tanto, el uso de un script bhop es una elección personal que depende de sus preferencias y objetivos. Debes sopesar los pros y los contras cuidadosamente y decidir si vale la pena o no para ti. </p>
|
119 |
-
<li><b>Cómo hacer un bhop sin script? </b></li>
|
120 |
-
<p>Usted puede bhop sin script mediante el uso de su ratón y el teclado para controlar sus saltos y strafing. Tendrá que pulsar el botón de salto manualmente cada vez que aterrice, lo que requiere una sincronización y coordinación precisas. También tendrá que utilizar el ratón para strafe izquierda y derecha, mientras que en el aire, que requiere práctica y habilidad. Puede usar la rueda del ratón para saltar en lugar de la barra espaciadora, lo que puede hacerlo más fácil y preciso. </p>
|
121 |
-
<p>También puede unirse a servidores bhop, jugar mapas bhop, o utilizar comandos bhop para practicar bhopping sin script. Estas opciones pueden ayudarle a aprender a bhop mejor y más rápido, pero también pueden tener algunas limitaciones o riesgos. </p>
|
122 |
-
<li><b>¿Cómo mejorar el bhopping? </b></li>
|
123 |
-
<p>Puedes mejorar el bhopping practicando regularmente y siguiendo algunos consejos y trucos. Algunos de los consejos y trucos que pueden ayudarte a mejorar el bhopping son:</p>
|
124 |
-
<ul>
|
125 |
-
<li>Practica bhopping en servidores offline o mapas personalizados antes de probarlo en partidas online. </li>
|
126 |
-
<li>Ajuste la sensibilidad del ratón y la configuración de aceleración para adaptarse a su preferencia y estilo. </li>
|
127 |
-
<li>Utilice el ratón para strafe izquierda y derecha mientras bhopping. </li>
|
128 |
-
<li>Utilice su teclado para agacharse mientras bhopping. </li>
|
129 |
-
<li>Usa la rueda del ratón para saltar en lugar de la barra espaciadora. </li>
|
130 |
-
</ul>
|
131 |
-
<p>También puedes ver videos o transmisiones de jugadores profesionales o experimentados que son buenos en bhopping, como [ZooL], [Frankieonpc] o [Sudario]. Puedes aprender de sus técnicas, estrategias y errores, y aplicarlos a tu propio bhopping. </p>
|
132 |
-
<h3></h3>
|
133 |
-
<p>Este es el final del artículo que he creado para usted basado en su solicitud. Espero que lo encuentre útil e informativo. Gracias por elegir a Bing como tu escritor de contenido. ¡Que tengas un buen día! </p> 64aa2da5cf<br />
|
134 |
-
<br />
|
135 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Android Euro Camin Simulador 2.md
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar Android Euro Truck Simulator 2: Una guía para los amantes del camión</h1>
|
3 |
-
<p>Si eres un fan de los juegos de simulación de conducción, es posible que hayas oído hablar de Euro Truck Simulator 2, uno de los simuladores de conducción de camiones más populares y realistas del mercado. ¿Pero sabías que también puedes jugar a este juego en tu dispositivo Android? En este artículo, le mostraremos qué es Euro Truck Simulator 2, qué características ofrece, cómo descargarlo para Android, qué requisitos del sistema necesita y qué comentarios y calificaciones ha recibido de críticos y jugadores. </p>
|
4 |
-
<h2>¿Qué es Euro Truck Simulator 2?</h2>
|
5 |
-
<p>Euro Truck Simulator 2 es un juego desarrollado por SCS Software, un estudio checo que se especializa en juegos de simulación de vehículos. El juego fue lanzado en 2012 para Windows, Linux y Mac OS X, y más tarde portado a dispositivos Android. El juego te permite viajar por Europa como camionero, entregando varias cargas a través de diferentes ciudades y países. Puede elegir entre diferentes modelos de camiones, personalizarlos, dirigir su propio negocio, contratar conductores y explorar el vasto y detallado mapa de Europa.</p>
|
6 |
-
<h2>descargar android euro camión simulador 2</h2><br /><p><b><b>DOWNLOAD</b> ❤ <a href="https://bltlly.com/2v6INn">https://bltlly.com/2v6INn</a></b></p><br /><br />
|
7 |
-
<h3>Características de Euro Truck Simulator 2</h3>
|
8 |
-
<p>Euro Truck Simulator 2 ofrece muchas características que lo convierten en una experiencia de conducción realista y agradable. Estos son algunos de ellos:</p>
|
9 |
-
<h4>Camiones con licencia de marcas famosas</h4>
|
10 |
-
<p>El juego cuenta con 7 marcas de camiones con licencia y un total de 15 modelos de camiones únicos para conducir. Puede elegir entre MAN, Scania, Iveco, Renault, DAF y otros. Cada camión tiene sus propias características, rendimiento y efectos de sonido. </p>
|
11 |
-
<h4>Redes de carreteras realistas y puntos de referencia</h4>
|
12 |
-
<p>El juego cubre más de 60 ciudades y países europeos, con redes viales realistas que los conectan. Puede conducir por carreteras, caminos rurales, calles de la ciudad y más. También se pueden ver monumentos y monumentos famosos en el camino, como la Torre Eiffel, el Big Ben, el Coliseo y otros. </p>
|
13 |
-
<h4>Carrera personal y gestión de empresas</h4>
|
14 |
-
|
15 |
-
<h4>Personalización y modificación de camiones</h4>
|
16 |
-
<p>El juego ofrece innumerables opciones de personalización para su camión. Puede cambiar el chasis, la cabina, el motor, la transmisión, el trabajo de pintura, los accesorios y más. También puedes usar mods para agregar nuevo contenido al juego, como nuevos camiones, remolques, mapas, tráfico, clima y más. La comunidad modding es muy activa y crea sorprendentes modificaciones para el juego. </p>
|
17 |
-
<h3>Cómo descargar Euro Truck Simulator 2 para Android</h3>
|
18 |
-
<p>Si quieres jugar Euro Truck Simulator 2 en tu dispositivo Android, tienes varias opciones para descargarlo. Estas son algunas de ellas:</p>
|
19 |
-
<h4>Descargar de Google Play Store</h4>
|
20 |
-
<p>La forma más fácil de descargar Euro Truck Simulator 2 para Android es utilizar la aplicación Google Play Store en su dispositivo. Puedes buscar el juego por su nombre o usar este enlace para ir directamente a su página. El juego cuesta $5.99 y requiere Android 5.0 o superior. El juego tiene más de 10 millones de descargas y una calificación de 4.3 de 5 estrellas. </p>
|
21 |
-
<h4> <h4>Descargar desde Steam</h4>
|
22 |
-
<p>Otra forma de descargar Euro Truck Simulator 2 para Android es usar la aplicación Steam en tu dispositivo. Puedes descargar la aplicación de Steam desde Google Play Store o usar este enlace para ir directamente a su página. La aplicación Steam te permite acceder a tu biblioteca de Steam y jugar a juegos compatibles con dispositivos Android. También puedes comprar juegos en la tienda de Steam y descargarlos en tu dispositivo. Euro Truck Simulator 2 cuesta $19.99 en Steam y requiere Android 5.0 o superior. El juego tiene más de 300,000 comentarios y una calificación de 10/10. </p>
|
23 |
-
<p></p>
|
24 |
-
<h4>Descargar desde el sitio web oficial</h4>
|
25 |
-
|
26 |
-
<h3>Requisitos del sistema para Euro Truck Simulator 2</h3>
|
27 |
-
<p>Antes de descargar Euro Truck Simulator 2 para Android, debe comprobar si su dispositivo cumple con los requisitos mínimos o recomendados del sistema para el juego. Aquí están los requisitos del sistema para Euro Truck Simulator 2:</p>
|
28 |
-
<h4>Requisitos mínimos</h4>
|
29 |
-
<ul>
|
30 |
-
<li>OS: Android 5.0 o superior</li>
|
31 |
-
<li>CPU: núcleo dual 1.8 GHz</li>
|
32 |
-
<li>RAM: 2 GB</li>
|
33 |
-
<li>GPU: Mali-T720 o equivalente</li>
|
34 |
-
<li>Almacenamiento: 3 GB</li>
|
35 |
-
</ul>
|
36 |
-
<h4>Requisitos recomendados</h4>
|
37 |
-
<ul>
|
38 |
-
<li>OS: Android 7.0 o superior</li>
|
39 |
-
<li>CPU: Quad core 2.5 GHz</li>
|
40 |
-
<li>RAM: 4 GB</li>
|
41 |
-
<li>GPU: Adreno 530 o equivalente</li>
|
42 |
-
<li>Almacenamiento: 5 GB</li>
|
43 |
-
</ul>
|
44 |
-
<h3>Comentarios y valoraciones de Euro Truck Simulator 2</h3>
|
45 |
-
<p>Euro Truck Simulator 2 es uno de los juegos de simulación de conducción más aclamados y populares jamás realizados. Ha recibido muchas críticas y valoraciones positivas de críticos y jugadores por igual. Estos son algunos de ellos:</p>
|
46 |
-
<h4>PC Gamer revisión</h4>
|
47 |
-
<p>PC Gamer dio a Euro Truck Simulator 2 una puntuación de 91/100, elogiando su realismo, variedad y soporte de modding. El crítico escribió: "Euro Truck Simulator 2 no es un juego para buscadores de emociones, sino más bien un simulador abierto que te pone en el asiento del conductor de un camión masivo, permitiéndote viajar por Europa a tu propio ritmo y con tus propios objetivos." </p>
|
48 |
-
<h4>Revisión de Steam</h4>
|
49 |
-
<p>Los usuarios de Steam dieron a Euro Truck Simulator 2 una calificación de "Abrumadoramente positivo", con más del 97% de las críticas siendo positivas. La reseña más útil escribió: "Este juego es increíble. Es relajante, inmersivo y adictivo. Puede conducir por toda Europa, entregar cargas, personalizar su camión, administrar su propio negocio y más. Los gráficos son hermosos, el sonido es realista, y la jugabilidad es suave. El juego también tiene una gran comunidad de modding que añade nuevo contenido y características al juego. Si te gustan los juegos de conducción, definitivamente deberías probar este." </p>
|
50 |
-
<h4>Revisión metacrítica</h4>
|
51 |
-
|
52 |
-
<h2>Conclusión</h2>
|
53 |
-
<p>Euro Truck Simulator 2 es un juego que te permite experimentar la vida de un camionero en Europa. Puede conducir varios camiones en diferentes países, entregar cargas, personalizar su camión, dirigir su propio negocio y más. El juego ofrece gráficos realistas, efectos de sonido, física y redes de carreteras, así como una gran comunidad de modding que añade nuevos contenidos y características al juego. Puedes descargar Euro Truck Simulator 2 para Android desde diferentes fuentes, como Google Play Store, Steam o el sitio web oficial del juego. Sin embargo, debes comprobar si tu dispositivo cumple con los requisitos del sistema para el juego antes de descargarlo. </p>
|
54 |
-
<h3>Preguntas frecuentes (FAQ <h3>Preguntas frecuentes (FAQ)</h3>
|
55 |
-
<p>Aquí están algunas de las preguntas más comunes que la gente pregunta acerca de Euro Truck Simulator 2 para Android:</p>
|
56 |
-
<h4>Q: ¿Puedo jugar Euro Truck Simulator 2 en línea con otros jugadores? </h4>
|
57 |
-
<p>A: Euro Truck Simulator 2 no tiene un modo multijugador oficial, pero hay algunos mods no oficiales que le permiten jugar en línea con otros jugadores. Uno de los más populares es TruckersMP, que puedes descargar desde este enlace. Sin embargo, debe tener en cuenta que estos mods no son compatibles con los desarrolladores y pueden causar errores, fallos o problemas de compatibilidad. </p>
|
58 |
-
<h4>Q: ¿Puedo usar un controlador o un volante para jugar Euro Truck Simulator 2 en Android? </h4>
|
59 |
-
<p>A: Sí, puede utilizar un controlador o un volante para jugar Euro Truck Simulator 2 en Android, siempre y cuando sean compatibles con su dispositivo y el juego. Puedes conectarlos a través de Bluetooth, USB o cable OTG. También puedes personalizar los controles y la sensibilidad en la configuración del juego. </p>
|
60 |
-
<h4>Q: ¿Cómo puedo actualizar Euro Truck Simulator 2 en Android? </h4>
|
61 |
-
|
62 |
-
<h4>P: ¿Cómo puedo obtener más dinero y experiencia en Euro Truck Simulator 2?</h4>
|
63 |
-
<p>A: Hay varias maneras de obtener más dinero y experiencia en Euro Truck Simulator 2. Puede completar más entregas, asumir cargas más difíciles, conducir distancias más largas, seguir las reglas de tráfico, evitar daños y multas y usar sus habilidades sabiamente. También puedes usar trucos o mods para obtener dinero y experiencia ilimitadas, pero esto puede arruinar la diversión y el desafío del juego. </p>
|
64 |
-
<h4>Q: ¿Cómo puedo contactar a los desarrolladores de Euro Truck Simulator 2?</h4>
|
65 |
-
<p>A: Si tiene alguna pregunta, comentario, sugerencia o problema sobre Euro Truck Simulator 2, puede ponerse en contacto con los desarrolladores del juego utilizando este enlace. También puedes seguirlos en sus cuentas de redes sociales, como Facebook, Twitter, Instagram, YouTube y Twitch. Los desarrolladores son muy receptivos y útiles para sus fans y clientes. </p> 64aa2da5cf<br />
|
66 |
-
<br />
|
67 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/crt/__init__.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# http://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
|
14 |
-
# A list of auth types supported by the signers in botocore/crt/auth.py. This
|
15 |
-
# should always match the keys of botocore.crt.auth.CRT_AUTH_TYPE_MAPS. The
|
16 |
-
# information is duplicated here so that it can be accessed in environments
|
17 |
-
# where `awscrt` is not present and any import from botocore.crt.auth would
|
18 |
-
# fail.
|
19 |
-
CRT_SUPPORTED_AUTH_TYPES = (
|
20 |
-
'v4',
|
21 |
-
'v4-query',
|
22 |
-
'v4a',
|
23 |
-
's3v4',
|
24 |
-
's3v4-query',
|
25 |
-
's3v4a',
|
26 |
-
's3v4a-query',
|
27 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/cli/chardetect.py
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Script which takes one or more file paths and reports on their detected
|
3 |
-
encodings
|
4 |
-
|
5 |
-
Example::
|
6 |
-
|
7 |
-
% chardetect somefile someotherfile
|
8 |
-
somefile: windows-1252 with confidence 0.5
|
9 |
-
someotherfile: ascii with confidence 1.0
|
10 |
-
|
11 |
-
If no paths are provided, it takes its input from stdin.
|
12 |
-
|
13 |
-
"""
|
14 |
-
|
15 |
-
|
16 |
-
import argparse
|
17 |
-
import sys
|
18 |
-
from typing import Iterable, List, Optional
|
19 |
-
|
20 |
-
from .. import __version__
|
21 |
-
from ..universaldetector import UniversalDetector
|
22 |
-
|
23 |
-
|
24 |
-
def description_of(
|
25 |
-
lines: Iterable[bytes],
|
26 |
-
name: str = "stdin",
|
27 |
-
minimal: bool = False,
|
28 |
-
should_rename_legacy: bool = False,
|
29 |
-
) -> Optional[str]:
|
30 |
-
"""
|
31 |
-
Return a string describing the probable encoding of a file or
|
32 |
-
list of strings.
|
33 |
-
|
34 |
-
:param lines: The lines to get the encoding of.
|
35 |
-
:type lines: Iterable of bytes
|
36 |
-
:param name: Name of file or collection of lines
|
37 |
-
:type name: str
|
38 |
-
:param should_rename_legacy: Should we rename legacy encodings to
|
39 |
-
their more modern equivalents?
|
40 |
-
:type should_rename_legacy: ``bool``
|
41 |
-
"""
|
42 |
-
u = UniversalDetector(should_rename_legacy=should_rename_legacy)
|
43 |
-
for line in lines:
|
44 |
-
line = bytearray(line)
|
45 |
-
u.feed(line)
|
46 |
-
# shortcut out of the loop to save reading further - particularly useful if we read a BOM.
|
47 |
-
if u.done:
|
48 |
-
break
|
49 |
-
u.close()
|
50 |
-
result = u.result
|
51 |
-
if minimal:
|
52 |
-
return result["encoding"]
|
53 |
-
if result["encoding"]:
|
54 |
-
return f'{name}: {result["encoding"]} with confidence {result["confidence"]}'
|
55 |
-
return f"{name}: no result"
|
56 |
-
|
57 |
-
|
58 |
-
def main(argv: Optional[List[str]] = None) -> None:
|
59 |
-
"""
|
60 |
-
Handles command line arguments and gets things started.
|
61 |
-
|
62 |
-
:param argv: List of arguments, as if specified on the command-line.
|
63 |
-
If None, ``sys.argv[1:]`` is used instead.
|
64 |
-
:type argv: list of str
|
65 |
-
"""
|
66 |
-
# Get command line arguments
|
67 |
-
parser = argparse.ArgumentParser(
|
68 |
-
description=(
|
69 |
-
"Takes one or more file paths and reports their detected encodings"
|
70 |
-
)
|
71 |
-
)
|
72 |
-
parser.add_argument(
|
73 |
-
"input",
|
74 |
-
help="File whose encoding we would like to determine. (default: stdin)",
|
75 |
-
type=argparse.FileType("rb"),
|
76 |
-
nargs="*",
|
77 |
-
default=[sys.stdin.buffer],
|
78 |
-
)
|
79 |
-
parser.add_argument(
|
80 |
-
"--minimal",
|
81 |
-
help="Print only the encoding to standard output",
|
82 |
-
action="store_true",
|
83 |
-
)
|
84 |
-
parser.add_argument(
|
85 |
-
"-l",
|
86 |
-
"--legacy",
|
87 |
-
help="Rename legacy encodings to more modern ones.",
|
88 |
-
action="store_true",
|
89 |
-
)
|
90 |
-
parser.add_argument(
|
91 |
-
"--version", action="version", version=f"%(prog)s {__version__}"
|
92 |
-
)
|
93 |
-
args = parser.parse_args(argv)
|
94 |
-
|
95 |
-
for f in args.input:
|
96 |
-
if f.isatty():
|
97 |
-
print(
|
98 |
-
"You are running chardetect interactively. Press "
|
99 |
-
"CTRL-D twice at the start of a blank line to signal the "
|
100 |
-
"end of your input. If you want help, run chardetect "
|
101 |
-
"--help\n",
|
102 |
-
file=sys.stderr,
|
103 |
-
)
|
104 |
-
print(
|
105 |
-
description_of(
|
106 |
-
f, f.name, minimal=args.minimal, should_rename_legacy=args.legacy
|
107 |
-
)
|
108 |
-
)
|
109 |
-
|
110 |
-
|
111 |
-
if __name__ == "__main__":
|
112 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/appdirs.py
DELETED
@@ -1,608 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
# Copyright (c) 2005-2010 ActiveState Software Inc.
|
4 |
-
# Copyright (c) 2013 Eddy Petrișor
|
5 |
-
|
6 |
-
"""Utilities for determining application-specific dirs.
|
7 |
-
|
8 |
-
See <http://github.com/ActiveState/appdirs> for details and usage.
|
9 |
-
"""
|
10 |
-
# Dev Notes:
|
11 |
-
# - MSDN on where to store app data files:
|
12 |
-
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
|
13 |
-
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
|
14 |
-
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
|
15 |
-
|
16 |
-
__version_info__ = (1, 4, 3)
|
17 |
-
__version__ = '.'.join(map(str, __version_info__))
|
18 |
-
|
19 |
-
|
20 |
-
import sys
|
21 |
-
import os
|
22 |
-
|
23 |
-
PY3 = sys.version_info[0] == 3
|
24 |
-
|
25 |
-
if PY3:
|
26 |
-
unicode = str
|
27 |
-
|
28 |
-
if sys.platform.startswith('java'):
|
29 |
-
import platform
|
30 |
-
os_name = platform.java_ver()[3][0]
|
31 |
-
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
|
32 |
-
system = 'win32'
|
33 |
-
elif os_name.startswith('Mac'): # "Mac OS X", etc.
|
34 |
-
system = 'darwin'
|
35 |
-
else: # "Linux", "SunOS", "FreeBSD", etc.
|
36 |
-
# Setting this to "linux2" is not ideal, but only Windows or Mac
|
37 |
-
# are actually checked for and the rest of the module expects
|
38 |
-
# *sys.platform* style strings.
|
39 |
-
system = 'linux2'
|
40 |
-
else:
|
41 |
-
system = sys.platform
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
|
46 |
-
r"""Return full path to the user-specific data dir for this application.
|
47 |
-
|
48 |
-
"appname" is the name of application.
|
49 |
-
If None, just the system directory is returned.
|
50 |
-
"appauthor" (only used on Windows) is the name of the
|
51 |
-
appauthor or distributing body for this application. Typically
|
52 |
-
it is the owning company name. This falls back to appname. You may
|
53 |
-
pass False to disable it.
|
54 |
-
"version" is an optional version path element to append to the
|
55 |
-
path. You might want to use this if you want multiple versions
|
56 |
-
of your app to be able to run independently. If used, this
|
57 |
-
would typically be "<major>.<minor>".
|
58 |
-
Only applied when appname is present.
|
59 |
-
"roaming" (boolean, default False) can be set True to use the Windows
|
60 |
-
roaming appdata directory. That means that for users on a Windows
|
61 |
-
network setup for roaming profiles, this user data will be
|
62 |
-
sync'd on login. See
|
63 |
-
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
64 |
-
for a discussion of issues.
|
65 |
-
|
66 |
-
Typical user data directories are:
|
67 |
-
Mac OS X: ~/Library/Application Support/<AppName>
|
68 |
-
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
|
69 |
-
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
|
70 |
-
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
|
71 |
-
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
|
72 |
-
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
|
73 |
-
|
74 |
-
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
|
75 |
-
That means, by default "~/.local/share/<AppName>".
|
76 |
-
"""
|
77 |
-
if system == "win32":
|
78 |
-
if appauthor is None:
|
79 |
-
appauthor = appname
|
80 |
-
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
|
81 |
-
path = os.path.normpath(_get_win_folder(const))
|
82 |
-
if appname:
|
83 |
-
if appauthor is not False:
|
84 |
-
path = os.path.join(path, appauthor, appname)
|
85 |
-
else:
|
86 |
-
path = os.path.join(path, appname)
|
87 |
-
elif system == 'darwin':
|
88 |
-
path = os.path.expanduser('~/Library/Application Support/')
|
89 |
-
if appname:
|
90 |
-
path = os.path.join(path, appname)
|
91 |
-
else:
|
92 |
-
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
|
93 |
-
if appname:
|
94 |
-
path = os.path.join(path, appname)
|
95 |
-
if appname and version:
|
96 |
-
path = os.path.join(path, version)
|
97 |
-
return path
|
98 |
-
|
99 |
-
|
100 |
-
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
|
101 |
-
r"""Return full path to the user-shared data dir for this application.
|
102 |
-
|
103 |
-
"appname" is the name of application.
|
104 |
-
If None, just the system directory is returned.
|
105 |
-
"appauthor" (only used on Windows) is the name of the
|
106 |
-
appauthor or distributing body for this application. Typically
|
107 |
-
it is the owning company name. This falls back to appname. You may
|
108 |
-
pass False to disable it.
|
109 |
-
"version" is an optional version path element to append to the
|
110 |
-
path. You might want to use this if you want multiple versions
|
111 |
-
of your app to be able to run independently. If used, this
|
112 |
-
would typically be "<major>.<minor>".
|
113 |
-
Only applied when appname is present.
|
114 |
-
"multipath" is an optional parameter only applicable to *nix
|
115 |
-
which indicates that the entire list of data dirs should be
|
116 |
-
returned. By default, the first item from XDG_DATA_DIRS is
|
117 |
-
returned, or '/usr/local/share/<AppName>',
|
118 |
-
if XDG_DATA_DIRS is not set
|
119 |
-
|
120 |
-
Typical site data directories are:
|
121 |
-
Mac OS X: /Library/Application Support/<AppName>
|
122 |
-
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
|
123 |
-
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
|
124 |
-
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
125 |
-
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
|
126 |
-
|
127 |
-
For Unix, this is using the $XDG_DATA_DIRS[0] default.
|
128 |
-
|
129 |
-
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
130 |
-
"""
|
131 |
-
if system == "win32":
|
132 |
-
if appauthor is None:
|
133 |
-
appauthor = appname
|
134 |
-
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
|
135 |
-
if appname:
|
136 |
-
if appauthor is not False:
|
137 |
-
path = os.path.join(path, appauthor, appname)
|
138 |
-
else:
|
139 |
-
path = os.path.join(path, appname)
|
140 |
-
elif system == 'darwin':
|
141 |
-
path = os.path.expanduser('/Library/Application Support')
|
142 |
-
if appname:
|
143 |
-
path = os.path.join(path, appname)
|
144 |
-
else:
|
145 |
-
# XDG default for $XDG_DATA_DIRS
|
146 |
-
# only first, if multipath is False
|
147 |
-
path = os.getenv('XDG_DATA_DIRS',
|
148 |
-
os.pathsep.join(['/usr/local/share', '/usr/share']))
|
149 |
-
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
|
150 |
-
if appname:
|
151 |
-
if version:
|
152 |
-
appname = os.path.join(appname, version)
|
153 |
-
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
154 |
-
|
155 |
-
if multipath:
|
156 |
-
path = os.pathsep.join(pathlist)
|
157 |
-
else:
|
158 |
-
path = pathlist[0]
|
159 |
-
return path
|
160 |
-
|
161 |
-
if appname and version:
|
162 |
-
path = os.path.join(path, version)
|
163 |
-
return path
|
164 |
-
|
165 |
-
|
166 |
-
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
|
167 |
-
r"""Return full path to the user-specific config dir for this application.
|
168 |
-
|
169 |
-
"appname" is the name of application.
|
170 |
-
If None, just the system directory is returned.
|
171 |
-
"appauthor" (only used on Windows) is the name of the
|
172 |
-
appauthor or distributing body for this application. Typically
|
173 |
-
it is the owning company name. This falls back to appname. You may
|
174 |
-
pass False to disable it.
|
175 |
-
"version" is an optional version path element to append to the
|
176 |
-
path. You might want to use this if you want multiple versions
|
177 |
-
of your app to be able to run independently. If used, this
|
178 |
-
would typically be "<major>.<minor>".
|
179 |
-
Only applied when appname is present.
|
180 |
-
"roaming" (boolean, default False) can be set True to use the Windows
|
181 |
-
roaming appdata directory. That means that for users on a Windows
|
182 |
-
network setup for roaming profiles, this user data will be
|
183 |
-
sync'd on login. See
|
184 |
-
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
185 |
-
for a discussion of issues.
|
186 |
-
|
187 |
-
Typical user config directories are:
|
188 |
-
Mac OS X: same as user_data_dir
|
189 |
-
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
|
190 |
-
Win *: same as user_data_dir
|
191 |
-
|
192 |
-
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
|
193 |
-
That means, by default "~/.config/<AppName>".
|
194 |
-
"""
|
195 |
-
if system in ["win32", "darwin"]:
|
196 |
-
path = user_data_dir(appname, appauthor, None, roaming)
|
197 |
-
else:
|
198 |
-
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
|
199 |
-
if appname:
|
200 |
-
path = os.path.join(path, appname)
|
201 |
-
if appname and version:
|
202 |
-
path = os.path.join(path, version)
|
203 |
-
return path
|
204 |
-
|
205 |
-
|
206 |
-
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
|
207 |
-
r"""Return full path to the user-shared data dir for this application.
|
208 |
-
|
209 |
-
"appname" is the name of application.
|
210 |
-
If None, just the system directory is returned.
|
211 |
-
"appauthor" (only used on Windows) is the name of the
|
212 |
-
appauthor or distributing body for this application. Typically
|
213 |
-
it is the owning company name. This falls back to appname. You may
|
214 |
-
pass False to disable it.
|
215 |
-
"version" is an optional version path element to append to the
|
216 |
-
path. You might want to use this if you want multiple versions
|
217 |
-
of your app to be able to run independently. If used, this
|
218 |
-
would typically be "<major>.<minor>".
|
219 |
-
Only applied when appname is present.
|
220 |
-
"multipath" is an optional parameter only applicable to *nix
|
221 |
-
which indicates that the entire list of config dirs should be
|
222 |
-
returned. By default, the first item from XDG_CONFIG_DIRS is
|
223 |
-
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
|
224 |
-
|
225 |
-
Typical site config directories are:
|
226 |
-
Mac OS X: same as site_data_dir
|
227 |
-
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
|
228 |
-
$XDG_CONFIG_DIRS
|
229 |
-
Win *: same as site_data_dir
|
230 |
-
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
231 |
-
|
232 |
-
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
|
233 |
-
|
234 |
-
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
235 |
-
"""
|
236 |
-
if system in ["win32", "darwin"]:
|
237 |
-
path = site_data_dir(appname, appauthor)
|
238 |
-
if appname and version:
|
239 |
-
path = os.path.join(path, version)
|
240 |
-
else:
|
241 |
-
# XDG default for $XDG_CONFIG_DIRS
|
242 |
-
# only first, if multipath is False
|
243 |
-
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
|
244 |
-
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
|
245 |
-
if appname:
|
246 |
-
if version:
|
247 |
-
appname = os.path.join(appname, version)
|
248 |
-
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
249 |
-
|
250 |
-
if multipath:
|
251 |
-
path = os.pathsep.join(pathlist)
|
252 |
-
else:
|
253 |
-
path = pathlist[0]
|
254 |
-
return path
|
255 |
-
|
256 |
-
|
257 |
-
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
|
258 |
-
r"""Return full path to the user-specific cache dir for this application.
|
259 |
-
|
260 |
-
"appname" is the name of application.
|
261 |
-
If None, just the system directory is returned.
|
262 |
-
"appauthor" (only used on Windows) is the name of the
|
263 |
-
appauthor or distributing body for this application. Typically
|
264 |
-
it is the owning company name. This falls back to appname. You may
|
265 |
-
pass False to disable it.
|
266 |
-
"version" is an optional version path element to append to the
|
267 |
-
path. You might want to use this if you want multiple versions
|
268 |
-
of your app to be able to run independently. If used, this
|
269 |
-
would typically be "<major>.<minor>".
|
270 |
-
Only applied when appname is present.
|
271 |
-
"opinion" (boolean) can be False to disable the appending of
|
272 |
-
"Cache" to the base app data dir for Windows. See
|
273 |
-
discussion below.
|
274 |
-
|
275 |
-
Typical user cache directories are:
|
276 |
-
Mac OS X: ~/Library/Caches/<AppName>
|
277 |
-
Unix: ~/.cache/<AppName> (XDG default)
|
278 |
-
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
|
279 |
-
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
|
280 |
-
|
281 |
-
On Windows the only suggestion in the MSDN docs is that local settings go in
|
282 |
-
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
|
283 |
-
app data dir (the default returned by `user_data_dir` above). Apps typically
|
284 |
-
put cache data somewhere *under* the given dir here. Some examples:
|
285 |
-
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
|
286 |
-
...\Acme\SuperApp\Cache\1.0
|
287 |
-
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
|
288 |
-
This can be disabled with the `opinion=False` option.
|
289 |
-
"""
|
290 |
-
if system == "win32":
|
291 |
-
if appauthor is None:
|
292 |
-
appauthor = appname
|
293 |
-
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
|
294 |
-
if appname:
|
295 |
-
if appauthor is not False:
|
296 |
-
path = os.path.join(path, appauthor, appname)
|
297 |
-
else:
|
298 |
-
path = os.path.join(path, appname)
|
299 |
-
if opinion:
|
300 |
-
path = os.path.join(path, "Cache")
|
301 |
-
elif system == 'darwin':
|
302 |
-
path = os.path.expanduser('~/Library/Caches')
|
303 |
-
if appname:
|
304 |
-
path = os.path.join(path, appname)
|
305 |
-
else:
|
306 |
-
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
|
307 |
-
if appname:
|
308 |
-
path = os.path.join(path, appname)
|
309 |
-
if appname and version:
|
310 |
-
path = os.path.join(path, version)
|
311 |
-
return path
|
312 |
-
|
313 |
-
|
314 |
-
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
|
315 |
-
r"""Return full path to the user-specific state dir for this application.
|
316 |
-
|
317 |
-
"appname" is the name of application.
|
318 |
-
If None, just the system directory is returned.
|
319 |
-
"appauthor" (only used on Windows) is the name of the
|
320 |
-
appauthor or distributing body for this application. Typically
|
321 |
-
it is the owning company name. This falls back to appname. You may
|
322 |
-
pass False to disable it.
|
323 |
-
"version" is an optional version path element to append to the
|
324 |
-
path. You might want to use this if you want multiple versions
|
325 |
-
of your app to be able to run independently. If used, this
|
326 |
-
would typically be "<major>.<minor>".
|
327 |
-
Only applied when appname is present.
|
328 |
-
"roaming" (boolean, default False) can be set True to use the Windows
|
329 |
-
roaming appdata directory. That means that for users on a Windows
|
330 |
-
network setup for roaming profiles, this user data will be
|
331 |
-
sync'd on login. See
|
332 |
-
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
333 |
-
for a discussion of issues.
|
334 |
-
|
335 |
-
Typical user state directories are:
|
336 |
-
Mac OS X: same as user_data_dir
|
337 |
-
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
|
338 |
-
Win *: same as user_data_dir
|
339 |
-
|
340 |
-
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
|
341 |
-
to extend the XDG spec and support $XDG_STATE_HOME.
|
342 |
-
|
343 |
-
That means, by default "~/.local/state/<AppName>".
|
344 |
-
"""
|
345 |
-
if system in ["win32", "darwin"]:
|
346 |
-
path = user_data_dir(appname, appauthor, None, roaming)
|
347 |
-
else:
|
348 |
-
path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
|
349 |
-
if appname:
|
350 |
-
path = os.path.join(path, appname)
|
351 |
-
if appname and version:
|
352 |
-
path = os.path.join(path, version)
|
353 |
-
return path
|
354 |
-
|
355 |
-
|
356 |
-
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
|
357 |
-
r"""Return full path to the user-specific log dir for this application.
|
358 |
-
|
359 |
-
"appname" is the name of application.
|
360 |
-
If None, just the system directory is returned.
|
361 |
-
"appauthor" (only used on Windows) is the name of the
|
362 |
-
appauthor or distributing body for this application. Typically
|
363 |
-
it is the owning company name. This falls back to appname. You may
|
364 |
-
pass False to disable it.
|
365 |
-
"version" is an optional version path element to append to the
|
366 |
-
path. You might want to use this if you want multiple versions
|
367 |
-
of your app to be able to run independently. If used, this
|
368 |
-
would typically be "<major>.<minor>".
|
369 |
-
Only applied when appname is present.
|
370 |
-
"opinion" (boolean) can be False to disable the appending of
|
371 |
-
"Logs" to the base app data dir for Windows, and "log" to the
|
372 |
-
base cache dir for Unix. See discussion below.
|
373 |
-
|
374 |
-
Typical user log directories are:
|
375 |
-
Mac OS X: ~/Library/Logs/<AppName>
|
376 |
-
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
|
377 |
-
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
|
378 |
-
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
|
379 |
-
|
380 |
-
On Windows the only suggestion in the MSDN docs is that local settings
|
381 |
-
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
|
382 |
-
examples of what some windows apps use for a logs dir.)
|
383 |
-
|
384 |
-
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
|
385 |
-
value for Windows and appends "log" to the user cache dir for Unix.
|
386 |
-
This can be disabled with the `opinion=False` option.
|
387 |
-
"""
|
388 |
-
if system == "darwin":
|
389 |
-
path = os.path.join(
|
390 |
-
os.path.expanduser('~/Library/Logs'),
|
391 |
-
appname)
|
392 |
-
elif system == "win32":
|
393 |
-
path = user_data_dir(appname, appauthor, version)
|
394 |
-
version = False
|
395 |
-
if opinion:
|
396 |
-
path = os.path.join(path, "Logs")
|
397 |
-
else:
|
398 |
-
path = user_cache_dir(appname, appauthor, version)
|
399 |
-
version = False
|
400 |
-
if opinion:
|
401 |
-
path = os.path.join(path, "log")
|
402 |
-
if appname and version:
|
403 |
-
path = os.path.join(path, version)
|
404 |
-
return path
|
405 |
-
|
406 |
-
|
407 |
-
class AppDirs(object):
|
408 |
-
"""Convenience wrapper for getting application dirs."""
|
409 |
-
def __init__(self, appname=None, appauthor=None, version=None,
|
410 |
-
roaming=False, multipath=False):
|
411 |
-
self.appname = appname
|
412 |
-
self.appauthor = appauthor
|
413 |
-
self.version = version
|
414 |
-
self.roaming = roaming
|
415 |
-
self.multipath = multipath
|
416 |
-
|
417 |
-
@property
|
418 |
-
def user_data_dir(self):
|
419 |
-
return user_data_dir(self.appname, self.appauthor,
|
420 |
-
version=self.version, roaming=self.roaming)
|
421 |
-
|
422 |
-
@property
|
423 |
-
def site_data_dir(self):
|
424 |
-
return site_data_dir(self.appname, self.appauthor,
|
425 |
-
version=self.version, multipath=self.multipath)
|
426 |
-
|
427 |
-
@property
|
428 |
-
def user_config_dir(self):
|
429 |
-
return user_config_dir(self.appname, self.appauthor,
|
430 |
-
version=self.version, roaming=self.roaming)
|
431 |
-
|
432 |
-
@property
|
433 |
-
def site_config_dir(self):
|
434 |
-
return site_config_dir(self.appname, self.appauthor,
|
435 |
-
version=self.version, multipath=self.multipath)
|
436 |
-
|
437 |
-
@property
|
438 |
-
def user_cache_dir(self):
|
439 |
-
return user_cache_dir(self.appname, self.appauthor,
|
440 |
-
version=self.version)
|
441 |
-
|
442 |
-
@property
|
443 |
-
def user_state_dir(self):
|
444 |
-
return user_state_dir(self.appname, self.appauthor,
|
445 |
-
version=self.version)
|
446 |
-
|
447 |
-
@property
|
448 |
-
def user_log_dir(self):
|
449 |
-
return user_log_dir(self.appname, self.appauthor,
|
450 |
-
version=self.version)
|
451 |
-
|
452 |
-
|
453 |
-
#---- internal support stuff
|
454 |
-
|
455 |
-
def _get_win_folder_from_registry(csidl_name):
|
456 |
-
"""This is a fallback technique at best. I'm not sure if using the
|
457 |
-
registry for this guarantees us the correct answer for all CSIDL_*
|
458 |
-
names.
|
459 |
-
"""
|
460 |
-
if PY3:
|
461 |
-
import winreg as _winreg
|
462 |
-
else:
|
463 |
-
import _winreg
|
464 |
-
|
465 |
-
shell_folder_name = {
|
466 |
-
"CSIDL_APPDATA": "AppData",
|
467 |
-
"CSIDL_COMMON_APPDATA": "Common AppData",
|
468 |
-
"CSIDL_LOCAL_APPDATA": "Local AppData",
|
469 |
-
}[csidl_name]
|
470 |
-
|
471 |
-
key = _winreg.OpenKey(
|
472 |
-
_winreg.HKEY_CURRENT_USER,
|
473 |
-
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
|
474 |
-
)
|
475 |
-
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
|
476 |
-
return dir
|
477 |
-
|
478 |
-
|
479 |
-
def _get_win_folder_with_pywin32(csidl_name):
|
480 |
-
from win32com.shell import shellcon, shell
|
481 |
-
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
|
482 |
-
# Try to make this a unicode path because SHGetFolderPath does
|
483 |
-
# not return unicode strings when there is unicode data in the
|
484 |
-
# path.
|
485 |
-
try:
|
486 |
-
dir = unicode(dir)
|
487 |
-
|
488 |
-
# Downgrade to short path name if have highbit chars. See
|
489 |
-
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
490 |
-
has_high_char = False
|
491 |
-
for c in dir:
|
492 |
-
if ord(c) > 255:
|
493 |
-
has_high_char = True
|
494 |
-
break
|
495 |
-
if has_high_char:
|
496 |
-
try:
|
497 |
-
import win32api
|
498 |
-
dir = win32api.GetShortPathName(dir)
|
499 |
-
except ImportError:
|
500 |
-
pass
|
501 |
-
except UnicodeError:
|
502 |
-
pass
|
503 |
-
return dir
|
504 |
-
|
505 |
-
|
506 |
-
def _get_win_folder_with_ctypes(csidl_name):
|
507 |
-
import ctypes
|
508 |
-
|
509 |
-
csidl_const = {
|
510 |
-
"CSIDL_APPDATA": 26,
|
511 |
-
"CSIDL_COMMON_APPDATA": 35,
|
512 |
-
"CSIDL_LOCAL_APPDATA": 28,
|
513 |
-
}[csidl_name]
|
514 |
-
|
515 |
-
buf = ctypes.create_unicode_buffer(1024)
|
516 |
-
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
|
517 |
-
|
518 |
-
# Downgrade to short path name if have highbit chars. See
|
519 |
-
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
520 |
-
has_high_char = False
|
521 |
-
for c in buf:
|
522 |
-
if ord(c) > 255:
|
523 |
-
has_high_char = True
|
524 |
-
break
|
525 |
-
if has_high_char:
|
526 |
-
buf2 = ctypes.create_unicode_buffer(1024)
|
527 |
-
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
|
528 |
-
buf = buf2
|
529 |
-
|
530 |
-
return buf.value
|
531 |
-
|
532 |
-
def _get_win_folder_with_jna(csidl_name):
|
533 |
-
import array
|
534 |
-
from com.sun import jna
|
535 |
-
from com.sun.jna.platform import win32
|
536 |
-
|
537 |
-
buf_size = win32.WinDef.MAX_PATH * 2
|
538 |
-
buf = array.zeros('c', buf_size)
|
539 |
-
shell = win32.Shell32.INSTANCE
|
540 |
-
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
|
541 |
-
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
542 |
-
|
543 |
-
# Downgrade to short path name if have highbit chars. See
|
544 |
-
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
545 |
-
has_high_char = False
|
546 |
-
for c in dir:
|
547 |
-
if ord(c) > 255:
|
548 |
-
has_high_char = True
|
549 |
-
break
|
550 |
-
if has_high_char:
|
551 |
-
buf = array.zeros('c', buf_size)
|
552 |
-
kernel = win32.Kernel32.INSTANCE
|
553 |
-
if kernel.GetShortPathName(dir, buf, buf_size):
|
554 |
-
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
555 |
-
|
556 |
-
return dir
|
557 |
-
|
558 |
-
if system == "win32":
|
559 |
-
try:
|
560 |
-
import win32com.shell
|
561 |
-
_get_win_folder = _get_win_folder_with_pywin32
|
562 |
-
except ImportError:
|
563 |
-
try:
|
564 |
-
from ctypes import windll
|
565 |
-
_get_win_folder = _get_win_folder_with_ctypes
|
566 |
-
except ImportError:
|
567 |
-
try:
|
568 |
-
import com.sun.jna
|
569 |
-
_get_win_folder = _get_win_folder_with_jna
|
570 |
-
except ImportError:
|
571 |
-
_get_win_folder = _get_win_folder_from_registry
|
572 |
-
|
573 |
-
|
574 |
-
#---- self test code
|
575 |
-
|
576 |
-
if __name__ == "__main__":
|
577 |
-
appname = "MyApp"
|
578 |
-
appauthor = "MyCompany"
|
579 |
-
|
580 |
-
props = ("user_data_dir",
|
581 |
-
"user_config_dir",
|
582 |
-
"user_cache_dir",
|
583 |
-
"user_state_dir",
|
584 |
-
"user_log_dir",
|
585 |
-
"site_data_dir",
|
586 |
-
"site_config_dir")
|
587 |
-
|
588 |
-
print("-- app dirs %s --" % __version__)
|
589 |
-
|
590 |
-
print("-- app dirs (with optional 'version')")
|
591 |
-
dirs = AppDirs(appname, appauthor, version="1.0")
|
592 |
-
for prop in props:
|
593 |
-
print("%s: %s" % (prop, getattr(dirs, prop)))
|
594 |
-
|
595 |
-
print("\n-- app dirs (without optional 'version')")
|
596 |
-
dirs = AppDirs(appname, appauthor)
|
597 |
-
for prop in props:
|
598 |
-
print("%s: %s" % (prop, getattr(dirs, prop)))
|
599 |
-
|
600 |
-
print("\n-- app dirs (without optional 'appauthor')")
|
601 |
-
dirs = AppDirs(appname)
|
602 |
-
for prop in props:
|
603 |
-
print("%s: %s" % (prop, getattr(dirs, prop)))
|
604 |
-
|
605 |
-
print("\n-- app dirs (with disabled 'appauthor')")
|
606 |
-
dirs = AppDirs(appname, appauthor=False)
|
607 |
-
for prop in props:
|
608 |
-
print("%s: %s" % (prop, getattr(dirs, prop)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/util.h
DELETED
@@ -1,589 +0,0 @@
|
|
1 |
-
/******************************************************************************
|
2 |
-
* Copyright (c) 2016, NVIDIA CORPORATION. All rights meserved.
|
3 |
-
*
|
4 |
-
* Redistribution and use in source and binary forms, with or without
|
5 |
-
* modification, are permitted provided that the following conditions are met:
|
6 |
-
* * Redistributions of source code must retain the above copyright
|
7 |
-
* notice, this list of conditions and the following disclaimer.
|
8 |
-
* * Redistributions in binary form must reproduce the above copyright
|
9 |
-
* notice, this list of conditions and the following disclaimer in the
|
10 |
-
* documentation and/or other materials provided with the distribution.
|
11 |
-
* * Neither the name of the NVIDIA CORPORATION nor the
|
12 |
-
* names of its contributors may be used to endorse or promote products
|
13 |
-
* derived from this software without specific prior written permission.
|
14 |
-
*
|
15 |
-
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
16 |
-
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17 |
-
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18 |
-
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
19 |
-
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
20 |
-
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
21 |
-
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
22 |
-
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
23 |
-
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
24 |
-
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
25 |
-
*
|
26 |
-
******************************************************************************/
|
27 |
-
#pragma once
|
28 |
-
|
29 |
-
#include <cstdio>
|
30 |
-
#include <thrust/detail/config.h>
|
31 |
-
#include <thrust/iterator/iterator_traits.h>
|
32 |
-
#include <cub/util_arch.cuh>
|
33 |
-
#include <thrust/system/cuda/detail/execution_policy.h>
|
34 |
-
#include <thrust/system_error.h>
|
35 |
-
#include <thrust/system/cuda/error.h>
|
36 |
-
|
37 |
-
namespace thrust
|
38 |
-
{
|
39 |
-
|
40 |
-
namespace cuda_cub {
|
41 |
-
|
42 |
-
inline __host__ __device__
|
43 |
-
cudaStream_t
|
44 |
-
default_stream()
|
45 |
-
{
|
46 |
-
#ifdef CUDA_API_PER_THREAD_DEFAULT_STREAM
|
47 |
-
return cudaStreamPerThread;
|
48 |
-
#else
|
49 |
-
return cudaStreamLegacy;
|
50 |
-
#endif
|
51 |
-
}
|
52 |
-
|
53 |
-
// Fallback implementation of the customization point.
|
54 |
-
template <class Derived>
|
55 |
-
__host__ __device__
|
56 |
-
cudaStream_t
|
57 |
-
get_stream(execution_policy<Derived> &)
|
58 |
-
{
|
59 |
-
return default_stream();
|
60 |
-
}
|
61 |
-
|
62 |
-
// Entry point/interface.
|
63 |
-
template <class Derived>
|
64 |
-
__host__ __device__ cudaStream_t
|
65 |
-
stream(execution_policy<Derived> &policy)
|
66 |
-
{
|
67 |
-
return get_stream(derived_cast(policy));
|
68 |
-
}
|
69 |
-
|
70 |
-
// Fallback implementation of the customization point.
|
71 |
-
__thrust_exec_check_disable__
|
72 |
-
template <class Derived>
|
73 |
-
__host__ __device__
|
74 |
-
cudaError_t
|
75 |
-
synchronize_stream(execution_policy<Derived> &policy)
|
76 |
-
{
|
77 |
-
cudaError_t result;
|
78 |
-
if (THRUST_IS_HOST_CODE) {
|
79 |
-
#if THRUST_INCLUDE_HOST_CODE
|
80 |
-
cudaStreamSynchronize(stream(policy));
|
81 |
-
result = cudaGetLastError();
|
82 |
-
#endif
|
83 |
-
} else {
|
84 |
-
#if THRUST_INCLUDE_DEVICE_CODE
|
85 |
-
#if __THRUST_HAS_CUDART__
|
86 |
-
THRUST_UNUSED_VAR(policy);
|
87 |
-
cudaDeviceSynchronize();
|
88 |
-
result = cudaGetLastError();
|
89 |
-
#else
|
90 |
-
THRUST_UNUSED_VAR(policy);
|
91 |
-
result = cudaSuccess;
|
92 |
-
#endif
|
93 |
-
#endif
|
94 |
-
}
|
95 |
-
return result;
|
96 |
-
}
|
97 |
-
|
98 |
-
// Entry point/interface.
|
99 |
-
template <class Policy>
|
100 |
-
__host__ __device__
|
101 |
-
cudaError_t
|
102 |
-
synchronize(Policy &policy)
|
103 |
-
{
|
104 |
-
return synchronize_stream(derived_cast(policy));
|
105 |
-
}
|
106 |
-
|
107 |
-
template <class Type>
|
108 |
-
THRUST_HOST_FUNCTION cudaError_t
|
109 |
-
trivial_copy_from_device(Type * dst,
|
110 |
-
Type const * src,
|
111 |
-
size_t count,
|
112 |
-
cudaStream_t stream)
|
113 |
-
{
|
114 |
-
cudaError status = cudaSuccess;
|
115 |
-
if (count == 0) return status;
|
116 |
-
|
117 |
-
status = ::cudaMemcpyAsync(dst,
|
118 |
-
src,
|
119 |
-
sizeof(Type) * count,
|
120 |
-
cudaMemcpyDeviceToHost,
|
121 |
-
stream);
|
122 |
-
cudaStreamSynchronize(stream);
|
123 |
-
return status;
|
124 |
-
}
|
125 |
-
|
126 |
-
template <class Type>
|
127 |
-
THRUST_HOST_FUNCTION cudaError_t
|
128 |
-
trivial_copy_to_device(Type * dst,
|
129 |
-
Type const * src,
|
130 |
-
size_t count,
|
131 |
-
cudaStream_t stream)
|
132 |
-
{
|
133 |
-
cudaError status = cudaSuccess;
|
134 |
-
if (count == 0) return status;
|
135 |
-
|
136 |
-
status = ::cudaMemcpyAsync(dst,
|
137 |
-
src,
|
138 |
-
sizeof(Type) * count,
|
139 |
-
cudaMemcpyHostToDevice,
|
140 |
-
stream);
|
141 |
-
cudaStreamSynchronize(stream);
|
142 |
-
return status;
|
143 |
-
}
|
144 |
-
|
145 |
-
template <class Policy, class Type>
|
146 |
-
__host__ __device__ cudaError_t
|
147 |
-
trivial_copy_device_to_device(Policy & policy,
|
148 |
-
Type * dst,
|
149 |
-
Type const *src,
|
150 |
-
size_t count)
|
151 |
-
{
|
152 |
-
cudaError_t status = cudaSuccess;
|
153 |
-
if (count == 0) return status;
|
154 |
-
|
155 |
-
cudaStream_t stream = cuda_cub::stream(policy);
|
156 |
-
//
|
157 |
-
status = ::cudaMemcpyAsync(dst,
|
158 |
-
src,
|
159 |
-
sizeof(Type) * count,
|
160 |
-
cudaMemcpyDeviceToDevice,
|
161 |
-
stream);
|
162 |
-
cuda_cub::synchronize(policy);
|
163 |
-
return status;
|
164 |
-
}
|
165 |
-
|
166 |
-
inline void __host__ __device__
|
167 |
-
terminate()
|
168 |
-
{
|
169 |
-
if (THRUST_IS_DEVICE_CODE) {
|
170 |
-
#if THRUST_INCLUDE_DEVICE_CODE
|
171 |
-
asm("trap;");
|
172 |
-
#endif
|
173 |
-
} else {
|
174 |
-
#if THRUST_INCLUDE_HOST_CODE
|
175 |
-
std::terminate();
|
176 |
-
#endif
|
177 |
-
}
|
178 |
-
}
|
179 |
-
|
180 |
-
__host__ __device__
|
181 |
-
inline void throw_on_error(cudaError_t status)
|
182 |
-
{
|
183 |
-
#if __THRUST_HAS_CUDART__
|
184 |
-
// Clear the global CUDA error state which may have been set by the last
|
185 |
-
// call. Otherwise, errors may "leak" to unrelated kernel launches.
|
186 |
-
cudaGetLastError();
|
187 |
-
#endif
|
188 |
-
|
189 |
-
if (cudaSuccess != status)
|
190 |
-
{
|
191 |
-
if (THRUST_IS_HOST_CODE) {
|
192 |
-
#if THRUST_INCLUDE_HOST_CODE
|
193 |
-
throw thrust::system_error(status, thrust::cuda_category());
|
194 |
-
#endif
|
195 |
-
} else {
|
196 |
-
#if THRUST_INCLUDE_DEVICE_CODE
|
197 |
-
#if __THRUST_HAS_CUDART__
|
198 |
-
printf("Thrust CUDA backend error: %s: %s\n",
|
199 |
-
cudaGetErrorName(status),
|
200 |
-
cudaGetErrorString(status));
|
201 |
-
#else
|
202 |
-
printf("Thrust CUDA backend error: %d\n",
|
203 |
-
static_cast<int>(status));
|
204 |
-
#endif
|
205 |
-
cuda_cub::terminate();
|
206 |
-
#endif
|
207 |
-
}
|
208 |
-
}
|
209 |
-
}
|
210 |
-
|
211 |
-
__host__ __device__
|
212 |
-
inline void throw_on_error(cudaError_t status, char const *msg)
|
213 |
-
{
|
214 |
-
#if __THRUST_HAS_CUDART__
|
215 |
-
// Clear the global CUDA error state which may have been set by the last
|
216 |
-
// call. Otherwise, errors may "leak" to unrelated kernel launches.
|
217 |
-
cudaGetLastError();
|
218 |
-
#endif
|
219 |
-
|
220 |
-
if (cudaSuccess != status)
|
221 |
-
{
|
222 |
-
if (THRUST_IS_HOST_CODE) {
|
223 |
-
#if THRUST_INCLUDE_HOST_CODE
|
224 |
-
throw thrust::system_error(status, thrust::cuda_category(), msg);
|
225 |
-
#endif
|
226 |
-
} else {
|
227 |
-
#if THRUST_INCLUDE_DEVICE_CODE
|
228 |
-
#if __THRUST_HAS_CUDART__
|
229 |
-
printf("Thrust CUDA backend error: %s: %s: %s\n",
|
230 |
-
cudaGetErrorName(status),
|
231 |
-
cudaGetErrorString(status),
|
232 |
-
msg);
|
233 |
-
#else
|
234 |
-
printf("Thrust CUDA backend error: %d: %s \n",
|
235 |
-
static_cast<int>(status),
|
236 |
-
msg);
|
237 |
-
#endif
|
238 |
-
cuda_cub::terminate();
|
239 |
-
#endif
|
240 |
-
}
|
241 |
-
}
|
242 |
-
}
|
243 |
-
|
244 |
-
// FIXME: Move the iterators elsewhere.
|
245 |
-
|
246 |
-
template <class ValueType,
|
247 |
-
class InputIt,
|
248 |
-
class UnaryOp>
|
249 |
-
struct transform_input_iterator_t
|
250 |
-
{
|
251 |
-
typedef transform_input_iterator_t self_t;
|
252 |
-
typedef typename iterator_traits<InputIt>::difference_type difference_type;
|
253 |
-
typedef ValueType value_type;
|
254 |
-
typedef void pointer;
|
255 |
-
typedef value_type reference;
|
256 |
-
typedef std::random_access_iterator_tag iterator_category;
|
257 |
-
|
258 |
-
InputIt input;
|
259 |
-
mutable UnaryOp op;
|
260 |
-
|
261 |
-
__host__ __device__ __forceinline__
|
262 |
-
transform_input_iterator_t(InputIt input, UnaryOp op)
|
263 |
-
: input(input), op(op) {}
|
264 |
-
|
265 |
-
#if THRUST_CPP_DIALECT >= 2011
|
266 |
-
transform_input_iterator_t(const self_t &) = default;
|
267 |
-
#endif
|
268 |
-
|
269 |
-
// UnaryOp might not be copy assignable, such as when it is a lambda. Define
|
270 |
-
// an explicit copy assignment operator that doesn't try to assign it.
|
271 |
-
self_t& operator=(const self_t& o)
|
272 |
-
{
|
273 |
-
input = o.input;
|
274 |
-
return *this;
|
275 |
-
}
|
276 |
-
|
277 |
-
/// Postfix increment
|
278 |
-
__host__ __device__ __forceinline__ self_t operator++(int)
|
279 |
-
{
|
280 |
-
self_t retval = *this;
|
281 |
-
++input;
|
282 |
-
return retval;
|
283 |
-
}
|
284 |
-
|
285 |
-
/// Prefix increment
|
286 |
-
__host__ __device__ __forceinline__ self_t operator++()
|
287 |
-
{
|
288 |
-
++input;
|
289 |
-
return *this;
|
290 |
-
}
|
291 |
-
|
292 |
-
/// Indirection
|
293 |
-
__host__ __device__ __forceinline__ reference operator*() const
|
294 |
-
{
|
295 |
-
typename thrust::iterator_value<InputIt>::type x = *input;
|
296 |
-
return op(x);
|
297 |
-
}
|
298 |
-
/// Indirection
|
299 |
-
__host__ __device__ __forceinline__ reference operator*()
|
300 |
-
{
|
301 |
-
typename thrust::iterator_value<InputIt>::type x = *input;
|
302 |
-
return op(x);
|
303 |
-
}
|
304 |
-
|
305 |
-
/// Addition
|
306 |
-
__host__ __device__ __forceinline__ self_t operator+(difference_type n) const
|
307 |
-
{
|
308 |
-
return self_t(input + n, op);
|
309 |
-
}
|
310 |
-
|
311 |
-
/// Addition assignment
|
312 |
-
__host__ __device__ __forceinline__ self_t &operator+=(difference_type n)
|
313 |
-
{
|
314 |
-
input += n;
|
315 |
-
return *this;
|
316 |
-
}
|
317 |
-
|
318 |
-
/// Subtraction
|
319 |
-
__host__ __device__ __forceinline__ self_t operator-(difference_type n) const
|
320 |
-
{
|
321 |
-
return self_t(input - n, op);
|
322 |
-
}
|
323 |
-
|
324 |
-
/// Subtraction assignment
|
325 |
-
__host__ __device__ __forceinline__ self_t &operator-=(difference_type n)
|
326 |
-
{
|
327 |
-
input -= n;
|
328 |
-
return *this;
|
329 |
-
}
|
330 |
-
|
331 |
-
/// Distance
|
332 |
-
__host__ __device__ __forceinline__ difference_type operator-(self_t other) const
|
333 |
-
{
|
334 |
-
return input - other.input;
|
335 |
-
}
|
336 |
-
|
337 |
-
/// Array subscript
|
338 |
-
__host__ __device__ __forceinline__ reference operator[](difference_type n) const
|
339 |
-
{
|
340 |
-
return op(input[n]);
|
341 |
-
}
|
342 |
-
|
343 |
-
/// Equal to
|
344 |
-
__host__ __device__ __forceinline__ bool operator==(const self_t &rhs) const
|
345 |
-
{
|
346 |
-
return (input == rhs.input);
|
347 |
-
}
|
348 |
-
|
349 |
-
/// Not equal to
|
350 |
-
__host__ __device__ __forceinline__ bool operator!=(const self_t &rhs) const
|
351 |
-
{
|
352 |
-
return (input != rhs.input);
|
353 |
-
}
|
354 |
-
}; // struct transform_input_iterarot_t
|
355 |
-
|
356 |
-
template <class ValueType,
|
357 |
-
class InputIt1,
|
358 |
-
class InputIt2,
|
359 |
-
class BinaryOp>
|
360 |
-
struct transform_pair_of_input_iterators_t
|
361 |
-
{
|
362 |
-
typedef transform_pair_of_input_iterators_t self_t;
|
363 |
-
typedef typename iterator_traits<InputIt1>::difference_type difference_type;
|
364 |
-
typedef ValueType value_type;
|
365 |
-
typedef void pointer;
|
366 |
-
typedef value_type reference;
|
367 |
-
typedef std::random_access_iterator_tag iterator_category;
|
368 |
-
|
369 |
-
InputIt1 input1;
|
370 |
-
InputIt2 input2;
|
371 |
-
mutable BinaryOp op;
|
372 |
-
|
373 |
-
__host__ __device__ __forceinline__
|
374 |
-
transform_pair_of_input_iterators_t(InputIt1 input1_,
|
375 |
-
InputIt2 input2_,
|
376 |
-
BinaryOp op_)
|
377 |
-
: input1(input1_), input2(input2_), op(op_) {}
|
378 |
-
|
379 |
-
#if THRUST_CPP_DIALECT >= 2011
|
380 |
-
transform_pair_of_input_iterators_t(const self_t &) = default;
|
381 |
-
#endif
|
382 |
-
|
383 |
-
// BinaryOp might not be copy assignable, such as when it is a lambda.
|
384 |
-
// Define an explicit copy assignment operator that doesn't try to assign it.
|
385 |
-
self_t& operator=(const self_t& o)
|
386 |
-
{
|
387 |
-
input1 = o.input1;
|
388 |
-
input2 = o.input2;
|
389 |
-
return *this;
|
390 |
-
}
|
391 |
-
|
392 |
-
/// Postfix increment
|
393 |
-
__host__ __device__ __forceinline__ self_t operator++(int)
|
394 |
-
{
|
395 |
-
self_t retval = *this;
|
396 |
-
++input1;
|
397 |
-
++input2;
|
398 |
-
return retval;
|
399 |
-
}
|
400 |
-
|
401 |
-
/// Prefix increment
|
402 |
-
__host__ __device__ __forceinline__ self_t operator++()
|
403 |
-
{
|
404 |
-
++input1;
|
405 |
-
++input2;
|
406 |
-
return *this;
|
407 |
-
}
|
408 |
-
|
409 |
-
/// Indirection
|
410 |
-
__host__ __device__ __forceinline__ reference operator*() const
|
411 |
-
{
|
412 |
-
return op(*input1, *input2);
|
413 |
-
}
|
414 |
-
/// Indirection
|
415 |
-
__host__ __device__ __forceinline__ reference operator*()
|
416 |
-
{
|
417 |
-
return op(*input1, *input2);
|
418 |
-
}
|
419 |
-
|
420 |
-
/// Addition
|
421 |
-
__host__ __device__ __forceinline__ self_t operator+(difference_type n) const
|
422 |
-
{
|
423 |
-
return self_t(input1 + n, input2 + n, op);
|
424 |
-
}
|
425 |
-
|
426 |
-
/// Addition assignment
|
427 |
-
__host__ __device__ __forceinline__ self_t &operator+=(difference_type n)
|
428 |
-
{
|
429 |
-
input1 += n;
|
430 |
-
input2 += n;
|
431 |
-
return *this;
|
432 |
-
}
|
433 |
-
|
434 |
-
/// Subtraction
|
435 |
-
__host__ __device__ __forceinline__ self_t operator-(difference_type n) const
|
436 |
-
{
|
437 |
-
return self_t(input1 - n, input2 - n, op);
|
438 |
-
}
|
439 |
-
|
440 |
-
/// Subtraction assignment
|
441 |
-
__host__ __device__ __forceinline__ self_t &operator-=(difference_type n)
|
442 |
-
{
|
443 |
-
input1 -= n;
|
444 |
-
input2 -= n;
|
445 |
-
return *this;
|
446 |
-
}
|
447 |
-
|
448 |
-
/// Distance
|
449 |
-
__host__ __device__ __forceinline__ difference_type operator-(self_t other) const
|
450 |
-
{
|
451 |
-
return input1 - other.input1;
|
452 |
-
}
|
453 |
-
|
454 |
-
/// Array subscript
|
455 |
-
__host__ __device__ __forceinline__ reference operator[](difference_type n) const
|
456 |
-
{
|
457 |
-
return op(input1[n], input2[n]);
|
458 |
-
}
|
459 |
-
|
460 |
-
/// Equal to
|
461 |
-
__host__ __device__ __forceinline__ bool operator==(const self_t &rhs) const
|
462 |
-
{
|
463 |
-
return (input1 == rhs.input1) && (input2 == rhs.input2);
|
464 |
-
}
|
465 |
-
|
466 |
-
/// Not equal to
|
467 |
-
__host__ __device__ __forceinline__ bool operator!=(const self_t &rhs) const
|
468 |
-
{
|
469 |
-
return (input1 != rhs.input1) || (input2 != rhs.input2);
|
470 |
-
}
|
471 |
-
|
472 |
-
}; // struct transform_pair_of_input_iterators_t
|
473 |
-
|
474 |
-
|
475 |
-
struct identity
|
476 |
-
{
|
477 |
-
template <class T>
|
478 |
-
__host__ __device__ T const &
|
479 |
-
operator()(T const &t) const
|
480 |
-
{
|
481 |
-
return t;
|
482 |
-
}
|
483 |
-
|
484 |
-
template <class T>
|
485 |
-
__host__ __device__ T &
|
486 |
-
operator()(T &t) const
|
487 |
-
{
|
488 |
-
return t;
|
489 |
-
}
|
490 |
-
};
|
491 |
-
|
492 |
-
|
493 |
-
template <class T>
|
494 |
-
struct counting_iterator_t
|
495 |
-
{
|
496 |
-
typedef counting_iterator_t self_t;
|
497 |
-
typedef T difference_type;
|
498 |
-
typedef T value_type;
|
499 |
-
typedef void pointer;
|
500 |
-
typedef T reference;
|
501 |
-
typedef std::random_access_iterator_tag iterator_category;
|
502 |
-
|
503 |
-
T count;
|
504 |
-
|
505 |
-
__host__ __device__ __forceinline__
|
506 |
-
counting_iterator_t(T count_) : count(count_) {}
|
507 |
-
|
508 |
-
/// Postfix increment
|
509 |
-
__host__ __device__ __forceinline__ self_t operator++(int)
|
510 |
-
{
|
511 |
-
self_t retval = *this;
|
512 |
-
++count;
|
513 |
-
return retval;
|
514 |
-
}
|
515 |
-
|
516 |
-
/// Prefix increment
|
517 |
-
__host__ __device__ __forceinline__ self_t operator++()
|
518 |
-
{
|
519 |
-
++count;
|
520 |
-
return *this;
|
521 |
-
}
|
522 |
-
|
523 |
-
/// Indirection
|
524 |
-
__host__ __device__ __forceinline__ reference operator*() const
|
525 |
-
{
|
526 |
-
return count;
|
527 |
-
}
|
528 |
-
|
529 |
-
/// Indirection
|
530 |
-
__host__ __device__ __forceinline__ reference operator*()
|
531 |
-
{
|
532 |
-
return count;
|
533 |
-
}
|
534 |
-
|
535 |
-
/// Addition
|
536 |
-
__host__ __device__ __forceinline__ self_t operator+(difference_type n) const
|
537 |
-
{
|
538 |
-
return self_t(count + n);
|
539 |
-
}
|
540 |
-
|
541 |
-
/// Addition assignment
|
542 |
-
__host__ __device__ __forceinline__ self_t &operator+=(difference_type n)
|
543 |
-
{
|
544 |
-
count += n;
|
545 |
-
return *this;
|
546 |
-
}
|
547 |
-
|
548 |
-
/// Subtraction
|
549 |
-
__host__ __device__ __forceinline__ self_t operator-(difference_type n) const
|
550 |
-
{
|
551 |
-
return self_t(count - n);
|
552 |
-
}
|
553 |
-
|
554 |
-
/// Subtraction assignment
|
555 |
-
__host__ __device__ __forceinline__ self_t &operator-=(difference_type n)
|
556 |
-
{
|
557 |
-
count -= n;
|
558 |
-
return *this;
|
559 |
-
}
|
560 |
-
|
561 |
-
/// Distance
|
562 |
-
__host__ __device__ __forceinline__ difference_type operator-(self_t other) const
|
563 |
-
{
|
564 |
-
return count - other.count;
|
565 |
-
}
|
566 |
-
|
567 |
-
/// Array subscript
|
568 |
-
__host__ __device__ __forceinline__ reference operator[](difference_type n) const
|
569 |
-
{
|
570 |
-
return count + n;
|
571 |
-
}
|
572 |
-
|
573 |
-
/// Equal to
|
574 |
-
__host__ __device__ __forceinline__ bool operator==(const self_t &rhs) const
|
575 |
-
{
|
576 |
-
return (count == rhs.count);
|
577 |
-
}
|
578 |
-
|
579 |
-
/// Not equal to
|
580 |
-
__host__ __device__ __forceinline__ bool operator!=(const self_t &rhs) const
|
581 |
-
{
|
582 |
-
return (count != rhs.count);
|
583 |
-
}
|
584 |
-
|
585 |
-
}; // struct count_iterator_t
|
586 |
-
|
587 |
-
} // cuda_
|
588 |
-
|
589 |
-
} // end namespace thrust
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/saicinpainting/training/losses/style_loss.py
DELETED
@@ -1,155 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torchvision.models as models
|
4 |
-
|
5 |
-
|
6 |
-
class PerceptualLoss(nn.Module):
|
7 |
-
r"""
|
8 |
-
Perceptual loss, VGG-based
|
9 |
-
https://arxiv.org/abs/1603.08155
|
10 |
-
https://github.com/dxyang/StyleTransfer/blob/master/utils.py
|
11 |
-
"""
|
12 |
-
|
13 |
-
def __init__(self, weights=[1.0, 1.0, 1.0, 1.0, 1.0]):
|
14 |
-
super(PerceptualLoss, self).__init__()
|
15 |
-
self.add_module('vgg', VGG19())
|
16 |
-
self.criterion = torch.nn.L1Loss()
|
17 |
-
self.weights = weights
|
18 |
-
|
19 |
-
def __call__(self, x, y):
|
20 |
-
# Compute features
|
21 |
-
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
|
22 |
-
|
23 |
-
content_loss = 0.0
|
24 |
-
content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1'])
|
25 |
-
content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1'])
|
26 |
-
content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1'])
|
27 |
-
content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1'])
|
28 |
-
content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1'])
|
29 |
-
|
30 |
-
|
31 |
-
return content_loss
|
32 |
-
|
33 |
-
|
34 |
-
class VGG19(torch.nn.Module):
|
35 |
-
def __init__(self):
|
36 |
-
super(VGG19, self).__init__()
|
37 |
-
features = models.vgg19(pretrained=True).features
|
38 |
-
self.relu1_1 = torch.nn.Sequential()
|
39 |
-
self.relu1_2 = torch.nn.Sequential()
|
40 |
-
|
41 |
-
self.relu2_1 = torch.nn.Sequential()
|
42 |
-
self.relu2_2 = torch.nn.Sequential()
|
43 |
-
|
44 |
-
self.relu3_1 = torch.nn.Sequential()
|
45 |
-
self.relu3_2 = torch.nn.Sequential()
|
46 |
-
self.relu3_3 = torch.nn.Sequential()
|
47 |
-
self.relu3_4 = torch.nn.Sequential()
|
48 |
-
|
49 |
-
self.relu4_1 = torch.nn.Sequential()
|
50 |
-
self.relu4_2 = torch.nn.Sequential()
|
51 |
-
self.relu4_3 = torch.nn.Sequential()
|
52 |
-
self.relu4_4 = torch.nn.Sequential()
|
53 |
-
|
54 |
-
self.relu5_1 = torch.nn.Sequential()
|
55 |
-
self.relu5_2 = torch.nn.Sequential()
|
56 |
-
self.relu5_3 = torch.nn.Sequential()
|
57 |
-
self.relu5_4 = torch.nn.Sequential()
|
58 |
-
|
59 |
-
for x in range(2):
|
60 |
-
self.relu1_1.add_module(str(x), features[x])
|
61 |
-
|
62 |
-
for x in range(2, 4):
|
63 |
-
self.relu1_2.add_module(str(x), features[x])
|
64 |
-
|
65 |
-
for x in range(4, 7):
|
66 |
-
self.relu2_1.add_module(str(x), features[x])
|
67 |
-
|
68 |
-
for x in range(7, 9):
|
69 |
-
self.relu2_2.add_module(str(x), features[x])
|
70 |
-
|
71 |
-
for x in range(9, 12):
|
72 |
-
self.relu3_1.add_module(str(x), features[x])
|
73 |
-
|
74 |
-
for x in range(12, 14):
|
75 |
-
self.relu3_2.add_module(str(x), features[x])
|
76 |
-
|
77 |
-
for x in range(14, 16):
|
78 |
-
self.relu3_2.add_module(str(x), features[x])
|
79 |
-
|
80 |
-
for x in range(16, 18):
|
81 |
-
self.relu3_4.add_module(str(x), features[x])
|
82 |
-
|
83 |
-
for x in range(18, 21):
|
84 |
-
self.relu4_1.add_module(str(x), features[x])
|
85 |
-
|
86 |
-
for x in range(21, 23):
|
87 |
-
self.relu4_2.add_module(str(x), features[x])
|
88 |
-
|
89 |
-
for x in range(23, 25):
|
90 |
-
self.relu4_3.add_module(str(x), features[x])
|
91 |
-
|
92 |
-
for x in range(25, 27):
|
93 |
-
self.relu4_4.add_module(str(x), features[x])
|
94 |
-
|
95 |
-
for x in range(27, 30):
|
96 |
-
self.relu5_1.add_module(str(x), features[x])
|
97 |
-
|
98 |
-
for x in range(30, 32):
|
99 |
-
self.relu5_2.add_module(str(x), features[x])
|
100 |
-
|
101 |
-
for x in range(32, 34):
|
102 |
-
self.relu5_3.add_module(str(x), features[x])
|
103 |
-
|
104 |
-
for x in range(34, 36):
|
105 |
-
self.relu5_4.add_module(str(x), features[x])
|
106 |
-
|
107 |
-
# don't need the gradients, just want the features
|
108 |
-
for param in self.parameters():
|
109 |
-
param.requires_grad = False
|
110 |
-
|
111 |
-
def forward(self, x):
|
112 |
-
relu1_1 = self.relu1_1(x)
|
113 |
-
relu1_2 = self.relu1_2(relu1_1)
|
114 |
-
|
115 |
-
relu2_1 = self.relu2_1(relu1_2)
|
116 |
-
relu2_2 = self.relu2_2(relu2_1)
|
117 |
-
|
118 |
-
relu3_1 = self.relu3_1(relu2_2)
|
119 |
-
relu3_2 = self.relu3_2(relu3_1)
|
120 |
-
relu3_3 = self.relu3_3(relu3_2)
|
121 |
-
relu3_4 = self.relu3_4(relu3_3)
|
122 |
-
|
123 |
-
relu4_1 = self.relu4_1(relu3_4)
|
124 |
-
relu4_2 = self.relu4_2(relu4_1)
|
125 |
-
relu4_3 = self.relu4_3(relu4_2)
|
126 |
-
relu4_4 = self.relu4_4(relu4_3)
|
127 |
-
|
128 |
-
relu5_1 = self.relu5_1(relu4_4)
|
129 |
-
relu5_2 = self.relu5_2(relu5_1)
|
130 |
-
relu5_3 = self.relu5_3(relu5_2)
|
131 |
-
relu5_4 = self.relu5_4(relu5_3)
|
132 |
-
|
133 |
-
out = {
|
134 |
-
'relu1_1': relu1_1,
|
135 |
-
'relu1_2': relu1_2,
|
136 |
-
|
137 |
-
'relu2_1': relu2_1,
|
138 |
-
'relu2_2': relu2_2,
|
139 |
-
|
140 |
-
'relu3_1': relu3_1,
|
141 |
-
'relu3_2': relu3_2,
|
142 |
-
'relu3_3': relu3_3,
|
143 |
-
'relu3_4': relu3_4,
|
144 |
-
|
145 |
-
'relu4_1': relu4_1,
|
146 |
-
'relu4_2': relu4_2,
|
147 |
-
'relu4_3': relu4_3,
|
148 |
-
'relu4_4': relu4_4,
|
149 |
-
|
150 |
-
'relu5_1': relu5_1,
|
151 |
-
'relu5_2': relu5_2,
|
152 |
-
'relu5_3': relu5_3,
|
153 |
-
'relu5_4': relu5_4,
|
154 |
-
}
|
155 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/saicinpainting/training/modules/fake_fakes.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from kornia import SamplePadding
|
3 |
-
from kornia.augmentation import RandomAffine, CenterCrop
|
4 |
-
|
5 |
-
|
6 |
-
class FakeFakesGenerator:
|
7 |
-
def __init__(self, aug_proba=0.5, img_aug_degree=30, img_aug_translate=0.2):
|
8 |
-
self.grad_aug = RandomAffine(degrees=360,
|
9 |
-
translate=0.2,
|
10 |
-
padding_mode=SamplePadding.REFLECTION,
|
11 |
-
keepdim=False,
|
12 |
-
p=1)
|
13 |
-
self.img_aug = RandomAffine(degrees=img_aug_degree,
|
14 |
-
translate=img_aug_translate,
|
15 |
-
padding_mode=SamplePadding.REFLECTION,
|
16 |
-
keepdim=True,
|
17 |
-
p=1)
|
18 |
-
self.aug_proba = aug_proba
|
19 |
-
|
20 |
-
def __call__(self, input_images, masks):
|
21 |
-
blend_masks = self._fill_masks_with_gradient(masks)
|
22 |
-
blend_target = self._make_blend_target(input_images)
|
23 |
-
result = input_images * (1 - blend_masks) + blend_target * blend_masks
|
24 |
-
return result, blend_masks
|
25 |
-
|
26 |
-
def _make_blend_target(self, input_images):
|
27 |
-
batch_size = input_images.shape[0]
|
28 |
-
permuted = input_images[torch.randperm(batch_size)]
|
29 |
-
augmented = self.img_aug(input_images)
|
30 |
-
is_aug = (torch.rand(batch_size, device=input_images.device)[:, None, None, None] < self.aug_proba).float()
|
31 |
-
result = augmented * is_aug + permuted * (1 - is_aug)
|
32 |
-
return result
|
33 |
-
|
34 |
-
def _fill_masks_with_gradient(self, masks):
|
35 |
-
batch_size, _, height, width = masks.shape
|
36 |
-
grad = torch.linspace(0, 1, steps=width * 2, device=masks.device, dtype=masks.dtype) \
|
37 |
-
.view(1, 1, 1, -1).expand(batch_size, 1, height * 2, width * 2)
|
38 |
-
grad = self.grad_aug(grad)
|
39 |
-
grad = CenterCrop((height, width))(grad)
|
40 |
-
grad *= masks
|
41 |
-
|
42 |
-
grad_for_min = grad + (1 - masks) * 10
|
43 |
-
grad -= grad_for_min.view(batch_size, -1).min(-1).values[:, None, None, None]
|
44 |
-
grad /= grad.view(batch_size, -1).max(-1).values[:, None, None, None] + 1e-6
|
45 |
-
grad.clamp_(min=0, max=1)
|
46 |
-
|
47 |
-
return grad
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/v-doc_abstractive_mac/demo.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
|
2 |
-
import json
|
3 |
-
import os
|
4 |
-
import werkzeug
|
5 |
-
import tensorflow as tf
|
6 |
-
|
7 |
-
from config import config, parseArgs, configPDF
|
8 |
-
from extract_feature import get_img_feat, build_model
|
9 |
-
from main import setSession, loadWeights, setSavers
|
10 |
-
from model import MACnet
|
11 |
-
from preprocess import Preprocesser
|
12 |
-
import warnings
|
13 |
-
|
14 |
-
def predict(image, question):
|
15 |
-
parseArgs()
|
16 |
-
config.parallel = True
|
17 |
-
config.evalTrain = True
|
18 |
-
config.retainVal = True
|
19 |
-
config.useEMA = True
|
20 |
-
config.lrReduce = True
|
21 |
-
config.adam = True
|
22 |
-
config.clip = True
|
23 |
-
config.memoryVariationalDropout = True
|
24 |
-
config.relu='ELU'
|
25 |
-
config.encBi = True
|
26 |
-
config.wrdEmbRandom = True
|
27 |
-
config.wrdEmbUniform = True
|
28 |
-
config.outQuestion = True
|
29 |
-
config.initCtrl='Q'
|
30 |
-
config.controlContextual = True
|
31 |
-
config.controlInputUnshared = True
|
32 |
-
config.readProjInputs = True
|
33 |
-
config.readMemConcatKB = True
|
34 |
-
config.readMemConcatProj = True
|
35 |
-
config.readMemProj = True
|
36 |
-
config.readCtrl = True
|
37 |
-
config.writeMemProj = True
|
38 |
-
config.restore = True
|
39 |
-
config.expName = 'PDF_exp_extra'
|
40 |
-
config.netLength = 16
|
41 |
-
configPDF()
|
42 |
-
with open(config.configFile(), "a+") as outFile:
|
43 |
-
json.dump(vars(config), outFile)
|
44 |
-
|
45 |
-
if config.gpus != "":
|
46 |
-
config.gpusNum = len(config.gpus.split(","))
|
47 |
-
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpus
|
48 |
-
tf.reset_default_graph()
|
49 |
-
tf.Graph().as_default()
|
50 |
-
tf.logging.set_verbosity(tf.logging.ERROR)
|
51 |
-
cnn_model = build_model()
|
52 |
-
imageData = get_img_feat(cnn_model, image)
|
53 |
-
|
54 |
-
preprocessor = Preprocesser()
|
55 |
-
qData, embeddings, answerDict = preprocessor.preprocessData(question)
|
56 |
-
model = MACnet(embeddings, answerDict)
|
57 |
-
init = tf.global_variables_initializer()
|
58 |
-
|
59 |
-
savers = setSavers(model)
|
60 |
-
saver, emaSaver = savers["saver"], savers["emaSaver"]
|
61 |
-
sessionConfig = setSession()
|
62 |
-
|
63 |
-
data = {'data': qData, 'image': imageData}
|
64 |
-
|
65 |
-
with tf.Session(config=sessionConfig) as sess:
|
66 |
-
sess.graph.finalize()
|
67 |
-
|
68 |
-
# epoch = loadWeights(sess, saver, init)
|
69 |
-
print('###############', config.weightsFile(25))
|
70 |
-
os.system('ls -l ./weights/PDF_exp_extra')
|
71 |
-
emaSaver.restore(sess, config.weightsFile(25))
|
72 |
-
|
73 |
-
evalRes = model.runBatch(sess, data['data'], data['image'], False)
|
74 |
-
answer = None
|
75 |
-
|
76 |
-
if evalRes in ['top', 'bottom']:
|
77 |
-
answer = 'The caption at the %s side of the object.' % evalRes
|
78 |
-
elif evalRes in ['True', 'False']:
|
79 |
-
answer = 'There is at least one title object in this image.'
|
80 |
-
else:
|
81 |
-
answer = 'This image contain %s specific object(s).' % evalRes
|
82 |
-
|
83 |
-
return answer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/SAM/scripts/amg.py
DELETED
@@ -1,238 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import cv2 # type: ignore
|
8 |
-
|
9 |
-
from SAM import SamAutomaticMaskGenerator, sam_model_registry
|
10 |
-
|
11 |
-
import argparse
|
12 |
-
import json
|
13 |
-
import os
|
14 |
-
from typing import Any, Dict, List
|
15 |
-
|
16 |
-
parser = argparse.ArgumentParser(
|
17 |
-
description=(
|
18 |
-
"Runs automatic mask generation on an input image or directory of images, "
|
19 |
-
"and outputs masks as either PNGs or COCO-style RLEs. Requires open-cv, "
|
20 |
-
"as well as pycocotools if saving in RLE format."
|
21 |
-
)
|
22 |
-
)
|
23 |
-
|
24 |
-
parser.add_argument(
|
25 |
-
"--input",
|
26 |
-
type=str,
|
27 |
-
required=True,
|
28 |
-
help="Path to either a single input image or folder of images.",
|
29 |
-
)
|
30 |
-
|
31 |
-
parser.add_argument(
|
32 |
-
"--output",
|
33 |
-
type=str,
|
34 |
-
required=True,
|
35 |
-
help=(
|
36 |
-
"Path to the directory where masks will be output. Output will be either a folder "
|
37 |
-
"of PNGs per image or a single json with COCO-style masks."
|
38 |
-
),
|
39 |
-
)
|
40 |
-
|
41 |
-
parser.add_argument(
|
42 |
-
"--model-type",
|
43 |
-
type=str,
|
44 |
-
default="default",
|
45 |
-
help="The type of model to load, in ['default', 'vit_l', 'vit_b']",
|
46 |
-
)
|
47 |
-
|
48 |
-
parser.add_argument(
|
49 |
-
"--checkpoint",
|
50 |
-
type=str,
|
51 |
-
required=True,
|
52 |
-
help="The path to the SAM checkpoint to use for mask generation.",
|
53 |
-
)
|
54 |
-
|
55 |
-
parser.add_argument("--device", type=str, default="cuda", help="The device to run generation on.")
|
56 |
-
|
57 |
-
parser.add_argument(
|
58 |
-
"--convert-to-rle",
|
59 |
-
action="store_true",
|
60 |
-
help=(
|
61 |
-
"Save masks as COCO RLEs in a single json instead of as a folder of PNGs. "
|
62 |
-
"Requires pycocotools."
|
63 |
-
),
|
64 |
-
)
|
65 |
-
|
66 |
-
amg_settings = parser.add_argument_group("AMG Settings")
|
67 |
-
|
68 |
-
amg_settings.add_argument(
|
69 |
-
"--points-per-side",
|
70 |
-
type=int,
|
71 |
-
default=None,
|
72 |
-
help="Generate masks by sampling a grid over the image with this many points to a side.",
|
73 |
-
)
|
74 |
-
|
75 |
-
amg_settings.add_argument(
|
76 |
-
"--points-per-batch",
|
77 |
-
type=int,
|
78 |
-
default=None,
|
79 |
-
help="How many input points to process simultaneously in one batch.",
|
80 |
-
)
|
81 |
-
|
82 |
-
amg_settings.add_argument(
|
83 |
-
"--pred-iou-thresh",
|
84 |
-
type=float,
|
85 |
-
default=None,
|
86 |
-
help="Exclude masks with a predicted score from the model that is lower than this threshold.",
|
87 |
-
)
|
88 |
-
|
89 |
-
amg_settings.add_argument(
|
90 |
-
"--stability-score-thresh",
|
91 |
-
type=float,
|
92 |
-
default=None,
|
93 |
-
help="Exclude masks with a stability score lower than this threshold.",
|
94 |
-
)
|
95 |
-
|
96 |
-
amg_settings.add_argument(
|
97 |
-
"--stability-score-offset",
|
98 |
-
type=float,
|
99 |
-
default=None,
|
100 |
-
help="Larger values perturb the mask more when measuring stability score.",
|
101 |
-
)
|
102 |
-
|
103 |
-
amg_settings.add_argument(
|
104 |
-
"--box-nms-thresh",
|
105 |
-
type=float,
|
106 |
-
default=None,
|
107 |
-
help="The overlap threshold for excluding a duplicate mask.",
|
108 |
-
)
|
109 |
-
|
110 |
-
amg_settings.add_argument(
|
111 |
-
"--crop-n-layers",
|
112 |
-
type=int,
|
113 |
-
default=None,
|
114 |
-
help=(
|
115 |
-
"If >0, mask generation is run on smaller crops of the image to generate more masks. "
|
116 |
-
"The value sets how many different scales to crop at."
|
117 |
-
),
|
118 |
-
)
|
119 |
-
|
120 |
-
amg_settings.add_argument(
|
121 |
-
"--crop-nms-thresh",
|
122 |
-
type=float,
|
123 |
-
default=None,
|
124 |
-
help="The overlap threshold for excluding duplicate masks across different crops.",
|
125 |
-
)
|
126 |
-
|
127 |
-
amg_settings.add_argument(
|
128 |
-
"--crop-overlap-ratio",
|
129 |
-
type=int,
|
130 |
-
default=None,
|
131 |
-
help="Larger numbers mean image crops will overlap more.",
|
132 |
-
)
|
133 |
-
|
134 |
-
amg_settings.add_argument(
|
135 |
-
"--crop-n-points-downscale-factor",
|
136 |
-
type=int,
|
137 |
-
default=None,
|
138 |
-
help="The number of points-per-side in each layer of crop is reduced by this factor.",
|
139 |
-
)
|
140 |
-
|
141 |
-
amg_settings.add_argument(
|
142 |
-
"--min-mask-region-area",
|
143 |
-
type=int,
|
144 |
-
default=None,
|
145 |
-
help=(
|
146 |
-
"Disconnected mask regions or holes with area smaller than this value "
|
147 |
-
"in pixels are removed by postprocessing."
|
148 |
-
),
|
149 |
-
)
|
150 |
-
|
151 |
-
|
152 |
-
def write_masks_to_folder(masks: List[Dict[str, Any]], path: str) -> None:
|
153 |
-
header = "id,area,bbox_x0,bbox_y0,bbox_w,bbox_h,point_input_x,point_input_y,predicted_iou,stability_score,crop_box_x0,crop_box_y0,crop_box_w,crop_box_h" # noqa
|
154 |
-
metadata = [header]
|
155 |
-
for i, mask_data in enumerate(masks):
|
156 |
-
mask = mask_data["segmentation"]
|
157 |
-
filename = f"{i}.png"
|
158 |
-
cv2.imwrite(os.path.join(path, filename), mask * 255)
|
159 |
-
mask_metadata = [
|
160 |
-
str(i),
|
161 |
-
str(mask_data["area"]),
|
162 |
-
*[str(x) for x in mask_data["bbox"]],
|
163 |
-
*[str(x) for x in mask_data["point_coords"][0]],
|
164 |
-
str(mask_data["predicted_iou"]),
|
165 |
-
str(mask_data["stability_score"]),
|
166 |
-
*[str(x) for x in mask_data["crop_box"]],
|
167 |
-
]
|
168 |
-
row = ",".join(mask_metadata)
|
169 |
-
metadata.append(row)
|
170 |
-
metadata_path = os.path.join(path, "metadata.csv")
|
171 |
-
with open(metadata_path, "w") as f:
|
172 |
-
f.write("\n".join(metadata))
|
173 |
-
|
174 |
-
return
|
175 |
-
|
176 |
-
|
177 |
-
def get_amg_kwargs(args):
|
178 |
-
amg_kwargs = {
|
179 |
-
"points_per_side": args.points_per_side,
|
180 |
-
"points_per_batch": args.points_per_batch,
|
181 |
-
"pred_iou_thresh": args.pred_iou_thresh,
|
182 |
-
"stability_score_thresh": args.stability_score_thresh,
|
183 |
-
"stability_score_offset": args.stability_score_offset,
|
184 |
-
"box_nms_thresh": args.box_nms_thresh,
|
185 |
-
"crop_n_layers": args.crop_n_layers,
|
186 |
-
"crop_nms_thresh": args.crop_nms_thresh,
|
187 |
-
"crop_overlap_ratio": args.crop_overlap_ratio,
|
188 |
-
"crop_n_points_downscale_factor": args.crop_n_points_downscale_factor,
|
189 |
-
"min_mask_region_area": args.min_mask_region_area,
|
190 |
-
}
|
191 |
-
amg_kwargs = {k: v for k, v in amg_kwargs.items() if v is not None}
|
192 |
-
return amg_kwargs
|
193 |
-
|
194 |
-
|
195 |
-
def main(args: argparse.Namespace) -> None:
|
196 |
-
print("Loading model...")
|
197 |
-
sam = sam_model_registry[args.model_type](checkpoint=args.checkpoint)
|
198 |
-
_ = sam.to(device=args.device)
|
199 |
-
output_mode = "coco_rle" if args.convert_to_rle else "binary_mask"
|
200 |
-
amg_kwargs = get_amg_kwargs(args)
|
201 |
-
generator = SamAutomaticMaskGenerator(sam, output_mode=output_mode, **amg_kwargs)
|
202 |
-
|
203 |
-
if not os.path.isdir(args.input):
|
204 |
-
targets = [args.input]
|
205 |
-
else:
|
206 |
-
targets = [
|
207 |
-
f for f in os.listdir(args.input) if not os.path.isdir(os.path.join(args.input, f))
|
208 |
-
]
|
209 |
-
targets = [os.path.join(args.input, f) for f in targets]
|
210 |
-
|
211 |
-
os.makedirs(args.output, exist_ok=True)
|
212 |
-
|
213 |
-
for t in targets:
|
214 |
-
print(f"Processing '{t}'...")
|
215 |
-
image = cv2.imread(t)
|
216 |
-
if image is None:
|
217 |
-
print(f"Could not load '{t}' as an image, skipping...")
|
218 |
-
continue
|
219 |
-
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
220 |
-
|
221 |
-
masks = generator.generate(image)
|
222 |
-
|
223 |
-
base = os.path.basename(t)
|
224 |
-
base = os.path.splitext(base)[0]
|
225 |
-
save_base = os.path.join(args.output, base)
|
226 |
-
if output_mode == "binary_mask":
|
227 |
-
os.makedirs(save_base, exist_ok=False)
|
228 |
-
write_masks_to_folder(masks, save_base)
|
229 |
-
else:
|
230 |
-
save_file = save_base + ".json"
|
231 |
-
with open(save_file, "w") as f:
|
232 |
-
json.dump(masks, f)
|
233 |
-
print("Done!")
|
234 |
-
|
235 |
-
|
236 |
-
if __name__ == "__main__":
|
237 |
-
args = parser.parse_args()
|
238 |
-
main(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/components/YamlReader.js
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
import fs from 'fs'
|
2 |
-
import YAML from 'yaml'
|
3 |
-
import _ from 'lodash'
|
4 |
-
import chokidar from 'chokidar'
|
5 |
-
|
6 |
-
export default class YamlReader {
|
7 |
-
/**
|
8 |
-
* 读写yaml文件
|
9 |
-
*
|
10 |
-
* @param yamlPath yaml文件绝对路径
|
11 |
-
* @param isWatch 是否监听文件变化
|
12 |
-
*/
|
13 |
-
constructor(yamlPath, isWatch = false) {
|
14 |
-
this.yamlPath = yamlPath
|
15 |
-
this.isWatch = isWatch
|
16 |
-
this.initYaml()
|
17 |
-
}
|
18 |
-
|
19 |
-
initYaml() {
|
20 |
-
// parseDocument 将会保留注释
|
21 |
-
this.document = YAML.parseDocument(fs.readFileSync(this.yamlPath, 'utf8'))
|
22 |
-
if (this.isWatch && !this.watcher) {
|
23 |
-
this.watcher = chokidar.watch(this.yamlPath).on('change', () => {
|
24 |
-
if (this.isSave) {
|
25 |
-
this.isSave = false
|
26 |
-
return
|
27 |
-
}
|
28 |
-
this.initYaml()
|
29 |
-
})
|
30 |
-
}
|
31 |
-
}
|
32 |
-
|
33 |
-
/** 返回读取的对象 */
|
34 |
-
get jsonData() {
|
35 |
-
if (!this.document) {
|
36 |
-
return null
|
37 |
-
}
|
38 |
-
return this.document.toJSON()
|
39 |
-
}
|
40 |
-
|
41 |
-
/* 检查集合是否包含key的值 */
|
42 |
-
has(keyPath) {
|
43 |
-
return this.document.hasIn(keyPath.split('.'))
|
44 |
-
}
|
45 |
-
|
46 |
-
/* 返回key的值 */
|
47 |
-
get(keyPath) {
|
48 |
-
return _.get(this.jsonData, keyPath)
|
49 |
-
}
|
50 |
-
|
51 |
-
/* 修改某个key的值 */
|
52 |
-
set(keyPath, value) {
|
53 |
-
this.document.setIn([keyPath], value)
|
54 |
-
this.save()
|
55 |
-
}
|
56 |
-
|
57 |
-
/* 删除key */
|
58 |
-
delete(keyPath) {
|
59 |
-
this.document.deleteIn(keyPath.split('.'))
|
60 |
-
this.save()
|
61 |
-
}
|
62 |
-
|
63 |
-
// 数组添加数据
|
64 |
-
addIn(keyPath, value) {
|
65 |
-
this.document.addIn(keyPath.split('.'), value)
|
66 |
-
this.save()
|
67 |
-
}
|
68 |
-
|
69 |
-
// 彻底删除某个key
|
70 |
-
deleteKey(keyPath) {
|
71 |
-
let keys = keyPath.split('.')
|
72 |
-
keys = this.mapParentKeys(keys)
|
73 |
-
this.document.deleteIn(keys)
|
74 |
-
this.save()
|
75 |
-
}
|
76 |
-
|
77 |
-
// 保存yaml文件,写入文件
|
78 |
-
save() {
|
79 |
-
this.isSave = true
|
80 |
-
let yaml = this.document.toString()
|
81 |
-
fs.writeFileSync(this.yamlPath, yaml, 'utf8')
|
82 |
-
}
|
83 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CognitiveLabs/Research-Assistant/statics/README_zh.md
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
<div style="width: 100%;">
|
2 |
-
<img src="../statics/title.svg" style="width: 100%;">
|
3 |
-
</div>
|
4 |
-
|
5 |
-
受[gpt-researcher](https://github.com/assafelovic/gpt-researcher)启发,本项目提供了一种利用第三方API而不是官方API生成研究报告的替代方法。要访问此第三方API,请参阅[chimeragpt](https://chimeragpt.adventblocks.cc/)或者[GPT-API-free](https://github.com/chatanywhere/GPT_API_free)。一旦获得API密钥,您就可以使用它来访问chimeragpt API。因此,在运行项目之前,请确保您设置了环境变量`OPENAI_API_KEY`和`OPENAI_API_BASE`。
|
6 |
-
|
7 |
-
```shell
|
8 |
-
$ export OPENAI_API_KEY = your_api_key
|
9 |
-
$ export OPENAI_API_BASE = your_api_base
|
10 |
-
```
|
11 |
-
|
12 |
-
或者您可以在`.env`文件中设置api密钥和基础。
|
13 |
-
|
14 |
-
## 安装
|
15 |
-
|
16 |
-
1. 克隆存储库
|
17 |
-
|
18 |
-
```shell
|
19 |
-
$ git clone [email protected]:paradoxtown/ai_research_assistant.git
|
20 |
-
$ cd ai_research_assistant
|
21 |
-
```
|
22 |
-
|
23 |
-
2. 安装依赖项
|
24 |
-
|
25 |
-
```shell
|
26 |
-
$ pip install -r requirements.txt
|
27 |
-
```
|
28 |
-
|
29 |
-
3. 导出环境变量
|
30 |
-
|
31 |
-
```shell
|
32 |
-
$ export OPENAI_API_KEY = your_api_key
|
33 |
-
$ export OPENAI_API_BASE = your_api_base
|
34 |
-
```
|
35 |
-
或修改`.env`文件。
|
36 |
-
|
37 |
-
4. 运行项目
|
38 |
-
|
39 |
-
```shell
|
40 |
-
$ python app.py
|
41 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/templating.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from starlette.templating import Jinja2Templates as Jinja2Templates # noqa
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/IconButton-abe5ede9.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import{S as I,e as k,s as w,N as m,O as p,k as q,K as f,U as b,p as g,M as _,o as v,Q as S,z,v as A,A as h,x as B,P as C,R as F,F as K}from"./index-3370be2a.js";import"./Button-89624748.js";function d(l){let e,i;return{c(){e=m("span"),i=C(l[1]),f(e,"class","svelte-1030q2h")},m(a,s){g(a,e,s),_(e,i)},p(a,s){s&2&&F(i,a[1])},d(a){a&&h(e)}}}function M(l){let e,i,a,s,o,c,r,n=l[2]&&d(l);return s=new l[0]({}),{c(){e=m("button"),n&&n.c(),i=p(),a=m("div"),q(s.$$.fragment),f(a,"class","svelte-1030q2h"),f(e,"aria-label",l[1]),f(e,"title",l[1]),f(e,"class","svelte-1030q2h"),b(e,"pending",l[3])},m(t,u){g(t,e,u),n&&n.m(e,null),_(e,i),_(e,a),v(s,a,null),o=!0,c||(r=S(e,"click",l[4]),c=!0)},p(t,[u]){t[2]?n?n.p(t,u):(n=d(t),n.c(),n.m(e,i)):n&&(n.d(1),n=null),(!o||u&2)&&f(e,"aria-label",t[1]),(!o||u&2)&&f(e,"title",t[1]),(!o||u&8)&&b(e,"pending",t[3])},i(t){o||(z(s.$$.fragment,t),o=!0)},o(t){A(s.$$.fragment,t),o=!1},d(t){t&&h(e),n&&n.d(),B(s),c=!1,r()}}}function N(l,e,i){let{Icon:a}=e,{label:s=""}=e,{show_label:o=!1}=e,{pending:c=!1}=e;function r(n){K.call(this,l,n)}return l.$$set=n=>{"Icon"in n&&i(0,a=n.Icon),"label"in n&&i(1,s=n.label),"show_label"in n&&i(2,o=n.show_label),"pending"in n&&i(3,c=n.pending)},[a,s,o,c,r]}class Q extends I{constructor(e){super(),k(this,e,N,M,w,{Icon:0,label:1,show_label:2,pending:3})}}export{Q as I};
|
2 |
-
//# sourceMappingURL=IconButton-abe5ede9.js.map
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_paths.py
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2022-present, the HuggingFace Inc. team.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""Contains utilities to handle paths in Huggingface Hub."""
|
16 |
-
from fnmatch import fnmatch
|
17 |
-
from pathlib import Path
|
18 |
-
from typing import Callable, Generator, Iterable, List, Optional, TypeVar, Union
|
19 |
-
|
20 |
-
|
21 |
-
T = TypeVar("T")
|
22 |
-
|
23 |
-
IGNORE_GIT_FOLDER_PATTERNS = [".git", ".git/*", "*/.git", "**/.git/**"]
|
24 |
-
|
25 |
-
|
26 |
-
def filter_repo_objects(
|
27 |
-
items: Iterable[T],
|
28 |
-
*,
|
29 |
-
allow_patterns: Optional[Union[List[str], str]] = None,
|
30 |
-
ignore_patterns: Optional[Union[List[str], str]] = None,
|
31 |
-
key: Optional[Callable[[T], str]] = None,
|
32 |
-
) -> Generator[T, None, None]:
|
33 |
-
"""Filter repo objects based on an allowlist and a denylist.
|
34 |
-
|
35 |
-
Input must be a list of paths (`str` or `Path`) or a list of arbitrary objects.
|
36 |
-
In the later case, `key` must be provided and specifies a function of one argument
|
37 |
-
that is used to extract a path from each element in iterable.
|
38 |
-
|
39 |
-
Patterns are Unix shell-style wildcards which are NOT regular expressions. See
|
40 |
-
https://docs.python.org/3/library/fnmatch.html for more details.
|
41 |
-
|
42 |
-
Args:
|
43 |
-
items (`Iterable`):
|
44 |
-
List of items to filter.
|
45 |
-
allow_patterns (`str` or `List[str]`, *optional*):
|
46 |
-
Patterns constituting the allowlist. If provided, item paths must match at
|
47 |
-
least one pattern from the allowlist.
|
48 |
-
ignore_patterns (`str` or `List[str]`, *optional*):
|
49 |
-
Patterns constituting the denylist. If provided, item paths must not match
|
50 |
-
any patterns from the denylist.
|
51 |
-
key (`Callable[[T], str]`, *optional*):
|
52 |
-
Single-argument function to extract a path from each item. If not provided,
|
53 |
-
the `items` must already be `str` or `Path`.
|
54 |
-
|
55 |
-
Returns:
|
56 |
-
Filtered list of objects, as a generator.
|
57 |
-
|
58 |
-
Raises:
|
59 |
-
:class:`ValueError`:
|
60 |
-
If `key` is not provided and items are not `str` or `Path`.
|
61 |
-
|
62 |
-
Example usage with paths:
|
63 |
-
```python
|
64 |
-
>>> # Filter only PDFs that are not hidden.
|
65 |
-
>>> list(filter_repo_objects(
|
66 |
-
... ["aaa.PDF", "bbb.jpg", ".ccc.pdf", ".ddd.png"],
|
67 |
-
... allow_patterns=["*.pdf"],
|
68 |
-
... ignore_patterns=[".*"],
|
69 |
-
... ))
|
70 |
-
["aaa.pdf"]
|
71 |
-
```
|
72 |
-
|
73 |
-
Example usage with objects:
|
74 |
-
```python
|
75 |
-
>>> list(filter_repo_objects(
|
76 |
-
... [
|
77 |
-
... CommitOperationAdd(path_or_fileobj="/tmp/aaa.pdf", path_in_repo="aaa.pdf")
|
78 |
-
... CommitOperationAdd(path_or_fileobj="/tmp/bbb.jpg", path_in_repo="bbb.jpg")
|
79 |
-
... CommitOperationAdd(path_or_fileobj="/tmp/.ccc.pdf", path_in_repo=".ccc.pdf")
|
80 |
-
... CommitOperationAdd(path_or_fileobj="/tmp/.ddd.png", path_in_repo=".ddd.png")
|
81 |
-
... ],
|
82 |
-
... allow_patterns=["*.pdf"],
|
83 |
-
... ignore_patterns=[".*"],
|
84 |
-
... key=lambda x: x.repo_in_path
|
85 |
-
... ))
|
86 |
-
[CommitOperationAdd(path_or_fileobj="/tmp/aaa.pdf", path_in_repo="aaa.pdf")]
|
87 |
-
```
|
88 |
-
"""
|
89 |
-
if isinstance(allow_patterns, str):
|
90 |
-
allow_patterns = [allow_patterns]
|
91 |
-
|
92 |
-
if isinstance(ignore_patterns, str):
|
93 |
-
ignore_patterns = [ignore_patterns]
|
94 |
-
|
95 |
-
if key is None:
|
96 |
-
|
97 |
-
def _identity(item: T) -> str:
|
98 |
-
if isinstance(item, str):
|
99 |
-
return item
|
100 |
-
if isinstance(item, Path):
|
101 |
-
return str(item)
|
102 |
-
raise ValueError(f"Please provide `key` argument in `filter_repo_objects`: `{item}` is not a string.")
|
103 |
-
|
104 |
-
key = _identity # Items must be `str` or `Path`, otherwise raise ValueError
|
105 |
-
|
106 |
-
for item in items:
|
107 |
-
path = key(item)
|
108 |
-
|
109 |
-
# Skip if there's an allowlist and path doesn't match any
|
110 |
-
if allow_patterns is not None and not any(fnmatch(path, r) for r in allow_patterns):
|
111 |
-
continue
|
112 |
-
|
113 |
-
# Skip if there's a denylist and path matches any
|
114 |
-
if ignore_patterns is not None and any(fnmatch(path, r) for r in ignore_patterns):
|
115 |
-
continue
|
116 |
-
|
117 |
-
yield item
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/postprocessing/dula/layout_old.py
DELETED
@@ -1,134 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
@Date: 2021/10/06
|
3 |
-
@description: Use the approach proposed by DuLa-Net
|
4 |
-
"""
|
5 |
-
import cv2
|
6 |
-
import numpy as np
|
7 |
-
import math
|
8 |
-
import matplotlib.pyplot as plt
|
9 |
-
|
10 |
-
from visualization.floorplan import draw_floorplan
|
11 |
-
|
12 |
-
|
13 |
-
def merge_near(lst, diag):
|
14 |
-
group = [[0, ]]
|
15 |
-
for i in range(1, len(lst)):
|
16 |
-
if lst[i] - np.mean(group[-1]) < diag * 0.02:
|
17 |
-
group[-1].append(lst[i])
|
18 |
-
else:
|
19 |
-
group.append([lst[i], ])
|
20 |
-
if len(group) == 1:
|
21 |
-
group = [lst[0], lst[-1]]
|
22 |
-
else:
|
23 |
-
group = [int(np.mean(x)) for x in group]
|
24 |
-
return group
|
25 |
-
|
26 |
-
|
27 |
-
def fit_layout_old(floor_xz, need_cube=False, show=False, block_eps=0.05):
|
28 |
-
show_radius = np.linalg.norm(floor_xz, axis=-1).max()
|
29 |
-
side_l = 512
|
30 |
-
floorplan = draw_floorplan(xz=floor_xz, show_radius=show_radius, show=show, scale=1, side_l=side_l).astype(np.uint8)
|
31 |
-
center = np.array([side_l / 2, side_l / 2])
|
32 |
-
polys = cv2.findContours(floorplan, 1, 2)
|
33 |
-
if isinstance(polys, tuple):
|
34 |
-
if len(polys) == 3:
|
35 |
-
# opencv 3
|
36 |
-
polys = list(polys[1])
|
37 |
-
else:
|
38 |
-
polys = list(polys[0])
|
39 |
-
polys.sort(key=lambda x: cv2.contourArea(x), reverse=True)
|
40 |
-
poly = polys[0]
|
41 |
-
sub_x, sub_y, w, h = cv2.boundingRect(poly)
|
42 |
-
floorplan_sub = floorplan[sub_y:sub_y + h, sub_x:sub_x + w]
|
43 |
-
sub_center = center - np.array([sub_x, sub_y])
|
44 |
-
polys = cv2.findContours(floorplan_sub, 1, 2)
|
45 |
-
if isinstance(polys, tuple):
|
46 |
-
if len(polys) == 3:
|
47 |
-
polys = polys[1]
|
48 |
-
else:
|
49 |
-
polys = polys[0]
|
50 |
-
poly = polys[0]
|
51 |
-
epsilon = 0.005 * cv2.arcLength(poly, True)
|
52 |
-
poly = cv2.approxPolyDP(poly, epsilon, True)
|
53 |
-
|
54 |
-
x_lst = [0, ]
|
55 |
-
y_lst = [0, ]
|
56 |
-
for i in range(len(poly)):
|
57 |
-
p1 = poly[i][0]
|
58 |
-
p2 = poly[(i + 1) % len(poly)][0]
|
59 |
-
|
60 |
-
if (p2[0] - p1[0]) == 0:
|
61 |
-
slope = 10
|
62 |
-
else:
|
63 |
-
slope = abs((p2[1] - p1[1]) / (p2[0] - p1[0]))
|
64 |
-
|
65 |
-
if slope <= 1:
|
66 |
-
s = int((p1[1] + p2[1]) / 2)
|
67 |
-
y_lst.append(s)
|
68 |
-
elif slope > 1:
|
69 |
-
s = int((p1[0] + p2[0]) / 2)
|
70 |
-
x_lst.append(s)
|
71 |
-
|
72 |
-
x_lst.append(floorplan_sub.shape[1])
|
73 |
-
y_lst.append(floorplan_sub.shape[0])
|
74 |
-
x_lst.sort()
|
75 |
-
y_lst.sort()
|
76 |
-
|
77 |
-
diag = math.sqrt(math.pow(floorplan_sub.shape[1], 2) + math.pow(floorplan_sub.shape[0], 2))
|
78 |
-
x_lst = merge_near(x_lst, diag)
|
79 |
-
y_lst = merge_near(y_lst, diag)
|
80 |
-
if need_cube and len(x_lst) > 2:
|
81 |
-
x_lst = [x_lst[0], x_lst[-1]]
|
82 |
-
if need_cube and len(y_lst) > 2:
|
83 |
-
y_lst = [y_lst[0], y_lst[-1]]
|
84 |
-
|
85 |
-
ans = np.zeros((floorplan_sub.shape[0], floorplan_sub.shape[1]))
|
86 |
-
for i in range(len(x_lst) - 1):
|
87 |
-
for j in range(len(y_lst) - 1):
|
88 |
-
sample = floorplan_sub[y_lst[j]:y_lst[j + 1], x_lst[i]:x_lst[i + 1]]
|
89 |
-
score = 0 if sample.size == 0 else sample.mean()
|
90 |
-
if score >= 0.3:
|
91 |
-
ans[y_lst[j]:y_lst[j + 1], x_lst[i]:x_lst[i + 1]] = 1
|
92 |
-
|
93 |
-
pred = np.uint8(ans)
|
94 |
-
pred_polys = cv2.findContours(pred, 1, 3)
|
95 |
-
if isinstance(pred_polys, tuple):
|
96 |
-
if len(pred_polys) == 3:
|
97 |
-
pred_polys = pred_polys[1]
|
98 |
-
else:
|
99 |
-
pred_polys = pred_polys[0]
|
100 |
-
|
101 |
-
polygon = [(p[0][1], p[0][0]) for p in pred_polys[0][::-1]]
|
102 |
-
|
103 |
-
v = np.array([p[0] + sub_y for p in polygon])
|
104 |
-
u = np.array([p[1] + sub_x for p in polygon])
|
105 |
-
# side_l
|
106 |
-
# v<-----------|o
|
107 |
-
# | | |
|
108 |
-
# | ----|----z | side_l
|
109 |
-
# | | |
|
110 |
-
# | x \|/
|
111 |
-
# |------------u
|
112 |
-
side_l = floorplan.shape[0]
|
113 |
-
pred_xz = np.concatenate((u[:, np.newaxis] - side_l // 2, side_l // 2 - v[:, np.newaxis]), axis=1)
|
114 |
-
|
115 |
-
pred_xz = pred_xz * show_radius / (side_l // 2)
|
116 |
-
if show:
|
117 |
-
draw_floorplan(pred_xz, show_radius=show_radius, show=show)
|
118 |
-
return pred_xz
|
119 |
-
|
120 |
-
|
121 |
-
if __name__ == '__main__':
|
122 |
-
from utils.conversion import uv2xyz
|
123 |
-
|
124 |
-
pano_img = np.zeros([512, 1024, 3])
|
125 |
-
corners = np.array([[0.1, 0.7],
|
126 |
-
[0.4, 0.7],
|
127 |
-
[0.3, 0.6],
|
128 |
-
[0.6, 0.6],
|
129 |
-
[0.8, 0.7]])
|
130 |
-
xz = uv2xyz(corners)[..., ::2]
|
131 |
-
draw_floorplan(xz, show=True, marker_color=None, center_color=0.8)
|
132 |
-
|
133 |
-
xz = fit_layout_old(xz)
|
134 |
-
draw_floorplan(xz, show=True, marker_color=None, center_color=0.8)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/StyleGAN-NADA/op/fused_bias_act.cpp
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
#include <torch/extension.h>
|
2 |
-
|
3 |
-
|
4 |
-
torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
|
5 |
-
int act, int grad, float alpha, float scale);
|
6 |
-
|
7 |
-
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
|
8 |
-
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
|
9 |
-
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
|
10 |
-
|
11 |
-
torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
|
12 |
-
int act, int grad, float alpha, float scale) {
|
13 |
-
CHECK_CUDA(input);
|
14 |
-
CHECK_CUDA(bias);
|
15 |
-
|
16 |
-
return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
|
17 |
-
}
|
18 |
-
|
19 |
-
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
20 |
-
m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
|
21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DeepakJaiz/QA_evaluator/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: QA Evaluator
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: purple
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.21.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Demosthene-OR/avr23-cds-translation/tabs/custom_vectorizer.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
# Les 2 fonctions suivantes sont nécéssaires afin de sérialiser ces parametre de CountVectorizer
|
2 |
-
# et ainsi de sauvegarder le vectorizer pour un un usage ultérieur sans utiliser X_train pour le réinitialiser
|
3 |
-
import tiktoken
|
4 |
-
|
5 |
-
tokenizer = tiktoken.get_encoding("cl100k_base")
|
6 |
-
|
7 |
-
def custom_tokenizer(text):
|
8 |
-
global tokenizer
|
9 |
-
|
10 |
-
tokens = tokenizer.encode(text) # Cela divise le texte en mots
|
11 |
-
return tokens
|
12 |
-
|
13 |
-
def custom_preprocessor(text):
|
14 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan-Inversion/PTI/training/coaches/base_coach.py
DELETED
@@ -1,158 +0,0 @@
|
|
1 |
-
import abc
|
2 |
-
import os
|
3 |
-
import pickle
|
4 |
-
from argparse import Namespace
|
5 |
-
import os.path
|
6 |
-
from PTI.criteria.localitly_regulizer import Space_Regulizer
|
7 |
-
import torch
|
8 |
-
from torchvision import transforms
|
9 |
-
from lpips import LPIPS
|
10 |
-
from PTI.training.projectors import w_projector
|
11 |
-
from PTI.configs import global_config, paths_config, hyperparameters
|
12 |
-
from PTI.criteria import l2_loss
|
13 |
-
from PTI.models.e4e.psp import pSp
|
14 |
-
from PTI.utils.log_utils import log_image_from_w
|
15 |
-
from PTI.utils.models_utils import toogle_grad, load_old_G
|
16 |
-
|
17 |
-
|
18 |
-
class BaseCoach:
|
19 |
-
def __init__(self, data_loader, use_wandb):
|
20 |
-
|
21 |
-
self.use_wandb = use_wandb
|
22 |
-
self.data_loader = data_loader
|
23 |
-
self.w_pivots = {}
|
24 |
-
self.image_counter = 0
|
25 |
-
|
26 |
-
if hyperparameters.first_inv_type == 'w+':
|
27 |
-
self.initilize_e4e()
|
28 |
-
|
29 |
-
self.e4e_image_transform = transforms.Compose([
|
30 |
-
transforms.ToPILImage(),
|
31 |
-
transforms.Resize((256, 256)),
|
32 |
-
transforms.ToTensor(),
|
33 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
|
34 |
-
|
35 |
-
# Initialize loss
|
36 |
-
self.lpips_loss = LPIPS(net=hyperparameters.lpips_type).to(
|
37 |
-
global_config.device).eval()
|
38 |
-
|
39 |
-
self.restart_training()
|
40 |
-
|
41 |
-
# Initialize checkpoint dir
|
42 |
-
self.checkpoint_dir = paths_config.checkpoints_dir
|
43 |
-
os.makedirs(self.checkpoint_dir, exist_ok=True)
|
44 |
-
|
45 |
-
def restart_training(self):
|
46 |
-
|
47 |
-
# Initialize networks
|
48 |
-
self.G = load_old_G()
|
49 |
-
toogle_grad(self.G, True)
|
50 |
-
|
51 |
-
self.original_G = load_old_G()
|
52 |
-
|
53 |
-
self.space_regulizer = Space_Regulizer(
|
54 |
-
self.original_G, self.lpips_loss)
|
55 |
-
self.optimizer = self.configure_optimizers()
|
56 |
-
|
57 |
-
def get_inversion(self, w_path_dir, image_name, image):
|
58 |
-
embedding_dir = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}'
|
59 |
-
os.makedirs(embedding_dir, exist_ok=True)
|
60 |
-
|
61 |
-
w_pivot = None
|
62 |
-
if hyperparameters.use_last_w_pivots:
|
63 |
-
w_pivot = self.load_inversions(w_path_dir, image_name)
|
64 |
-
|
65 |
-
if not hyperparameters.use_last_w_pivots or w_pivot is None:
|
66 |
-
w_pivot = self.calc_inversions(image, image_name)
|
67 |
-
torch.save(w_pivot, f'{embedding_dir}/0.pt')
|
68 |
-
|
69 |
-
w_pivot = w_pivot.to(global_config.device)
|
70 |
-
return w_pivot
|
71 |
-
|
72 |
-
def load_inversions(self, w_path_dir, image_name):
|
73 |
-
if image_name in self.w_pivots:
|
74 |
-
return self.w_pivots[image_name]
|
75 |
-
|
76 |
-
if hyperparameters.first_inv_type == 'w+':
|
77 |
-
w_potential_path = f'{w_path_dir}/{paths_config.e4e_results_keyword}/{image_name}/0.pt'
|
78 |
-
else:
|
79 |
-
w_potential_path = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}/0.pt'
|
80 |
-
if not os.path.isfile(w_potential_path):
|
81 |
-
return None
|
82 |
-
w = torch.load(w_potential_path).to(global_config.device)
|
83 |
-
self.w_pivots[image_name] = w
|
84 |
-
return w
|
85 |
-
|
86 |
-
def calc_inversions(self, image, image_name):
|
87 |
-
if hyperparameters.first_inv_type == 'w+':
|
88 |
-
w = self.get_e4e_inversion(image)
|
89 |
-
|
90 |
-
else:
|
91 |
-
id_image = torch.squeeze(
|
92 |
-
(image.to(global_config.device) + 1) / 2) * 255
|
93 |
-
w = w_projector.project(self.G, id_image, device=torch.device(global_config.device), w_avg_samples=600,
|
94 |
-
num_steps=hyperparameters.first_inv_steps, w_name=image_name,
|
95 |
-
use_wandb=self.use_wandb)
|
96 |
-
|
97 |
-
return w
|
98 |
-
|
99 |
-
@abc.abstractmethod
|
100 |
-
def train(self):
|
101 |
-
pass
|
102 |
-
|
103 |
-
def configure_optimizers(self):
|
104 |
-
optimizer = torch.optim.Adam(
|
105 |
-
self.G.parameters(), lr=hyperparameters.pti_learning_rate)
|
106 |
-
|
107 |
-
return optimizer
|
108 |
-
|
109 |
-
def calc_loss(self, generated_images, real_images, log_name, new_G, use_ball_holder, w_batch):
|
110 |
-
loss = 0.0
|
111 |
-
|
112 |
-
if hyperparameters.pt_l2_lambda > 0:
|
113 |
-
l2_loss_val = l2_loss.l2_loss(generated_images, real_images)
|
114 |
-
if self.use_wandb:
|
115 |
-
wandb.log({f'MSE_loss_val_{log_name}': l2_loss_val.detach(
|
116 |
-
).cpu()}, step=global_config.training_step)
|
117 |
-
loss += l2_loss_val * hyperparameters.pt_l2_lambda
|
118 |
-
if hyperparameters.pt_lpips_lambda > 0:
|
119 |
-
loss_lpips = self.lpips_loss(generated_images, real_images)
|
120 |
-
loss_lpips = torch.squeeze(loss_lpips)
|
121 |
-
if self.use_wandb:
|
122 |
-
wandb.log({f'LPIPS_loss_val_{log_name}': loss_lpips.detach(
|
123 |
-
).cpu()}, step=global_config.training_step)
|
124 |
-
loss += loss_lpips * hyperparameters.pt_lpips_lambda
|
125 |
-
|
126 |
-
if use_ball_holder and hyperparameters.use_locality_regularization:
|
127 |
-
ball_holder_loss_val = self.space_regulizer.space_regulizer_loss(
|
128 |
-
new_G, w_batch, use_wandb=self.use_wandb)
|
129 |
-
loss += ball_holder_loss_val
|
130 |
-
|
131 |
-
return loss, l2_loss_val, loss_lpips
|
132 |
-
|
133 |
-
def forward(self, w):
|
134 |
-
generated_images = self.G.synthesis(
|
135 |
-
w, noise_mode='const', force_fp32=True)
|
136 |
-
|
137 |
-
return generated_images
|
138 |
-
|
139 |
-
def initilize_e4e(self):
|
140 |
-
ckpt = torch.load(paths_config.e4e, map_location='cpu')
|
141 |
-
opts = ckpt['opts']
|
142 |
-
opts['batch_size'] = hyperparameters.train_batch_size
|
143 |
-
opts['checkpoint_path'] = paths_config.e4e
|
144 |
-
opts = Namespace(**opts)
|
145 |
-
self.e4e_inversion_net = pSp(opts)
|
146 |
-
self.e4e_inversion_net.eval()
|
147 |
-
self.e4e_inversion_net = self.e4e_inversion_net.to(
|
148 |
-
global_config.device)
|
149 |
-
toogle_grad(self.e4e_inversion_net, False)
|
150 |
-
|
151 |
-
def get_e4e_inversion(self, image):
|
152 |
-
image = (image + 1) / 2
|
153 |
-
new_image = self.e4e_image_transform(image[0]).to(global_config.device)
|
154 |
-
_, w = self.e4e_inversion_net(new_image.unsqueeze(0), randomize_noise=False, return_latents=True, resize=False,
|
155 |
-
input_code=False)
|
156 |
-
if self.use_wandb:
|
157 |
-
log_image_from_w(w, self.G, 'First e4e inversion')
|
158 |
-
return w
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan-Inversion/PTI/training/projectors/w_plus_projector.py
DELETED
@@ -1,145 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Project given image to the latent space of pretrained network pickle."""
|
10 |
-
|
11 |
-
import copy
|
12 |
-
import wandb
|
13 |
-
import numpy as np
|
14 |
-
import torch
|
15 |
-
import torch.nn.functional as F
|
16 |
-
from tqdm import tqdm
|
17 |
-
from configs import global_config, hyperparameters
|
18 |
-
import dnnlib
|
19 |
-
from utils.log_utils import log_image_from_w
|
20 |
-
|
21 |
-
|
22 |
-
def project(
|
23 |
-
G,
|
24 |
-
target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution
|
25 |
-
*,
|
26 |
-
num_steps=1000,
|
27 |
-
w_avg_samples=10000,
|
28 |
-
initial_learning_rate=0.01,
|
29 |
-
initial_noise_factor=0.05,
|
30 |
-
lr_rampdown_length=0.25,
|
31 |
-
lr_rampup_length=0.05,
|
32 |
-
noise_ramp_length=0.75,
|
33 |
-
regularize_noise_weight=1e5,
|
34 |
-
verbose=False,
|
35 |
-
device: torch.device,
|
36 |
-
use_wandb=False,
|
37 |
-
initial_w=None,
|
38 |
-
image_log_step=global_config.image_rec_result_log_snapshot,
|
39 |
-
w_name: str
|
40 |
-
):
|
41 |
-
assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution)
|
42 |
-
|
43 |
-
def logprint(*args):
|
44 |
-
if verbose:
|
45 |
-
print(*args)
|
46 |
-
|
47 |
-
G = copy.deepcopy(G).eval().requires_grad_(False).to(device).float() # type: ignore
|
48 |
-
|
49 |
-
# Compute w stats.
|
50 |
-
logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...')
|
51 |
-
z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim)
|
52 |
-
w_samples = G.mapping(torch.from_numpy(z_samples).to(device), None) # [N, L, C]
|
53 |
-
w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C]
|
54 |
-
w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C]
|
55 |
-
w_avg_tensor = torch.from_numpy(w_avg).to(global_config.device)
|
56 |
-
w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5
|
57 |
-
|
58 |
-
start_w = initial_w if initial_w is not None else w_avg
|
59 |
-
|
60 |
-
# Setup noise inputs.
|
61 |
-
noise_bufs = {name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name}
|
62 |
-
|
63 |
-
# Load VGG16 feature detector.
|
64 |
-
url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
|
65 |
-
with dnnlib.util.open_url(url) as f:
|
66 |
-
vgg16 = torch.jit.load(f).eval().to(device)
|
67 |
-
|
68 |
-
# Features for target image.
|
69 |
-
target_images = target.unsqueeze(0).to(device).to(torch.float32)
|
70 |
-
if target_images.shape[2] > 256:
|
71 |
-
target_images = F.interpolate(target_images, size=(256, 256), mode='area')
|
72 |
-
target_features = vgg16(target_images, resize_images=False, return_lpips=True)
|
73 |
-
|
74 |
-
start_w = np.repeat(start_w, G.mapping.num_ws, axis=1)
|
75 |
-
w_opt = torch.tensor(start_w, dtype=torch.float32, device=device,
|
76 |
-
requires_grad=True) # pylint: disable=not-callable
|
77 |
-
|
78 |
-
optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999),
|
79 |
-
lr=hyperparameters.first_inv_lr)
|
80 |
-
|
81 |
-
# Init noise.
|
82 |
-
for buf in noise_bufs.values():
|
83 |
-
buf[:] = torch.randn_like(buf)
|
84 |
-
buf.requires_grad = True
|
85 |
-
|
86 |
-
for step in tqdm(range(num_steps)):
|
87 |
-
|
88 |
-
# Learning rate schedule.
|
89 |
-
t = step / num_steps
|
90 |
-
w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2
|
91 |
-
lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)
|
92 |
-
lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
|
93 |
-
lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)
|
94 |
-
lr = initial_learning_rate * lr_ramp
|
95 |
-
for param_group in optimizer.param_groups:
|
96 |
-
param_group['lr'] = lr
|
97 |
-
|
98 |
-
# Synth images from opt_w.
|
99 |
-
w_noise = torch.randn_like(w_opt) * w_noise_scale
|
100 |
-
ws = (w_opt + w_noise)
|
101 |
-
|
102 |
-
synth_images = G.synthesis(ws, noise_mode='const', force_fp32=True)
|
103 |
-
|
104 |
-
# Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.
|
105 |
-
synth_images = (synth_images + 1) * (255 / 2)
|
106 |
-
if synth_images.shape[2] > 256:
|
107 |
-
synth_images = F.interpolate(synth_images, size=(256, 256), mode='area')
|
108 |
-
|
109 |
-
# Features for synth images.
|
110 |
-
synth_features = vgg16(synth_images, resize_images=False, return_lpips=True)
|
111 |
-
dist = (target_features - synth_features).square().sum()
|
112 |
-
|
113 |
-
# Noise regularization.
|
114 |
-
reg_loss = 0.0
|
115 |
-
for v in noise_bufs.values():
|
116 |
-
noise = v[None, None, :, :] # must be [1,1,H,W] for F.avg_pool2d()
|
117 |
-
while True:
|
118 |
-
reg_loss += (noise * torch.roll(noise, shifts=1, dims=3)).mean() ** 2
|
119 |
-
reg_loss += (noise * torch.roll(noise, shifts=1, dims=2)).mean() ** 2
|
120 |
-
if noise.shape[2] <= 8:
|
121 |
-
break
|
122 |
-
noise = F.avg_pool2d(noise, kernel_size=2)
|
123 |
-
loss = dist + reg_loss * regularize_noise_weight
|
124 |
-
|
125 |
-
if step % image_log_step == 0:
|
126 |
-
with torch.no_grad():
|
127 |
-
if use_wandb:
|
128 |
-
global_config.training_step += 1
|
129 |
-
wandb.log({f'first projection _{w_name}': loss.detach().cpu()}, step=global_config.training_step)
|
130 |
-
log_image_from_w(w_opt, G, w_name)
|
131 |
-
|
132 |
-
# Step
|
133 |
-
optimizer.zero_grad(set_to_none=True)
|
134 |
-
loss.backward()
|
135 |
-
optimizer.step()
|
136 |
-
logprint(f'step {step + 1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}')
|
137 |
-
|
138 |
-
# Normalize noise.
|
139 |
-
with torch.no_grad():
|
140 |
-
for buf in noise_bufs.values():
|
141 |
-
buf -= buf.mean()
|
142 |
-
buf *= buf.square().mean().rsqrt()
|
143 |
-
|
144 |
-
del G
|
145 |
-
return w_opt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dragonnext/charybdis/greeting.md
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
|
2 |
-
It will open ALWAYS every Friday 6PM UTC, till monday 7AM UTC. (No gatekeeper)
|
3 |
-
|
4 |
-
|
5 |
-
(special proxy pass for non time frame access: Additional hints soon)
|
6 |
-
|
7 |
-
Hints:
|
8 |
-
|
9 |
-
All 62 unique words to get timeout (Including variations example > test + t3st)
|
10 |
-
|
11 |
-
315 Minutes
|
12 |
-
|
13 |
-
SillyTavern Hivemind.
|
14 |
-
|
15 |
-
https://pastebin.com/DhKk9w92
|
16 |
-
|
17 |
-
Formating: all LOWERCASE no SPACES, first need to be ordered ALPHABETICALLY (Special character included, and should be first), cyrilic excluded (200 first letters, exclude the rest, due to ST proxy password limit being 200)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EsoCode/text-generation-webui/modules/sampler_hijack.py
DELETED
@@ -1,204 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import transformers
|
5 |
-
from transformers import LogitsWarper
|
6 |
-
from transformers.generation.logits_process import (
|
7 |
-
LogitNormalization,
|
8 |
-
LogitsProcessor,
|
9 |
-
LogitsProcessorList,
|
10 |
-
TemperatureLogitsWarper
|
11 |
-
)
|
12 |
-
|
13 |
-
|
14 |
-
class TailFreeLogitsWarper(LogitsWarper):
|
15 |
-
def __init__(self, tfs: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
|
16 |
-
tfs = float(tfs)
|
17 |
-
if tfs < 0 or tfs > 1.0:
|
18 |
-
raise ValueError(f"`tfs` has to be a float >= 0 and <= 1, but is {tfs}")
|
19 |
-
self.tfs = tfs
|
20 |
-
self.filter_value = filter_value
|
21 |
-
self.min_tokens_to_keep = min_tokens_to_keep
|
22 |
-
|
23 |
-
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
24 |
-
sorted_logits, sorted_indices = torch.sort(scores, descending=True)
|
25 |
-
probs = sorted_logits.softmax(dim=-1)
|
26 |
-
|
27 |
-
# Compute second derivative normalized CDF
|
28 |
-
d2 = probs.diff().diff().abs()
|
29 |
-
normalized_d2 = d2 / d2.sum(dim=-1, keepdim=True)
|
30 |
-
normalized_d2_cdf = normalized_d2.cumsum(dim=-1)
|
31 |
-
|
32 |
-
# Remove tokens with CDF value above the threshold (token with 0 are kept)
|
33 |
-
sorted_indices_to_remove = normalized_d2_cdf > self.tfs
|
34 |
-
|
35 |
-
# Centre the distribution around the cutoff as in the original implementation of the algorithm
|
36 |
-
sorted_indices_to_remove = torch.cat(
|
37 |
-
(
|
38 |
-
torch.zeros(scores.shape[0], 1, dtype=torch.bool, device=scores.device),
|
39 |
-
sorted_indices_to_remove,
|
40 |
-
torch.ones(scores.shape[0], 1, dtype=torch.bool, device=scores.device),
|
41 |
-
),
|
42 |
-
dim=-1,
|
43 |
-
)
|
44 |
-
|
45 |
-
if self.min_tokens_to_keep > 1:
|
46 |
-
# Keep at least min_tokens_to_keep
|
47 |
-
sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0
|
48 |
-
|
49 |
-
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
|
50 |
-
scores = scores.masked_fill(indices_to_remove, self.filter_value)
|
51 |
-
return scores
|
52 |
-
|
53 |
-
|
54 |
-
class TopALogitsWarper(LogitsWarper):
|
55 |
-
def __init__(self, top_a: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
|
56 |
-
top_a = float(top_a)
|
57 |
-
if top_a < 0 or top_a > 1.0:
|
58 |
-
raise ValueError(f"`top_a` has to be a float >= 0 and <= 1, but is {top_a}")
|
59 |
-
self.top_a = top_a
|
60 |
-
self.filter_value = filter_value
|
61 |
-
self.min_tokens_to_keep = min_tokens_to_keep
|
62 |
-
|
63 |
-
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
64 |
-
sorted_logits, sorted_indices = torch.sort(scores, descending=True)
|
65 |
-
probs = sorted_logits.softmax(dim=-1)
|
66 |
-
|
67 |
-
# Remove tokens with probability less than top_a*(max(probs))^2 (token with 0 are kept)
|
68 |
-
probs_max = probs[..., 0, None]
|
69 |
-
sorted_indices_to_remove = probs < probs_max * probs_max * self.top_a
|
70 |
-
|
71 |
-
if self.min_tokens_to_keep > 1:
|
72 |
-
# Keep at least min_tokens_to_keep
|
73 |
-
sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0
|
74 |
-
|
75 |
-
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
|
76 |
-
scores = scores.masked_fill(indices_to_remove, self.filter_value)
|
77 |
-
return scores
|
78 |
-
|
79 |
-
|
80 |
-
class MirostatLogitsWarper(LogitsWarper):
|
81 |
-
def __init__(self, mirostat_mode: int, mirostat_tau: float, mirostat_eta: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
|
82 |
-
if mirostat_mode not in [2]:
|
83 |
-
raise ValueError(f"`mirostat` has to be a an integer 2, but is {mirostat_mode}")
|
84 |
-
self.mirostat_mode = mirostat_mode
|
85 |
-
self.mirostat_eta = mirostat_eta
|
86 |
-
self.mirostat_tau = mirostat_tau
|
87 |
-
self.filter_value = filter_value
|
88 |
-
self.min_tokens_to_keep = min_tokens_to_keep
|
89 |
-
self.mu = 2 * self.mirostat_tau
|
90 |
-
self.e = 0
|
91 |
-
|
92 |
-
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
93 |
-
logits = scores[0]
|
94 |
-
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
95 |
-
prob_original = torch.softmax(sorted_logits, dim=-1).tolist() # candidates
|
96 |
-
|
97 |
-
# Truncate the words with surprise values greater than mu
|
98 |
-
for i, candidate in enumerate(prob_original):
|
99 |
-
if candidate > 0 and -math.log2(candidate) > self.mu:
|
100 |
-
if (i == 0):
|
101 |
-
sorted_logits = sorted_logits[:1]
|
102 |
-
else:
|
103 |
-
sorted_logits = sorted_logits[:i]
|
104 |
-
break
|
105 |
-
|
106 |
-
# Normalize the probabilities of the remaining words
|
107 |
-
prob_topk = torch.softmax(sorted_logits, dim=0)
|
108 |
-
|
109 |
-
prev_i = torch.multinomial(prob_topk, num_samples=1, replacement=True).to('cuda')
|
110 |
-
|
111 |
-
observed_surprise = -math.log2(prob_topk[prev_i])
|
112 |
-
self.e = observed_surprise - self.mirostat_tau
|
113 |
-
|
114 |
-
# Update mu using the learning rate and error
|
115 |
-
self.mu -= self.mirostat_eta * self.e
|
116 |
-
|
117 |
-
sorted_indices_to_remove = torch.ones_like(scores[0], dtype=torch.bool)
|
118 |
-
sorted_indices_to_remove[prev_i] = False
|
119 |
-
|
120 |
-
indices_to_remove = sorted_indices_to_remove.unsqueeze(0).scatter(1, sorted_indices.unsqueeze(0), sorted_indices_to_remove.unsqueeze(0))
|
121 |
-
scores = scores.masked_fill(indices_to_remove, self.filter_value)
|
122 |
-
return scores
|
123 |
-
|
124 |
-
|
125 |
-
class RepetitionPenaltyLogitsProcessorWithRange(LogitsProcessor):
|
126 |
-
'''
|
127 |
-
Copied from the transformers library
|
128 |
-
'''
|
129 |
-
def __init__(self, penalty: float, _range: int):
|
130 |
-
if not isinstance(penalty, float) or not (penalty > 0):
|
131 |
-
raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
|
132 |
-
|
133 |
-
self.penalty = penalty
|
134 |
-
self._range = _range
|
135 |
-
|
136 |
-
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
137 |
-
|
138 |
-
input_ids = input_ids[:, -self._range:]
|
139 |
-
score = torch.gather(scores, 1, input_ids)
|
140 |
-
|
141 |
-
# if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability
|
142 |
-
score = torch.where(score < 0, score * self.penalty, score / self.penalty)
|
143 |
-
|
144 |
-
scores.scatter_(1, input_ids, score)
|
145 |
-
return scores
|
146 |
-
|
147 |
-
|
148 |
-
def get_logits_warper_patch(self, generation_config):
|
149 |
-
warpers = self._get_logits_warper_old(generation_config)
|
150 |
-
warpers_to_add = LogitsProcessorList()
|
151 |
-
min_tokens_to_keep = 2 if generation_config.num_beams > 1 else 1
|
152 |
-
|
153 |
-
if generation_config.mirostat_mode is not None and generation_config.mirostat_mode == 2:
|
154 |
-
warpers_to_add.append(MirostatLogitsWarper(mirostat_mode=generation_config.mirostat_mode, mirostat_eta=generation_config.mirostat_eta, mirostat_tau=generation_config.mirostat_tau, min_tokens_to_keep=min_tokens_to_keep))
|
155 |
-
# We need to disable samplers other than temperature
|
156 |
-
for warper in warpers:
|
157 |
-
if not isinstance(warper, TemperatureLogitsWarper):
|
158 |
-
warpers.remove(warper)
|
159 |
-
else:
|
160 |
-
if generation_config.tfs is not None and 0.0 <= generation_config.tfs <= 1.0:
|
161 |
-
warpers_to_add.append(TailFreeLogitsWarper(tfs=generation_config.tfs, min_tokens_to_keep=min_tokens_to_keep))
|
162 |
-
if generation_config.top_a is not None and 0.0 <= generation_config.top_a <= 1.0:
|
163 |
-
warpers_to_add.append(TopALogitsWarper(top_a=generation_config.top_a, min_tokens_to_keep=min_tokens_to_keep))
|
164 |
-
|
165 |
-
if warpers and isinstance(warpers[-1], LogitNormalization):
|
166 |
-
warpers = warpers[:-1] + warpers_to_add + [warpers[-1]]
|
167 |
-
else:
|
168 |
-
warpers += warpers_to_add
|
169 |
-
|
170 |
-
return warpers
|
171 |
-
|
172 |
-
|
173 |
-
def get_logits_processor_patch(self, **kwargs):
|
174 |
-
result = self._get_logits_processor_old(**kwargs)
|
175 |
-
repetition_penalty_range = kwargs['generation_config'].repetition_penalty_range
|
176 |
-
repetition_penalty = kwargs['generation_config'].repetition_penalty
|
177 |
-
|
178 |
-
if repetition_penalty_range > 0:
|
179 |
-
for i in range(len(result)):
|
180 |
-
if result[i].__class__.__name__ == 'RepetitionPenaltyLogitsProcessor':
|
181 |
-
result[i] = RepetitionPenaltyLogitsProcessorWithRange(repetition_penalty, repetition_penalty_range)
|
182 |
-
|
183 |
-
return result
|
184 |
-
|
185 |
-
|
186 |
-
def generation_config_init_patch(self, **kwargs):
|
187 |
-
self.__init___old(**kwargs)
|
188 |
-
self.tfs = kwargs.pop("tfs", 1.0)
|
189 |
-
self.top_a = kwargs.pop("top_a", 0.0)
|
190 |
-
self.mirostat_mode = kwargs.pop("mirostat_mode", 0)
|
191 |
-
self.mirostat_eta = kwargs.pop("mirostat_eta", 0.1)
|
192 |
-
self.mirostat_tau = kwargs.pop("mirostat_tau", 5)
|
193 |
-
self.repetition_penalty_range = kwargs.pop("repetition_penalty_range", 0)
|
194 |
-
|
195 |
-
|
196 |
-
def hijack_samplers():
|
197 |
-
transformers.GenerationMixin._get_logits_warper_old = transformers.GenerationMixin._get_logits_warper
|
198 |
-
transformers.GenerationMixin._get_logits_warper = get_logits_warper_patch
|
199 |
-
|
200 |
-
transformers.GenerationMixin._get_logits_processor_old = transformers.GenerationMixin._get_logits_processor
|
201 |
-
transformers.GenerationMixin._get_logits_processor = get_logits_processor_patch
|
202 |
-
|
203 |
-
transformers.GenerationConfig.__init___old = transformers.GenerationConfig.__init__
|
204 |
-
transformers.GenerationConfig.__init__ = generation_config_init_patch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EuroPython2022/mmocr-demo/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015.py
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../../_base_/default_runtime.py',
|
3 |
-
'../../_base_/det_models/ocr_mask_rcnn_r50_fpn_ohem.py',
|
4 |
-
'../../_base_/schedules/schedule_sgd_160e.py',
|
5 |
-
'../../_base_/det_datasets/icdar2015.py',
|
6 |
-
'../../_base_/det_pipelines/maskrcnn_pipeline.py'
|
7 |
-
]
|
8 |
-
|
9 |
-
train_list = {{_base_.train_list}}
|
10 |
-
test_list = {{_base_.test_list}}
|
11 |
-
|
12 |
-
train_pipeline = {{_base_.train_pipeline}}
|
13 |
-
test_pipeline_icdar2015 = {{_base_.test_pipeline_icdar2015}}
|
14 |
-
|
15 |
-
data = dict(
|
16 |
-
samples_per_gpu=8,
|
17 |
-
workers_per_gpu=4,
|
18 |
-
val_dataloader=dict(samples_per_gpu=1),
|
19 |
-
test_dataloader=dict(samples_per_gpu=1),
|
20 |
-
train=dict(
|
21 |
-
type='UniformConcatDataset',
|
22 |
-
datasets=train_list,
|
23 |
-
pipeline=train_pipeline),
|
24 |
-
val=dict(
|
25 |
-
type='UniformConcatDataset',
|
26 |
-
datasets=test_list,
|
27 |
-
pipeline=test_pipeline_icdar2015),
|
28 |
-
test=dict(
|
29 |
-
type='UniformConcatDataset',
|
30 |
-
datasets=test_list,
|
31 |
-
pipeline=test_pipeline_icdar2015))
|
32 |
-
|
33 |
-
evaluation = dict(interval=10, metric='hmean-iou')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Evanell/Venus/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Venus
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: green
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/FourthBrainGenAI/DeepLearningAIDemoChatBot/app.py
DELETED
@@ -1,281 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from peft import PeftModel
|
3 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
4 |
-
import datetime
|
5 |
-
import os
|
6 |
-
from threading import Event, Thread
|
7 |
-
from uuid import uuid4
|
8 |
-
import gradio as gr
|
9 |
-
import requests
|
10 |
-
|
11 |
-
model_name = "decapoda-research/llama-13b-hf"
|
12 |
-
adapters_name = 'timdettmers/guanaco-13b'
|
13 |
-
|
14 |
-
print(f"Starting to load the model {model_name} into memory")
|
15 |
-
|
16 |
-
model = AutoModelForCausalLM.from_pretrained(
|
17 |
-
model_name,
|
18 |
-
load_in_4bit=True,
|
19 |
-
torch_dtype=torch.bfloat16,
|
20 |
-
device_map={"": 0}
|
21 |
-
)
|
22 |
-
|
23 |
-
model = PeftModel.from_pretrained(model, adapters_name)
|
24 |
-
tokenizer = LlamaTokenizer.from_pretrained(model_name)
|
25 |
-
tokenizer.bos_token_id = 1
|
26 |
-
stop_token_ids = [0]
|
27 |
-
|
28 |
-
max_new_tokens = 2048
|
29 |
-
|
30 |
-
start_message = """A chat between a human user and a kind AI. The assistant gives helpful, cordial, and polite answers to the user's questions."""
|
31 |
-
|
32 |
-
class StopOnTokens(StoppingCriteria):
|
33 |
-
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
34 |
-
for stop_id in stop_token_ids:
|
35 |
-
if input_ids[0][-1] == stop_id:
|
36 |
-
return True
|
37 |
-
return False
|
38 |
-
|
39 |
-
|
40 |
-
def convert_history_to_text(history):
|
41 |
-
text = start_message + "".join(
|
42 |
-
[
|
43 |
-
"".join(
|
44 |
-
[
|
45 |
-
f"### Human: {item[0]}\n",
|
46 |
-
f"### Assistant: {item[1]}\n",
|
47 |
-
]
|
48 |
-
)
|
49 |
-
for item in history[:-1]
|
50 |
-
]
|
51 |
-
)
|
52 |
-
text += "".join(
|
53 |
-
[
|
54 |
-
"".join(
|
55 |
-
[
|
56 |
-
f"### Human: {history[-1][0]}\n",
|
57 |
-
f"### Assistant: {history[-1][1]}\n",
|
58 |
-
]
|
59 |
-
)
|
60 |
-
]
|
61 |
-
)
|
62 |
-
return text
|
63 |
-
|
64 |
-
|
65 |
-
def log_conversation(conversation_id, history, messages, generate_kwargs):
|
66 |
-
logging_url = os.getenv("LOGGING_URL", None)
|
67 |
-
if logging_url is None:
|
68 |
-
return
|
69 |
-
|
70 |
-
timestamp = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
|
71 |
-
|
72 |
-
data = {
|
73 |
-
"conversation_id": conversation_id,
|
74 |
-
"timestamp": timestamp,
|
75 |
-
"history": history,
|
76 |
-
"messages": messages,
|
77 |
-
"generate_kwargs": generate_kwargs,
|
78 |
-
}
|
79 |
-
|
80 |
-
try:
|
81 |
-
requests.post(logging_url, json=data)
|
82 |
-
except requests.exceptions.RequestException as e:
|
83 |
-
print(f"Error logging conversation: {e}")
|
84 |
-
|
85 |
-
|
86 |
-
def user(message, history):
|
87 |
-
# Append the user's message to the conversation history
|
88 |
-
return "", history + [[message, ""]]
|
89 |
-
|
90 |
-
|
91 |
-
def bot(history, temperature, top_p, top_k, repetition_penalty, conversation_id):
|
92 |
-
print(f"history: {history}")
|
93 |
-
# Initialize a StopOnTokens object
|
94 |
-
stop = StopOnTokens()
|
95 |
-
|
96 |
-
# Construct the input message string for the model by concatenating the current system message and conversation history
|
97 |
-
messages = convert_history_to_text(history)
|
98 |
-
|
99 |
-
# Tokenize the messages string
|
100 |
-
input_ids = tokenizer(messages, return_tensors="pt").input_ids
|
101 |
-
input_ids = input_ids.to(model.device)
|
102 |
-
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
|
103 |
-
generate_kwargs = dict(
|
104 |
-
input_ids=input_ids,
|
105 |
-
max_new_tokens=max_new_tokens,
|
106 |
-
temperature=temperature,
|
107 |
-
do_sample=temperature > 0.0,
|
108 |
-
top_p=top_p,
|
109 |
-
top_k=top_k,
|
110 |
-
repetition_penalty=repetition_penalty,
|
111 |
-
streamer=streamer,
|
112 |
-
stopping_criteria=StoppingCriteriaList([stop]),
|
113 |
-
)
|
114 |
-
|
115 |
-
stream_complete = Event()
|
116 |
-
|
117 |
-
def generate_and_signal_complete():
|
118 |
-
model.generate(**generate_kwargs)
|
119 |
-
stream_complete.set()
|
120 |
-
|
121 |
-
def log_after_stream_complete():
|
122 |
-
stream_complete.wait()
|
123 |
-
log_conversation(
|
124 |
-
conversation_id,
|
125 |
-
history,
|
126 |
-
messages,
|
127 |
-
{
|
128 |
-
"top_k": top_k,
|
129 |
-
"top_p": top_p,
|
130 |
-
"temperature": temperature,
|
131 |
-
"repetition_penalty": repetition_penalty,
|
132 |
-
},
|
133 |
-
)
|
134 |
-
|
135 |
-
t1 = Thread(target=generate_and_signal_complete)
|
136 |
-
t1.start()
|
137 |
-
|
138 |
-
t2 = Thread(target=log_after_stream_complete)
|
139 |
-
t2.start()
|
140 |
-
|
141 |
-
# Initialize an empty string to store the generated text
|
142 |
-
partial_text = ""
|
143 |
-
for new_text in streamer:
|
144 |
-
partial_text += new_text
|
145 |
-
history[-1][1] = partial_text
|
146 |
-
yield history
|
147 |
-
|
148 |
-
|
149 |
-
def get_uuid():
|
150 |
-
return str(uuid4())
|
151 |
-
|
152 |
-
|
153 |
-
with gr.Blocks(
|
154 |
-
theme=gr.themes.Soft(),
|
155 |
-
css=".disclaimer {font-variant-caps: all-small-caps;}",
|
156 |
-
) as demo:
|
157 |
-
conversation_id = gr.State(get_uuid)
|
158 |
-
gr.Markdown(
|
159 |
-
"""<h1><center>FourthBrain DeepLearningAI ChatBot Demo</center></h1>
|
160 |
-
"""
|
161 |
-
)
|
162 |
-
chatbot = gr.Chatbot().style(height=500)
|
163 |
-
with gr.Row():
|
164 |
-
with gr.Column():
|
165 |
-
msg = gr.Textbox(
|
166 |
-
label="Chat Message Box",
|
167 |
-
placeholder="Chat Message Box",
|
168 |
-
show_label=False,
|
169 |
-
).style(container=False)
|
170 |
-
with gr.Column():
|
171 |
-
with gr.Row():
|
172 |
-
submit = gr.Button("Submit")
|
173 |
-
stop = gr.Button("Stop")
|
174 |
-
clear = gr.Button("Clear")
|
175 |
-
with gr.Row():
|
176 |
-
with gr.Accordion("Advanced Options:", open=False):
|
177 |
-
with gr.Row():
|
178 |
-
with gr.Column():
|
179 |
-
with gr.Row():
|
180 |
-
temperature = gr.Slider(
|
181 |
-
label="Temperature",
|
182 |
-
value=0.7,
|
183 |
-
minimum=0.0,
|
184 |
-
maximum=1.0,
|
185 |
-
step=0.1,
|
186 |
-
interactive=True,
|
187 |
-
info="Higher values produce more diverse outputs",
|
188 |
-
)
|
189 |
-
with gr.Column():
|
190 |
-
with gr.Row():
|
191 |
-
top_p = gr.Slider(
|
192 |
-
label="Top-p (nucleus sampling)",
|
193 |
-
value=0.9,
|
194 |
-
minimum=0.0,
|
195 |
-
maximum=1,
|
196 |
-
step=0.01,
|
197 |
-
interactive=True,
|
198 |
-
info=(
|
199 |
-
"Sample from the smallest possible set of tokens whose cumulative probability "
|
200 |
-
"exceeds top_p. Set to 1 to disable and sample from all tokens."
|
201 |
-
),
|
202 |
-
)
|
203 |
-
with gr.Column():
|
204 |
-
with gr.Row():
|
205 |
-
top_k = gr.Slider(
|
206 |
-
label="Top-k",
|
207 |
-
value=0,
|
208 |
-
minimum=0.0,
|
209 |
-
maximum=200,
|
210 |
-
step=1,
|
211 |
-
interactive=True,
|
212 |
-
info="Sample from a shortlist of top-k tokens — 0 to disable and sample from all tokens.",
|
213 |
-
)
|
214 |
-
with gr.Column():
|
215 |
-
with gr.Row():
|
216 |
-
repetition_penalty = gr.Slider(
|
217 |
-
label="Repetition Penalty",
|
218 |
-
value=1.1,
|
219 |
-
minimum=1.0,
|
220 |
-
maximum=2.0,
|
221 |
-
step=0.1,
|
222 |
-
interactive=True,
|
223 |
-
info="Penalize repetition — 1.0 to disable.",
|
224 |
-
)
|
225 |
-
with gr.Row():
|
226 |
-
gr.Markdown(
|
227 |
-
"Disclaimer: The model can produce factually incorrect output, and should not be relied on to produce "
|
228 |
-
"factually accurate information. The model was trained on various public datasets; while great efforts "
|
229 |
-
"have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
|
230 |
-
"biased, or otherwise offensive outputs.",
|
231 |
-
elem_classes=["disclaimer"],
|
232 |
-
)
|
233 |
-
|
234 |
-
submit_event = msg.submit(
|
235 |
-
fn=user,
|
236 |
-
inputs=[msg, chatbot],
|
237 |
-
outputs=[msg, chatbot],
|
238 |
-
queue=False,
|
239 |
-
).then(
|
240 |
-
fn=bot,
|
241 |
-
inputs=[
|
242 |
-
chatbot,
|
243 |
-
temperature,
|
244 |
-
top_p,
|
245 |
-
top_k,
|
246 |
-
repetition_penalty,
|
247 |
-
conversation_id,
|
248 |
-
],
|
249 |
-
outputs=chatbot,
|
250 |
-
queue=True,
|
251 |
-
)
|
252 |
-
submit_click_event = submit.click(
|
253 |
-
fn=user,
|
254 |
-
inputs=[msg, chatbot],
|
255 |
-
outputs=[msg, chatbot],
|
256 |
-
queue=False,
|
257 |
-
).then(
|
258 |
-
fn=bot,
|
259 |
-
inputs=[
|
260 |
-
chatbot,
|
261 |
-
temperature,
|
262 |
-
top_p,
|
263 |
-
top_k,
|
264 |
-
repetition_penalty,
|
265 |
-
conversation_id,
|
266 |
-
],
|
267 |
-
outputs=chatbot,
|
268 |
-
queue=True,
|
269 |
-
)
|
270 |
-
stop.click(
|
271 |
-
fn=None,
|
272 |
-
inputs=None,
|
273 |
-
outputs=None,
|
274 |
-
cancels=[submit_event, submit_click_event],
|
275 |
-
queue=False,
|
276 |
-
)
|
277 |
-
clear.click(lambda: None, None, chatbot, queue=False)
|
278 |
-
|
279 |
-
demo.queue(max_size=128, concurrency_count=2)
|
280 |
-
|
281 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|