Commit
·
9334e8e
1
Parent(s):
cb9f647
Update parquet files (step 105 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1gistliPinn/ChatGPT4/Examples/Cyberflex E-gate Driver Download Win7 REPACK.md +0 -44
- spaces/1gistliPinn/ChatGPT4/Examples/Download Jumpstart For Wireless Windows 7.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/4k video downloader review is it safe and reliable? - Reddit.md +0 -117
- spaces/1phancelerku/anime-remove-background/Become a Pilot in the Indian Air Force A Cut Above Game for PC and Mobile.md +0 -86
- spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_fastdeploy_stable_diffusion_inpaint_legacy.py +0 -477
- spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/variational_autoencoder/modules.py +0 -1064
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/contperceptual.py +0 -123
- spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/text/text_encoder.py +0 -272
- spaces/AIGText/GlyphControl/ldm/models/diffusion/dpm_solver/__init__.py +0 -1
- spaces/ALSv/FSW/roop/capturer.py +0 -22
- spaces/AchyuthGamer/Free-Accounts-Generator/minecraft/css/style.css +0 -80
- spaces/Adapting/TrendFlow/mypages/__init__.py +0 -2
- spaces/AgentVerse/agentVerse/agentverse/memory_manipulator/reflection.py +0 -330
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/csvscenario-plugin.js +0 -18
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/dialog-quest/DataMethods.js +0 -25
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/checkbox/Factory.js +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/input/OverCell.js +0 -30
- spaces/AlexZou/Deploy_Restoration/net/IntmdSequential.py +0 -19
- spaces/Alichuan/VITS-Umamusume-voice-synthesizer/modules.py +0 -387
- spaces/Alpaca233/SadTalker/src/audio2exp_models/audio2exp.py +0 -41
- spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/docs/eval.md +0 -31
- spaces/Amjadd/BookGPT/README.md +0 -13
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/unet3d-cond.md +0 -13
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/one_step_unet.py +0 -24
- spaces/Andy1621/uniformer_image_detection/configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py +0 -50
- spaces/Andy1621/uniformer_image_detection/configs/pisa/pisa_ssd300_coco.py +0 -8
- spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/reppoints_detector.py +0 -22
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/ui.py +0 -251
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/test.sh +0 -10
- spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/inpaint_zoom/utils/zoom_in_utils.py +0 -75
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/compatibility_tags.py +0 -165
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__init__.py +0 -4
- spaces/Awesimo/jojogan/e4e/models/encoders/__init__.py +0 -0
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/.github/CONTRIBUTING.md +0 -68
- spaces/Benson/text-generation/Examples/Car Simulator 9.md +0 -113
- spaces/Benson/text-generation/Examples/Descargar Carx Street 0.9.1.md +0 -77
- spaces/Benson/text-generation/Examples/Descargar Gratis De Taxi Driver Juego.md +0 -60
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_resources/_legacy.py +0 -121
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/version.py +0 -504
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/archive_util.py +0 -213
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/tutorials/data_loading.md +0 -77
- spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/malloc_and_free.h +0 -104
- spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/logical.h +0 -44
- spaces/CVPR/regionclip-demo/detectron2/layers/csrc/deformable/deform_conv.h +0 -377
- spaces/CVPR/regionclip-demo/detectron2/structures/tsv_file.py +0 -352
- spaces/CVPR/transfiner/configs/common/coco_schedule.py +0 -47
- spaces/Cahlil/Speech-Recognition-with-Speaker-Segmentation/README.md +0 -12
- spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/inference.py +0 -98
- spaces/ChandraMohanNayal/AutoGPT/ui/utils.py +0 -31
- spaces/CognitiveLabs/GPT-auto-webscraping/chains/output_format/templates.py +0 -30
spaces/1gistliPinn/ChatGPT4/Examples/Cyberflex E-gate Driver Download Win7 REPACK.md
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
<h2>cyberflex e-gate driver download win7</h2><br /><p><b><b>Download Zip</b> ••• <a href="https://imgfil.com/2uy16N">https://imgfil.com/2uy16N</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
8 bit not working
|
4 |
-
|
5 |
-
when i try to update my ubuntu it says operation not possible
|
6 |
-
|
7 |
-
s0u][ight: Please pastebin the full output of "sudo apt-get update".
|
8 |
-
|
9 |
-
paste?
|
10 |
-
|
11 |
-
!pastebin | s0u][ight
|
12 |
-
|
13 |
-
s0u][ight: For posting multi-line texts into the channel, please use | To post!screenshots use |!pastebinit to paste directly from command line | Make sure you give us the URL for your paste - see also the channel topic.
|
14 |
-
|
15 |
-
ok
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
i did an apt-get update and it started saying that
|
20 |
-
|
21 |
-
s0u][ight: Please remove all "ppa.launchpad.net/n/n/" from your sources.list.
|
22 |
-
|
23 |
-
s0u][ight: (Any instances of "ppa.launchpad.net/n/n/" in sources.list.d will also need to be removed).
|
24 |
-
|
25 |
-
s0u][ight, ppa's must be disabled in the /etc/apt/sources.list.d file
|
26 |
-
|
27 |
-
BluesKaj: That is incorrect. ppas are currently permitted for apt-get, and apt-get update is not even intended to read that file.
|
28 |
-
|
29 |
-
Jordan_U, nevermind
|
30 |
-
|
31 |
-
BluesKaj: s0u][ight: Please pastebin the entire contents of /etc/apt/sources.list and /etc/apt/sources.list.d/.
|
32 |
-
|
33 |
-
did that
|
34 |
-
|
35 |
-
removed all ppa lines
|
36 |
-
|
37 |
-
now apt-get update
|
38 |
-
|
39 |
-
Jordan_U, well, my mistake on ppa, it won't hurt anything at this time
|
40 |
-
|
41 |
-
BluesKaj: It's not a mistake. s0u][ight: 4fefd39f24<br />
|
42 |
-
<br />
|
43 |
-
<br />
|
44 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Jumpstart For Wireless Windows 7.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>download jumpstart for wireless windows 7</h2><br /><p><b><b>Download File</b> ⭐ <a href="https://imgfil.com/2uy11z">https://imgfil.com/2uy11z</a></b></p><br /><br />
|
2 |
-
|
3 |
-
2K, XP, 2K3, VISTA, WIN7/32bits. Download. Jumpstart Wireless Intermediate Driver. Others. TL-WN7200ND_V1_Utility.zip. 1.0.0.46. 2008-09-25. 22.51 MB. 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/4k video downloader review is it safe and reliable? - Reddit.md
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download 4K Videos from YouTube and Reddit</h1>
|
3 |
-
<p>If you are a fan of high-quality videos, you might have noticed that many YouTube and Reddit videos are available in 4K resolution. 4K videos offer stunning clarity, detail, and color that can enhance your viewing experience. However, watching 4K videos online requires a fast and stable internet connection, which is not always available. Moreover, you might want to save some 4K videos offline for personal use, such as editing, sharing, or watching later. In this article, we will show you how to download 4K videos from YouTube and Reddit using some of the best 4K video downloader software for PC and Mac.</p>
|
4 |
-
<h2>4k download youtube reddit</h2><br /><p><b><b>Download</b> ❤ <a href="https://urlin.us/2uSS9u">https://urlin.us/2uSS9u</a></b></p><br /><br />
|
5 |
-
<h2>What is 4K Video and Why You Need It</h2>
|
6 |
-
<p>4K video is a video format that has a resolution of 3840 x 2160 pixels, which is four times more than the standard HD resolution of 1920 x 1080 pixels. This means that 4K video has more pixels per inch (PPI), which results in sharper and clearer images. You can see more details, textures, and colors in 4K video than in HD video.</p>
|
7 |
-
<h3>The Benefits of 4K Video Quality</h3>
|
8 |
-
<p>There are many benefits of watching and downloading 4K videos, such as:</p>
|
9 |
-
<ul>
|
10 |
-
<li>You can enjoy a more immersive and realistic viewing experience, especially on large screens or VR devices.</li>
|
11 |
-
<li>You can capture more details and nuances in your videos, which can improve your content creation and editing skills.</li>
|
12 |
-
<li>You can future-proof your videos, as 4K is becoming the new standard for video quality.</li>
|
13 |
-
</ul>
|
14 |
-
<h3>The Challenges of 4K Video Downloading</h3>
|
15 |
-
<p>However, there are also some challenges that you might face when downloading 4K videos from YouTube and Reddit, such as:</p>
|
16 |
-
<ul>
|
17 |
-
<li>Not all videos are available in 4K resolution, especially older or low-budget ones.</li>
|
18 |
-
<li>Downloading 4K videos requires more time, storage space, and bandwidth than downloading HD videos.</li>
|
19 |
-
<li>Playing 4K videos requires a compatible device, player, and monitor that can support the high resolution.</li>
|
20 |
-
<li>Downloading copyrighted videos without permission or fair use can violate the law and ethical principles.</li>
|
21 |
-
</ul>
|
22 |
-
<h2>The Best 4K Video Downloader Software for PC and Mac</h2>
|
23 |
-
<p>To overcome these challenges, you need a reliable and powerful 4K video downloader software that can help you download YouTube and Reddit videos in 4K resolution with ease. Here are some of the best options that we recommend:</p>
|
24 |
-
<p>4k video downloader safe reddit<br />
|
25 |
-
4k video downloader error reddit<br />
|
26 |
-
4k video downloader proxy reddit<br />
|
27 |
-
4k video downloader alternative reddit<br />
|
28 |
-
4k video downloader license key reddit<br />
|
29 |
-
4k video downloader activation key reddit<br />
|
30 |
-
4k video downloader crack reddit<br />
|
31 |
-
4k video downloader premium reddit<br />
|
32 |
-
4k video downloader review reddit<br />
|
33 |
-
4k video downloader youtube-dl reddit<br />
|
34 |
-
best way to download 4k youtube videos reddit<br />
|
35 |
-
how to download 4k youtube videos reddit<br />
|
36 |
-
best 4k youtube downloader reddit<br />
|
37 |
-
free 4k youtube downloader reddit<br />
|
38 |
-
youtube-dl 4k reddit<br />
|
39 |
-
youtube-dl download 4k reddit<br />
|
40 |
-
youtube-dl best quality reddit<br />
|
41 |
-
youtube-dl mkv reddit<br />
|
42 |
-
youtube-dl proxy reddit<br />
|
43 |
-
youtube-dl alternative reddit<br />
|
44 |
-
download blocked youtube videos reddit<br />
|
45 |
-
download youtube videos in full hd reddit<br />
|
46 |
-
download youtube videos in 1080p reddit<br />
|
47 |
-
download youtube videos in mkv format reddit<br />
|
48 |
-
download youtube videos with subtitles reddit<br />
|
49 |
-
download youtube playlists in 4k reddit<br />
|
50 |
-
download youtube screensavers in 4k reddit<br />
|
51 |
-
download youtube stock footage in 4k reddit<br />
|
52 |
-
download youtube backgrounds in 4k reddit<br />
|
53 |
-
download youtube music videos in 4k reddit<br />
|
54 |
-
download youtube documentaries in 4k reddit<br />
|
55 |
-
download youtube movies in 4k reddit<br />
|
56 |
-
download youtube trailers in 4k reddit<br />
|
57 |
-
download youtube live streams in 4k reddit<br />
|
58 |
-
download youtube vr videos in 4k reddit<br />
|
59 |
-
download youtube hdr videos in 4k reddit<br />
|
60 |
-
download youtube 360 videos in 4k reddit<br />
|
61 |
-
download youtube slow motion videos in 4k reddit<br />
|
62 |
-
download youtube timelapse videos in 4k reddit<br />
|
63 |
-
download youtube hyperlapse videos in 4k reddit</p>
|
64 |
-
<h3>4K Video Downloader</h3>
|
65 |
-
<p><a href="(^10^)">[10](https://www.4kdownload.com/)</a></p>
|
66 |
-
<p>As the name suggests, this software is designed to download 4K videos from YouTube and other popular video sites. It has a simple and user-friendly interface that allows you to download videos, playlists , channels, and subtitles in various formats and quality options. It also supports 3D and 360-degree videos, smart mode, and cross-platform compatibility. You can download up to 24 videos in a batch with the free version, or unlimited videos with the premium version.</p>
|
67 |
-
<h3>Fucosoft Video Converter</h3>
|
68 |
-
<p><a href="">[9](https://www.fucosoft.com/video-converter.html)</a></p>
|
69 |
-
<p>This software is not only a video converter, but also a video downloader that can download 4K videos from YouTube, Reddit, Facebook, Instagram, Twitter, and more. It can also convert downloaded videos to various formats, such as MP4, MOV, AVI, MKV, etc., and edit them with built-in tools. You can download multiple videos at once with high speed and quality.</p>
|
70 |
-
<h3>Wondershare AllMyTube</h3>
|
71 |
-
<p><a href="">[8](https://videoconverter.wondershare.com/allmytube-video-downloader.html)</a></p>
|
72 |
-
<p>This software is another versatile video downloader that can download 4K videos from YouTube and over 10,000 other video sites. It can also convert downloaded videos to different formats and devices, such as iPhone, iPad, Android, etc. It has a one-click download mode, a browser extension, a video player, and a video library. You can download up to 10 videos at the same time with the free trial version, or unlimited videos with the full version.</p>
|
73 |
-
<h3>Gihosoft TubeGet</h3>
|
74 |
-
<p><a href="">[7](https://www.gihosoft.com/free-youtube-downloader.html)</a></p>
|
75 |
-
<p>This software is a simple and effective video downloader that can download 4K videos from YouTube and other sites. It can also extract audio from videos and save them as MP3 files. It has a clean and intuitive interface that lets you download videos by copying and pasting URLs or using the browser add-on. You can download up to five videos per day with the free version, or unlimited videos with the pro version.</p>
|
76 |
-
<h3>Leawo CleverGet</h3>
|
77 |
-
<p><a href="">[6](https://www.leawo.org/cleverget/)</a></p>
|
78 |
-
<p>This software is a smart and powerful video downloader that can download 4K videos from YouTube and more than 1000 other sites. It can also convert downloaded videos to various formats and resolutions, such as 1080P, 720P, etc. It has a multi-threading technology that accelerates the download speed and ensures the video quality. You can download unlimited videos with the free version, but you need to upgrade to the premium version to remove the watermark.</p>
|
79 |
-
<h3>AmoyShare AnyUTube</h3>
|
80 |
-
<p><a href="">[5](https://www.amoyshare.com/anyutube/)</a></p>
|
81 |
-
<p>This software is a dedicated YouTube video downloader that can download 4K videos from YouTube in MP4 or MP3 format. It can also download YouTube playlists, channels, subtitles, and lyrics. It has a user-friendly interface that allows you to search for videos by keywords or URLs. You can download up to 14 videos per day with the free version, or unlimited videos with the paid version.</p>
|
82 |
-
<h2>How to Use 4K Video Downloader Software to Download YouTube and Reddit Videos</h2>
|
83 |
-
<p>Although each 4K video downloader software has its own features and functions, they generally share the same steps for downloading YouTube and Reddit videos in 4K resolution. Here are the common steps that you need to follow:</p>
|
84 |
-
<h3>Step 1: Copy the Video URL</h3>
|
85 |
-
<p>The first step is to copy the URL of the video that you want to download from YouTube or Reddit. You can do this by right-clicking on the video and selecting "Copy video URL" or "Copy link address". Alternatively, you can copy the URL from the address bar of your browser.</p>
|
86 |
-
<h3>Step 2: Paste the URL into the Software</h3>
|
87 |
-
<p>The next step is to paste the URL into the software that you have installed on your PC or Mac. You can do this by clicking on the "Paste URL" button or using the keyboard shortcut Ctrl+V (Windows) or Command+V (Mac). The software will automatically analyze the URL and show you the available download options.</p>
|
88 |
-
<h3>Step 3: Choose the Output Format and Quality</h3>
|
89 |
-
<p>The third step is to choose the output format and quality that you want for your downloaded video. You can choose from different formats, such as MP4, MKV, AVI, etc., depending on your preference and device compatibility. You can also choose the quality level, such as 4K, 1080P, 720P, etc., depending on your internet speed and storage space. You can also choose to download the audio only or the subtitles if available.</p>
|
90 |
-
<h3>Step 4: Start the Download Process</h3>
|
91 |
-
<p>The final step is to start the download process by clicking on the "Download" button or using the keyboard shortcut Enter. The software will begin to download the video in the background and show you the progress and speed. You can pause, resume, or cancel the download at any time. Once the download is completed, you can find the video in the output folder or the video library of the software.</p>
|
92 |
-
<h2>Tips and Tricks for 4K Video Downloading and Playback</h2>
|
93 |
-
<p>To make the most of your 4K video downloading and playback experience, here are some tips and tricks that you should keep in mind:</p>
|
94 |
-
<h3>Check the Internet Speed and Bandwidth</h3>
|
95 |
-
<p>Downloading 4K videos requires a fast and stable internet connection, as they are much larger than HD videos. You should check your internet speed and bandwidth before downloading 4K videos, and avoid downloading multiple videos at the same time or using other applications that consume internet resources. You can use online tools such as <a href="">[9](https://www.speedtest.net/)</a> to test your internet speed and performance.</p>
|
96 |
-
<h3>Adjust the Encoder Settings and Bitrates</h3>
|
97 |
-
<p>Downloading 4K videos also requires a lot of storage space, as they are much heavier than HD videos. You should check your storage space before downloading 4K videos, and delete or transfer some files if necessary. You can also adjust the encoder settings and bitrates of your downloaded videos to reduce their file size without compromising their quality. You can use online tools such as <a href="">[8](https://www.onlineconverter.com/video)</a> to compress and convert your videos online.</p>
|
98 |
-
<h3>Use a Compatible Device and Player</h3>
|
99 |
-
<p>Playing 4K videos requires a compatible device, player, and monitor that can support the high resolution. You should check your device specifications before playing 4K videos, and upgrade your hardware or software if needed. You can also use a dedicated 4K video player that can optimize the playback quality and performance of your videos. Some of the best 4K video players are <a href="">[7](https://www.vlc.de/en/)</a>, <a href="">[6](https://www.kmplayer.com/)</a>, <a href="">[5](https://www.5kplayer.com/)</a>, <a href="">[4](https://potplayer.daum.net/)</a>, and <a href="">[3](https://mpv.io/)</a>.</p>
|
100 |
-
<h3>Respect the Copyright Laws and Fair Use Policy</h3>
|
101 |
-
<p>Downloading 4K videos from YouTube and Reddit is not illegal, as long as you follow the fair use policy and respect the rights of the original creators. You should not download or use any videos that are protected by copyright or have restricted permissions. You should also not download or use any videos for commercial purposes or without giving proper credit to the sources. You should also not download or use any videos that contain illegal, harmful, or offensive content. You should always respect the terms and conditions of YouTube and Reddit, and the laws and regulations of your country.</p>
|
102 |
-
<h2>Conclusion</h2>
|
103 |
-
<p>Downloading 4K videos from YouTube and Reddit can be a great way to enjoy high-quality videos offline or for personal use. However, you need to have the right tools and techniques to download and play 4K videos smoothly and safely. In this article, we have introduced some of the best 4K video downloader software for PC and Mac, and how to use them to download YouTube and Reddit videos in 4K resolution. We have also shared some tips and tricks for 4K video downloading and playback, and how to respect the copyright laws and fair use policy. We hope that this article has helped you learn more about 4K video downloading and how to do it properly.</p>
|
104 |
-
<h2>FAQs</h2>
|
105 |
-
<p>Here are some of the frequently asked questions about 4K video downloading:</p>
|
106 |
-
<h3>Q: What is the difference between 4K and UHD?</h3>
|
107 |
-
<p>A: 4K and UHD are often used interchangeably, but they are not exactly the same. 4K refers to the resolution of 4096 x 2160 pixels, which is used in digital cinema and professional video production. UHD refers to the resolution of 3840 x 2160 pixels, which is used in consumer TVs and monitors. Both resolutions have the same aspect ratio of 16:9, but 4K has slightly more pixels than UHD.</p>
|
108 |
-
<h3>Q: How much storage space does a 4K video take?</h3>
|
109 |
-
<p>A: The storage space of a 4K video depends on several factors, such as the length, format, codec, bitrate, and compression of the video. However, a rough estimate is that a one-minute 4K video can take up to 375 MB of storage space. Therefore, a one-hour 4K video can take up to 22.5 GB of storage space.</p>
|
110 |
-
<h3>Q: How fast is the internet speed required for downloading or streaming 4K videos?</h3>
|
111 |
-
<p>A: The internet speed required for downloading or streaming 4K videos also depends on several factors, such as the source, quality, platform, and network of the video. However, a general recommendation is that you need at least 25 Mbps of internet speed for downloading or streaming 4K videos smoothly.</p>
|
112 |
-
<h3>Q: Can I download 4K videos from YouTube or Reddit on my mobile device?</h3>
|
113 |
-
<p>A: Yes, you can download 4K videos from YouTube or Reddit on your mobile device, but you need to use a third-party app or website that can support 4K video downloading. Some of the popular apps or websites are <a href="">[2](https://www.videoder.com/)</a>, <a href="">[1](https://ytmp3.cc/en13/)</a>, <a href="">[0](https://www.tubemate.net/)</a>, etc. However, you should be careful about the security and legality of these apps or websites, as they might contain malware or violate the terms and conditions of YouTube or Reddit.</p>
|
114 |
-
<h3>Q: How can I play 4K videos on my PC or Mac?</h3>
|
115 |
-
<p>A: To play 4K videos on your PC or Mac, you need to have a compatible device, player, and monitor that can support the high resolution. You can check your device specifications and upgrade your hardware or software if needed. You can also use a dedicated 4K video player that can optimize the playback quality and performance of your videos. Some of the best 4K video players are <a href="">[7](https://www.vlc.de/en/)</a>, <a href="">[6](https://www.kmplayer.com/)</a>, <a href="">[5](https://www.5kplayer.com/)</a>, <a href="">[4](https://potplayer.daum.net/)</a>, and <a href="">[3](https://mpv.io/)</a>.</p> 197e85843d<br />
|
116 |
-
<br />
|
117 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Become a Pilot in the Indian Air Force A Cut Above Game for PC and Mobile.md
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Indian Air Force: A Cut Above Game Download</h1>
|
3 |
-
<p>If you have ever dreamed of becoming a fighter pilot in the Indian Air Force, then you might want to check out this game. Indian Air Force: A Cut Above is a free air combat mobile game that lets you experience the thrill and challenge of flying various IAF aircraft and weapons. The game was developed by Threye Interactive, a Delhi-based game studio, in collaboration with the Indian Air Force. It was officially launched on July 31, 2019 by the Air Chief Marshal B.S. Dhanoa for Android and iOS devices. The game aims to attract and inspire the youth to join the IAF and serve the nation.</p>
|
4 |
-
<h2>Features of the game</h2>
|
5 |
-
<p>Indian Air Force: A Cut Above has three game modes: Training, Single Player, and Free Flight. In Training mode, you can learn the basics of flying, landing, combat, and rescue operations. In Single Player mode, you can take on various missions that test your skills and courage. You can also customize your pilot avatar and choose from different IAF aircraft such as MiG-21, Mirage-2000, Su-30MKI, Rafale, Tejas, Apache, and Chinook. In Free Flight mode, you can explore the skies in any aircraft you want.</p>
|
6 |
-
<h2>indian air force a cut above game download</h2><br /><p><b><b>Download File</b> ✸ <a href="https://jinyurl.com/2uNQQj">https://jinyurl.com/2uNQQj</a></b></p><br /><br />
|
7 |
-
<p>The game features realistic and engaging gameplay and graphics that simulate the actual scenarios faced by IAF pilots. You can experience flying in different weather conditions, terrains, and altitudes. You can also use different weapons such as guns, missiles, rockets, and bombs to destroy enemy targets. The game also has a character modelled after Wing Commander Abhinandan Varthaman, who shot down a Pakistani F-16 in February 2019. You can even sport his signature gunslinger moustache in the game.</p>
|
8 |
-
<h2>Review of the game</h2>
|
9 |
-
<p>Indian Air Force: A Cut Above is a fun and exciting game that gives you a glimpse of what it takes to be an IAF warrior. The game has many pros such as:</p>
|
10 |
-
<ul>
|
11 |
-
<li>It is free to download and play</li>
|
12 |
-
<li>It has a variety of modes, missions, aircraft, and weapons to choose from</li>
|
13 |
-
<li>It has realistic and immersive gameplay and graphics</li>
|
14 |
-
<li>It has educational and motivational value for aspiring IAF pilots</li>
|
15 |
-
</ul>
|
16 |
-
<p>However, the game also has some cons such as:</p>
|
17 |
-
<p>How to play Indian Air Force: A Cut Above on PC<br />
|
18 |
-
Indian Air Force: A Cut Above app store review<br />
|
19 |
-
Indian Air Force: A Cut Above gameplay and features<br />
|
20 |
-
Indian Air Force: A Cut Above apk file download<br />
|
21 |
-
Indian Air Force: A Cut Above best fighter jets and weapons<br />
|
22 |
-
Indian Air Force: A Cut Above solo and PvP missions<br />
|
23 |
-
Indian Air Force: A Cut Above tips and tricks for beginners<br />
|
24 |
-
Indian Air Force: A Cut Above official trailer and launch date<br />
|
25 |
-
Indian Air Force: A Cut Above system requirements and compatibility<br />
|
26 |
-
Indian Air Force: A Cut Above mod apk unlimited money<br />
|
27 |
-
Indian Air Force: A Cut Above online test and recruitment<br />
|
28 |
-
Indian Air Force: A Cut Above digital india initiative<br />
|
29 |
-
Indian Air Force: A Cut Above latest updates and news<br />
|
30 |
-
Indian Air Force: A Cut Above cheats and hacks<br />
|
31 |
-
Indian Air Force: A Cut Above bluestacks emulator download<br />
|
32 |
-
Indian Air Force: A Cut Above ios and android devices<br />
|
33 |
-
Indian Air Force: A Cut Above customer support and feedback<br />
|
34 |
-
Indian Air Force: A Cut Above ratings and rankings<br />
|
35 |
-
Indian Air Force: A Cut Above achievements and rewards<br />
|
36 |
-
Indian Air Force: A Cut Above realistic graphics and sound effects<br />
|
37 |
-
Indian Air Force: A Cut Above history and background<br />
|
38 |
-
Indian Air Force: A Cut Above offline mode and data usage<br />
|
39 |
-
Indian Air Force: A Cut Above installation and setup guide<br />
|
40 |
-
Indian Air Force: A Cut Above comparison with other air combat games<br />
|
41 |
-
Indian Air Force: A Cut Above pros and cons of playing<br />
|
42 |
-
Indian Air Force: A Cut Above free download link and size<br />
|
43 |
-
Indian Air Force: A Cut Above fun facts and trivia<br />
|
44 |
-
Indian Air Force: A Cut Above user reviews and testimonials<br />
|
45 |
-
Indian Air Force: A Cut Above challenges and difficulty levels<br />
|
46 |
-
Indian Air Force: A Cut Above screenshots and videos<br />
|
47 |
-
Indian Air Force: A Cut Above frequently asked questions (FAQs)<br />
|
48 |
-
Indian Air Force: A Cut Above community and forums<br />
|
49 |
-
Indian Air Force: A Cut Above bugs and issues report<br />
|
50 |
-
Indian Air Force: A Cut Above fan art and memes<br />
|
51 |
-
Indian Air Force: A Cut Above developer interview and insights</p>
|
52 |
-
<ul>
|
53 |
-
<li>It requires internet connection and permissions to access your photos, files, and phone calls</li>
|
54 |
-
<li>It does not support auto-rotate or notch displays</li>
|
55 |
-
<li>It has clunky controls and audio effects</li>
|
56 |
-
<li>It does not have multiplayer or online features yet</li>
|
57 |
-
</ul>
|
58 |
-
<p>Compared to other similar games such as Ace Combat or H.A.W.X., Indian Air Force: A Cut Above is more arcade-style than simulation-style. It does not have complex controls or physics, but it does have more variety and authenticity in terms of IAF scenarios and equipment.</p>
|
59 |
-
<p>Some tips and tricks to play the game better are:</p>
|
60 |
-
<ul>
|
61 |
-
<li>Complete the training mode before attempting the missions</li>
|
62 |
-
<li>Use the radar and target lock features to locate and track enemies</li>
|
63 |
-
<li>Avoid flying too low or too high to avoid crashing or stalling</li>
|
64 |
-
<li>Use different weapons for different targets depending on their range and type</li>
|
65 |
-
<li>Refuel your aircraft when needed by flying close to a tanker plane</li>
|
66 |
-
</ul>
|
67 |
-
<h2>Conclusion</h2>
|
68 |
-
<p>Indian Air Force: A Cut Above is a great game for anyone who loves air combat and adventure. It is a unique and innovative game that showcases the glory and valor of the Indian Air Force. It is also a game that can inspire and educate the youth about the IAF and its role in defending the nation.</p>
|
69 |
-
<p>If you want to download and play the game, you can visit the official website of the Indian Air Force or the Google Play Store or the App Store. The game is compatible with Android 5.0 and above and iOS 9.0 and above devices. The game size is about 300 MB and it requires about 1 GB of free space on your device.</p>
|
70 |
-
<p>So, what are you waiting for? Download Indian Air Force: A Cut Above today and experience the thrill of flying in the sky. You will not regret it!</p>
|
71 |
-
<h2>FAQs</h2>
|
72 |
-
<p>Here are some frequently asked questions about the game:</p>
|
73 |
-
<ol>
|
74 |
-
<li>Is Indian Air Force: A Cut Above an official game of the IAF?</li>
|
75 |
-
<p>Yes, it is an official game of the IAF that was developed in collaboration with Threye Interactive, a Delhi-based game studio.</p>
|
76 |
-
<li>Is Indian Air Force: A Cut Above a free game?</li>
|
77 |
-
<p>Yes, it is a free game that does not have any in-app purchases or ads. However, it does require internet connection and permissions to access your photos, files, and phone calls.</p>
|
78 |
-
<li>Is Indian Air Force: A Cut Above a realistic game?</li>
|
79 |
-
<p>It is a realistic game in terms of the scenarios, aircraft, and weapons that are used by the IAF. However, it is not a simulation game that has complex controls or physics. It is more of an arcade-style game that is easy to play and enjoy.</p>
|
80 |
-
<li>Is Indian Air Force: A Cut Above a multiplayer game?</li>
|
81 |
-
<p>No, it is not a multiplayer game yet. It only has single-player modes such as Training, Single Player, and Free Flight. However, the developers have said that they are working on adding multiplayer and online features in the future.</p>
|
82 |
-
<li>How can I contact the developers of Indian Air Force: A Cut Above?</li>
|
83 |
-
<p>You can contact the developers of Indian Air Force: A Cut Above by visiting their website www.threye.com or by emailing them at [email protected]. You can also follow them on Facebook, Twitter, Instagram, and YouTube for updates and news.</p>
|
84 |
-
</ol></p> 401be4b1e0<br />
|
85 |
-
<br />
|
86 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_fastdeploy_stable_diffusion_inpaint_legacy.py
DELETED
@@ -1,477 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import inspect
|
17 |
-
from typing import Callable, List, Optional, Union
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
import paddle
|
21 |
-
import PIL
|
22 |
-
|
23 |
-
from paddlenlp.transformers import CLIPFeatureExtractor, CLIPTokenizer
|
24 |
-
|
25 |
-
from ...fastdeploy_utils import FastDeployRuntimeModel
|
26 |
-
from ...pipeline_utils import DiffusionPipeline
|
27 |
-
from ...schedulers import (
|
28 |
-
DDIMScheduler,
|
29 |
-
DPMSolverMultistepScheduler,
|
30 |
-
EulerAncestralDiscreteScheduler,
|
31 |
-
EulerDiscreteScheduler,
|
32 |
-
LMSDiscreteScheduler,
|
33 |
-
PNDMScheduler,
|
34 |
-
)
|
35 |
-
from ...utils import PIL_INTERPOLATION, logging
|
36 |
-
from . import StableDiffusionPipelineOutput
|
37 |
-
|
38 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
39 |
-
|
40 |
-
|
41 |
-
def preprocess_image(image):
|
42 |
-
w, h = image.size
|
43 |
-
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
|
44 |
-
image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
|
45 |
-
image = np.array(image).astype(np.float32) / 255.0
|
46 |
-
image = image[None].transpose(0, 3, 1, 2)
|
47 |
-
return 2.0 * image - 1.0
|
48 |
-
|
49 |
-
|
50 |
-
def preprocess_mask(mask, scale_factor=8):
|
51 |
-
mask = mask.convert("L")
|
52 |
-
w, h = mask.size
|
53 |
-
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
|
54 |
-
mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
|
55 |
-
mask = np.array(mask).astype(np.float32) / 255.0
|
56 |
-
mask = np.tile(mask, (4, 1, 1))
|
57 |
-
mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
|
58 |
-
mask = 1 - mask # repaint white, keep black
|
59 |
-
return mask
|
60 |
-
|
61 |
-
|
62 |
-
class FastDeployStableDiffusionInpaintPipelineLegacy(DiffusionPipeline):
|
63 |
-
r"""
|
64 |
-
Pipeline for text-guided image inpainting legacy using Stable Diffusion.
|
65 |
-
|
66 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
67 |
-
library implements for all the pipelines (such as downloading or saving etc.)
|
68 |
-
|
69 |
-
Args:
|
70 |
-
vae_encoder ([`FastDeployRuntimeModel`]):
|
71 |
-
Variational Auto-Encoder (VAE) Model to encode images to latent representations.
|
72 |
-
vae_decoder ([`FastDeployRuntimeModel`]):
|
73 |
-
Variational Auto-Encoder (VAE) Model to decode images from latent representations.
|
74 |
-
text_encoder ([`FastDeployRuntimeModel`]):
|
75 |
-
Frozen text-encoder. Stable Diffusion uses the text portion of
|
76 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
77 |
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
78 |
-
tokenizer (`CLIPTokenizer`):
|
79 |
-
Tokenizer of class
|
80 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
81 |
-
unet ([`FastDeployRuntimeModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
82 |
-
scheduler ([`SchedulerMixin`]):
|
83 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
84 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], [`PNDMScheduler`], [`EulerDiscreteScheduler`], [`EulerAncestralDiscreteScheduler`]
|
85 |
-
or [`DPMSolverMultistepScheduler`].
|
86 |
-
safety_checker ([`FastDeployRuntimeModel`]):
|
87 |
-
Classification module that estimates whether generated images could be considered offensive or harmful.
|
88 |
-
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
89 |
-
feature_extractor ([`CLIPFeatureExtractor`]):
|
90 |
-
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
91 |
-
"""
|
92 |
-
_optional_components = ["safety_checker", "feature_extractor"]
|
93 |
-
|
94 |
-
def __init__(
|
95 |
-
self,
|
96 |
-
vae_encoder: FastDeployRuntimeModel,
|
97 |
-
vae_decoder: FastDeployRuntimeModel,
|
98 |
-
text_encoder: FastDeployRuntimeModel,
|
99 |
-
tokenizer: CLIPTokenizer,
|
100 |
-
unet: FastDeployRuntimeModel,
|
101 |
-
scheduler: Union[
|
102 |
-
DDIMScheduler,
|
103 |
-
PNDMScheduler,
|
104 |
-
LMSDiscreteScheduler,
|
105 |
-
EulerDiscreteScheduler,
|
106 |
-
EulerAncestralDiscreteScheduler,
|
107 |
-
DPMSolverMultistepScheduler,
|
108 |
-
],
|
109 |
-
safety_checker: FastDeployRuntimeModel,
|
110 |
-
feature_extractor: CLIPFeatureExtractor,
|
111 |
-
requires_safety_checker: bool = True,
|
112 |
-
):
|
113 |
-
super().__init__()
|
114 |
-
|
115 |
-
if safety_checker is None and requires_safety_checker:
|
116 |
-
logger.warning(
|
117 |
-
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
118 |
-
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
119 |
-
" results in services or applications open to the public. PaddleNLP team, diffusers team and Hugging Face"
|
120 |
-
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
121 |
-
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
122 |
-
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
123 |
-
)
|
124 |
-
if safety_checker is not None and feature_extractor is None:
|
125 |
-
raise ValueError(
|
126 |
-
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
127 |
-
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
128 |
-
)
|
129 |
-
|
130 |
-
self.register_modules(
|
131 |
-
vae_encoder=vae_encoder,
|
132 |
-
vae_decoder=vae_decoder,
|
133 |
-
text_encoder=text_encoder,
|
134 |
-
tokenizer=tokenizer,
|
135 |
-
unet=unet,
|
136 |
-
scheduler=scheduler,
|
137 |
-
safety_checker=safety_checker,
|
138 |
-
feature_extractor=feature_extractor,
|
139 |
-
)
|
140 |
-
|
141 |
-
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
142 |
-
|
143 |
-
def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
|
144 |
-
r"""
|
145 |
-
Encodes the prompt into text encoder hidden states.
|
146 |
-
|
147 |
-
Args:
|
148 |
-
prompt (`str` or `list(int)`):
|
149 |
-
prompt to be encoded
|
150 |
-
num_images_per_prompt (`int`):
|
151 |
-
number of images that should be generated per prompt
|
152 |
-
do_classifier_free_guidance (`bool`):
|
153 |
-
whether to use classifier free guidance or not
|
154 |
-
negative_prompt (`str` or `List[str]`):
|
155 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
156 |
-
if `guidance_scale` is less than `1`).
|
157 |
-
"""
|
158 |
-
batch_size = len(prompt) if isinstance(prompt, list) else 1
|
159 |
-
|
160 |
-
# get prompt text embeddings
|
161 |
-
text_inputs = self.tokenizer(
|
162 |
-
prompt,
|
163 |
-
padding="max_length",
|
164 |
-
max_length=self.tokenizer.model_max_length,
|
165 |
-
truncation=True,
|
166 |
-
return_tensors="np",
|
167 |
-
)
|
168 |
-
text_input_ids = text_inputs.input_ids
|
169 |
-
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="np").input_ids
|
170 |
-
|
171 |
-
if not np.array_equal(text_input_ids, untruncated_ids):
|
172 |
-
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
|
173 |
-
logger.warning(
|
174 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
175 |
-
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
176 |
-
)
|
177 |
-
|
178 |
-
text_embeddings = self.text_encoder(input_ids=text_input_ids.astype(np.int64))[0]
|
179 |
-
text_embeddings = np.repeat(text_embeddings, num_images_per_prompt, axis=0)
|
180 |
-
|
181 |
-
# get unconditional embeddings for classifier free guidance
|
182 |
-
if do_classifier_free_guidance:
|
183 |
-
uncond_tokens: List[str]
|
184 |
-
if negative_prompt is None:
|
185 |
-
uncond_tokens = [""] * batch_size
|
186 |
-
elif type(prompt) is not type(negative_prompt):
|
187 |
-
raise TypeError(
|
188 |
-
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
189 |
-
f" {type(prompt)}."
|
190 |
-
)
|
191 |
-
elif isinstance(negative_prompt, str):
|
192 |
-
uncond_tokens = [negative_prompt] * batch_size
|
193 |
-
elif batch_size != len(negative_prompt):
|
194 |
-
raise ValueError(
|
195 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
196 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
197 |
-
" the batch size of `prompt`."
|
198 |
-
)
|
199 |
-
else:
|
200 |
-
uncond_tokens = negative_prompt
|
201 |
-
|
202 |
-
max_length = text_input_ids.shape[-1]
|
203 |
-
uncond_input = self.tokenizer(
|
204 |
-
uncond_tokens,
|
205 |
-
padding="max_length",
|
206 |
-
max_length=max_length,
|
207 |
-
truncation=True,
|
208 |
-
return_tensors="np",
|
209 |
-
)
|
210 |
-
uncond_embeddings = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int64))[0]
|
211 |
-
uncond_embeddings = np.repeat(uncond_embeddings, num_images_per_prompt, axis=0)
|
212 |
-
|
213 |
-
# For classifier free guidance, we need to do two forward passes.
|
214 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
215 |
-
# to avoid doing two forward passes
|
216 |
-
text_embeddings = np.concatenate([uncond_embeddings, text_embeddings])
|
217 |
-
|
218 |
-
return text_embeddings
|
219 |
-
|
220 |
-
def run_safety_checker(self, image, dtype):
|
221 |
-
if self.safety_checker is not None:
|
222 |
-
safety_checker_input = self.feature_extractor(
|
223 |
-
self.numpy_to_pil(image), return_tensors="np"
|
224 |
-
).pixel_values.astype(dtype)
|
225 |
-
# There will throw an error if use safety_checker batchsize>1
|
226 |
-
images, has_nsfw_concept = [], []
|
227 |
-
for i in range(image.shape[0]):
|
228 |
-
image_i, has_nsfw_concept_i = self.safety_checker(
|
229 |
-
clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1]
|
230 |
-
)
|
231 |
-
images.append(image_i)
|
232 |
-
has_nsfw_concept.append(has_nsfw_concept_i[0])
|
233 |
-
image = np.concatenate(images)
|
234 |
-
else:
|
235 |
-
has_nsfw_concept = None
|
236 |
-
return image, has_nsfw_concept
|
237 |
-
|
238 |
-
def decode_latents(self, latents):
|
239 |
-
latents = 1 / 0.18215 * latents
|
240 |
-
image = np.concatenate(
|
241 |
-
[self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])]
|
242 |
-
)
|
243 |
-
image = np.clip(image / 2 + 0.5, 0, 1)
|
244 |
-
image = image.transpose([0, 2, 3, 1])
|
245 |
-
return image
|
246 |
-
|
247 |
-
def prepare_extra_step_kwargs(self, eta):
|
248 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
249 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
250 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
251 |
-
# and should be between [0, 1]
|
252 |
-
|
253 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
254 |
-
extra_step_kwargs = {}
|
255 |
-
if accepts_eta:
|
256 |
-
extra_step_kwargs["eta"] = eta
|
257 |
-
|
258 |
-
return extra_step_kwargs
|
259 |
-
|
260 |
-
def check_inputs(self, prompt, strength, callback_steps):
|
261 |
-
if not isinstance(prompt, str) and not isinstance(prompt, list):
|
262 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
263 |
-
|
264 |
-
if strength < 0 or strength > 1:
|
265 |
-
raise ValueError(f"The value of strength should in [1.0, 1.0] but is {strength}")
|
266 |
-
|
267 |
-
if (callback_steps is None) or (
|
268 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
269 |
-
):
|
270 |
-
raise ValueError(
|
271 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
272 |
-
f" {type(callback_steps)}."
|
273 |
-
)
|
274 |
-
|
275 |
-
def get_timesteps(self, num_inference_steps, strength):
|
276 |
-
# get the original timestep using init_timestep
|
277 |
-
offset = self.scheduler.config.get("steps_offset", 0)
|
278 |
-
init_timestep = int(num_inference_steps * strength) + offset
|
279 |
-
init_timestep = min(init_timestep, num_inference_steps)
|
280 |
-
|
281 |
-
t_start = max(num_inference_steps - init_timestep + offset, 0)
|
282 |
-
timesteps = self.scheduler.timesteps[t_start:]
|
283 |
-
|
284 |
-
return timesteps, num_inference_steps - t_start
|
285 |
-
|
286 |
-
def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, generator=None, noise=None):
|
287 |
-
if generator is None:
|
288 |
-
generator = np.random
|
289 |
-
|
290 |
-
image = image.astype(dtype)
|
291 |
-
init_latents = self.vae_encoder(sample=image)[0]
|
292 |
-
init_latents = 0.18215 * init_latents
|
293 |
-
init_latents = paddle.to_tensor(init_latents)
|
294 |
-
|
295 |
-
# Expand init_latents for batch_size and num_images_per_prompt
|
296 |
-
init_latents = paddle.concat([init_latents] * batch_size * num_images_per_prompt, axis=0)
|
297 |
-
init_latents_orig = paddle.to_tensor(init_latents)
|
298 |
-
|
299 |
-
# add noise to latents using the timesteps
|
300 |
-
if noise is None:
|
301 |
-
noise = paddle.to_tensor(generator.randn(*init_latents.shape).astype(dtype))
|
302 |
-
elif list(noise.shape) != list(init_latents.shape):
|
303 |
-
raise ValueError(f"Unexpected noise shape, got {noise.shape}, expected {init_latents.shape}")
|
304 |
-
elif isinstance(noise, np.ndarray):
|
305 |
-
noise = paddle.to_tensor(noise, dtype=dtype)
|
306 |
-
|
307 |
-
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
308 |
-
latents = init_latents
|
309 |
-
return latents, init_latents_orig, noise
|
310 |
-
|
311 |
-
def __call__(
|
312 |
-
self,
|
313 |
-
prompt: Union[str, List[str]],
|
314 |
-
image: Union[np.ndarray, PIL.Image.Image] = None,
|
315 |
-
mask_image: Union[np.ndarray, PIL.Image.Image] = None,
|
316 |
-
strength: float = 0.8,
|
317 |
-
num_inference_steps: Optional[int] = 50,
|
318 |
-
guidance_scale: Optional[float] = 7.5,
|
319 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
320 |
-
num_images_per_prompt: Optional[int] = 1,
|
321 |
-
eta: Optional[float] = 0.0,
|
322 |
-
generator: Optional[np.random.RandomState] = None,
|
323 |
-
noise: Optional[np.ndarray] = None,
|
324 |
-
output_type: Optional[str] = "pil",
|
325 |
-
return_dict: bool = True,
|
326 |
-
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
327 |
-
callback_steps: Optional[int] = 1,
|
328 |
-
):
|
329 |
-
r"""
|
330 |
-
Function invoked when calling the pipeline for generation.
|
331 |
-
|
332 |
-
Args:
|
333 |
-
prompt (`str` or `List[str]`):
|
334 |
-
The prompt or prompts to guide the image generation.
|
335 |
-
image (`nd.ndarray` or `PIL.Image.Image`):
|
336 |
-
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
337 |
-
process. This is the image whose masked region will be inpainted.
|
338 |
-
mask_image (`nd.ndarray` or `PIL.Image.Image`):
|
339 |
-
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
340 |
-
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
|
341 |
-
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
|
342 |
-
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.uu
|
343 |
-
strength (`float`, *optional*, defaults to 0.8):
|
344 |
-
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
|
345 |
-
`image` will be used as a starting point, adding more noise to it the larger the `strength`. The
|
346 |
-
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
|
347 |
-
noise will be maximum and the denoising process will run for the full number of iterations specified in
|
348 |
-
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
349 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
350 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
351 |
-
expense of slower inference. This parameter will be modulated by `strength`.
|
352 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
353 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
354 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
355 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
356 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
357 |
-
usually at the expense of lower image quality.
|
358 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
359 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
360 |
-
if `guidance_scale` is less than `1`).
|
361 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
362 |
-
The number of images to generate per prompt.
|
363 |
-
eta (`float`, *optional*, defaults to 0.0):
|
364 |
-
Corresponds to parameter eta (?) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
365 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
366 |
-
generator (`np.random.RandomState`, *optional*):
|
367 |
-
A np.random.RandomState to make generation deterministic.
|
368 |
-
noise (`np.ndarray`, *optional*):
|
369 |
-
Pre-generated noise tensor, sampled from a Gaussian distribution, to be used as inputs for image
|
370 |
-
generation. If not provided, a noise tensor will ge generated by sampling using the supplied random
|
371 |
-
`generator`.
|
372 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
373 |
-
The output format of the generate image. Choose between
|
374 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
375 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
376 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
377 |
-
plain tuple.
|
378 |
-
callback (`Callable`, *optional*):
|
379 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
380 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
|
381 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
382 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
383 |
-
called at every step.
|
384 |
-
|
385 |
-
Returns:
|
386 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
387 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
388 |
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
389 |
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
390 |
-
(nsfw) content, according to the `safety_checker`.
|
391 |
-
"""
|
392 |
-
# 1. Check inputs
|
393 |
-
self.check_inputs(prompt, strength, callback_steps)
|
394 |
-
|
395 |
-
# 2. Define call parameters
|
396 |
-
batch_size = 1 if isinstance(prompt, str) else len(prompt)
|
397 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
398 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
399 |
-
# corresponds to doing no classifier free guidance.
|
400 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
401 |
-
|
402 |
-
# 3. Encode input prompt
|
403 |
-
text_embeddings = self._encode_prompt(
|
404 |
-
prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
|
405 |
-
)
|
406 |
-
|
407 |
-
# 4. Preprocess image and mask
|
408 |
-
if isinstance(image, PIL.Image.Image):
|
409 |
-
image = preprocess_image(image)
|
410 |
-
|
411 |
-
if isinstance(mask_image, PIL.Image.Image):
|
412 |
-
mask_image = preprocess_mask(mask_image)
|
413 |
-
|
414 |
-
# 5. set timesteps
|
415 |
-
self.scheduler.set_timesteps(num_inference_steps)
|
416 |
-
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength)
|
417 |
-
latent_timestep = timesteps[:1].tile([batch_size * num_images_per_prompt])
|
418 |
-
|
419 |
-
# 6. Prepare latent variables
|
420 |
-
# encode the init image into latents and scale the latents
|
421 |
-
latents, init_latents_orig, noise = self.prepare_latents(
|
422 |
-
image, latent_timestep, batch_size, num_images_per_prompt, text_embeddings.dtype, generator, noise
|
423 |
-
)
|
424 |
-
|
425 |
-
# 7. Prepare mask latent
|
426 |
-
mask = paddle.to_tensor(mask_image, dtype=latents.dtype)
|
427 |
-
mask = paddle.concat([mask] * batch_size * num_images_per_prompt)
|
428 |
-
|
429 |
-
# 8. Prepare extra step kwargs.
|
430 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(eta)
|
431 |
-
|
432 |
-
# 9. Denoising loop
|
433 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
434 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
435 |
-
text_embeddings = paddle.to_tensor(text_embeddings, dtype="float32")
|
436 |
-
for i, t in enumerate(timesteps):
|
437 |
-
# expand the latents if we are doing classifier free guidance
|
438 |
-
latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents
|
439 |
-
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
440 |
-
latent_model_input = latent_model_input
|
441 |
-
|
442 |
-
# predict the noise residual
|
443 |
-
noise_pred = self.unet.zero_copy_infer(
|
444 |
-
sample=latent_model_input, timestep=t, encoder_hidden_states=text_embeddings
|
445 |
-
)[0]
|
446 |
-
# perform guidance
|
447 |
-
if do_classifier_free_guidance:
|
448 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
449 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
450 |
-
|
451 |
-
# compute the previous noisy sample x_t -> x_t-1
|
452 |
-
scheduler_output = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs)
|
453 |
-
latents = scheduler_output.prev_sample
|
454 |
-
init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, t)
|
455 |
-
|
456 |
-
latents = (init_latents_proper * mask) + (latents * (1 - mask))
|
457 |
-
|
458 |
-
# call the callback, if provided
|
459 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
460 |
-
progress_bar.update()
|
461 |
-
if callback is not None and i % callback_steps == 0:
|
462 |
-
callback(i, t, latents)
|
463 |
-
|
464 |
-
# 10. Post-processing
|
465 |
-
image = self.decode_latents(latents.numpy())
|
466 |
-
|
467 |
-
# 11. Run safety checker
|
468 |
-
image, has_nsfw_concept = self.run_safety_checker(image, text_embeddings.dtype)
|
469 |
-
|
470 |
-
# 12. Convert to PIL
|
471 |
-
if output_type == "pil":
|
472 |
-
image = self.numpy_to_pil(image)
|
473 |
-
|
474 |
-
if not return_dict:
|
475 |
-
return (image, has_nsfw_concept)
|
476 |
-
|
477 |
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/variational_autoencoder/modules.py
DELETED
@@ -1,1064 +0,0 @@
|
|
1 |
-
# pytorch_diffusion + derived encoder decoder
|
2 |
-
import math
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
import numpy as np
|
6 |
-
from einops import rearrange
|
7 |
-
|
8 |
-
from audioldm.utils import instantiate_from_config
|
9 |
-
from audioldm.latent_diffusion.attention import LinearAttention
|
10 |
-
|
11 |
-
def get_timestep_embedding(timesteps, embedding_dim):
|
12 |
-
"""
|
13 |
-
This matches the implementation in Denoising Diffusion Probabilistic Models:
|
14 |
-
From Fairseq.
|
15 |
-
Build sinusoidal embeddings.
|
16 |
-
This matches the implementation in tensor2tensor, but differs slightly
|
17 |
-
from the description in Section 3.5 of "Attention Is All You Need".
|
18 |
-
"""
|
19 |
-
assert len(timesteps.shape) == 1
|
20 |
-
|
21 |
-
half_dim = embedding_dim // 2
|
22 |
-
emb = math.log(10000) / (half_dim - 1)
|
23 |
-
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
|
24 |
-
emb = emb.to(device=timesteps.device)
|
25 |
-
emb = timesteps.float()[:, None] * emb[None, :]
|
26 |
-
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
27 |
-
if embedding_dim % 2 == 1: # zero pad
|
28 |
-
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
|
29 |
-
return emb
|
30 |
-
|
31 |
-
def nonlinearity(x):
|
32 |
-
# swish
|
33 |
-
return x * torch.sigmoid(x)
|
34 |
-
|
35 |
-
|
36 |
-
def Normalize(in_channels, num_groups=32):
|
37 |
-
return torch.nn.GroupNorm(
|
38 |
-
num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True
|
39 |
-
)
|
40 |
-
|
41 |
-
|
42 |
-
class Upsample(nn.Module):
|
43 |
-
def __init__(self, in_channels, with_conv):
|
44 |
-
super().__init__()
|
45 |
-
self.with_conv = with_conv
|
46 |
-
if self.with_conv:
|
47 |
-
self.conv = torch.nn.Conv2d(
|
48 |
-
in_channels, in_channels, kernel_size=3, stride=1, padding=1
|
49 |
-
)
|
50 |
-
|
51 |
-
def forward(self, x):
|
52 |
-
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
|
53 |
-
if self.with_conv:
|
54 |
-
x = self.conv(x)
|
55 |
-
return x
|
56 |
-
|
57 |
-
|
58 |
-
class UpsampleTimeStride4(nn.Module):
|
59 |
-
def __init__(self, in_channels, with_conv):
|
60 |
-
super().__init__()
|
61 |
-
self.with_conv = with_conv
|
62 |
-
if self.with_conv:
|
63 |
-
self.conv = torch.nn.Conv2d(
|
64 |
-
in_channels, in_channels, kernel_size=5, stride=1, padding=2
|
65 |
-
)
|
66 |
-
|
67 |
-
def forward(self, x):
|
68 |
-
x = torch.nn.functional.interpolate(x, scale_factor=(4.0, 2.0), mode="nearest")
|
69 |
-
if self.with_conv:
|
70 |
-
x = self.conv(x)
|
71 |
-
return x
|
72 |
-
|
73 |
-
|
74 |
-
class Downsample(nn.Module):
|
75 |
-
def __init__(self, in_channels, with_conv):
|
76 |
-
super().__init__()
|
77 |
-
self.with_conv = with_conv
|
78 |
-
if self.with_conv:
|
79 |
-
# Do time downsampling here
|
80 |
-
# no asymmetric padding in torch conv, must do it ourselves
|
81 |
-
self.conv = torch.nn.Conv2d(
|
82 |
-
in_channels, in_channels, kernel_size=3, stride=2, padding=0
|
83 |
-
)
|
84 |
-
|
85 |
-
def forward(self, x):
|
86 |
-
if self.with_conv:
|
87 |
-
pad = (0, 1, 0, 1)
|
88 |
-
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
|
89 |
-
x = self.conv(x)
|
90 |
-
else:
|
91 |
-
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
|
92 |
-
return x
|
93 |
-
|
94 |
-
|
95 |
-
class DownsampleTimeStride4(nn.Module):
|
96 |
-
def __init__(self, in_channels, with_conv):
|
97 |
-
super().__init__()
|
98 |
-
self.with_conv = with_conv
|
99 |
-
if self.with_conv:
|
100 |
-
# Do time downsampling here
|
101 |
-
# no asymmetric padding in torch conv, must do it ourselves
|
102 |
-
self.conv = torch.nn.Conv2d(
|
103 |
-
in_channels, in_channels, kernel_size=5, stride=(4, 2), padding=1
|
104 |
-
)
|
105 |
-
|
106 |
-
def forward(self, x):
|
107 |
-
if self.with_conv:
|
108 |
-
pad = (0, 1, 0, 1)
|
109 |
-
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
|
110 |
-
x = self.conv(x)
|
111 |
-
else:
|
112 |
-
x = torch.nn.functional.avg_pool2d(x, kernel_size=(4, 2), stride=(4, 2))
|
113 |
-
return x
|
114 |
-
|
115 |
-
|
116 |
-
class ResnetBlock(nn.Module):
|
117 |
-
def __init__(
|
118 |
-
self,
|
119 |
-
*,
|
120 |
-
in_channels,
|
121 |
-
out_channels=None,
|
122 |
-
conv_shortcut=False,
|
123 |
-
dropout,
|
124 |
-
temb_channels=512,
|
125 |
-
):
|
126 |
-
super().__init__()
|
127 |
-
self.in_channels = in_channels
|
128 |
-
out_channels = in_channels if out_channels is None else out_channels
|
129 |
-
self.out_channels = out_channels
|
130 |
-
self.use_conv_shortcut = conv_shortcut
|
131 |
-
|
132 |
-
self.norm1 = Normalize(in_channels)
|
133 |
-
self.conv1 = torch.nn.Conv2d(
|
134 |
-
in_channels, out_channels, kernel_size=3, stride=1, padding=1
|
135 |
-
)
|
136 |
-
if temb_channels > 0:
|
137 |
-
self.temb_proj = torch.nn.Linear(temb_channels, out_channels)
|
138 |
-
self.norm2 = Normalize(out_channels)
|
139 |
-
self.dropout = torch.nn.Dropout(dropout)
|
140 |
-
self.conv2 = torch.nn.Conv2d(
|
141 |
-
out_channels, out_channels, kernel_size=3, stride=1, padding=1
|
142 |
-
)
|
143 |
-
if self.in_channels != self.out_channels:
|
144 |
-
if self.use_conv_shortcut:
|
145 |
-
self.conv_shortcut = torch.nn.Conv2d(
|
146 |
-
in_channels, out_channels, kernel_size=3, stride=1, padding=1
|
147 |
-
)
|
148 |
-
else:
|
149 |
-
self.nin_shortcut = torch.nn.Conv2d(
|
150 |
-
in_channels, out_channels, kernel_size=1, stride=1, padding=0
|
151 |
-
)
|
152 |
-
|
153 |
-
def forward(self, x, temb):
|
154 |
-
h = x
|
155 |
-
h = self.norm1(h)
|
156 |
-
h = nonlinearity(h)
|
157 |
-
h = self.conv1(h)
|
158 |
-
|
159 |
-
if temb is not None:
|
160 |
-
h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None]
|
161 |
-
|
162 |
-
h = self.norm2(h)
|
163 |
-
h = nonlinearity(h)
|
164 |
-
h = self.dropout(h)
|
165 |
-
h = self.conv2(h)
|
166 |
-
|
167 |
-
if self.in_channels != self.out_channels:
|
168 |
-
if self.use_conv_shortcut:
|
169 |
-
x = self.conv_shortcut(x)
|
170 |
-
else:
|
171 |
-
x = self.nin_shortcut(x)
|
172 |
-
|
173 |
-
return x + h
|
174 |
-
|
175 |
-
|
176 |
-
class LinAttnBlock(LinearAttention):
|
177 |
-
"""to match AttnBlock usage"""
|
178 |
-
|
179 |
-
def __init__(self, in_channels):
|
180 |
-
super().__init__(dim=in_channels, heads=1, dim_head=in_channels)
|
181 |
-
|
182 |
-
|
183 |
-
class AttnBlock(nn.Module):
|
184 |
-
def __init__(self, in_channels):
|
185 |
-
super().__init__()
|
186 |
-
self.in_channels = in_channels
|
187 |
-
|
188 |
-
self.norm = Normalize(in_channels)
|
189 |
-
self.q = torch.nn.Conv2d(
|
190 |
-
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
191 |
-
)
|
192 |
-
self.k = torch.nn.Conv2d(
|
193 |
-
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
194 |
-
)
|
195 |
-
self.v = torch.nn.Conv2d(
|
196 |
-
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
197 |
-
)
|
198 |
-
self.proj_out = torch.nn.Conv2d(
|
199 |
-
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
200 |
-
)
|
201 |
-
|
202 |
-
def forward(self, x):
|
203 |
-
h_ = x
|
204 |
-
h_ = self.norm(h_)
|
205 |
-
q = self.q(h_)
|
206 |
-
k = self.k(h_)
|
207 |
-
v = self.v(h_)
|
208 |
-
|
209 |
-
# compute attention
|
210 |
-
b, c, h, w = q.shape
|
211 |
-
q = q.reshape(b, c, h * w).contiguous()
|
212 |
-
q = q.permute(0, 2, 1).contiguous() # b,hw,c
|
213 |
-
k = k.reshape(b, c, h * w).contiguous() # b,c,hw
|
214 |
-
w_ = torch.bmm(q, k).contiguous() # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
|
215 |
-
w_ = w_ * (int(c) ** (-0.5))
|
216 |
-
w_ = torch.nn.functional.softmax(w_, dim=2)
|
217 |
-
|
218 |
-
# attend to values
|
219 |
-
v = v.reshape(b, c, h * w).contiguous()
|
220 |
-
w_ = w_.permute(0, 2, 1).contiguous() # b,hw,hw (first hw of k, second of q)
|
221 |
-
h_ = torch.bmm(
|
222 |
-
v, w_
|
223 |
-
).contiguous() # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
|
224 |
-
h_ = h_.reshape(b, c, h, w).contiguous()
|
225 |
-
|
226 |
-
h_ = self.proj_out(h_)
|
227 |
-
|
228 |
-
return x + h_
|
229 |
-
|
230 |
-
|
231 |
-
def make_attn(in_channels, attn_type="vanilla"):
|
232 |
-
assert attn_type in ["vanilla", "linear", "none"], f"attn_type {attn_type} unknown"
|
233 |
-
# print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
|
234 |
-
if attn_type == "vanilla":
|
235 |
-
return AttnBlock(in_channels)
|
236 |
-
elif attn_type == "none":
|
237 |
-
return nn.Identity(in_channels)
|
238 |
-
else:
|
239 |
-
return LinAttnBlock(in_channels)
|
240 |
-
|
241 |
-
|
242 |
-
class Model(nn.Module):
|
243 |
-
def __init__(
|
244 |
-
self,
|
245 |
-
*,
|
246 |
-
ch,
|
247 |
-
out_ch,
|
248 |
-
ch_mult=(1, 2, 4, 8),
|
249 |
-
num_res_blocks,
|
250 |
-
attn_resolutions,
|
251 |
-
dropout=0.0,
|
252 |
-
resamp_with_conv=True,
|
253 |
-
in_channels,
|
254 |
-
resolution,
|
255 |
-
use_timestep=True,
|
256 |
-
use_linear_attn=False,
|
257 |
-
attn_type="vanilla",
|
258 |
-
):
|
259 |
-
super().__init__()
|
260 |
-
if use_linear_attn:
|
261 |
-
attn_type = "linear"
|
262 |
-
self.ch = ch
|
263 |
-
self.temb_ch = self.ch * 4
|
264 |
-
self.num_resolutions = len(ch_mult)
|
265 |
-
self.num_res_blocks = num_res_blocks
|
266 |
-
self.resolution = resolution
|
267 |
-
self.in_channels = in_channels
|
268 |
-
|
269 |
-
self.use_timestep = use_timestep
|
270 |
-
if self.use_timestep:
|
271 |
-
# timestep embedding
|
272 |
-
self.temb = nn.Module()
|
273 |
-
self.temb.dense = nn.ModuleList(
|
274 |
-
[
|
275 |
-
torch.nn.Linear(self.ch, self.temb_ch),
|
276 |
-
torch.nn.Linear(self.temb_ch, self.temb_ch),
|
277 |
-
]
|
278 |
-
)
|
279 |
-
|
280 |
-
# downsampling
|
281 |
-
self.conv_in = torch.nn.Conv2d(
|
282 |
-
in_channels, self.ch, kernel_size=3, stride=1, padding=1
|
283 |
-
)
|
284 |
-
|
285 |
-
curr_res = resolution
|
286 |
-
in_ch_mult = (1,) + tuple(ch_mult)
|
287 |
-
self.down = nn.ModuleList()
|
288 |
-
for i_level in range(self.num_resolutions):
|
289 |
-
block = nn.ModuleList()
|
290 |
-
attn = nn.ModuleList()
|
291 |
-
block_in = ch * in_ch_mult[i_level]
|
292 |
-
block_out = ch * ch_mult[i_level]
|
293 |
-
for i_block in range(self.num_res_blocks):
|
294 |
-
block.append(
|
295 |
-
ResnetBlock(
|
296 |
-
in_channels=block_in,
|
297 |
-
out_channels=block_out,
|
298 |
-
temb_channels=self.temb_ch,
|
299 |
-
dropout=dropout,
|
300 |
-
)
|
301 |
-
)
|
302 |
-
block_in = block_out
|
303 |
-
if curr_res in attn_resolutions:
|
304 |
-
attn.append(make_attn(block_in, attn_type=attn_type))
|
305 |
-
down = nn.Module()
|
306 |
-
down.block = block
|
307 |
-
down.attn = attn
|
308 |
-
if i_level != self.num_resolutions - 1:
|
309 |
-
down.downsample = Downsample(block_in, resamp_with_conv)
|
310 |
-
curr_res = curr_res // 2
|
311 |
-
self.down.append(down)
|
312 |
-
|
313 |
-
# middle
|
314 |
-
self.mid = nn.Module()
|
315 |
-
self.mid.block_1 = ResnetBlock(
|
316 |
-
in_channels=block_in,
|
317 |
-
out_channels=block_in,
|
318 |
-
temb_channels=self.temb_ch,
|
319 |
-
dropout=dropout,
|
320 |
-
)
|
321 |
-
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
|
322 |
-
self.mid.block_2 = ResnetBlock(
|
323 |
-
in_channels=block_in,
|
324 |
-
out_channels=block_in,
|
325 |
-
temb_channels=self.temb_ch,
|
326 |
-
dropout=dropout,
|
327 |
-
)
|
328 |
-
|
329 |
-
# upsampling
|
330 |
-
self.up = nn.ModuleList()
|
331 |
-
for i_level in reversed(range(self.num_resolutions)):
|
332 |
-
block = nn.ModuleList()
|
333 |
-
attn = nn.ModuleList()
|
334 |
-
block_out = ch * ch_mult[i_level]
|
335 |
-
skip_in = ch * ch_mult[i_level]
|
336 |
-
for i_block in range(self.num_res_blocks + 1):
|
337 |
-
if i_block == self.num_res_blocks:
|
338 |
-
skip_in = ch * in_ch_mult[i_level]
|
339 |
-
block.append(
|
340 |
-
ResnetBlock(
|
341 |
-
in_channels=block_in + skip_in,
|
342 |
-
out_channels=block_out,
|
343 |
-
temb_channels=self.temb_ch,
|
344 |
-
dropout=dropout,
|
345 |
-
)
|
346 |
-
)
|
347 |
-
block_in = block_out
|
348 |
-
if curr_res in attn_resolutions:
|
349 |
-
attn.append(make_attn(block_in, attn_type=attn_type))
|
350 |
-
up = nn.Module()
|
351 |
-
up.block = block
|
352 |
-
up.attn = attn
|
353 |
-
if i_level != 0:
|
354 |
-
up.upsample = Upsample(block_in, resamp_with_conv)
|
355 |
-
curr_res = curr_res * 2
|
356 |
-
self.up.insert(0, up) # prepend to get consistent order
|
357 |
-
|
358 |
-
# end
|
359 |
-
self.norm_out = Normalize(block_in)
|
360 |
-
self.conv_out = torch.nn.Conv2d(
|
361 |
-
block_in, out_ch, kernel_size=3, stride=1, padding=1
|
362 |
-
)
|
363 |
-
|
364 |
-
def forward(self, x, t=None, context=None):
|
365 |
-
# assert x.shape[2] == x.shape[3] == self.resolution
|
366 |
-
if context is not None:
|
367 |
-
# assume aligned context, cat along channel axis
|
368 |
-
x = torch.cat((x, context), dim=1)
|
369 |
-
if self.use_timestep:
|
370 |
-
# timestep embedding
|
371 |
-
assert t is not None
|
372 |
-
temb = get_timestep_embedding(t, self.ch)
|
373 |
-
temb = self.temb.dense[0](temb)
|
374 |
-
temb = nonlinearity(temb)
|
375 |
-
temb = self.temb.dense[1](temb)
|
376 |
-
else:
|
377 |
-
temb = None
|
378 |
-
|
379 |
-
# downsampling
|
380 |
-
hs = [self.conv_in(x)]
|
381 |
-
for i_level in range(self.num_resolutions):
|
382 |
-
for i_block in range(self.num_res_blocks):
|
383 |
-
h = self.down[i_level].block[i_block](hs[-1], temb)
|
384 |
-
if len(self.down[i_level].attn) > 0:
|
385 |
-
h = self.down[i_level].attn[i_block](h)
|
386 |
-
hs.append(h)
|
387 |
-
if i_level != self.num_resolutions - 1:
|
388 |
-
hs.append(self.down[i_level].downsample(hs[-1]))
|
389 |
-
|
390 |
-
# middle
|
391 |
-
h = hs[-1]
|
392 |
-
h = self.mid.block_1(h, temb)
|
393 |
-
h = self.mid.attn_1(h)
|
394 |
-
h = self.mid.block_2(h, temb)
|
395 |
-
|
396 |
-
# upsampling
|
397 |
-
for i_level in reversed(range(self.num_resolutions)):
|
398 |
-
for i_block in range(self.num_res_blocks + 1):
|
399 |
-
h = self.up[i_level].block[i_block](
|
400 |
-
torch.cat([h, hs.pop()], dim=1), temb
|
401 |
-
)
|
402 |
-
if len(self.up[i_level].attn) > 0:
|
403 |
-
h = self.up[i_level].attn[i_block](h)
|
404 |
-
if i_level != 0:
|
405 |
-
h = self.up[i_level].upsample(h)
|
406 |
-
|
407 |
-
# end
|
408 |
-
h = self.norm_out(h)
|
409 |
-
h = nonlinearity(h)
|
410 |
-
h = self.conv_out(h)
|
411 |
-
return h
|
412 |
-
|
413 |
-
def get_last_layer(self):
|
414 |
-
return self.conv_out.weight
|
415 |
-
|
416 |
-
|
417 |
-
class Encoder(nn.Module):
|
418 |
-
def __init__(
|
419 |
-
self,
|
420 |
-
*,
|
421 |
-
ch,
|
422 |
-
out_ch,
|
423 |
-
ch_mult=(1, 2, 4, 8),
|
424 |
-
num_res_blocks,
|
425 |
-
attn_resolutions,
|
426 |
-
dropout=0.0,
|
427 |
-
resamp_with_conv=True,
|
428 |
-
in_channels,
|
429 |
-
resolution,
|
430 |
-
z_channels,
|
431 |
-
double_z=True,
|
432 |
-
use_linear_attn=False,
|
433 |
-
attn_type="vanilla",
|
434 |
-
downsample_time_stride4_levels=[],
|
435 |
-
**ignore_kwargs,
|
436 |
-
):
|
437 |
-
super().__init__()
|
438 |
-
if use_linear_attn:
|
439 |
-
attn_type = "linear"
|
440 |
-
self.ch = ch
|
441 |
-
self.temb_ch = 0
|
442 |
-
self.num_resolutions = len(ch_mult)
|
443 |
-
self.num_res_blocks = num_res_blocks
|
444 |
-
self.resolution = resolution
|
445 |
-
self.in_channels = in_channels
|
446 |
-
self.downsample_time_stride4_levels = downsample_time_stride4_levels
|
447 |
-
|
448 |
-
if len(self.downsample_time_stride4_levels) > 0:
|
449 |
-
assert max(self.downsample_time_stride4_levels) < self.num_resolutions, (
|
450 |
-
"The level to perform downsample 4 operation need to be smaller than the total resolution number %s"
|
451 |
-
% str(self.num_resolutions)
|
452 |
-
)
|
453 |
-
|
454 |
-
# downsampling
|
455 |
-
self.conv_in = torch.nn.Conv2d(
|
456 |
-
in_channels, self.ch, kernel_size=3, stride=1, padding=1
|
457 |
-
)
|
458 |
-
|
459 |
-
curr_res = resolution
|
460 |
-
in_ch_mult = (1,) + tuple(ch_mult)
|
461 |
-
self.in_ch_mult = in_ch_mult
|
462 |
-
self.down = nn.ModuleList()
|
463 |
-
for i_level in range(self.num_resolutions):
|
464 |
-
block = nn.ModuleList()
|
465 |
-
attn = nn.ModuleList()
|
466 |
-
block_in = ch * in_ch_mult[i_level]
|
467 |
-
block_out = ch * ch_mult[i_level]
|
468 |
-
for i_block in range(self.num_res_blocks):
|
469 |
-
block.append(
|
470 |
-
ResnetBlock(
|
471 |
-
in_channels=block_in,
|
472 |
-
out_channels=block_out,
|
473 |
-
temb_channels=self.temb_ch,
|
474 |
-
dropout=dropout,
|
475 |
-
)
|
476 |
-
)
|
477 |
-
block_in = block_out
|
478 |
-
if curr_res in attn_resolutions:
|
479 |
-
attn.append(make_attn(block_in, attn_type=attn_type))
|
480 |
-
down = nn.Module()
|
481 |
-
down.block = block
|
482 |
-
down.attn = attn
|
483 |
-
if i_level != self.num_resolutions - 1:
|
484 |
-
if i_level in self.downsample_time_stride4_levels:
|
485 |
-
down.downsample = DownsampleTimeStride4(block_in, resamp_with_conv)
|
486 |
-
else:
|
487 |
-
down.downsample = Downsample(block_in, resamp_with_conv)
|
488 |
-
curr_res = curr_res // 2
|
489 |
-
self.down.append(down)
|
490 |
-
|
491 |
-
# middle
|
492 |
-
self.mid = nn.Module()
|
493 |
-
self.mid.block_1 = ResnetBlock(
|
494 |
-
in_channels=block_in,
|
495 |
-
out_channels=block_in,
|
496 |
-
temb_channels=self.temb_ch,
|
497 |
-
dropout=dropout,
|
498 |
-
)
|
499 |
-
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
|
500 |
-
self.mid.block_2 = ResnetBlock(
|
501 |
-
in_channels=block_in,
|
502 |
-
out_channels=block_in,
|
503 |
-
temb_channels=self.temb_ch,
|
504 |
-
dropout=dropout,
|
505 |
-
)
|
506 |
-
|
507 |
-
# end
|
508 |
-
self.norm_out = Normalize(block_in)
|
509 |
-
self.conv_out = torch.nn.Conv2d(
|
510 |
-
block_in,
|
511 |
-
2 * z_channels if double_z else z_channels,
|
512 |
-
kernel_size=3,
|
513 |
-
stride=1,
|
514 |
-
padding=1,
|
515 |
-
)
|
516 |
-
|
517 |
-
def forward(self, x):
|
518 |
-
# timestep embedding
|
519 |
-
temb = None
|
520 |
-
# downsampling
|
521 |
-
hs = [self.conv_in(x)]
|
522 |
-
for i_level in range(self.num_resolutions):
|
523 |
-
for i_block in range(self.num_res_blocks):
|
524 |
-
h = self.down[i_level].block[i_block](hs[-1], temb)
|
525 |
-
if len(self.down[i_level].attn) > 0:
|
526 |
-
h = self.down[i_level].attn[i_block](h)
|
527 |
-
hs.append(h)
|
528 |
-
if i_level != self.num_resolutions - 1:
|
529 |
-
hs.append(self.down[i_level].downsample(hs[-1]))
|
530 |
-
|
531 |
-
# middle
|
532 |
-
h = hs[-1]
|
533 |
-
h = self.mid.block_1(h, temb)
|
534 |
-
h = self.mid.attn_1(h)
|
535 |
-
h = self.mid.block_2(h, temb)
|
536 |
-
|
537 |
-
# end
|
538 |
-
h = self.norm_out(h)
|
539 |
-
h = nonlinearity(h)
|
540 |
-
h = self.conv_out(h)
|
541 |
-
return h
|
542 |
-
|
543 |
-
|
544 |
-
class Decoder(nn.Module):
|
545 |
-
def __init__(
|
546 |
-
self,
|
547 |
-
*,
|
548 |
-
ch,
|
549 |
-
out_ch,
|
550 |
-
ch_mult=(1, 2, 4, 8),
|
551 |
-
num_res_blocks,
|
552 |
-
attn_resolutions,
|
553 |
-
dropout=0.0,
|
554 |
-
resamp_with_conv=True,
|
555 |
-
in_channels,
|
556 |
-
resolution,
|
557 |
-
z_channels,
|
558 |
-
give_pre_end=False,
|
559 |
-
tanh_out=False,
|
560 |
-
use_linear_attn=False,
|
561 |
-
downsample_time_stride4_levels=[],
|
562 |
-
attn_type="vanilla",
|
563 |
-
**ignorekwargs,
|
564 |
-
):
|
565 |
-
super().__init__()
|
566 |
-
if use_linear_attn:
|
567 |
-
attn_type = "linear"
|
568 |
-
self.ch = ch
|
569 |
-
self.temb_ch = 0
|
570 |
-
self.num_resolutions = len(ch_mult)
|
571 |
-
self.num_res_blocks = num_res_blocks
|
572 |
-
self.resolution = resolution
|
573 |
-
self.in_channels = in_channels
|
574 |
-
self.give_pre_end = give_pre_end
|
575 |
-
self.tanh_out = tanh_out
|
576 |
-
self.downsample_time_stride4_levels = downsample_time_stride4_levels
|
577 |
-
|
578 |
-
if len(self.downsample_time_stride4_levels) > 0:
|
579 |
-
assert max(self.downsample_time_stride4_levels) < self.num_resolutions, (
|
580 |
-
"The level to perform downsample 4 operation need to be smaller than the total resolution number %s"
|
581 |
-
% str(self.num_resolutions)
|
582 |
-
)
|
583 |
-
|
584 |
-
# compute in_ch_mult, block_in and curr_res at lowest res
|
585 |
-
in_ch_mult = (1,) + tuple(ch_mult)
|
586 |
-
block_in = ch * ch_mult[self.num_resolutions - 1]
|
587 |
-
curr_res = resolution // 2 ** (self.num_resolutions - 1)
|
588 |
-
self.z_shape = (1, z_channels, curr_res, curr_res)
|
589 |
-
# print("Working with z of shape {} = {} dimensions.".format(
|
590 |
-
# self.z_shape, np.prod(self.z_shape)))
|
591 |
-
|
592 |
-
# z to block_in
|
593 |
-
self.conv_in = torch.nn.Conv2d(
|
594 |
-
z_channels, block_in, kernel_size=3, stride=1, padding=1
|
595 |
-
)
|
596 |
-
|
597 |
-
# middle
|
598 |
-
self.mid = nn.Module()
|
599 |
-
self.mid.block_1 = ResnetBlock(
|
600 |
-
in_channels=block_in,
|
601 |
-
out_channels=block_in,
|
602 |
-
temb_channels=self.temb_ch,
|
603 |
-
dropout=dropout,
|
604 |
-
)
|
605 |
-
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
|
606 |
-
self.mid.block_2 = ResnetBlock(
|
607 |
-
in_channels=block_in,
|
608 |
-
out_channels=block_in,
|
609 |
-
temb_channels=self.temb_ch,
|
610 |
-
dropout=dropout,
|
611 |
-
)
|
612 |
-
|
613 |
-
# upsampling
|
614 |
-
self.up = nn.ModuleList()
|
615 |
-
for i_level in reversed(range(self.num_resolutions)):
|
616 |
-
block = nn.ModuleList()
|
617 |
-
attn = nn.ModuleList()
|
618 |
-
block_out = ch * ch_mult[i_level]
|
619 |
-
for i_block in range(self.num_res_blocks + 1):
|
620 |
-
block.append(
|
621 |
-
ResnetBlock(
|
622 |
-
in_channels=block_in,
|
623 |
-
out_channels=block_out,
|
624 |
-
temb_channels=self.temb_ch,
|
625 |
-
dropout=dropout,
|
626 |
-
)
|
627 |
-
)
|
628 |
-
block_in = block_out
|
629 |
-
if curr_res in attn_resolutions:
|
630 |
-
attn.append(make_attn(block_in, attn_type=attn_type))
|
631 |
-
up = nn.Module()
|
632 |
-
up.block = block
|
633 |
-
up.attn = attn
|
634 |
-
if i_level != 0:
|
635 |
-
if i_level - 1 in self.downsample_time_stride4_levels:
|
636 |
-
up.upsample = UpsampleTimeStride4(block_in, resamp_with_conv)
|
637 |
-
else:
|
638 |
-
up.upsample = Upsample(block_in, resamp_with_conv)
|
639 |
-
curr_res = curr_res * 2
|
640 |
-
self.up.insert(0, up) # prepend to get consistent order
|
641 |
-
|
642 |
-
# end
|
643 |
-
self.norm_out = Normalize(block_in)
|
644 |
-
self.conv_out = torch.nn.Conv2d(
|
645 |
-
block_in, out_ch, kernel_size=3, stride=1, padding=1
|
646 |
-
)
|
647 |
-
|
648 |
-
def forward(self, z):
|
649 |
-
# assert z.shape[1:] == self.z_shape[1:]
|
650 |
-
self.last_z_shape = z.shape
|
651 |
-
|
652 |
-
# timestep embedding
|
653 |
-
temb = None
|
654 |
-
|
655 |
-
# z to block_in
|
656 |
-
h = self.conv_in(z)
|
657 |
-
|
658 |
-
# middle
|
659 |
-
h = self.mid.block_1(h, temb)
|
660 |
-
h = self.mid.attn_1(h)
|
661 |
-
h = self.mid.block_2(h, temb)
|
662 |
-
|
663 |
-
# upsampling
|
664 |
-
for i_level in reversed(range(self.num_resolutions)):
|
665 |
-
for i_block in range(self.num_res_blocks + 1):
|
666 |
-
h = self.up[i_level].block[i_block](h, temb)
|
667 |
-
if len(self.up[i_level].attn) > 0:
|
668 |
-
h = self.up[i_level].attn[i_block](h)
|
669 |
-
if i_level != 0:
|
670 |
-
h = self.up[i_level].upsample(h)
|
671 |
-
|
672 |
-
# end
|
673 |
-
if self.give_pre_end:
|
674 |
-
return h
|
675 |
-
|
676 |
-
h = self.norm_out(h)
|
677 |
-
h = nonlinearity(h)
|
678 |
-
h = self.conv_out(h)
|
679 |
-
if self.tanh_out:
|
680 |
-
h = torch.tanh(h)
|
681 |
-
return h
|
682 |
-
|
683 |
-
|
684 |
-
class SimpleDecoder(nn.Module):
|
685 |
-
def __init__(self, in_channels, out_channels, *args, **kwargs):
|
686 |
-
super().__init__()
|
687 |
-
self.model = nn.ModuleList(
|
688 |
-
[
|
689 |
-
nn.Conv2d(in_channels, in_channels, 1),
|
690 |
-
ResnetBlock(
|
691 |
-
in_channels=in_channels,
|
692 |
-
out_channels=2 * in_channels,
|
693 |
-
temb_channels=0,
|
694 |
-
dropout=0.0,
|
695 |
-
),
|
696 |
-
ResnetBlock(
|
697 |
-
in_channels=2 * in_channels,
|
698 |
-
out_channels=4 * in_channels,
|
699 |
-
temb_channels=0,
|
700 |
-
dropout=0.0,
|
701 |
-
),
|
702 |
-
ResnetBlock(
|
703 |
-
in_channels=4 * in_channels,
|
704 |
-
out_channels=2 * in_channels,
|
705 |
-
temb_channels=0,
|
706 |
-
dropout=0.0,
|
707 |
-
),
|
708 |
-
nn.Conv2d(2 * in_channels, in_channels, 1),
|
709 |
-
Upsample(in_channels, with_conv=True),
|
710 |
-
]
|
711 |
-
)
|
712 |
-
# end
|
713 |
-
self.norm_out = Normalize(in_channels)
|
714 |
-
self.conv_out = torch.nn.Conv2d(
|
715 |
-
in_channels, out_channels, kernel_size=3, stride=1, padding=1
|
716 |
-
)
|
717 |
-
|
718 |
-
def forward(self, x):
|
719 |
-
for i, layer in enumerate(self.model):
|
720 |
-
if i in [1, 2, 3]:
|
721 |
-
x = layer(x, None)
|
722 |
-
else:
|
723 |
-
x = layer(x)
|
724 |
-
|
725 |
-
h = self.norm_out(x)
|
726 |
-
h = nonlinearity(h)
|
727 |
-
x = self.conv_out(h)
|
728 |
-
return x
|
729 |
-
|
730 |
-
|
731 |
-
class UpsampleDecoder(nn.Module):
|
732 |
-
def __init__(
|
733 |
-
self,
|
734 |
-
in_channels,
|
735 |
-
out_channels,
|
736 |
-
ch,
|
737 |
-
num_res_blocks,
|
738 |
-
resolution,
|
739 |
-
ch_mult=(2, 2),
|
740 |
-
dropout=0.0,
|
741 |
-
):
|
742 |
-
super().__init__()
|
743 |
-
# upsampling
|
744 |
-
self.temb_ch = 0
|
745 |
-
self.num_resolutions = len(ch_mult)
|
746 |
-
self.num_res_blocks = num_res_blocks
|
747 |
-
block_in = in_channels
|
748 |
-
curr_res = resolution // 2 ** (self.num_resolutions - 1)
|
749 |
-
self.res_blocks = nn.ModuleList()
|
750 |
-
self.upsample_blocks = nn.ModuleList()
|
751 |
-
for i_level in range(self.num_resolutions):
|
752 |
-
res_block = []
|
753 |
-
block_out = ch * ch_mult[i_level]
|
754 |
-
for i_block in range(self.num_res_blocks + 1):
|
755 |
-
res_block.append(
|
756 |
-
ResnetBlock(
|
757 |
-
in_channels=block_in,
|
758 |
-
out_channels=block_out,
|
759 |
-
temb_channels=self.temb_ch,
|
760 |
-
dropout=dropout,
|
761 |
-
)
|
762 |
-
)
|
763 |
-
block_in = block_out
|
764 |
-
self.res_blocks.append(nn.ModuleList(res_block))
|
765 |
-
if i_level != self.num_resolutions - 1:
|
766 |
-
self.upsample_blocks.append(Upsample(block_in, True))
|
767 |
-
curr_res = curr_res * 2
|
768 |
-
|
769 |
-
# end
|
770 |
-
self.norm_out = Normalize(block_in)
|
771 |
-
self.conv_out = torch.nn.Conv2d(
|
772 |
-
block_in, out_channels, kernel_size=3, stride=1, padding=1
|
773 |
-
)
|
774 |
-
|
775 |
-
def forward(self, x):
|
776 |
-
# upsampling
|
777 |
-
h = x
|
778 |
-
for k, i_level in enumerate(range(self.num_resolutions)):
|
779 |
-
for i_block in range(self.num_res_blocks + 1):
|
780 |
-
h = self.res_blocks[i_level][i_block](h, None)
|
781 |
-
if i_level != self.num_resolutions - 1:
|
782 |
-
h = self.upsample_blocks[k](h)
|
783 |
-
h = self.norm_out(h)
|
784 |
-
h = nonlinearity(h)
|
785 |
-
h = self.conv_out(h)
|
786 |
-
return h
|
787 |
-
|
788 |
-
|
789 |
-
class LatentRescaler(nn.Module):
|
790 |
-
def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
|
791 |
-
super().__init__()
|
792 |
-
# residual block, interpolate, residual block
|
793 |
-
self.factor = factor
|
794 |
-
self.conv_in = nn.Conv2d(
|
795 |
-
in_channels, mid_channels, kernel_size=3, stride=1, padding=1
|
796 |
-
)
|
797 |
-
self.res_block1 = nn.ModuleList(
|
798 |
-
[
|
799 |
-
ResnetBlock(
|
800 |
-
in_channels=mid_channels,
|
801 |
-
out_channels=mid_channels,
|
802 |
-
temb_channels=0,
|
803 |
-
dropout=0.0,
|
804 |
-
)
|
805 |
-
for _ in range(depth)
|
806 |
-
]
|
807 |
-
)
|
808 |
-
self.attn = AttnBlock(mid_channels)
|
809 |
-
self.res_block2 = nn.ModuleList(
|
810 |
-
[
|
811 |
-
ResnetBlock(
|
812 |
-
in_channels=mid_channels,
|
813 |
-
out_channels=mid_channels,
|
814 |
-
temb_channels=0,
|
815 |
-
dropout=0.0,
|
816 |
-
)
|
817 |
-
for _ in range(depth)
|
818 |
-
]
|
819 |
-
)
|
820 |
-
|
821 |
-
self.conv_out = nn.Conv2d(
|
822 |
-
mid_channels,
|
823 |
-
out_channels,
|
824 |
-
kernel_size=1,
|
825 |
-
)
|
826 |
-
|
827 |
-
def forward(self, x):
|
828 |
-
x = self.conv_in(x)
|
829 |
-
for block in self.res_block1:
|
830 |
-
x = block(x, None)
|
831 |
-
x = torch.nn.functional.interpolate(
|
832 |
-
x,
|
833 |
-
size=(
|
834 |
-
int(round(x.shape[2] * self.factor)),
|
835 |
-
int(round(x.shape[3] * self.factor)),
|
836 |
-
),
|
837 |
-
)
|
838 |
-
x = self.attn(x).contiguous()
|
839 |
-
for block in self.res_block2:
|
840 |
-
x = block(x, None)
|
841 |
-
x = self.conv_out(x)
|
842 |
-
return x
|
843 |
-
|
844 |
-
|
845 |
-
class MergedRescaleEncoder(nn.Module):
|
846 |
-
def __init__(
|
847 |
-
self,
|
848 |
-
in_channels,
|
849 |
-
ch,
|
850 |
-
resolution,
|
851 |
-
out_ch,
|
852 |
-
num_res_blocks,
|
853 |
-
attn_resolutions,
|
854 |
-
dropout=0.0,
|
855 |
-
resamp_with_conv=True,
|
856 |
-
ch_mult=(1, 2, 4, 8),
|
857 |
-
rescale_factor=1.0,
|
858 |
-
rescale_module_depth=1,
|
859 |
-
):
|
860 |
-
super().__init__()
|
861 |
-
intermediate_chn = ch * ch_mult[-1]
|
862 |
-
self.encoder = Encoder(
|
863 |
-
in_channels=in_channels,
|
864 |
-
num_res_blocks=num_res_blocks,
|
865 |
-
ch=ch,
|
866 |
-
ch_mult=ch_mult,
|
867 |
-
z_channels=intermediate_chn,
|
868 |
-
double_z=False,
|
869 |
-
resolution=resolution,
|
870 |
-
attn_resolutions=attn_resolutions,
|
871 |
-
dropout=dropout,
|
872 |
-
resamp_with_conv=resamp_with_conv,
|
873 |
-
out_ch=None,
|
874 |
-
)
|
875 |
-
self.rescaler = LatentRescaler(
|
876 |
-
factor=rescale_factor,
|
877 |
-
in_channels=intermediate_chn,
|
878 |
-
mid_channels=intermediate_chn,
|
879 |
-
out_channels=out_ch,
|
880 |
-
depth=rescale_module_depth,
|
881 |
-
)
|
882 |
-
|
883 |
-
def forward(self, x):
|
884 |
-
x = self.encoder(x)
|
885 |
-
x = self.rescaler(x)
|
886 |
-
return x
|
887 |
-
|
888 |
-
|
889 |
-
class MergedRescaleDecoder(nn.Module):
|
890 |
-
def __init__(
|
891 |
-
self,
|
892 |
-
z_channels,
|
893 |
-
out_ch,
|
894 |
-
resolution,
|
895 |
-
num_res_blocks,
|
896 |
-
attn_resolutions,
|
897 |
-
ch,
|
898 |
-
ch_mult=(1, 2, 4, 8),
|
899 |
-
dropout=0.0,
|
900 |
-
resamp_with_conv=True,
|
901 |
-
rescale_factor=1.0,
|
902 |
-
rescale_module_depth=1,
|
903 |
-
):
|
904 |
-
super().__init__()
|
905 |
-
tmp_chn = z_channels * ch_mult[-1]
|
906 |
-
self.decoder = Decoder(
|
907 |
-
out_ch=out_ch,
|
908 |
-
z_channels=tmp_chn,
|
909 |
-
attn_resolutions=attn_resolutions,
|
910 |
-
dropout=dropout,
|
911 |
-
resamp_with_conv=resamp_with_conv,
|
912 |
-
in_channels=None,
|
913 |
-
num_res_blocks=num_res_blocks,
|
914 |
-
ch_mult=ch_mult,
|
915 |
-
resolution=resolution,
|
916 |
-
ch=ch,
|
917 |
-
)
|
918 |
-
self.rescaler = LatentRescaler(
|
919 |
-
factor=rescale_factor,
|
920 |
-
in_channels=z_channels,
|
921 |
-
mid_channels=tmp_chn,
|
922 |
-
out_channels=tmp_chn,
|
923 |
-
depth=rescale_module_depth,
|
924 |
-
)
|
925 |
-
|
926 |
-
def forward(self, x):
|
927 |
-
x = self.rescaler(x)
|
928 |
-
x = self.decoder(x)
|
929 |
-
return x
|
930 |
-
|
931 |
-
|
932 |
-
class Upsampler(nn.Module):
|
933 |
-
def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
|
934 |
-
super().__init__()
|
935 |
-
assert out_size >= in_size
|
936 |
-
num_blocks = int(np.log2(out_size // in_size)) + 1
|
937 |
-
factor_up = 1.0 + (out_size % in_size)
|
938 |
-
print(
|
939 |
-
f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}"
|
940 |
-
)
|
941 |
-
self.rescaler = LatentRescaler(
|
942 |
-
factor=factor_up,
|
943 |
-
in_channels=in_channels,
|
944 |
-
mid_channels=2 * in_channels,
|
945 |
-
out_channels=in_channels,
|
946 |
-
)
|
947 |
-
self.decoder = Decoder(
|
948 |
-
out_ch=out_channels,
|
949 |
-
resolution=out_size,
|
950 |
-
z_channels=in_channels,
|
951 |
-
num_res_blocks=2,
|
952 |
-
attn_resolutions=[],
|
953 |
-
in_channels=None,
|
954 |
-
ch=in_channels,
|
955 |
-
ch_mult=[ch_mult for _ in range(num_blocks)],
|
956 |
-
)
|
957 |
-
|
958 |
-
def forward(self, x):
|
959 |
-
x = self.rescaler(x)
|
960 |
-
x = self.decoder(x)
|
961 |
-
return x
|
962 |
-
|
963 |
-
|
964 |
-
class Resize(nn.Module):
|
965 |
-
def __init__(self, in_channels=None, learned=False, mode="bilinear"):
|
966 |
-
super().__init__()
|
967 |
-
self.with_conv = learned
|
968 |
-
self.mode = mode
|
969 |
-
if self.with_conv:
|
970 |
-
print(
|
971 |
-
f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode"
|
972 |
-
)
|
973 |
-
raise NotImplementedError()
|
974 |
-
assert in_channels is not None
|
975 |
-
# no asymmetric padding in torch conv, must do it ourselves
|
976 |
-
self.conv = torch.nn.Conv2d(
|
977 |
-
in_channels, in_channels, kernel_size=4, stride=2, padding=1
|
978 |
-
)
|
979 |
-
|
980 |
-
def forward(self, x, scale_factor=1.0):
|
981 |
-
if scale_factor == 1.0:
|
982 |
-
return x
|
983 |
-
else:
|
984 |
-
x = torch.nn.functional.interpolate(
|
985 |
-
x, mode=self.mode, align_corners=False, scale_factor=scale_factor
|
986 |
-
)
|
987 |
-
return x
|
988 |
-
|
989 |
-
|
990 |
-
class FirstStagePostProcessor(nn.Module):
|
991 |
-
def __init__(
|
992 |
-
self,
|
993 |
-
ch_mult: list,
|
994 |
-
in_channels,
|
995 |
-
pretrained_model: nn.Module = None,
|
996 |
-
reshape=False,
|
997 |
-
n_channels=None,
|
998 |
-
dropout=0.0,
|
999 |
-
pretrained_config=None,
|
1000 |
-
):
|
1001 |
-
super().__init__()
|
1002 |
-
if pretrained_config is None:
|
1003 |
-
assert (
|
1004 |
-
pretrained_model is not None
|
1005 |
-
), 'Either "pretrained_model" or "pretrained_config" must not be None'
|
1006 |
-
self.pretrained_model = pretrained_model
|
1007 |
-
else:
|
1008 |
-
assert (
|
1009 |
-
pretrained_config is not None
|
1010 |
-
), 'Either "pretrained_model" or "pretrained_config" must not be None'
|
1011 |
-
self.instantiate_pretrained(pretrained_config)
|
1012 |
-
|
1013 |
-
self.do_reshape = reshape
|
1014 |
-
|
1015 |
-
if n_channels is None:
|
1016 |
-
n_channels = self.pretrained_model.encoder.ch
|
1017 |
-
|
1018 |
-
self.proj_norm = Normalize(in_channels, num_groups=in_channels // 2)
|
1019 |
-
self.proj = nn.Conv2d(
|
1020 |
-
in_channels, n_channels, kernel_size=3, stride=1, padding=1
|
1021 |
-
)
|
1022 |
-
|
1023 |
-
blocks = []
|
1024 |
-
downs = []
|
1025 |
-
ch_in = n_channels
|
1026 |
-
for m in ch_mult:
|
1027 |
-
blocks.append(
|
1028 |
-
ResnetBlock(
|
1029 |
-
in_channels=ch_in, out_channels=m * n_channels, dropout=dropout
|
1030 |
-
)
|
1031 |
-
)
|
1032 |
-
ch_in = m * n_channels
|
1033 |
-
downs.append(Downsample(ch_in, with_conv=False))
|
1034 |
-
|
1035 |
-
self.model = nn.ModuleList(blocks)
|
1036 |
-
self.downsampler = nn.ModuleList(downs)
|
1037 |
-
|
1038 |
-
def instantiate_pretrained(self, config):
|
1039 |
-
model = instantiate_from_config(config)
|
1040 |
-
self.pretrained_model = model.eval()
|
1041 |
-
# self.pretrained_model.train = False
|
1042 |
-
for param in self.pretrained_model.parameters():
|
1043 |
-
param.requires_grad = False
|
1044 |
-
|
1045 |
-
@torch.no_grad()
|
1046 |
-
def encode_with_pretrained(self, x):
|
1047 |
-
c = self.pretrained_model.encode(x)
|
1048 |
-
if isinstance(c, DiagonalGaussianDistribution):
|
1049 |
-
c = c.mode()
|
1050 |
-
return c
|
1051 |
-
|
1052 |
-
def forward(self, x):
|
1053 |
-
z_fs = self.encode_with_pretrained(x)
|
1054 |
-
z = self.proj_norm(z_fs)
|
1055 |
-
z = self.proj(z)
|
1056 |
-
z = nonlinearity(z)
|
1057 |
-
|
1058 |
-
for submodel, downmodel in zip(self.model, self.downsampler):
|
1059 |
-
z = submodel(z, temb=None)
|
1060 |
-
z = downmodel(z)
|
1061 |
-
|
1062 |
-
if self.do_reshape:
|
1063 |
-
z = rearrange(z, "b c h w -> b (h w) c")
|
1064 |
-
return z
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/contperceptual.py
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
import sys
|
5 |
-
|
6 |
-
sys.path.insert(0, '.') # nopep8
|
7 |
-
from ldm.modules.losses_audio.vqperceptual import *
|
8 |
-
|
9 |
-
|
10 |
-
class LPAPSWithDiscriminator(nn.Module):
|
11 |
-
def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0,
|
12 |
-
disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
|
13 |
-
perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
|
14 |
-
disc_loss="hinge"):
|
15 |
-
|
16 |
-
super().__init__()
|
17 |
-
assert disc_loss in ["hinge", "vanilla"]
|
18 |
-
self.kl_weight = kl_weight
|
19 |
-
self.pixel_weight = pixelloss_weight
|
20 |
-
self.perceptual_loss = LPAPS().eval()# LPIPS用于日常图像,而LPAPS用于梅尔谱图
|
21 |
-
self.perceptual_weight = perceptual_weight
|
22 |
-
# output log variance
|
23 |
-
self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init)
|
24 |
-
|
25 |
-
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
|
26 |
-
n_layers=disc_num_layers,
|
27 |
-
use_actnorm=use_actnorm,
|
28 |
-
).apply(weights_init)
|
29 |
-
self.discriminator_iter_start = disc_start
|
30 |
-
if disc_loss == "hinge":
|
31 |
-
self.disc_loss = hinge_d_loss
|
32 |
-
elif disc_loss == "vanilla":
|
33 |
-
self.disc_loss = vanilla_d_loss
|
34 |
-
else:
|
35 |
-
raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
|
36 |
-
print(f"LPAPSWithDiscriminator running with {disc_loss} loss.")
|
37 |
-
self.disc_factor = disc_factor
|
38 |
-
self.discriminator_weight = disc_weight
|
39 |
-
self.disc_conditional = disc_conditional
|
40 |
-
|
41 |
-
|
42 |
-
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
|
43 |
-
if last_layer is not None:
|
44 |
-
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
|
45 |
-
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
|
46 |
-
else:
|
47 |
-
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
|
48 |
-
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
|
49 |
-
|
50 |
-
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
|
51 |
-
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
|
52 |
-
d_weight = d_weight * self.discriminator_weight
|
53 |
-
return d_weight
|
54 |
-
|
55 |
-
def forward(self, inputs, reconstructions, posteriors, optimizer_idx,
|
56 |
-
global_step, last_layer=None, cond=None, split="train", weights=None):
|
57 |
-
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
|
58 |
-
if self.perceptual_weight > 0:
|
59 |
-
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
|
60 |
-
# print(f"p_loss {p_loss}")
|
61 |
-
rec_loss = rec_loss + self.perceptual_weight * p_loss
|
62 |
-
else:
|
63 |
-
p_loss = torch.tensor([0.0])
|
64 |
-
|
65 |
-
nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
|
66 |
-
weighted_nll_loss = nll_loss
|
67 |
-
if weights is not None:
|
68 |
-
weighted_nll_loss = weights*nll_loss
|
69 |
-
weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
|
70 |
-
nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
|
71 |
-
kl_loss = posteriors.kl()
|
72 |
-
kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
|
73 |
-
|
74 |
-
# now the GAN part
|
75 |
-
if optimizer_idx == 0:
|
76 |
-
# generator update
|
77 |
-
if cond is None:
|
78 |
-
assert not self.disc_conditional
|
79 |
-
logits_fake = self.discriminator(reconstructions.contiguous())
|
80 |
-
else:
|
81 |
-
assert self.disc_conditional
|
82 |
-
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
|
83 |
-
g_loss = -torch.mean(logits_fake)
|
84 |
-
|
85 |
-
try:
|
86 |
-
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
|
87 |
-
except RuntimeError:
|
88 |
-
assert not self.training
|
89 |
-
d_weight = torch.tensor(0.0)
|
90 |
-
|
91 |
-
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
|
92 |
-
loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss
|
93 |
-
|
94 |
-
log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
|
95 |
-
"{}/logvar".format(split): self.logvar.detach(),
|
96 |
-
"{}/kl_loss".format(split): kl_loss.detach().mean(),
|
97 |
-
"{}/nll_loss".format(split): nll_loss.detach().mean(),
|
98 |
-
"{}/rec_loss".format(split): rec_loss.detach().mean(),
|
99 |
-
"{}/d_weight".format(split): d_weight.detach(),
|
100 |
-
"{}/disc_factor".format(split): torch.tensor(disc_factor),
|
101 |
-
"{}/g_loss".format(split): g_loss.detach().mean(),
|
102 |
-
}
|
103 |
-
return loss, log
|
104 |
-
|
105 |
-
if optimizer_idx == 1:
|
106 |
-
# second pass for discriminator update
|
107 |
-
if cond is None:
|
108 |
-
logits_real = self.discriminator(inputs.contiguous().detach())
|
109 |
-
logits_fake = self.discriminator(reconstructions.contiguous().detach())
|
110 |
-
else:
|
111 |
-
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
|
112 |
-
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
|
113 |
-
|
114 |
-
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
|
115 |
-
d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
|
116 |
-
|
117 |
-
log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
|
118 |
-
"{}/logits_real".format(split): logits_real.detach().mean(),
|
119 |
-
"{}/logits_fake".format(split): logits_fake.detach().mean()
|
120 |
-
}
|
121 |
-
return d_loss, log
|
122 |
-
|
123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/text/text_encoder.py
DELETED
@@ -1,272 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import re
|
3 |
-
import six
|
4 |
-
from six.moves import range # pylint: disable=redefined-builtin
|
5 |
-
|
6 |
-
PAD = "<pad>"
|
7 |
-
EOS = "<EOS>"
|
8 |
-
UNK = "<UNK>"
|
9 |
-
SEG = "|"
|
10 |
-
PUNCS = '!,.?;:'
|
11 |
-
RESERVED_TOKENS = [PAD, EOS, UNK]
|
12 |
-
NUM_RESERVED_TOKENS = len(RESERVED_TOKENS)
|
13 |
-
PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0
|
14 |
-
EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1
|
15 |
-
UNK_ID = RESERVED_TOKENS.index(UNK) # Normally 2
|
16 |
-
|
17 |
-
if six.PY2:
|
18 |
-
RESERVED_TOKENS_BYTES = RESERVED_TOKENS
|
19 |
-
else:
|
20 |
-
RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")]
|
21 |
-
|
22 |
-
# Regular expression for unescaping token strings.
|
23 |
-
# '\u' is converted to '_'
|
24 |
-
# '\\' is converted to '\'
|
25 |
-
# '\213;' is converted to unichr(213)
|
26 |
-
_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);")
|
27 |
-
_ESCAPE_CHARS = set(u"\\_u;0123456789")
|
28 |
-
|
29 |
-
|
30 |
-
def strip_ids(ids, ids_to_strip):
|
31 |
-
"""Strip ids_to_strip from the end ids."""
|
32 |
-
ids = list(ids)
|
33 |
-
while ids and ids[-1] in ids_to_strip:
|
34 |
-
ids.pop()
|
35 |
-
return ids
|
36 |
-
|
37 |
-
|
38 |
-
class TextEncoder(object):
|
39 |
-
"""Base class for converting from ints to/from human readable strings."""
|
40 |
-
|
41 |
-
def __init__(self, num_reserved_ids=NUM_RESERVED_TOKENS):
|
42 |
-
self._num_reserved_ids = num_reserved_ids
|
43 |
-
|
44 |
-
@property
|
45 |
-
def num_reserved_ids(self):
|
46 |
-
return self._num_reserved_ids
|
47 |
-
|
48 |
-
def encode(self, s):
|
49 |
-
"""Transform a human-readable string into a sequence of int ids.
|
50 |
-
|
51 |
-
The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
|
52 |
-
num_reserved_ids) are reserved.
|
53 |
-
|
54 |
-
EOS is not appended.
|
55 |
-
|
56 |
-
Args:
|
57 |
-
s: human-readable string to be converted.
|
58 |
-
|
59 |
-
Returns:
|
60 |
-
ids: list of integers
|
61 |
-
"""
|
62 |
-
return [int(w) + self._num_reserved_ids for w in s.split()]
|
63 |
-
|
64 |
-
def decode(self, ids, strip_extraneous=False):
|
65 |
-
"""Transform a sequence of int ids into a human-readable string.
|
66 |
-
|
67 |
-
EOS is not expected in ids.
|
68 |
-
|
69 |
-
Args:
|
70 |
-
ids: list of integers to be converted.
|
71 |
-
strip_extraneous: bool, whether to strip off extraneous tokens
|
72 |
-
(EOS and PAD).
|
73 |
-
|
74 |
-
Returns:
|
75 |
-
s: human-readable string.
|
76 |
-
"""
|
77 |
-
if strip_extraneous:
|
78 |
-
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
|
79 |
-
return " ".join(self.decode_list(ids))
|
80 |
-
|
81 |
-
def decode_list(self, ids):
|
82 |
-
"""Transform a sequence of int ids into a their string versions.
|
83 |
-
|
84 |
-
This method supports transforming individual input/output ids to their
|
85 |
-
string versions so that sequence to/from text conversions can be visualized
|
86 |
-
in a human readable format.
|
87 |
-
|
88 |
-
Args:
|
89 |
-
ids: list of integers to be converted.
|
90 |
-
|
91 |
-
Returns:
|
92 |
-
strs: list of human-readable string.
|
93 |
-
"""
|
94 |
-
decoded_ids = []
|
95 |
-
for id_ in ids:
|
96 |
-
if 0 <= id_ < self._num_reserved_ids:
|
97 |
-
decoded_ids.append(RESERVED_TOKENS[int(id_)])
|
98 |
-
else:
|
99 |
-
decoded_ids.append(id_ - self._num_reserved_ids)
|
100 |
-
return [str(d) for d in decoded_ids]
|
101 |
-
|
102 |
-
@property
|
103 |
-
def vocab_size(self):
|
104 |
-
raise NotImplementedError()
|
105 |
-
|
106 |
-
|
107 |
-
class TokenTextEncoder(TextEncoder):
|
108 |
-
"""Encoder based on a user-supplied vocabulary (file or list)."""
|
109 |
-
|
110 |
-
def __init__(self,
|
111 |
-
vocab_filename,
|
112 |
-
reverse=False,
|
113 |
-
vocab_list=None,
|
114 |
-
replace_oov=None,
|
115 |
-
num_reserved_ids=NUM_RESERVED_TOKENS):
|
116 |
-
"""Initialize from a file or list, one token per line.
|
117 |
-
|
118 |
-
Handling of reserved tokens works as follows:
|
119 |
-
- When initializing from a list, we add reserved tokens to the vocab.
|
120 |
-
- When initializing from a file, we do not add reserved tokens to the vocab.
|
121 |
-
- When saving vocab files, we save reserved tokens to the file.
|
122 |
-
|
123 |
-
Args:
|
124 |
-
vocab_filename: If not None, the full filename to read vocab from. If this
|
125 |
-
is not None, then vocab_list should be None.
|
126 |
-
reverse: Boolean indicating if tokens should be reversed during encoding
|
127 |
-
and decoding.
|
128 |
-
vocab_list: If not None, a list of elements of the vocabulary. If this is
|
129 |
-
not None, then vocab_filename should be None.
|
130 |
-
replace_oov: If not None, every out-of-vocabulary token seen when
|
131 |
-
encoding will be replaced by this string (which must be in vocab).
|
132 |
-
num_reserved_ids: Number of IDs to save for reserved tokens like <EOS>.
|
133 |
-
"""
|
134 |
-
super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids)
|
135 |
-
self._reverse = reverse
|
136 |
-
self._replace_oov = replace_oov
|
137 |
-
if vocab_filename:
|
138 |
-
self._init_vocab_from_file(vocab_filename)
|
139 |
-
else:
|
140 |
-
assert vocab_list is not None
|
141 |
-
self._init_vocab_from_list(vocab_list)
|
142 |
-
self.pad_index = self.token_to_id[PAD]
|
143 |
-
self.eos_index = self.token_to_id[EOS]
|
144 |
-
self.unk_index = self.token_to_id[UNK]
|
145 |
-
self.seg_index = self.token_to_id[SEG] if SEG in self.token_to_id else self.eos_index
|
146 |
-
|
147 |
-
def encode(self, s):
|
148 |
-
"""Converts a space-separated string of tokens to a list of ids."""
|
149 |
-
sentence = s
|
150 |
-
tokens = sentence.strip().split()
|
151 |
-
if self._replace_oov is not None:
|
152 |
-
tokens = [t if t in self.token_to_id else self._replace_oov
|
153 |
-
for t in tokens]
|
154 |
-
ret = [self.token_to_id[tok] for tok in tokens]
|
155 |
-
return ret[::-1] if self._reverse else ret
|
156 |
-
|
157 |
-
def decode(self, ids, strip_eos=False, strip_padding=False):
|
158 |
-
if strip_padding and self.pad() in list(ids):
|
159 |
-
pad_pos = list(ids).index(self.pad())
|
160 |
-
ids = ids[:pad_pos]
|
161 |
-
if strip_eos and self.eos() in list(ids):
|
162 |
-
eos_pos = list(ids).index(self.eos())
|
163 |
-
ids = ids[:eos_pos]
|
164 |
-
return " ".join(self.decode_list(ids))
|
165 |
-
|
166 |
-
def decode_list(self, ids):
|
167 |
-
seq = reversed(ids) if self._reverse else ids
|
168 |
-
return [self._safe_id_to_token(i) for i in seq]
|
169 |
-
|
170 |
-
@property
|
171 |
-
def vocab_size(self):
|
172 |
-
return len(self.id_to_token)
|
173 |
-
|
174 |
-
def __len__(self):
|
175 |
-
return self.vocab_size
|
176 |
-
|
177 |
-
def _safe_id_to_token(self, idx):
|
178 |
-
return self.id_to_token.get(idx, "ID_%d" % idx)
|
179 |
-
|
180 |
-
def _init_vocab_from_file(self, filename):
|
181 |
-
"""Load vocab from a file.
|
182 |
-
|
183 |
-
Args:
|
184 |
-
filename: The file to load vocabulary from.
|
185 |
-
"""
|
186 |
-
with open(filename) as f:
|
187 |
-
tokens = [token.strip() for token in f.readlines()]
|
188 |
-
|
189 |
-
def token_gen():
|
190 |
-
for token in tokens:
|
191 |
-
yield token
|
192 |
-
|
193 |
-
self._init_vocab(token_gen(), add_reserved_tokens=False)
|
194 |
-
|
195 |
-
def _init_vocab_from_list(self, vocab_list):
|
196 |
-
"""Initialize tokens from a list of tokens.
|
197 |
-
|
198 |
-
It is ok if reserved tokens appear in the vocab list. They will be
|
199 |
-
removed. The set of tokens in vocab_list should be unique.
|
200 |
-
|
201 |
-
Args:
|
202 |
-
vocab_list: A list of tokens.
|
203 |
-
"""
|
204 |
-
|
205 |
-
def token_gen():
|
206 |
-
for token in vocab_list:
|
207 |
-
if token not in RESERVED_TOKENS:
|
208 |
-
yield token
|
209 |
-
|
210 |
-
self._init_vocab(token_gen())
|
211 |
-
|
212 |
-
def _init_vocab(self, token_generator, add_reserved_tokens=True):
|
213 |
-
"""Initialize vocabulary with tokens from token_generator."""
|
214 |
-
|
215 |
-
self.id_to_token = {}
|
216 |
-
non_reserved_start_index = 0
|
217 |
-
|
218 |
-
if add_reserved_tokens:
|
219 |
-
self.id_to_token.update(enumerate(RESERVED_TOKENS))
|
220 |
-
non_reserved_start_index = len(RESERVED_TOKENS)
|
221 |
-
|
222 |
-
self.id_to_token.update(
|
223 |
-
enumerate(token_generator, start=non_reserved_start_index))
|
224 |
-
|
225 |
-
# _token_to_id is the reverse of _id_to_token
|
226 |
-
self.token_to_id = dict((v, k) for k, v in six.iteritems(self.id_to_token))
|
227 |
-
|
228 |
-
def pad(self):
|
229 |
-
return self.pad_index
|
230 |
-
|
231 |
-
def eos(self):
|
232 |
-
return self.eos_index
|
233 |
-
|
234 |
-
def unk(self):
|
235 |
-
return self.unk_index
|
236 |
-
|
237 |
-
def seg(self):
|
238 |
-
return self.seg_index
|
239 |
-
|
240 |
-
def store_to_file(self, filename):
|
241 |
-
"""Write vocab file to disk.
|
242 |
-
|
243 |
-
Vocab files have one token per line. The file ends in a newline. Reserved
|
244 |
-
tokens are written to the vocab file as well.
|
245 |
-
|
246 |
-
Args:
|
247 |
-
filename: Full path of the file to store the vocab to.
|
248 |
-
"""
|
249 |
-
with open(filename, "w") as f:
|
250 |
-
for i in range(len(self.id_to_token)):
|
251 |
-
f.write(self.id_to_token[i] + "\n")
|
252 |
-
|
253 |
-
def sil_phonemes(self):
|
254 |
-
return [p for p in self.id_to_token.values() if is_sil_phoneme(p)]
|
255 |
-
|
256 |
-
# add by zhenhiye
|
257 |
-
def add_new_token(self, new_token):
|
258 |
-
assert new_token not in list(self.id_to_token.values())
|
259 |
-
num_existing_tokens = len(self.id_to_token)
|
260 |
-
self.id_to_token[num_existing_tokens] = new_token
|
261 |
-
self.token_to_id = dict((v, k) for k, v in six.iteritems(self.id_to_token))
|
262 |
-
print(f"Added {new_token} into the token dict!")
|
263 |
-
|
264 |
-
def build_token_encoder(token_list_file):
|
265 |
-
token_list = json.load(open(token_list_file))
|
266 |
-
return TokenTextEncoder(None, vocab_list=token_list, replace_oov='<UNK>')
|
267 |
-
|
268 |
-
|
269 |
-
def is_sil_phoneme(p):
|
270 |
-
return p == '' or not p[0].isalpha()
|
271 |
-
# for aishell_notone_sing
|
272 |
-
# return p == '' or not p[0].isalpha() or p == 'breathe'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGText/GlyphControl/ldm/models/diffusion/dpm_solver/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .sampler import DPMSolverSampler
|
|
|
|
spaces/ALSv/FSW/roop/capturer.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
from typing import Optional
|
2 |
-
import cv2
|
3 |
-
|
4 |
-
from roop.typing import Frame
|
5 |
-
|
6 |
-
|
7 |
-
def get_video_frame(video_path: str, frame_number: int = 0) -> Optional[Frame]:
|
8 |
-
capture = cv2.VideoCapture(video_path)
|
9 |
-
frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT)
|
10 |
-
capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1))
|
11 |
-
has_frame, frame = capture.read()
|
12 |
-
capture.release()
|
13 |
-
if has_frame:
|
14 |
-
return frame
|
15 |
-
return None
|
16 |
-
|
17 |
-
|
18 |
-
def get_video_frame_total(video_path: str) -> int:
|
19 |
-
capture = cv2.VideoCapture(video_path)
|
20 |
-
video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
|
21 |
-
capture.release()
|
22 |
-
return video_frame_total
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/Free-Accounts-Generator/minecraft/css/style.css
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
body {
|
2 |
-
font-family: Verdana, Geneva, sans-serif;
|
3 |
-
font-size: 1.2em;
|
4 |
-
margin: 2%;
|
5 |
-
max-width: 100%;
|
6 |
-
padding: 80px 30px;
|
7 |
-
line-height: 1.65em;
|
8 |
-
background-image: url('https://huggingface.co/spaces/AchyuthGamer/Free-Accounts-Generator/resolve/main/img/minecraft.jpg');
|
9 |
-
color: #fff;
|
10 |
-
font-weight: 300;
|
11 |
-
|
12 |
-
}
|
13 |
-
|
14 |
-
h1 {
|
15 |
-
text-align: center;
|
16 |
-
margin: 19% 0 5% 0;
|
17 |
-
font-size: 60px;
|
18 |
-
text-shadow: 0 0 28px #FF0000, 0 0 28px #008000;
|
19 |
-
}
|
20 |
-
|
21 |
-
h4 {
|
22 |
-
text-align: center;
|
23 |
-
margin: 50% 0 5% 0;
|
24 |
-
}
|
25 |
-
|
26 |
-
#wordbox {
|
27 |
-
/*opacity: 0;*/
|
28 |
-
margin: 30px auto 0;
|
29 |
-
display: block;
|
30 |
-
width: 80%;
|
31 |
-
height: 50px;
|
32 |
-
font-size: 25px;
|
33 |
-
text-align: center;
|
34 |
-
background: #fff;
|
35 |
-
border-radius: 6px;
|
36 |
-
color: #black;
|
37 |
-
transition: 1s linear;
|
38 |
-
}
|
39 |
-
|
40 |
-
#button {
|
41 |
-
-webkit-box-sizing: border-box;
|
42 |
-
-moz-box-sizing: border-box;
|
43 |
-
box-sizing: border-box;
|
44 |
-
background: #0b7fba;
|
45 |
-
border: 0;
|
46 |
-
color: #fff;
|
47 |
-
font-size: 20px;
|
48 |
-
padding: 1em 2em;
|
49 |
-
cursor: pointer;
|
50 |
-
margin: 0 auto 80px;
|
51 |
-
display: block;
|
52 |
-
text-align: center;
|
53 |
-
border-radius: 6px;
|
54 |
-
font-weight: bold;
|
55 |
-
transition: all 0.3s ease;
|
56 |
-
background-image: linear-gradient(to right, #25aae1, #4481eb, #04befe, #3f86ed);
|
57 |
-
box-shadow: 0 4px 15px 0 rgba(65, 132, 234, 0.75);
|
58 |
-
}
|
59 |
-
|
60 |
-
#button:hover {
|
61 |
-
background-position: 100% 0;
|
62 |
-
-moz-transition: all 0.4s ease-in-out;
|
63 |
-
-o-transition: all 0.4s ease-in-out;
|
64 |
-
-webkit-transition: all 0.4s ease-in-out;
|
65 |
-
transition: all 0.4s ease-in-out;
|
66 |
-
transform: scale(1.2);
|
67 |
-
cursor: pointer; }
|
68 |
-
|
69 |
-
#button:focus {
|
70 |
-
outline: none;
|
71 |
-
}
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
span {
|
76 |
-
position: bottom;
|
77 |
-
top: 0;
|
78 |
-
left: 0;
|
79 |
-
margin: 40px;
|
80 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapting/TrendFlow/mypages/__init__.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
from .welcome import welcome
|
2 |
-
from .home import home
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/memory_manipulator/reflection.py
DELETED
@@ -1,330 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
from typing import List, Union, Optional, Any, TYPE_CHECKING
|
3 |
-
from collections import defaultdict
|
4 |
-
|
5 |
-
from pydantic import Field
|
6 |
-
import numpy as np
|
7 |
-
from datetime import datetime as dt
|
8 |
-
|
9 |
-
import re
|
10 |
-
|
11 |
-
from agentverse.llms.openai import get_embedding
|
12 |
-
from sklearn.metrics.pairwise import cosine_similarity
|
13 |
-
|
14 |
-
from agentverse.message import Message
|
15 |
-
from agentverse.memory import BaseMemory
|
16 |
-
|
17 |
-
from agentverse.logging import logger
|
18 |
-
|
19 |
-
from . import memory_manipulator_registry
|
20 |
-
from .base import BaseMemoryManipulator
|
21 |
-
|
22 |
-
if TYPE_CHECKING:
|
23 |
-
from agentverse.memory import VectorStoreMemory
|
24 |
-
from agentverse.agents.base import BaseAgent
|
25 |
-
|
26 |
-
|
27 |
-
IMPORTANCE_PROMPT = """On the scale of 1 to 10, where 1 is purely mundane \
|
28 |
-
(e.g., brushing teeth, making bed) and 10 is \
|
29 |
-
extremely poignant (e.g., a break up, college \
|
30 |
-
acceptance), rate the likely poignancy of the \
|
31 |
-
following piece of memory. \
|
32 |
-
If you think it's too hard to rate it, you can give an inaccurate assessment. \
|
33 |
-
The content or people mentioned is not real. You can hypothesis any reasonable context. \
|
34 |
-
Please strictly only output one number. \
|
35 |
-
Memory: {} \
|
36 |
-
Rating: """
|
37 |
-
IMMEDIACY_PROMPT = """On the scale of 1 to 10, where 1 is requiring no short time attention\
|
38 |
-
(e.g., a bed is in the room) and 10 is \
|
39 |
-
needing quick attention or immediate response(e.g., being required a reply by others), rate the likely immediacy of the \
|
40 |
-
following statement. \
|
41 |
-
If you think it's too hard to rate it, you can give an inaccurate assessment. \
|
42 |
-
The content or people mentioned is not real. You can hypothesis any reasonable context. \
|
43 |
-
Please strictly only output one number. \
|
44 |
-
Memory: {} \
|
45 |
-
Rating: """
|
46 |
-
QUESTION_PROMPT = """Given only the information above, what are 3 most salient \
|
47 |
-
high-level questions we can answer about the subjects in the statements?"""
|
48 |
-
|
49 |
-
INSIGHT_PROMPT = """What at most 5 high-level insights can you infer from \
|
50 |
-
the above statements? Only output insights with high confidence.
|
51 |
-
example format: insight (because of 1, 5, 3)"""
|
52 |
-
|
53 |
-
|
54 |
-
@memory_manipulator_registry.register("reflection")
|
55 |
-
class Reflection(BaseMemoryManipulator):
|
56 |
-
memory: VectorStoreMemory = None
|
57 |
-
agent: BaseAgent = None
|
58 |
-
|
59 |
-
reflection: str = ""
|
60 |
-
|
61 |
-
importance_threshold: int = 10
|
62 |
-
accumulated_importance: int = 0
|
63 |
-
|
64 |
-
memory2importance: dict = {}
|
65 |
-
memory2immediacy: dict = {}
|
66 |
-
memory2time: defaultdict = Field(default=defaultdict(dict))
|
67 |
-
|
68 |
-
# TODO newly added func from generative agents
|
69 |
-
|
70 |
-
def manipulate_memory(self) -> None:
|
71 |
-
# reflect here
|
72 |
-
if self.should_reflect():
|
73 |
-
logger.debug(
|
74 |
-
f"Agent {self.agent.name} is now doing reflection since accumulated_importance={self.accumulated_importance} < reflection_threshold={self.importance_threshold}"
|
75 |
-
)
|
76 |
-
self.reflection = self.reflect()
|
77 |
-
return self.reflection
|
78 |
-
else:
|
79 |
-
logger.debug(
|
80 |
-
f"Agent {self.agent.name} doesn't reflect since accumulated_importance={self.accumulated_importance} < reflection_threshold={self.importance_threshold}"
|
81 |
-
)
|
82 |
-
|
83 |
-
return ""
|
84 |
-
|
85 |
-
def get_accumulated_importance(self):
|
86 |
-
accumulated_importance = 0
|
87 |
-
|
88 |
-
for memory in self.memory.messages:
|
89 |
-
if (
|
90 |
-
memory.content not in self.memory2importance
|
91 |
-
or memory.content not in self.memory2immediacy
|
92 |
-
):
|
93 |
-
self.memory2importance[memory.content] = self.get_importance(
|
94 |
-
memory.content
|
95 |
-
)
|
96 |
-
self.memory2immediacy[memory.content] = self.get_immediacy(
|
97 |
-
memory.content
|
98 |
-
)
|
99 |
-
|
100 |
-
for score in self.memory2importance.values():
|
101 |
-
accumulated_importance += score
|
102 |
-
|
103 |
-
self.accumulated_importance = accumulated_importance
|
104 |
-
|
105 |
-
return accumulated_importance
|
106 |
-
|
107 |
-
def should_reflect(self):
|
108 |
-
if self.get_accumulated_importance() >= self.importance_threshold:
|
109 |
-
# double the importance_threshold
|
110 |
-
self.importance_threshold *= 2
|
111 |
-
return True
|
112 |
-
else:
|
113 |
-
return False
|
114 |
-
|
115 |
-
def get_questions(self, texts):
|
116 |
-
prompt = "\n".join(texts) + "\n" + QUESTION_PROMPT
|
117 |
-
result = self.agent.llm.generate_response(prompt)
|
118 |
-
result = result.content
|
119 |
-
questions = [q for q in result.split("\n") if len(q.strip()) > 0]
|
120 |
-
questions = questions[:3]
|
121 |
-
return questions
|
122 |
-
|
123 |
-
def get_insights(self, statements):
|
124 |
-
prompt = ""
|
125 |
-
for i, st in enumerate(statements):
|
126 |
-
prompt += str(i + 1) + ". " + st + "\n"
|
127 |
-
prompt += INSIGHT_PROMPT
|
128 |
-
result = self.agent.llm.generate_response(prompt)
|
129 |
-
result = result.content
|
130 |
-
insights = [isg for isg in result.split("\n") if len(isg.strip()) > 0][:5]
|
131 |
-
insights = [".".join(i.split(".")[1:]) for i in insights]
|
132 |
-
# remove insight pointers for now
|
133 |
-
insights = [i.split("(")[0].strip() for i in insights]
|
134 |
-
return insights
|
135 |
-
|
136 |
-
def get_importance(self, content: str):
|
137 |
-
"""
|
138 |
-
Exploit GPT to evaluate the importance of this memory
|
139 |
-
"""
|
140 |
-
prompt = IMPORTANCE_PROMPT.format(content)
|
141 |
-
result = self.memory.llm.generate_response(prompt)
|
142 |
-
|
143 |
-
try:
|
144 |
-
score = int(re.findall(r"\s*(\d+)\s*", result.content)[0])
|
145 |
-
except Exception as e:
|
146 |
-
logger.warn(
|
147 |
-
f"Found error {e} Abnormal result of importance rating '{result}'. Setting default value"
|
148 |
-
)
|
149 |
-
score = 0
|
150 |
-
return score
|
151 |
-
|
152 |
-
def get_immediacy(self, content: str):
|
153 |
-
"""
|
154 |
-
Exploit GPT to evaluate the immediacy of this memory
|
155 |
-
"""
|
156 |
-
prompt = IMMEDIACY_PROMPT.format(content)
|
157 |
-
result = self.memory.llm.generate_response(prompt)
|
158 |
-
try:
|
159 |
-
score = int(re.findall(r"\s*(\d+)\s*", result.content)[0])
|
160 |
-
except Exception as e:
|
161 |
-
logger.warn(
|
162 |
-
f"Found error {e} Abnormal result of importance rating '{result}'. Setting default value"
|
163 |
-
)
|
164 |
-
score = 0
|
165 |
-
return score
|
166 |
-
|
167 |
-
def query_similarity(
|
168 |
-
self,
|
169 |
-
text: Union[str, List[str]],
|
170 |
-
k: int,
|
171 |
-
memory_bank: List,
|
172 |
-
current_time=dt.now(),
|
173 |
-
nms_threshold=0.99,
|
174 |
-
) -> List[str]:
|
175 |
-
"""
|
176 |
-
get top-k entry based on recency, relevance, importance, immediacy
|
177 |
-
The query result can be Short-term or Long-term queried result.
|
178 |
-
formula is
|
179 |
-
`score= sim(q,v) *max(LTM_score, STM_score)`
|
180 |
-
`STM_score=time_score(createTime)*immediacy`
|
181 |
-
`LTM_score=time_score(accessTime)*importance`
|
182 |
-
time score is exponential decay weight. stm decays faster.
|
183 |
-
|
184 |
-
The query supports querying based on multiple texts and only gives non-overlapping results
|
185 |
-
If nms_threshold is not 1, nms mechanism if activated. By default,
|
186 |
-
use soft nms with modified iou base(score starts to decay iff cos sim is higher than this value,
|
187 |
-
and decay weight at this value if 0. rather than 1-threshold).
|
188 |
-
|
189 |
-
Args:
|
190 |
-
text: str
|
191 |
-
k: int
|
192 |
-
memory_bank: List
|
193 |
-
current_time: dt.now
|
194 |
-
nms_threshold: float = 0.99
|
195 |
-
|
196 |
-
|
197 |
-
Returns: List[str]
|
198 |
-
"""
|
199 |
-
assert len(text) > 0
|
200 |
-
texts = [text] if isinstance(text, str) else text
|
201 |
-
maximum_score = None
|
202 |
-
for text in texts:
|
203 |
-
embedding = get_embedding(text)
|
204 |
-
score = []
|
205 |
-
for memory in memory_bank:
|
206 |
-
if memory.content not in self.memory2time:
|
207 |
-
self.memory2time[memory.content]["last_access_time"] = dt.now()
|
208 |
-
self.memory2time[memory.content]["create_time"] = dt.now()
|
209 |
-
|
210 |
-
last_access_time_diff = (
|
211 |
-
current_time - self.memory2time[memory.content]["last_access_time"]
|
212 |
-
).total_seconds() // 3600
|
213 |
-
recency = np.power(
|
214 |
-
0.99, last_access_time_diff
|
215 |
-
) # TODO: review the metaparameter 0.99
|
216 |
-
|
217 |
-
create_time_diff = (
|
218 |
-
current_time - self.memory2time[memory.content]["create_time"]
|
219 |
-
).total_seconds() // 60
|
220 |
-
instancy = np.power(
|
221 |
-
0.90, create_time_diff
|
222 |
-
) # TODO: review the metaparameter 0.90
|
223 |
-
|
224 |
-
relevance = cosine_similarity(
|
225 |
-
np.array(embedding).reshape(1, -1),
|
226 |
-
np.array(self.memory.memory2embedding[memory.content]).reshape(
|
227 |
-
1, -1
|
228 |
-
),
|
229 |
-
)[0][0]
|
230 |
-
|
231 |
-
if (
|
232 |
-
memory.content not in self.memory2importance
|
233 |
-
or memory.content not in self.memory2immediacy
|
234 |
-
):
|
235 |
-
self.memory2importance[memory.content] = self.get_importance(
|
236 |
-
memory.content
|
237 |
-
)
|
238 |
-
self.memory2immediacy[memory.content] = self.get_immediacy(
|
239 |
-
memory.content
|
240 |
-
)
|
241 |
-
|
242 |
-
importance = self.memory2importance[memory.content] / 10
|
243 |
-
immediacy = self.memory2immediacy[memory.content] / 10
|
244 |
-
|
245 |
-
ltm_w = recency * importance
|
246 |
-
stm_w = instancy * immediacy
|
247 |
-
|
248 |
-
score.append(relevance * np.maximum(ltm_w, stm_w))
|
249 |
-
|
250 |
-
score = np.array(score)
|
251 |
-
|
252 |
-
if maximum_score is not None:
|
253 |
-
maximum_score = np.maximum(score, maximum_score)
|
254 |
-
else:
|
255 |
-
maximum_score = score
|
256 |
-
|
257 |
-
if nms_threshold == 1.0:
|
258 |
-
# no nms is triggered
|
259 |
-
top_k_indices = np.argsort(maximum_score)[-k:][::-1]
|
260 |
-
else:
|
261 |
-
# TODO: soft-nms
|
262 |
-
assert 0 <= nms_threshold < 1
|
263 |
-
top_k_indices = []
|
264 |
-
while len(top_k_indices) < min(k, len(memory_bank)):
|
265 |
-
top_index = np.argmax(maximum_score)
|
266 |
-
top_k_indices.append(top_index)
|
267 |
-
maximum_score[top_index] = -1 # anything to prevent being chosen again
|
268 |
-
top_embedding = self.memory.memory2embedding[
|
269 |
-
memory_bank[top_index].content
|
270 |
-
]
|
271 |
-
cos_sim = cosine_similarity(
|
272 |
-
np.array(top_embedding).reshape(1, -1),
|
273 |
-
np.array(
|
274 |
-
[
|
275 |
-
self.memory.memory2embedding[memory.content]
|
276 |
-
for memory in memory_bank
|
277 |
-
]
|
278 |
-
),
|
279 |
-
)[0]
|
280 |
-
score_weight = np.ones_like(maximum_score)
|
281 |
-
score_weight[cos_sim >= nms_threshold] -= (
|
282 |
-
cos_sim[cos_sim >= nms_threshold] - nms_threshold
|
283 |
-
) / (1 - nms_threshold)
|
284 |
-
maximum_score = maximum_score * score_weight
|
285 |
-
|
286 |
-
# access them and refresh the access time
|
287 |
-
for i in top_k_indices:
|
288 |
-
self.memory2time[memory_bank[i].content]["last_access_time"] = current_time
|
289 |
-
# sort them in time periods. if the data tag is 'observation', ad time info output.
|
290 |
-
top_k_indices = sorted(
|
291 |
-
top_k_indices,
|
292 |
-
key=lambda x: self.memory2time[memory_bank[x].content]["create_time"],
|
293 |
-
)
|
294 |
-
query_results = []
|
295 |
-
for i in top_k_indices:
|
296 |
-
query_result = memory_bank[i].content
|
297 |
-
query_results.append(query_result)
|
298 |
-
|
299 |
-
return query_results
|
300 |
-
|
301 |
-
def get_memories_of_interest_oneself(self):
|
302 |
-
memories_of_interest = []
|
303 |
-
for memory in self.memory.messages[-100:]:
|
304 |
-
if memory.sender == self.agent.name:
|
305 |
-
memories_of_interest.append(memory)
|
306 |
-
return memories_of_interest
|
307 |
-
|
308 |
-
def reflect(self):
|
309 |
-
"""
|
310 |
-
initiate a reflection that inserts high level knowledge to memory
|
311 |
-
"""
|
312 |
-
memories_of_interest = self.get_memories_of_interest_oneself()
|
313 |
-
questions = self.get_questions([m.content for m in memories_of_interest])
|
314 |
-
statements = self.query_similarity(
|
315 |
-
questions, len(questions) * 10, memories_of_interest
|
316 |
-
)
|
317 |
-
insights = self.get_insights(statements)
|
318 |
-
logger.info(self.agent.name + f" Insights: {insights}")
|
319 |
-
for insight in insights:
|
320 |
-
# convert insight to messages
|
321 |
-
# TODO currently only oneself can see its own reflection
|
322 |
-
insight_message = Message(
|
323 |
-
content=insight, sender=self.agent.name, receiver={self.agent.name}
|
324 |
-
)
|
325 |
-
self.memory.add_message([insight_message])
|
326 |
-
reflection = "\n".join(insights)
|
327 |
-
return reflection
|
328 |
-
|
329 |
-
def reset(self) -> None:
|
330 |
-
self.reflection = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/csvscenario-plugin.js
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
import CSVScenario from './csvscenario.js';
|
2 |
-
|
3 |
-
class CSVScenarioPlugin extends Phaser.Plugins.BasePlugin {
|
4 |
-
constructor(pluginManager) {
|
5 |
-
super(pluginManager);
|
6 |
-
}
|
7 |
-
|
8 |
-
start() {
|
9 |
-
var eventEmitter = this.game.events;
|
10 |
-
eventEmitter.on('destroy', this.destroy, this);
|
11 |
-
}
|
12 |
-
|
13 |
-
add(scene, config) {
|
14 |
-
return new CSVScenario(scene, config);
|
15 |
-
}
|
16 |
-
}
|
17 |
-
|
18 |
-
export default CSVScenarioPlugin;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/dialog-quest/DataMethods.js
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
export default {
|
2 |
-
getData(key, defaultValue) {
|
3 |
-
return this.questionManager.getData(key, defaultValue);
|
4 |
-
},
|
5 |
-
|
6 |
-
setData(key, value) {
|
7 |
-
this.questionManager.setData(key, value);
|
8 |
-
return this;
|
9 |
-
},
|
10 |
-
|
11 |
-
incData(key, inc, defaultValue) {
|
12 |
-
this.questionManager.incData(key, inc, defaultValue);
|
13 |
-
return this;
|
14 |
-
},
|
15 |
-
|
16 |
-
mulData(key, mul, defaultValue) {
|
17 |
-
this.questionManager.mulData(key, mul, defaultValue);
|
18 |
-
return this;
|
19 |
-
},
|
20 |
-
|
21 |
-
clearData() {
|
22 |
-
this.questionManager.clearData();
|
23 |
-
return this;
|
24 |
-
},
|
25 |
-
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/checkbox/Factory.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import Checkbox from './Checkbox.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('checkbox', function (x, y, width, height, color, config) {
|
6 |
-
var gameObject = new Checkbox(this.scene, x, y, width, height, color, config);
|
7 |
-
this.scene.add.existing(gameObject);
|
8 |
-
return gameObject;
|
9 |
-
});
|
10 |
-
|
11 |
-
SetValue(window, 'RexPlugins.UI.Checkbox', Checkbox);
|
12 |
-
|
13 |
-
export default Checkbox;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/input/OverCell.js
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
import EmitCellEvent from './EmitCellEvent.js';
|
2 |
-
|
3 |
-
var OverCell = function (table, tableConfig) {
|
4 |
-
table
|
5 |
-
.on('pointermove', OnMove, this)
|
6 |
-
.on('pointerover', OnMove, this)
|
7 |
-
.on('pointerout', OnOut, this) // pointer-up is included too
|
8 |
-
}
|
9 |
-
|
10 |
-
var OnMove = function (pointer, localX, localY, event) {
|
11 |
-
var table = this.childrenMap.child;
|
12 |
-
var cellIndex = table.pointToCellIndex(pointer.worldX, pointer.worldY);
|
13 |
-
if (cellIndex === table.input.lastOverCellIndex) {
|
14 |
-
return;
|
15 |
-
}
|
16 |
-
|
17 |
-
var preCellIndex = table.input.lastOverCellIndex;
|
18 |
-
table.input.lastOverCellIndex = cellIndex;
|
19 |
-
EmitCellEvent(this.eventEmitter, 'cell.out', table, preCellIndex, undefined, pointer, event);
|
20 |
-
EmitCellEvent(this.eventEmitter, 'cell.over', table, cellIndex, undefined, pointer, event);
|
21 |
-
}
|
22 |
-
|
23 |
-
var OnOut = function (pointer, event) {
|
24 |
-
var table = this.childrenMap.child;
|
25 |
-
var cellIndex = table.input.lastOverCellIndex;
|
26 |
-
table.input.lastOverCellIndex = undefined;
|
27 |
-
EmitCellEvent(this.eventEmitter, 'cell.out', table, cellIndex, undefined, pointer, event);
|
28 |
-
}
|
29 |
-
|
30 |
-
export default OverCell;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexZou/Deploy_Restoration/net/IntmdSequential.py
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
import torch.nn as nn
|
2 |
-
|
3 |
-
|
4 |
-
class IntermediateSequential(nn.Sequential):
|
5 |
-
def __init__(self, *args, return_intermediate=False):
|
6 |
-
super().__init__(*args)
|
7 |
-
self.return_intermediate = return_intermediate
|
8 |
-
|
9 |
-
def forward(self, input):
|
10 |
-
if not self.return_intermediate:
|
11 |
-
return super().forward(input)
|
12 |
-
|
13 |
-
intermediate_outputs = {}
|
14 |
-
output = input
|
15 |
-
for name, module in self.named_children():
|
16 |
-
output = intermediate_outputs[name] = module(output)
|
17 |
-
|
18 |
-
return output, intermediate_outputs
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alichuan/VITS-Umamusume-voice-synthesizer/modules.py
DELETED
@@ -1,387 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
from torch.nn import functional as F
|
5 |
-
|
6 |
-
from torch.nn import Conv1d
|
7 |
-
from torch.nn.utils import weight_norm, remove_weight_norm
|
8 |
-
|
9 |
-
import commons
|
10 |
-
from commons import init_weights, get_padding
|
11 |
-
from transforms import piecewise_rational_quadratic_transform
|
12 |
-
|
13 |
-
|
14 |
-
LRELU_SLOPE = 0.1
|
15 |
-
|
16 |
-
|
17 |
-
class LayerNorm(nn.Module):
|
18 |
-
def __init__(self, channels, eps=1e-5):
|
19 |
-
super().__init__()
|
20 |
-
self.channels = channels
|
21 |
-
self.eps = eps
|
22 |
-
|
23 |
-
self.gamma = nn.Parameter(torch.ones(channels))
|
24 |
-
self.beta = nn.Parameter(torch.zeros(channels))
|
25 |
-
|
26 |
-
def forward(self, x):
|
27 |
-
x = x.transpose(1, -1)
|
28 |
-
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
|
29 |
-
return x.transpose(1, -1)
|
30 |
-
|
31 |
-
|
32 |
-
class ConvReluNorm(nn.Module):
|
33 |
-
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
|
34 |
-
super().__init__()
|
35 |
-
self.in_channels = in_channels
|
36 |
-
self.hidden_channels = hidden_channels
|
37 |
-
self.out_channels = out_channels
|
38 |
-
self.kernel_size = kernel_size
|
39 |
-
self.n_layers = n_layers
|
40 |
-
self.p_dropout = p_dropout
|
41 |
-
assert n_layers > 1, "Number of layers should be larger than 0."
|
42 |
-
|
43 |
-
self.conv_layers = nn.ModuleList()
|
44 |
-
self.norm_layers = nn.ModuleList()
|
45 |
-
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
|
46 |
-
self.norm_layers.append(LayerNorm(hidden_channels))
|
47 |
-
self.relu_drop = nn.Sequential(
|
48 |
-
nn.ReLU(),
|
49 |
-
nn.Dropout(p_dropout))
|
50 |
-
for _ in range(n_layers-1):
|
51 |
-
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
|
52 |
-
self.norm_layers.append(LayerNorm(hidden_channels))
|
53 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
|
54 |
-
self.proj.weight.data.zero_()
|
55 |
-
self.proj.bias.data.zero_()
|
56 |
-
|
57 |
-
def forward(self, x, x_mask):
|
58 |
-
x_org = x
|
59 |
-
for i in range(self.n_layers):
|
60 |
-
x = self.conv_layers[i](x * x_mask)
|
61 |
-
x = self.norm_layers[i](x)
|
62 |
-
x = self.relu_drop(x)
|
63 |
-
x = x_org + self.proj(x)
|
64 |
-
return x * x_mask
|
65 |
-
|
66 |
-
|
67 |
-
class DDSConv(nn.Module):
|
68 |
-
"""
|
69 |
-
Dialted and Depth-Separable Convolution
|
70 |
-
"""
|
71 |
-
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
|
72 |
-
super().__init__()
|
73 |
-
self.channels = channels
|
74 |
-
self.kernel_size = kernel_size
|
75 |
-
self.n_layers = n_layers
|
76 |
-
self.p_dropout = p_dropout
|
77 |
-
|
78 |
-
self.drop = nn.Dropout(p_dropout)
|
79 |
-
self.convs_sep = nn.ModuleList()
|
80 |
-
self.convs_1x1 = nn.ModuleList()
|
81 |
-
self.norms_1 = nn.ModuleList()
|
82 |
-
self.norms_2 = nn.ModuleList()
|
83 |
-
for i in range(n_layers):
|
84 |
-
dilation = kernel_size ** i
|
85 |
-
padding = (kernel_size * dilation - dilation) // 2
|
86 |
-
self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
|
87 |
-
groups=channels, dilation=dilation, padding=padding
|
88 |
-
))
|
89 |
-
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
|
90 |
-
self.norms_1.append(LayerNorm(channels))
|
91 |
-
self.norms_2.append(LayerNorm(channels))
|
92 |
-
|
93 |
-
def forward(self, x, x_mask, g=None):
|
94 |
-
if g is not None:
|
95 |
-
x = x + g
|
96 |
-
for i in range(self.n_layers):
|
97 |
-
y = self.convs_sep[i](x * x_mask)
|
98 |
-
y = self.norms_1[i](y)
|
99 |
-
y = F.gelu(y)
|
100 |
-
y = self.convs_1x1[i](y)
|
101 |
-
y = self.norms_2[i](y)
|
102 |
-
y = F.gelu(y)
|
103 |
-
y = self.drop(y)
|
104 |
-
x = x + y
|
105 |
-
return x * x_mask
|
106 |
-
|
107 |
-
|
108 |
-
class WN(torch.nn.Module):
|
109 |
-
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
|
110 |
-
super(WN, self).__init__()
|
111 |
-
assert(kernel_size % 2 == 1)
|
112 |
-
self.hidden_channels =hidden_channels
|
113 |
-
self.kernel_size = kernel_size,
|
114 |
-
self.dilation_rate = dilation_rate
|
115 |
-
self.n_layers = n_layers
|
116 |
-
self.gin_channels = gin_channels
|
117 |
-
self.p_dropout = p_dropout
|
118 |
-
|
119 |
-
self.in_layers = torch.nn.ModuleList()
|
120 |
-
self.res_skip_layers = torch.nn.ModuleList()
|
121 |
-
self.drop = nn.Dropout(p_dropout)
|
122 |
-
|
123 |
-
if gin_channels != 0:
|
124 |
-
cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
|
125 |
-
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
|
126 |
-
|
127 |
-
for i in range(n_layers):
|
128 |
-
dilation = dilation_rate ** i
|
129 |
-
padding = int((kernel_size * dilation - dilation) / 2)
|
130 |
-
in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
|
131 |
-
dilation=dilation, padding=padding)
|
132 |
-
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
|
133 |
-
self.in_layers.append(in_layer)
|
134 |
-
|
135 |
-
# last one is not necessary
|
136 |
-
if i < n_layers - 1:
|
137 |
-
res_skip_channels = 2 * hidden_channels
|
138 |
-
else:
|
139 |
-
res_skip_channels = hidden_channels
|
140 |
-
|
141 |
-
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
|
142 |
-
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
|
143 |
-
self.res_skip_layers.append(res_skip_layer)
|
144 |
-
|
145 |
-
def forward(self, x, x_mask, g=None, **kwargs):
|
146 |
-
output = torch.zeros_like(x)
|
147 |
-
n_channels_tensor = torch.IntTensor([self.hidden_channels])
|
148 |
-
|
149 |
-
if g is not None:
|
150 |
-
g = self.cond_layer(g)
|
151 |
-
|
152 |
-
for i in range(self.n_layers):
|
153 |
-
x_in = self.in_layers[i](x)
|
154 |
-
if g is not None:
|
155 |
-
cond_offset = i * 2 * self.hidden_channels
|
156 |
-
g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
|
157 |
-
else:
|
158 |
-
g_l = torch.zeros_like(x_in)
|
159 |
-
|
160 |
-
acts = commons.fused_add_tanh_sigmoid_multiply(
|
161 |
-
x_in,
|
162 |
-
g_l,
|
163 |
-
n_channels_tensor)
|
164 |
-
acts = self.drop(acts)
|
165 |
-
|
166 |
-
res_skip_acts = self.res_skip_layers[i](acts)
|
167 |
-
if i < self.n_layers - 1:
|
168 |
-
res_acts = res_skip_acts[:,:self.hidden_channels,:]
|
169 |
-
x = (x + res_acts) * x_mask
|
170 |
-
output = output + res_skip_acts[:,self.hidden_channels:,:]
|
171 |
-
else:
|
172 |
-
output = output + res_skip_acts
|
173 |
-
return output * x_mask
|
174 |
-
|
175 |
-
def remove_weight_norm(self):
|
176 |
-
if self.gin_channels != 0:
|
177 |
-
torch.nn.utils.remove_weight_norm(self.cond_layer)
|
178 |
-
for l in self.in_layers:
|
179 |
-
torch.nn.utils.remove_weight_norm(l)
|
180 |
-
for l in self.res_skip_layers:
|
181 |
-
torch.nn.utils.remove_weight_norm(l)
|
182 |
-
|
183 |
-
|
184 |
-
class ResBlock1(torch.nn.Module):
|
185 |
-
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
|
186 |
-
super(ResBlock1, self).__init__()
|
187 |
-
self.convs1 = nn.ModuleList([
|
188 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
189 |
-
padding=get_padding(kernel_size, dilation[0]))),
|
190 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
191 |
-
padding=get_padding(kernel_size, dilation[1]))),
|
192 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
|
193 |
-
padding=get_padding(kernel_size, dilation[2])))
|
194 |
-
])
|
195 |
-
self.convs1.apply(init_weights)
|
196 |
-
|
197 |
-
self.convs2 = nn.ModuleList([
|
198 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
199 |
-
padding=get_padding(kernel_size, 1))),
|
200 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
201 |
-
padding=get_padding(kernel_size, 1))),
|
202 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
203 |
-
padding=get_padding(kernel_size, 1)))
|
204 |
-
])
|
205 |
-
self.convs2.apply(init_weights)
|
206 |
-
|
207 |
-
def forward(self, x, x_mask=None):
|
208 |
-
for c1, c2 in zip(self.convs1, self.convs2):
|
209 |
-
xt = F.leaky_relu(x, LRELU_SLOPE)
|
210 |
-
if x_mask is not None:
|
211 |
-
xt = xt * x_mask
|
212 |
-
xt = c1(xt)
|
213 |
-
xt = F.leaky_relu(xt, LRELU_SLOPE)
|
214 |
-
if x_mask is not None:
|
215 |
-
xt = xt * x_mask
|
216 |
-
xt = c2(xt)
|
217 |
-
x = xt + x
|
218 |
-
if x_mask is not None:
|
219 |
-
x = x * x_mask
|
220 |
-
return x
|
221 |
-
|
222 |
-
def remove_weight_norm(self):
|
223 |
-
for l in self.convs1:
|
224 |
-
remove_weight_norm(l)
|
225 |
-
for l in self.convs2:
|
226 |
-
remove_weight_norm(l)
|
227 |
-
|
228 |
-
|
229 |
-
class ResBlock2(torch.nn.Module):
|
230 |
-
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
|
231 |
-
super(ResBlock2, self).__init__()
|
232 |
-
self.convs = nn.ModuleList([
|
233 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
234 |
-
padding=get_padding(kernel_size, dilation[0]))),
|
235 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
236 |
-
padding=get_padding(kernel_size, dilation[1])))
|
237 |
-
])
|
238 |
-
self.convs.apply(init_weights)
|
239 |
-
|
240 |
-
def forward(self, x, x_mask=None):
|
241 |
-
for c in self.convs:
|
242 |
-
xt = F.leaky_relu(x, LRELU_SLOPE)
|
243 |
-
if x_mask is not None:
|
244 |
-
xt = xt * x_mask
|
245 |
-
xt = c(xt)
|
246 |
-
x = xt + x
|
247 |
-
if x_mask is not None:
|
248 |
-
x = x * x_mask
|
249 |
-
return x
|
250 |
-
|
251 |
-
def remove_weight_norm(self):
|
252 |
-
for l in self.convs:
|
253 |
-
remove_weight_norm(l)
|
254 |
-
|
255 |
-
|
256 |
-
class Log(nn.Module):
|
257 |
-
def forward(self, x, x_mask, reverse=False, **kwargs):
|
258 |
-
if not reverse:
|
259 |
-
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
|
260 |
-
logdet = torch.sum(-y, [1, 2])
|
261 |
-
return y, logdet
|
262 |
-
else:
|
263 |
-
x = torch.exp(x) * x_mask
|
264 |
-
return x
|
265 |
-
|
266 |
-
|
267 |
-
class Flip(nn.Module):
|
268 |
-
def forward(self, x, *args, reverse=False, **kwargs):
|
269 |
-
x = torch.flip(x, [1])
|
270 |
-
if not reverse:
|
271 |
-
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
|
272 |
-
return x, logdet
|
273 |
-
else:
|
274 |
-
return x
|
275 |
-
|
276 |
-
|
277 |
-
class ElementwiseAffine(nn.Module):
|
278 |
-
def __init__(self, channels):
|
279 |
-
super().__init__()
|
280 |
-
self.channels = channels
|
281 |
-
self.m = nn.Parameter(torch.zeros(channels,1))
|
282 |
-
self.logs = nn.Parameter(torch.zeros(channels,1))
|
283 |
-
|
284 |
-
def forward(self, x, x_mask, reverse=False, **kwargs):
|
285 |
-
if not reverse:
|
286 |
-
y = self.m + torch.exp(self.logs) * x
|
287 |
-
y = y * x_mask
|
288 |
-
logdet = torch.sum(self.logs * x_mask, [1,2])
|
289 |
-
return y, logdet
|
290 |
-
else:
|
291 |
-
x = (x - self.m) * torch.exp(-self.logs) * x_mask
|
292 |
-
return x
|
293 |
-
|
294 |
-
|
295 |
-
class ResidualCouplingLayer(nn.Module):
|
296 |
-
def __init__(self,
|
297 |
-
channels,
|
298 |
-
hidden_channels,
|
299 |
-
kernel_size,
|
300 |
-
dilation_rate,
|
301 |
-
n_layers,
|
302 |
-
p_dropout=0,
|
303 |
-
gin_channels=0,
|
304 |
-
mean_only=False):
|
305 |
-
assert channels % 2 == 0, "channels should be divisible by 2"
|
306 |
-
super().__init__()
|
307 |
-
self.channels = channels
|
308 |
-
self.hidden_channels = hidden_channels
|
309 |
-
self.kernel_size = kernel_size
|
310 |
-
self.dilation_rate = dilation_rate
|
311 |
-
self.n_layers = n_layers
|
312 |
-
self.half_channels = channels // 2
|
313 |
-
self.mean_only = mean_only
|
314 |
-
|
315 |
-
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
|
316 |
-
self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
|
317 |
-
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
|
318 |
-
self.post.weight.data.zero_()
|
319 |
-
self.post.bias.data.zero_()
|
320 |
-
|
321 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
322 |
-
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
|
323 |
-
h = self.pre(x0) * x_mask
|
324 |
-
h = self.enc(h, x_mask, g=g)
|
325 |
-
stats = self.post(h) * x_mask
|
326 |
-
if not self.mean_only:
|
327 |
-
m, logs = torch.split(stats, [self.half_channels]*2, 1)
|
328 |
-
else:
|
329 |
-
m = stats
|
330 |
-
logs = torch.zeros_like(m)
|
331 |
-
|
332 |
-
if not reverse:
|
333 |
-
x1 = m + x1 * torch.exp(logs) * x_mask
|
334 |
-
x = torch.cat([x0, x1], 1)
|
335 |
-
logdet = torch.sum(logs, [1,2])
|
336 |
-
return x, logdet
|
337 |
-
else:
|
338 |
-
x1 = (x1 - m) * torch.exp(-logs) * x_mask
|
339 |
-
x = torch.cat([x0, x1], 1)
|
340 |
-
return x
|
341 |
-
|
342 |
-
|
343 |
-
class ConvFlow(nn.Module):
|
344 |
-
def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
|
345 |
-
super().__init__()
|
346 |
-
self.in_channels = in_channels
|
347 |
-
self.filter_channels = filter_channels
|
348 |
-
self.kernel_size = kernel_size
|
349 |
-
self.n_layers = n_layers
|
350 |
-
self.num_bins = num_bins
|
351 |
-
self.tail_bound = tail_bound
|
352 |
-
self.half_channels = in_channels // 2
|
353 |
-
|
354 |
-
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
|
355 |
-
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
|
356 |
-
self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
|
357 |
-
self.proj.weight.data.zero_()
|
358 |
-
self.proj.bias.data.zero_()
|
359 |
-
|
360 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
361 |
-
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
|
362 |
-
h = self.pre(x0)
|
363 |
-
h = self.convs(h, x_mask, g=g)
|
364 |
-
h = self.proj(h) * x_mask
|
365 |
-
|
366 |
-
b, c, t = x0.shape
|
367 |
-
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
|
368 |
-
|
369 |
-
unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
|
370 |
-
unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
|
371 |
-
unnormalized_derivatives = h[..., 2 * self.num_bins:]
|
372 |
-
|
373 |
-
x1, logabsdet = piecewise_rational_quadratic_transform(x1,
|
374 |
-
unnormalized_widths,
|
375 |
-
unnormalized_heights,
|
376 |
-
unnormalized_derivatives,
|
377 |
-
inverse=reverse,
|
378 |
-
tails='linear',
|
379 |
-
tail_bound=self.tail_bound
|
380 |
-
)
|
381 |
-
|
382 |
-
x = torch.cat([x0, x1], 1) * x_mask
|
383 |
-
logdet = torch.sum(logabsdet * x_mask, [1,2])
|
384 |
-
if not reverse:
|
385 |
-
return x, logdet
|
386 |
-
else:
|
387 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/audio2exp_models/audio2exp.py
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
from tqdm import tqdm
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
|
5 |
-
|
6 |
-
class Audio2Exp(nn.Module):
|
7 |
-
def __init__(self, netG, cfg, device, prepare_training_loss=False):
|
8 |
-
super(Audio2Exp, self).__init__()
|
9 |
-
self.cfg = cfg
|
10 |
-
self.device = device
|
11 |
-
self.netG = netG.to(device)
|
12 |
-
|
13 |
-
def test(self, batch):
|
14 |
-
|
15 |
-
mel_input = batch['indiv_mels'] # bs T 1 80 16
|
16 |
-
bs = mel_input.shape[0]
|
17 |
-
T = mel_input.shape[1]
|
18 |
-
|
19 |
-
exp_coeff_pred = []
|
20 |
-
|
21 |
-
for i in tqdm(range(0, T, 10),'audio2exp:'): # every 10 frames
|
22 |
-
|
23 |
-
current_mel_input = mel_input[:,i:i+10]
|
24 |
-
|
25 |
-
#ref = batch['ref'][:, :, :64].repeat((1,current_mel_input.shape[1],1)) #bs T 64
|
26 |
-
ref = batch['ref'][:, :, :64][:, i:i+10]
|
27 |
-
ratio = batch['ratio_gt'][:, i:i+10] #bs T
|
28 |
-
|
29 |
-
audiox = current_mel_input.view(-1, 1, 80, 16) # bs*T 1 80 16
|
30 |
-
|
31 |
-
curr_exp_coeff_pred = self.netG(audiox, ref, ratio) # bs T 64
|
32 |
-
|
33 |
-
exp_coeff_pred += [curr_exp_coeff_pred]
|
34 |
-
|
35 |
-
# BS x T x 64
|
36 |
-
results_dict = {
|
37 |
-
'exp_coeff_pred': torch.cat(exp_coeff_pred, axis=1)
|
38 |
-
}
|
39 |
-
return results_dict
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/docs/eval.md
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
## Eval on ICCV2021-MFR
|
2 |
-
|
3 |
-
coming soon.
|
4 |
-
|
5 |
-
|
6 |
-
## Eval IJBC
|
7 |
-
You can eval ijbc with pytorch or onnx.
|
8 |
-
|
9 |
-
|
10 |
-
1. Eval IJBC With Onnx
|
11 |
-
```shell
|
12 |
-
CUDA_VISIBLE_DEVICES=0 python onnx_ijbc.py --model-root ms1mv3_arcface_r50 --image-path IJB_release/IJBC --result-dir ms1mv3_arcface_r50
|
13 |
-
```
|
14 |
-
|
15 |
-
2. Eval IJBC With Pytorch
|
16 |
-
```shell
|
17 |
-
CUDA_VISIBLE_DEVICES=0,1 python eval_ijbc.py \
|
18 |
-
--model-prefix ms1mv3_arcface_r50/backbone.pth \
|
19 |
-
--image-path IJB_release/IJBC \
|
20 |
-
--result-dir ms1mv3_arcface_r50 \
|
21 |
-
--batch-size 128 \
|
22 |
-
--job ms1mv3_arcface_r50 \
|
23 |
-
--target IJBC \
|
24 |
-
--network iresnet50
|
25 |
-
```
|
26 |
-
|
27 |
-
## Inference
|
28 |
-
|
29 |
-
```shell
|
30 |
-
python inference.py --weight ms1mv3_arcface_r50/backbone.pth --network r50
|
31 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amjadd/BookGPT/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: BookGPT
|
3 |
-
emoji: 😻
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
duplicated_from: pritish/BookGPT
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/unet3d-cond.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
# UNet3DConditionModel
|
2 |
-
|
3 |
-
The [UNet](https://huggingface.co/papers/1505.04597) model was originally introduced by Ronneberger et al for biomedical image segmentation, but it is also commonly used in 🤗 Diffusers because it outputs images that are the same size as the input. It is one of the most important components of a diffusion system because it facilitates the actual diffusion process. There are several variants of the UNet model in 🤗 Diffusers, depending on it's number of dimensions and whether it is a conditional model or not. This is a 3D UNet conditional model.
|
4 |
-
|
5 |
-
The abstract from the paper is:
|
6 |
-
|
7 |
-
*There is large consent that successful training of deep networks requires many thousand annotated training samples. In this paper, we present a network and training strategy that relies on the strong use of data augmentation to use the available annotated samples more efficiently. The architecture consists of a contracting path to capture context and a symmetric expanding path that enables precise localization. We show that such a network can be trained end-to-end from very few images and outperforms the prior best method (a sliding-window convolutional network) on the ISBI challenge for segmentation of neuronal structures in electron microscopic stacks. Using the same network trained on transmitted light microscopy images (phase contrast and DIC) we won the ISBI cell tracking challenge 2015 in these categories by a large margin. Moreover, the network is fast. Segmentation of a 512x512 image takes less than a second on a recent GPU. The full implementation (based on Caffe) and the trained networks are available at http://lmb.informatik.uni-freiburg.de/people/ronneber/u-net.*
|
8 |
-
|
9 |
-
## UNet3DConditionModel
|
10 |
-
[[autodoc]] UNet3DConditionModel
|
11 |
-
|
12 |
-
## UNet3DConditionOutput
|
13 |
-
[[autodoc]] models.unet_3d_condition.UNet3DConditionOutput
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/one_step_unet.py
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
import torch
|
3 |
-
|
4 |
-
from diffusers import DiffusionPipeline
|
5 |
-
|
6 |
-
|
7 |
-
class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
|
8 |
-
def __init__(self, unet, scheduler):
|
9 |
-
super().__init__()
|
10 |
-
|
11 |
-
self.register_modules(unet=unet, scheduler=scheduler)
|
12 |
-
|
13 |
-
def __call__(self):
|
14 |
-
image = torch.randn(
|
15 |
-
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
|
16 |
-
)
|
17 |
-
timestep = 1
|
18 |
-
|
19 |
-
model_output = self.unet(image, timestep).sample
|
20 |
-
scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample
|
21 |
-
|
22 |
-
result = scheduler_output - scheduler_output + torch.ones_like(scheduler_output)
|
23 |
-
|
24 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
neck=dict(
|
4 |
-
type='FPN_CARAFE',
|
5 |
-
in_channels=[256, 512, 1024, 2048],
|
6 |
-
out_channels=256,
|
7 |
-
num_outs=5,
|
8 |
-
start_level=0,
|
9 |
-
end_level=-1,
|
10 |
-
norm_cfg=None,
|
11 |
-
act_cfg=None,
|
12 |
-
order=('conv', 'norm', 'act'),
|
13 |
-
upsample_cfg=dict(
|
14 |
-
type='carafe',
|
15 |
-
up_kernel=5,
|
16 |
-
up_group=1,
|
17 |
-
encoder_kernel=3,
|
18 |
-
encoder_dilation=1,
|
19 |
-
compressed_channels=64)))
|
20 |
-
img_norm_cfg = dict(
|
21 |
-
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
22 |
-
train_pipeline = [
|
23 |
-
dict(type='LoadImageFromFile'),
|
24 |
-
dict(type='LoadAnnotations', with_bbox=True),
|
25 |
-
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
|
26 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
27 |
-
dict(type='Normalize', **img_norm_cfg),
|
28 |
-
dict(type='Pad', size_divisor=64),
|
29 |
-
dict(type='DefaultFormatBundle'),
|
30 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
|
31 |
-
]
|
32 |
-
test_pipeline = [
|
33 |
-
dict(type='LoadImageFromFile'),
|
34 |
-
dict(
|
35 |
-
type='MultiScaleFlipAug',
|
36 |
-
img_scale=(1333, 800),
|
37 |
-
flip=False,
|
38 |
-
transforms=[
|
39 |
-
dict(type='Resize', keep_ratio=True),
|
40 |
-
dict(type='RandomFlip'),
|
41 |
-
dict(type='Normalize', **img_norm_cfg),
|
42 |
-
dict(type='Pad', size_divisor=64),
|
43 |
-
dict(type='ImageToTensor', keys=['img']),
|
44 |
-
dict(type='Collect', keys=['img']),
|
45 |
-
])
|
46 |
-
]
|
47 |
-
data = dict(
|
48 |
-
train=dict(pipeline=train_pipeline),
|
49 |
-
val=dict(pipeline=test_pipeline),
|
50 |
-
test=dict(pipeline=test_pipeline))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/pisa/pisa_ssd300_coco.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
_base_ = '../ssd/ssd300_coco.py'
|
2 |
-
|
3 |
-
model = dict(
|
4 |
-
bbox_head=dict(type='PISASSDHead'),
|
5 |
-
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
|
6 |
-
|
7 |
-
optimizer_config = dict(
|
8 |
-
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/reppoints_detector.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
from ..builder import DETECTORS
|
2 |
-
from .single_stage import SingleStageDetector
|
3 |
-
|
4 |
-
|
5 |
-
@DETECTORS.register_module()
|
6 |
-
class RepPointsDetector(SingleStageDetector):
|
7 |
-
"""RepPoints: Point Set Representation for Object Detection.
|
8 |
-
|
9 |
-
This detector is the implementation of:
|
10 |
-
- RepPoints detector (https://arxiv.org/pdf/1904.11490)
|
11 |
-
"""
|
12 |
-
|
13 |
-
def __init__(self,
|
14 |
-
backbone,
|
15 |
-
neck,
|
16 |
-
bbox_head,
|
17 |
-
train_cfg=None,
|
18 |
-
test_cfg=None,
|
19 |
-
pretrained=None):
|
20 |
-
super(RepPointsDetector,
|
21 |
-
self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
|
22 |
-
pretrained)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/ui.py
DELETED
@@ -1,251 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
from pathlib import Path
|
3 |
-
|
4 |
-
import gradio as gr
|
5 |
-
import torch
|
6 |
-
import yaml
|
7 |
-
|
8 |
-
from modules import shared
|
9 |
-
|
10 |
-
|
11 |
-
with open(Path(__file__).resolve().parent / '../css/NotoSans/stylesheet.css', 'r') as f:
|
12 |
-
css = f.read()
|
13 |
-
with open(Path(__file__).resolve().parent / '../css/main.css', 'r') as f:
|
14 |
-
css += f.read()
|
15 |
-
with open(Path(__file__).resolve().parent / '../js/main.js', 'r') as f:
|
16 |
-
js = f.read()
|
17 |
-
with open(Path(__file__).resolve().parent / '../js/save_files.js', 'r') as f:
|
18 |
-
save_files_js = f.read()
|
19 |
-
with open(Path(__file__).resolve().parent / '../js/switch_tabs.js', 'r') as f:
|
20 |
-
switch_tabs_js = f.read()
|
21 |
-
with open(Path(__file__).resolve().parent / '../js/show_controls.js', 'r') as f:
|
22 |
-
show_controls_js = f.read()
|
23 |
-
|
24 |
-
refresh_symbol = '🔄'
|
25 |
-
delete_symbol = '🗑️'
|
26 |
-
save_symbol = '💾'
|
27 |
-
|
28 |
-
theme = gr.themes.Default(
|
29 |
-
font=['Noto Sans', 'Helvetica', 'ui-sans-serif', 'system-ui', 'sans-serif'],
|
30 |
-
font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'],
|
31 |
-
).set(
|
32 |
-
border_color_primary='#c5c5d2',
|
33 |
-
button_large_padding='6px 12px',
|
34 |
-
body_text_color_subdued='#484848',
|
35 |
-
background_fill_secondary='#eaeaea'
|
36 |
-
)
|
37 |
-
|
38 |
-
if Path("notification.mp3").exists():
|
39 |
-
audio_notification_js = "document.querySelector('#audio_notification audio')?.play();"
|
40 |
-
else:
|
41 |
-
audio_notification_js = ""
|
42 |
-
|
43 |
-
|
44 |
-
def list_model_elements():
|
45 |
-
elements = [
|
46 |
-
'loader',
|
47 |
-
'filter_by_loader',
|
48 |
-
'cpu_memory',
|
49 |
-
'auto_devices',
|
50 |
-
'disk',
|
51 |
-
'cpu',
|
52 |
-
'bf16',
|
53 |
-
'load_in_8bit',
|
54 |
-
'trust_remote_code',
|
55 |
-
'use_fast',
|
56 |
-
'load_in_4bit',
|
57 |
-
'compute_dtype',
|
58 |
-
'quant_type',
|
59 |
-
'use_double_quant',
|
60 |
-
'wbits',
|
61 |
-
'groupsize',
|
62 |
-
'model_type',
|
63 |
-
'pre_layer',
|
64 |
-
'triton',
|
65 |
-
'desc_act',
|
66 |
-
'no_inject_fused_attention',
|
67 |
-
'no_inject_fused_mlp',
|
68 |
-
'no_use_cuda_fp16',
|
69 |
-
'disable_exllama',
|
70 |
-
'cfg_cache',
|
71 |
-
'threads',
|
72 |
-
'threads_batch',
|
73 |
-
'n_batch',
|
74 |
-
'no_mmap',
|
75 |
-
'mlock',
|
76 |
-
'mul_mat_q',
|
77 |
-
'n_gpu_layers',
|
78 |
-
'tensor_split',
|
79 |
-
'n_ctx',
|
80 |
-
'llama_cpp_seed',
|
81 |
-
'gpu_split',
|
82 |
-
'max_seq_len',
|
83 |
-
'compress_pos_emb',
|
84 |
-
'alpha_value',
|
85 |
-
'rope_freq_base',
|
86 |
-
'numa',
|
87 |
-
]
|
88 |
-
|
89 |
-
for i in range(torch.cuda.device_count()):
|
90 |
-
elements.append(f'gpu_memory_{i}')
|
91 |
-
|
92 |
-
return elements
|
93 |
-
|
94 |
-
|
95 |
-
def list_interface_input_elements():
|
96 |
-
elements = [
|
97 |
-
'max_new_tokens',
|
98 |
-
'auto_max_new_tokens',
|
99 |
-
'max_tokens_second',
|
100 |
-
'seed',
|
101 |
-
'temperature',
|
102 |
-
'top_p',
|
103 |
-
'top_k',
|
104 |
-
'typical_p',
|
105 |
-
'epsilon_cutoff',
|
106 |
-
'eta_cutoff',
|
107 |
-
'repetition_penalty',
|
108 |
-
'repetition_penalty_range',
|
109 |
-
'encoder_repetition_penalty',
|
110 |
-
'no_repeat_ngram_size',
|
111 |
-
'min_length',
|
112 |
-
'do_sample',
|
113 |
-
'penalty_alpha',
|
114 |
-
'num_beams',
|
115 |
-
'length_penalty',
|
116 |
-
'early_stopping',
|
117 |
-
'mirostat_mode',
|
118 |
-
'mirostat_tau',
|
119 |
-
'mirostat_eta',
|
120 |
-
'grammar_string',
|
121 |
-
'negative_prompt',
|
122 |
-
'guidance_scale',
|
123 |
-
'add_bos_token',
|
124 |
-
'ban_eos_token',
|
125 |
-
'custom_token_bans',
|
126 |
-
'truncation_length',
|
127 |
-
'custom_stopping_strings',
|
128 |
-
'skip_special_tokens',
|
129 |
-
'stream',
|
130 |
-
'tfs',
|
131 |
-
'top_a',
|
132 |
-
]
|
133 |
-
|
134 |
-
# Chat elements
|
135 |
-
elements += [
|
136 |
-
'textbox',
|
137 |
-
'start_with',
|
138 |
-
'character_menu',
|
139 |
-
'history',
|
140 |
-
'name1',
|
141 |
-
'name2',
|
142 |
-
'greeting',
|
143 |
-
'context',
|
144 |
-
'mode',
|
145 |
-
'instruction_template',
|
146 |
-
'name1_instruct',
|
147 |
-
'name2_instruct',
|
148 |
-
'context_instruct',
|
149 |
-
'turn_template',
|
150 |
-
'chat_style',
|
151 |
-
'chat-instruct_command',
|
152 |
-
]
|
153 |
-
|
154 |
-
# Notebook/default elements
|
155 |
-
elements += [
|
156 |
-
'textbox-notebook',
|
157 |
-
'textbox-default',
|
158 |
-
'output_textbox',
|
159 |
-
'prompt_menu-default',
|
160 |
-
'prompt_menu-notebook',
|
161 |
-
]
|
162 |
-
|
163 |
-
# Model elements
|
164 |
-
elements += list_model_elements()
|
165 |
-
|
166 |
-
return elements
|
167 |
-
|
168 |
-
|
169 |
-
def gather_interface_values(*args):
|
170 |
-
output = {}
|
171 |
-
for i, element in enumerate(list_interface_input_elements()):
|
172 |
-
output[element] = args[i]
|
173 |
-
|
174 |
-
if not shared.args.multi_user:
|
175 |
-
shared.persistent_interface_state = output
|
176 |
-
|
177 |
-
return output
|
178 |
-
|
179 |
-
|
180 |
-
def apply_interface_values(state, use_persistent=False):
|
181 |
-
if use_persistent:
|
182 |
-
state = shared.persistent_interface_state
|
183 |
-
|
184 |
-
elements = list_interface_input_elements()
|
185 |
-
if len(state) == 0:
|
186 |
-
return [gr.update() for k in elements] # Dummy, do nothing
|
187 |
-
else:
|
188 |
-
return [state[k] if k in state else gr.update() for k in elements]
|
189 |
-
|
190 |
-
|
191 |
-
def save_settings(state, preset, instruction_template, extensions, show_controls):
|
192 |
-
output = copy.deepcopy(shared.settings)
|
193 |
-
exclude = ['name2', 'greeting', 'context', 'turn_template']
|
194 |
-
for k in state:
|
195 |
-
if k in shared.settings and k not in exclude:
|
196 |
-
output[k] = state[k]
|
197 |
-
|
198 |
-
output['preset'] = preset
|
199 |
-
output['prompt-default'] = state['prompt_menu-default']
|
200 |
-
output['prompt-notebook'] = state['prompt_menu-notebook']
|
201 |
-
output['character'] = state['character_menu']
|
202 |
-
output['instruction_template'] = instruction_template
|
203 |
-
output['default_extensions'] = extensions
|
204 |
-
output['seed'] = int(output['seed'])
|
205 |
-
output['show_controls'] = show_controls
|
206 |
-
|
207 |
-
return yaml.dump(output, sort_keys=False, width=float("inf"))
|
208 |
-
|
209 |
-
|
210 |
-
class ToolButton(gr.Button, gr.components.IOComponent):
|
211 |
-
"""
|
212 |
-
Small button with single emoji as text, fits inside gradio forms
|
213 |
-
Copied from https://github.com/AUTOMATIC1111/stable-diffusion-webui
|
214 |
-
"""
|
215 |
-
|
216 |
-
def __init__(self, **kwargs):
|
217 |
-
super().__init__(**kwargs)
|
218 |
-
|
219 |
-
def get_block_name(self):
|
220 |
-
return "button"
|
221 |
-
|
222 |
-
|
223 |
-
def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_class, interactive=True):
|
224 |
-
"""
|
225 |
-
Copied from https://github.com/AUTOMATIC1111/stable-diffusion-webui
|
226 |
-
"""
|
227 |
-
def refresh():
|
228 |
-
refresh_method()
|
229 |
-
args = refreshed_args() if callable(refreshed_args) else refreshed_args
|
230 |
-
|
231 |
-
for k, v in args.items():
|
232 |
-
setattr(refresh_component, k, v)
|
233 |
-
|
234 |
-
return gr.update(**(args or {}))
|
235 |
-
|
236 |
-
refresh_button = ToolButton(value=refresh_symbol, elem_classes=elem_class, interactive=interactive)
|
237 |
-
refresh_button.click(
|
238 |
-
fn=refresh,
|
239 |
-
inputs=[],
|
240 |
-
outputs=[refresh_component]
|
241 |
-
)
|
242 |
-
|
243 |
-
return refresh_button
|
244 |
-
|
245 |
-
|
246 |
-
def create_delete_button(**kwargs):
|
247 |
-
return ToolButton(value=delete_symbol, **kwargs)
|
248 |
-
|
249 |
-
|
250 |
-
def create_save_button(**kwargs):
|
251 |
-
return ToolButton(value=save_symbol, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/test.sh
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
#!/usr/bin/env bash
|
2 |
-
|
3 |
-
work_path=$(dirname $0)
|
4 |
-
PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \
|
5 |
-
python -m torch.distributed.launch --nproc_per_node=8 \
|
6 |
-
tools/test.py ${work_path}/test_config_h32.py \
|
7 |
-
${work_path}/ckpt/latest.pth \
|
8 |
-
--launcher pytorch \
|
9 |
-
--eval mIoU \
|
10 |
-
2>&1 | tee -a ${work_path}/log.txt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/inpaint_zoom/utils/zoom_in_utils.py
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import cv2
|
4 |
-
import numpy as np
|
5 |
-
from PIL import Image
|
6 |
-
|
7 |
-
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
8 |
-
|
9 |
-
|
10 |
-
def write_video(file_path, frames, fps, reversed=True):
|
11 |
-
"""
|
12 |
-
Writes frames to an mp4 video file
|
13 |
-
:param file_path: Path to output video, must end with .mp4
|
14 |
-
:param frames: List of PIL.Image objects
|
15 |
-
:param fps: Desired frame rate
|
16 |
-
:param reversed: if order of images to be reversed (default = True)
|
17 |
-
"""
|
18 |
-
if reversed == True:
|
19 |
-
frames.reverse()
|
20 |
-
|
21 |
-
w, h = frames[0].size
|
22 |
-
fourcc = cv2.VideoWriter_fourcc("m", "p", "4", "v")
|
23 |
-
# fourcc = cv2.VideoWriter_fourcc(*'avc1')
|
24 |
-
writer = cv2.VideoWriter(file_path, fourcc, fps, (w, h))
|
25 |
-
|
26 |
-
for frame in frames:
|
27 |
-
np_frame = np.array(frame.convert("RGB"))
|
28 |
-
cv_frame = cv2.cvtColor(np_frame, cv2.COLOR_RGB2BGR)
|
29 |
-
writer.write(cv_frame)
|
30 |
-
|
31 |
-
writer.release()
|
32 |
-
|
33 |
-
|
34 |
-
def image_grid(imgs, rows, cols):
|
35 |
-
assert len(imgs) == rows * cols
|
36 |
-
|
37 |
-
w, h = imgs[0].size
|
38 |
-
grid = Image.new("RGB", size=(cols * w, rows * h))
|
39 |
-
grid_w, grid_h = grid.size
|
40 |
-
|
41 |
-
for i, img in enumerate(imgs):
|
42 |
-
grid.paste(img, box=(i % cols * w, i // cols * h))
|
43 |
-
return grid
|
44 |
-
|
45 |
-
|
46 |
-
def shrink_and_paste_on_blank(current_image, mask_width):
|
47 |
-
"""
|
48 |
-
Decreases size of current_image by mask_width pixels from each side,
|
49 |
-
then adds a mask_width width transparent frame,
|
50 |
-
so that the image the function returns is the same size as the input.
|
51 |
-
:param current_image: input image to transform
|
52 |
-
:param mask_width: width in pixels to shrink from each side
|
53 |
-
"""
|
54 |
-
|
55 |
-
height = current_image.height
|
56 |
-
width = current_image.width
|
57 |
-
|
58 |
-
# shrink down by mask_width
|
59 |
-
prev_image = current_image.resize((height - 2 * mask_width, width - 2 * mask_width))
|
60 |
-
prev_image = prev_image.convert("RGBA")
|
61 |
-
prev_image = np.array(prev_image)
|
62 |
-
|
63 |
-
# create blank non-transparent image
|
64 |
-
blank_image = np.array(current_image.convert("RGBA")) * 0
|
65 |
-
blank_image[:, :, 3] = 1
|
66 |
-
|
67 |
-
# paste shrinked onto blank
|
68 |
-
blank_image[mask_width : height - mask_width, mask_width : width - mask_width, :] = prev_image
|
69 |
-
prev_image = Image.fromarray(blank_image)
|
70 |
-
|
71 |
-
return prev_image
|
72 |
-
|
73 |
-
|
74 |
-
def dummy(images, **kwargs):
|
75 |
-
return images, False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/compatibility_tags.py
DELETED
@@ -1,165 +0,0 @@
|
|
1 |
-
"""Generate and work with PEP 425 Compatibility Tags.
|
2 |
-
"""
|
3 |
-
|
4 |
-
import re
|
5 |
-
from typing import List, Optional, Tuple
|
6 |
-
|
7 |
-
from pip._vendor.packaging.tags import (
|
8 |
-
PythonVersion,
|
9 |
-
Tag,
|
10 |
-
compatible_tags,
|
11 |
-
cpython_tags,
|
12 |
-
generic_tags,
|
13 |
-
interpreter_name,
|
14 |
-
interpreter_version,
|
15 |
-
mac_platforms,
|
16 |
-
)
|
17 |
-
|
18 |
-
_osx_arch_pat = re.compile(r"(.+)_(\d+)_(\d+)_(.+)")
|
19 |
-
|
20 |
-
|
21 |
-
def version_info_to_nodot(version_info: Tuple[int, ...]) -> str:
|
22 |
-
# Only use up to the first two numbers.
|
23 |
-
return "".join(map(str, version_info[:2]))
|
24 |
-
|
25 |
-
|
26 |
-
def _mac_platforms(arch: str) -> List[str]:
|
27 |
-
match = _osx_arch_pat.match(arch)
|
28 |
-
if match:
|
29 |
-
name, major, minor, actual_arch = match.groups()
|
30 |
-
mac_version = (int(major), int(minor))
|
31 |
-
arches = [
|
32 |
-
# Since we have always only checked that the platform starts
|
33 |
-
# with "macosx", for backwards-compatibility we extract the
|
34 |
-
# actual prefix provided by the user in case they provided
|
35 |
-
# something like "macosxcustom_". It may be good to remove
|
36 |
-
# this as undocumented or deprecate it in the future.
|
37 |
-
"{}_{}".format(name, arch[len("macosx_") :])
|
38 |
-
for arch in mac_platforms(mac_version, actual_arch)
|
39 |
-
]
|
40 |
-
else:
|
41 |
-
# arch pattern didn't match (?!)
|
42 |
-
arches = [arch]
|
43 |
-
return arches
|
44 |
-
|
45 |
-
|
46 |
-
def _custom_manylinux_platforms(arch: str) -> List[str]:
|
47 |
-
arches = [arch]
|
48 |
-
arch_prefix, arch_sep, arch_suffix = arch.partition("_")
|
49 |
-
if arch_prefix == "manylinux2014":
|
50 |
-
# manylinux1/manylinux2010 wheels run on most manylinux2014 systems
|
51 |
-
# with the exception of wheels depending on ncurses. PEP 599 states
|
52 |
-
# manylinux1/manylinux2010 wheels should be considered
|
53 |
-
# manylinux2014 wheels:
|
54 |
-
# https://www.python.org/dev/peps/pep-0599/#backwards-compatibility-with-manylinux2010-wheels
|
55 |
-
if arch_suffix in {"i686", "x86_64"}:
|
56 |
-
arches.append("manylinux2010" + arch_sep + arch_suffix)
|
57 |
-
arches.append("manylinux1" + arch_sep + arch_suffix)
|
58 |
-
elif arch_prefix == "manylinux2010":
|
59 |
-
# manylinux1 wheels run on most manylinux2010 systems with the
|
60 |
-
# exception of wheels depending on ncurses. PEP 571 states
|
61 |
-
# manylinux1 wheels should be considered manylinux2010 wheels:
|
62 |
-
# https://www.python.org/dev/peps/pep-0571/#backwards-compatibility-with-manylinux1-wheels
|
63 |
-
arches.append("manylinux1" + arch_sep + arch_suffix)
|
64 |
-
return arches
|
65 |
-
|
66 |
-
|
67 |
-
def _get_custom_platforms(arch: str) -> List[str]:
|
68 |
-
arch_prefix, arch_sep, arch_suffix = arch.partition("_")
|
69 |
-
if arch.startswith("macosx"):
|
70 |
-
arches = _mac_platforms(arch)
|
71 |
-
elif arch_prefix in ["manylinux2014", "manylinux2010"]:
|
72 |
-
arches = _custom_manylinux_platforms(arch)
|
73 |
-
else:
|
74 |
-
arches = [arch]
|
75 |
-
return arches
|
76 |
-
|
77 |
-
|
78 |
-
def _expand_allowed_platforms(platforms: Optional[List[str]]) -> Optional[List[str]]:
|
79 |
-
if not platforms:
|
80 |
-
return None
|
81 |
-
|
82 |
-
seen = set()
|
83 |
-
result = []
|
84 |
-
|
85 |
-
for p in platforms:
|
86 |
-
if p in seen:
|
87 |
-
continue
|
88 |
-
additions = [c for c in _get_custom_platforms(p) if c not in seen]
|
89 |
-
seen.update(additions)
|
90 |
-
result.extend(additions)
|
91 |
-
|
92 |
-
return result
|
93 |
-
|
94 |
-
|
95 |
-
def _get_python_version(version: str) -> PythonVersion:
|
96 |
-
if len(version) > 1:
|
97 |
-
return int(version[0]), int(version[1:])
|
98 |
-
else:
|
99 |
-
return (int(version[0]),)
|
100 |
-
|
101 |
-
|
102 |
-
def _get_custom_interpreter(
|
103 |
-
implementation: Optional[str] = None, version: Optional[str] = None
|
104 |
-
) -> str:
|
105 |
-
if implementation is None:
|
106 |
-
implementation = interpreter_name()
|
107 |
-
if version is None:
|
108 |
-
version = interpreter_version()
|
109 |
-
return f"{implementation}{version}"
|
110 |
-
|
111 |
-
|
112 |
-
def get_supported(
|
113 |
-
version: Optional[str] = None,
|
114 |
-
platforms: Optional[List[str]] = None,
|
115 |
-
impl: Optional[str] = None,
|
116 |
-
abis: Optional[List[str]] = None,
|
117 |
-
) -> List[Tag]:
|
118 |
-
"""Return a list of supported tags for each version specified in
|
119 |
-
`versions`.
|
120 |
-
|
121 |
-
:param version: a string version, of the form "33" or "32",
|
122 |
-
or None. The version will be assumed to support our ABI.
|
123 |
-
:param platform: specify a list of platforms you want valid
|
124 |
-
tags for, or None. If None, use the local system platform.
|
125 |
-
:param impl: specify the exact implementation you want valid
|
126 |
-
tags for, or None. If None, use the local interpreter impl.
|
127 |
-
:param abis: specify a list of abis you want valid
|
128 |
-
tags for, or None. If None, use the local interpreter abi.
|
129 |
-
"""
|
130 |
-
supported: List[Tag] = []
|
131 |
-
|
132 |
-
python_version: Optional[PythonVersion] = None
|
133 |
-
if version is not None:
|
134 |
-
python_version = _get_python_version(version)
|
135 |
-
|
136 |
-
interpreter = _get_custom_interpreter(impl, version)
|
137 |
-
|
138 |
-
platforms = _expand_allowed_platforms(platforms)
|
139 |
-
|
140 |
-
is_cpython = (impl or interpreter_name()) == "cp"
|
141 |
-
if is_cpython:
|
142 |
-
supported.extend(
|
143 |
-
cpython_tags(
|
144 |
-
python_version=python_version,
|
145 |
-
abis=abis,
|
146 |
-
platforms=platforms,
|
147 |
-
)
|
148 |
-
)
|
149 |
-
else:
|
150 |
-
supported.extend(
|
151 |
-
generic_tags(
|
152 |
-
interpreter=interpreter,
|
153 |
-
abis=abis,
|
154 |
-
platforms=platforms,
|
155 |
-
)
|
156 |
-
)
|
157 |
-
supported.extend(
|
158 |
-
compatible_tags(
|
159 |
-
python_version=python_version,
|
160 |
-
interpreter=interpreter,
|
161 |
-
platforms=platforms,
|
162 |
-
)
|
163 |
-
)
|
164 |
-
|
165 |
-
return supported
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__init__.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
from .more import * # noqa
|
2 |
-
from .recipes import * # noqa
|
3 |
-
|
4 |
-
__version__ = '8.8.0'
|
|
|
|
|
|
|
|
|
|
spaces/Awesimo/jojogan/e4e/models/encoders/__init__.py
DELETED
File without changes
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/.github/CONTRIBUTING.md
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
# Contributing to detectron2
|
2 |
-
|
3 |
-
## Issues
|
4 |
-
We use GitHub issues to track public bugs and questions.
|
5 |
-
Please make sure to follow one of the
|
6 |
-
[issue templates](https://github.com/facebookresearch/detectron2/issues/new/choose)
|
7 |
-
when reporting any issues.
|
8 |
-
|
9 |
-
Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe
|
10 |
-
disclosure of security bugs. In those cases, please go through the process
|
11 |
-
outlined on that page and do not file a public issue.
|
12 |
-
|
13 |
-
## Pull Requests
|
14 |
-
We actively welcome pull requests.
|
15 |
-
|
16 |
-
However, if you're adding any significant features (e.g. > 50 lines), please
|
17 |
-
make sure to discuss with maintainers about your motivation and proposals in an issue
|
18 |
-
before sending a PR. This is to save your time so you don't spend time on a PR that we'll not accept.
|
19 |
-
|
20 |
-
We do not always accept new features, and we take the following
|
21 |
-
factors into consideration:
|
22 |
-
|
23 |
-
1. Whether the same feature can be achieved without modifying detectron2.
|
24 |
-
Detectron2 is designed so that you can implement many extensions from the outside, e.g.
|
25 |
-
those in [projects](https://github.com/facebookresearch/detectron2/tree/master/projects).
|
26 |
-
* If some part of detectron2 is not extensible enough, you can also bring up a more general issue to
|
27 |
-
improve it. Such feature request may be useful to more users.
|
28 |
-
2. Whether the feature is potentially useful to a large audience (e.g. an impactful detection paper, a popular dataset,
|
29 |
-
a significant speedup, a widely useful utility),
|
30 |
-
or only to a small portion of users (e.g., a less-known paper, an improvement not in the object
|
31 |
-
detection field, a trick that's not very popular in the community, code to handle a non-standard type of data)
|
32 |
-
* Adoption of additional models, datasets, new task are by default not added to detectron2 before they
|
33 |
-
receive significant popularity in the community.
|
34 |
-
We sometimes accept such features in `projects/`, or as a link in `projects/README.md`.
|
35 |
-
3. Whether the proposed solution has a good design / interface. This can be discussed in the issue prior to PRs, or
|
36 |
-
in the form of a draft PR.
|
37 |
-
4. Whether the proposed solution adds extra mental/practical overhead to users who don't
|
38 |
-
need such feature.
|
39 |
-
5. Whether the proposed solution breaks existing APIs.
|
40 |
-
|
41 |
-
To add a feature to an existing function/class `Func`, there are always two approaches:
|
42 |
-
(1) add new arguments to `Func`; (2) write a new `Func_with_new_feature`.
|
43 |
-
To meet the above criteria, we often prefer approach (2), because:
|
44 |
-
|
45 |
-
1. It does not involve modifying or potentially breaking existing code.
|
46 |
-
2. It does not add overhead to users who do not need the new feature.
|
47 |
-
3. Adding new arguments to a function/class is not scalable w.r.t. all the possible new research ideas in the future.
|
48 |
-
|
49 |
-
When sending a PR, please do:
|
50 |
-
|
51 |
-
1. If a PR contains multiple orthogonal changes, split it to several PRs.
|
52 |
-
2. If you've added code that should be tested, add tests.
|
53 |
-
3. For PRs that need experiments (e.g. adding a new model or new methods),
|
54 |
-
you don't need to update model zoo, but do provide experiment results in the description of the PR.
|
55 |
-
4. If APIs are changed, update the documentation.
|
56 |
-
5. We use the [Google style docstrings](https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html) in python.
|
57 |
-
6. Make sure your code lints with `./dev/linter.sh`.
|
58 |
-
|
59 |
-
|
60 |
-
## Contributor License Agreement ("CLA")
|
61 |
-
In order to accept your pull request, we need you to submit a CLA. You only need
|
62 |
-
to do this once to work on any of Facebook's open source projects.
|
63 |
-
|
64 |
-
Complete your CLA here: <https://code.facebook.com/cla>
|
65 |
-
|
66 |
-
## License
|
67 |
-
By contributing to detectron2, you agree that your contributions will be licensed
|
68 |
-
under the LICENSE file in the root directory of this source tree.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Car Simulator 9.md
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Simulador de coches 9: Una revisión del último juego de simulación de coches</h1>
|
3 |
-
<p>Si usted es un fan de los juegos de simulación de coches, es posible que haya oído hablar de Car Simulator 9, la nueva entrega de la popular serie. Car Simulator 9 es un juego realista e inmersivo que te permite conducir varios coches en diferentes entornos y escenarios. Puede personalizar su coche, actualizar su garaje, desafíos completos, y más. En este artículo, revisaremos Car Simulator 9 y te daremos algunos consejos y trucos sobre cómo jugarlo. </p>
|
4 |
-
<h2>¿Qué es el simulador de coche 9?</h2>
|
5 |
-
<p>Car Simulator 9 es un juego de simulación de coches desarrollado por Red Dot Games y publicado por PlayWay S.A. Fue lanzado el 11 de agosto de 2021, para Windows, Mac, Linux, PlayStation 4, Xbox One y Nintendo Switch. Es el noveno juego de la serie Car Simulator, que comenzó en 2014 con Car Mechanic Simulator.</p>
|
6 |
-
<h2>car simulator 9</h2><br /><p><b><b>DOWNLOAD</b> >>> <a href="https://bltlly.com/2v6Mjj">https://bltlly.com/2v6Mjj</a></b></p><br /><br />
|
7 |
-
<h3>Características y jugabilidad</h3>
|
8 |
-
<p>Car Simulator 9 cuenta con más de 72 coches de varias marcas y modelos, cada uno con sus propias especificaciones y rendimiento. Usted puede elegir entre diferentes categorías de coches, tales como coches deportivos, coches del músculo, coches clásicos, coches eléctricos, y más. También puede modificar su automóvil con más de 4000 piezas únicas, como motores, neumáticos, frenos, suspensión, kits de carrocería, trabajos de pintura y más. </p>
|
9 |
-
<p>El juego ofrece una experiencia de conducción realista con física precisa y sistema de daños. Puede conducir su coche en diferentes condiciones de carretera, como asfalto, tierra, nieve, lluvia, etc. También puede explorar diferentes lugares, como ciudades, carreteras, campo, montañas, desiertos, etc. El juego tiene un clima dinámico y díaciclo nocturno que afecta la visibilidad y el manejo de su automóvil. </p>
|
10 |
-
|
11 |
-
<h3>Gráficos y sonido</h3>
|
12 |
-
<p>Car Simulator 9 tiene gráficos impresionantes que crean un entorno realista e inmersivo. El juego tiene texturas de alta calidad, efectos de iluminación, sombras, reflejos, etc. El juego también tiene modelos detallados de coches y piezas que puedes inspeccionar de cerca. El juego también tiene efectos de sonido realistas que coinciden con los sonidos del motor, sonidos de neumáticos, sonidos de cuerno, etc. El juego también tiene una banda sonora que cuenta con varios géneros de música que se adaptan al estado de ánimo del juego. </p>
|
13 |
-
<h3>Pros y contras</h3>
|
14 |
-
<p>Car Simulator 9 es un juego divertido y adictivo que atraerá a los entusiastas del automóvil y a los jugadores casuales por igual. El juego tiene muchos profesionales, como:</p>
|
15 |
-
<ul>
|
16 |
-
<li> Tiene una gran variedad de coches y piezas que puede personalizar. </li>
|
17 |
-
<li> Tiene una experiencia de conducción realista con la física precisa y el sistema de daños. </li>
|
18 |
-
<li> Tiene diferentes ubicaciones y escenarios que puede explorar. </li>
|
19 |
-
<li> Tiene varios modos de juego y objetivos que puedes jugar. </li>
|
20 |
-
<li> Tiene impresionantes gráficos y efectos de sonido que crean un entorno realista. </li>
|
21 |
-
</ul>
|
22 |
-
<p>Sin embargo, el juego también tiene algunos contras, como:</p>
|
23 |
-
<ul>
|
24 |
-
<li> Puede ser repetitivo y tedioso a veces. </li>
|
25 |
-
<li> Puede ser buggy y glitchy a veces. </li>
|
26 |
-
<li>Puede ser caro comprar todos los DLCs.</li>
|
27 |
-
<li> Puede ser difícil controlar el coche con algunos dispositivos o ajustes. </li>
|
28 |
-
<li> Puede ser demasiado fácil o demasiado difícil para algunos jugadores. </li>
|
29 |
-
</ul>
|
30 |
-
<h2>Cómo jugar coche <h2>Cómo jugar simulador de coche 9?</h2>
|
31 |
-
<p>Si usted está interesado en jugar Car Simulator 9, tendrá que cumplir con algunos requisitos y seguir algunos pasos. Aquí es cómo jugar Car Simulator 9:</p>
|
32 |
-
<h3>Requisitos del sistema</h3>
|
33 |
-
<p>Antes de comprar o descargar el juego, debe asegurarse de que su dispositivo pueda ejecutarlo sin problemas. Aquí están los requisitos mínimos y recomendados del sistema para Car Simulator 9:</p>
|
34 |
-
<p></p>
|
35 |
-
<tabla>
|
36 |
-
<tr>
|
37 |
-
<th>Mínimo</th>
|
38 |
-
<th>Recomendado</th>
|
39 |
-
</tr>
|
40 |
-
<tr>
|
41 |
-
<td>OS: Windows 7/8/10 (64 bits)</td>
|
42 |
-
|
43 |
-
</tr>
|
44 |
-
<tr>
|
45 |
-
<td>Procesador: Intel Core i3 3.0 GHz o equivalente AMD</td>
|
46 |
-
<td>Procesador: Intel Core i5 3.4 GHz o equivalente AMD</td>
|
47 |
-
</tr>
|
48 |
-
<tr>
|
49 |
-
<td>Memoria: 4 GB de RAM</td>
|
50 |
-
<td>Memoria: 8 GB de RAM</td>
|
51 |
-
</tr>
|
52 |
-
<tr>
|
53 |
-
<td>Gráficos: Nvidia GeForce GTX 660 o AMD Radeon R9 270x</td>
|
54 |
-
<td>Gráficos: Nvidia GeForce GTX 970 o AMD Radeon RX 580</td>
|
55 |
-
</tr>
|
56 |
-
<tr>
|
57 |
-
<td>Almacenamiento: 20 GB de espacio disponible</td>
|
58 |
-
<td>Almacenamiento: 20 GB de espacio disponible</td>
|
59 |
-
</tr>
|
60 |
-
<tr>
|
61 |
-
<td>Tarjeta de sonido: DirectX compatible</td>
|
62 |
-
<td>Tarjeta de sonido: DirectX compatible</td>
|
63 |
-
</tr>
|
64 |
-
</tabla>
|
65 |
-
<h3>Descarga e instalación</h3>
|
66 |
-
<p>Una vez que haya comprobado los requisitos del sistema, puede comprar o descargar el juego desde varias plataformas. El juego está disponible en Steam, GOG, Epic Games Store, PlayStation Store, Microsoft Store y Nintendo eShop. El precio del juego varía dependiendo de la plataforma y la región, pero por lo general es alrededor de $ 19.99 USD. También puede comprar los PLC por separado o como un paquete para contenido adicional y características. </p>
|
67 |
-
<p>Para instalar el juego, debe seguir las instrucciones de la plataforma que está utilizando. Por ejemplo, si estás usando Steam, necesitas crear una cuenta, iniciar sesión, añadir el juego a tu biblioteca y hacer clic en instalar. El proceso de instalación puede tardar algún tiempo dependiendo de la velocidad de Internet y el rendimiento del dispositivo. </p>
|
68 |
-
<h3>Controles y ajustes</h3>
|
69 |
-
<p>Después de instalar el juego, puede iniciarlo y ajustar los controles y ajustes a su preferencia. Puede elegir entre diferentes dispositivos de entrada, como teclado, ratón, gamepad, volante, etc. También puede personalizar las combinaciones de teclas, sensibilidad, vibración, etc. También puede cambiar la configuración de gráficos, sonido, juego, etc. Puede elegir entre diferentes resoluciones, niveles de calidad, niveles de volumen, niveles de dificultad, etc.</p>
|
70 |
-
<h2> Consejos y trucos para el simulador de coche 9</h2>
|
71 |
-
<p>Para disfrutar más del juego y mejorar tus habilidades, aquí hay algunos consejos y trucos para Car Simulator 9:</p>
|
72 |
-
<h3>Cómo personalizar su coche</h3>
|
73 |
-
|
74 |
-
<p>Para comprar nuevas piezas o accesorios para su coche, debe ir a la tienda presionando S en el teclado o haciendo clic en el icono del carrito de compras en la pantalla. A continuación, puede navegar a través de diferentes artículos que están disponibles para la compra. También puedes filtrarlos por precio, marca, modelo, categoría, etc. Para comprar un artículo, necesitas tener suficiente dinero en tu cuenta. Puedes ganar dinero completando tareas o vendiendo autos. </p>
|
75 |
-
<h3>Cómo ganar dinero y mejorar su garaje</h3>
|
76 |
-
<p>Para comprar coches nuevos o piezas para su coche, necesita tener suficiente dinero en su cuenta. Hay varias maneras de ganar dinero en Car Simulator 9:</p>
|
77 |
-
<ul>
|
78 |
-
<li>Puedes trabajar como mecánico de coches y reparar coches para clientes en tu garaje. Usted recibirá los pedidos de los clientes que tienen problemas con sus coches. Tendrá que diagnosticar el problema utilizando herramientas como escáner o ruta de prueba. A continuación, tendrá que reemplazar o arreglar las piezas rotas mediante el uso de herramientas tales como llave o soldador. Recibirás puntos de dinero y reputación por cada pedido completado. </li>
|
79 |
-
<li>Usted puede comprar y vender coches en la casa de subastas o el depósito de chatarra. Usted puede encontrar coches baratos que están dañados o viejos y comprarlos a un precio bajo a un precio bajo. A continuación, puede repararlos y restaurarlos en su garaje y venderlos a un precio más alto. También puede encontrar coches raros o únicos que valen más dinero. </li>
|
80 |
-
<li>Usted puede poner a prueba sus habilidades de conducción en varios desafíos y logros que recompensa con puntos de dinero y reputación. Puedes acceder a los desafíos y logros pulsando C en tu teclado o haciendo clic en el icono del trofeo en la pantalla. A continuación, puede elegir entre diferentes tipos de desafíos, tales como velocidad, deriva, salto, carrera, etc. Tendrá que completar los objetivos dentro del límite de tiempo o el límite de puntuación. </li>
|
81 |
-
|
82 |
-
</ul>
|
83 |
-
<p>Para actualizar su garaje, necesita tener suficientes puntos de reputación en su cuenta. Los puntos de reputación se ganan completando tareas o ganando carreras. Puede acceder al menú de actualización del garaje pulsando G en el teclado o haciendo clic en el icono del martillo en la pantalla. A continuación, puede elegir entre diferentes actualizaciones, tales como herramientas, equipos, espacio, decoración, etc. Actualizar su garaje le permitirá trabajar más rápido, más fácil y mejor. </p>
|
84 |
-
<h3>Cómo completar desafíos y logros</h3>
|
85 |
-
<p>Uno de los aspectos divertidos de Car Simulator 9 es que puede completar varios desafíos y logros que ponen a prueba sus habilidades de conducción y conocimientos. Para acceder a los desafíos y logros, debe presionar C en el teclado o hacer clic en el icono del trofeo en la pantalla. A continuación, puede elegir entre diferentes tipos de desafíos, tales como velocidad, deriva, salto, carrera, etc. Tendrá que completar los objetivos dentro del límite de tiempo o el límite de puntuación. </p>
|
86 |
-
<p>Algunos de los desafíos y logros son fáciles y directos, mientras que otros son difíciles y difíciles. Aquí hay algunos consejos y trucos para completar algunos de los desafíos y logros:</p>
|
87 |
-
<ul>
|
88 |
-
<li>Para los desafíos de velocidad, es necesario conducir lo más rápido posible sin chocar o salir de la carretera. Puede utilizar propulsores de nitro o turbocompresores para aumentar su velocidad. También puede utilizar el deslizamiento o el dibujo para reducir la resistencia del aire y ganar velocidad. También debes evitar el tráfico y los obstáculos que pueden ralentizarte. </li>
|
89 |
-
<li>Para los desafíos de deriva, es necesario deslizar su coche de lado sin perder el control o girar hacia fuera. Puede utilizar el freno de mano o el embrague para iniciar una deriva. También puede utilizar el acelerador o la dirección para mantener o ajustar una deriva. También debe usar la dirección contraria o el bloqueo opuesto para equilibrar su automóvil y evitar el sobreviraje o subviraje. </li>
|
90 |
-
|
91 |
-
<li>Para los desafíos de carrera, necesitas terminar primero entre otros competidores sin chocar o ser descalificado. Puede utilizar atajos o rutas alternativas para obtener una ventaja sobre otros corredores. También puede utilizar propulsores de nitro o turbocompresores para superar a otros corredores. También debe utilizar técnicas de frenado o curvas para navegar curvas y giros sin perder velocidad. </li>
|
92 |
-
</ul>
|
93 |
-
<h2>Conclusión</h2>
|
94 |
-
<h3>Resumen de la revisión</h3>
|
95 |
-
<p>En conclusión, Car Simulator 9 es un juego de simulación de coches realista e inmersivo que te permite conducir varios coches en diferentes entornos y escenarios. Puede personalizar su coche, actualizar su garaje, desafíos completos, y más. El juego tiene muchos pros, como una gran variedad de coches y piezas, una experiencia de conducción realista, diferentes ubicaciones y escenarios, varios modos de juego y objetivos, gráficos impresionantes y efectos de sonido, etc. El juego también tiene algunos contras, como ser repetitivo y tedioso a veces, tener errores y problemas a veces, ser caro para comprar todos los DLC, ser difícil de controlar con algunos dispositivos o configuraciones, ser demasiado fácil o demasiado difícil para algunos jugadores, etc.</p>
|
96 |
-
<h3>Clasificación y recomendación</h3>
|
97 |
-
<p>Le damos a Car Simulator 9 una calificación de 4 de 5 estrellas basada en nuestra revisión. Recomendamos Car Simulator 9 a cualquiera que ame los juegos de simulación de coches o los coches en general. El juego es adecuado para jugadores de todas las edades y niveles de habilidad. El juego es divertido y adictivo y te mantendrá entretenido durante horas. </p>
|
98 |
-
<h2>Preguntas frecuentes</h2>
|
99 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Car Simulator 9:</p>
|
100 |
-
<ul>
|
101 |
-
<li><b>Q: ¿Cuánto cuesta Car Simulator 9? </b></li>
|
102 |
-
<li <li>A: Car Simulator 9 cuesta alrededor de $ 19.99 USD en varias plataformas, como Steam, GOG, Epic Games Store, PlayStation Store, Microsoft Store y Nintendo eShop. El precio puede variar dependiendo de la plataforma y la región. También puede comprar los DLC por separado o como un paquete para contenido adicional y características. </li>
|
103 |
-
|
104 |
-
<li>A: Car Simulator 9 no tiene una longitud fija o una historia lineal. El juego es abierto y estilo sandbox, lo que significa que puedes jugar el tiempo que quieras y de la manera que quieras. Puedes crear tus propias metas y objetivos, o seguir los proporcionados por el juego. El juego tiene mucho contenido y valor de repetición, así que nunca te quedarás sin cosas que hacer. </li>
|
105 |
-
<li><b>Q: ¿Es multijugador Car Simulator 9? </b></li>
|
106 |
-
<li>A: Sí, Car Simulator 9 tiene un modo multijugador que le permite jugar con otros jugadores en línea o fuera de línea. Puede unirse o crear un vestíbulo e invitar a otros jugadores a unirse. También puede elegir la ubicación, el coche y las reglas de la carrera. Puede competir con otros jugadores en carreras o carreras de arrastre, o cooperar con ellos en la reparación de coches o completar desafíos. </li>
|
107 |
-
<li><b>Q: ¿Es realista Car Simulator 9? </b></li>
|
108 |
-
<li>A: Sí, Car Simulator 9 es realista en términos de gráficos, sonido, física y sistema de daños. El juego tiene texturas de alta calidad, efectos de iluminación, sombras, reflejos, etc. El juego también tiene modelos detallados de coches y piezas que puedes inspeccionar de cerca. El juego también tiene efectos de sonido realistas que coinciden con los sonidos del motor, sonidos de neumáticos, sonidos de cuerno, etc. El juego también tiene la física precisa y sistema de daños que afectan a la visibilidad y el manejo de su coche. </li>
|
109 |
-
<li><b>Q: ¿Es divertido Car Simulator 9? </b></li>
|
110 |
-
<li>A: Sí, Car Simulator 9 es divertido y adictivo para cualquier persona que ama los juegos de simulación de coches o coches en general. El juego tiene muchas características y opciones de juego que te mantendrán entretenido durante horas. Puede personalizar su coche, actualizar su garaje, desafíos completos, y más. El juego también tiene impresionantes gráficos y efectos de sonido que crean un entorno realista. </li>
|
111 |
-
</ul></p> 64aa2da5cf<br />
|
112 |
-
<br />
|
113 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Carx Street 0.9.1.md
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar CarX Street 0.9.1 y convertirse en una leyenda de carreras callejeras</h1>
|
3 |
-
<p>Si eres un fan de los juegos de carreras de coches, es posible que hayas oído hablar de CarX Street, el último juego de los creadores de CarX Drift Racing 2. CarX Street es un juego de carreras de mundo abierto realista y dinámico que te permite abrazar la libertad de ser un corredor de la calle en la ciudad de Sunset City. Puede elegir entre una variedad de coches, personalizarlos y competir en carreteras y calles de la ciudad, así como la deriva en las pistas de alta velocidad. </p>
|
4 |
-
<p>CarX Street se encuentra actualmente en pruebas beta abiertas, lo que significa que puede descargarlo de forma gratuita y disfrutar de sus características antes de que se lance oficialmente. La última versión del juego es 0.9.1, que se actualizó el 7 de junio de 2023, y trae algunas mejoras y correcciones de errores al juego. </p>
|
5 |
-
<h2>descargar carx street 0.9.1</h2><br /><p><b><b>Download Zip</b> ⚡ <a href="https://bltlly.com/2v6M1K">https://bltlly.com/2v6M1K</a></b></p><br /><br />
|
6 |
-
<p>En este artículo, le diremos qué es CarX Street 0.9.1, por qué debe jugarlo y cómo descargarlo en su dispositivo Android o iOS. También te daremos algunos consejos y trucos para ayudarte a convertirte en una leyenda de las carreras callejeras en CarX Street 0.9.1. </p>
|
7 |
-
<h2>¿Qué es CarX Street 0.9.1? </h2>
|
8 |
-
<p>CarX Street 0.9.1 es la última versión de la prueba beta abierta de CarX Street, un juego de carreras de mundo abierto realista y dinámico desarrollado por CarX Technologies, LLC. El juego está disponible para dispositivos Android e iOS, y requiere una conexión a Internet para jugar. </p>
|
9 |
-
<p>CarX Street 0.9.1 le ofrece una variedad de características que lo convierten en uno de los mejores juegos de carreras de coches en dispositivos móviles. Estos son algunos de ellos:</p>
|
10 |
-
<h3>Características de CarX Street 0.9.1</h3>
|
11 |
-
<ul>
|
12 |
-
<li>Un mapa de mundo abierto grande y detallado que cubre carreteras, calles de ciudades, zonas industriales, suburbios y más. </li>
|
13 |
-
<li>Una lista diversa de autos que incluye autos deportivos, autos musculares, supercoches, autos clásicos y más. </li>
|
14 |
-
<li>Un sistema de ajuste de coche detallado que le permite intercambiar piezas, actualizar su motor, transmisión, cuerpo, suspensión, neumáticos y más. </li>
|
15 |
-
|
16 |
-
<li>Un motor de física realista que simula el comportamiento del automóvil basado en la tecnología CarX.</li>
|
17 |
-
<li>Un dinámico ciclo día/noche que cambia la iluminación y la atmósfera del juego. </li>
|
18 |
-
<li>Un modo de carrera que te desafía a unirte a clubes, derrotar jefes y convertirte en la leyenda de Sunset City.</li>
|
19 |
-
<li>Una variedad de modos de carrera que incluyen carreras de velocidad, carreras de deriva, carreras de arrastre, pruebas de tiempo y más. </li>
|
20 |
-
<li>Un modo multijugador online que te permite competir con otros jugadores de todo el mundo. </li>
|
21 |
-
</ul>
|
22 |
-
<h3>Cómo descargar CarX Street 0.9.1 en dispositivos Android e iOS</h3>
|
23 |
-
<p>Descargar CarX Street 0.9.1 es fácil y gratuito. Todo lo que necesitas hacer es seguir estos pasos:</p>
|
24 |
-
<ol>
|
25 |
-
<li>Ir a la Google Play Store o la App Store en su dispositivo. </li>
|
26 |
-
<li>Buscar CarX Street o utilizar este enlace: [CarX Street - Aplicaciones en Google Play]( 1 ). </li>
|
27 |
-
<li>Toque en el botón Instalar y espere a que termine la descarga. </li>
|
28 |
-
<li>Iniciar el juego y disfrutar! </li>
|
29 |
-
</ol <h2>Por qué deberías jugar CarX Street 0.9.1</h2>
|
30 |
-
<p>CarX Street 0.9.1 no es solo otro juego de carreras de coches. Es un juego que te da la oportunidad de experimentar la emoción y la emoción de ser un corredor de la calle en un mundo abierto realista y dinámico. Aquí hay algunas razones por las que deberías jugar CarX Street 0.9.1:</p>
|
31 |
-
<h3>Carreras realistas y dinámicas de mundo abierto</h3>
|
32 |
-
<p>Una de las principales atracciones de CarX Street 0.9.1 es su mapa de mundo abierto grande y detallado que cubre carreteras, calles de la ciudad, zonas industriales, suburbios y más. Puede explorar el mapa libremente y encontrar diferentes lugares, lugares de interés y secretos. También puedes encontrar diferentes eventos, desafíos y carreras que pondrán a prueba tus habilidades y te recompensarán con dinero y reputación. </p>
|
33 |
-
<p></p>
|
34 |
-
|
35 |
-
<h3>Coches personalizables y sintonizables</h3>
|
36 |
-
<p>Otra razón para jugar CarX Street 0.9.1 es su diversa lista de autos que incluye autos deportivos, autos musculares, supercoches, autos clásicos y más. Puedes elegir entre más de 50 coches que tienen diferentes características, como velocidad, aceleración, manejo, deriva y más. También puede personalizar sus coches con diferentes partes, colores, pegatinas y calcomanías. </p>
|
37 |
-
<p>Pero la personalización no es suficiente. También necesitas afinar tus coches para que funcionen mejor en la carretera. CarX Street 0.9.1 le ofrece un sistema de ajuste detallado del automóvil que le permite intercambiar piezas, actualizar su motor, transmisión, cuerpo, suspensión, neumáticos y más. También puede ajustar la configuración de su automóvil, como el ángulo de curvatura, el ángulo del dedo del pie, la presión de los neumáticos, la relación de transmisión y más. El ajuste de su coche lo hará más rápido, más sensible, y más estable en el camino. </p>
|
38 |
-
<h3>Modo de carrera desafiante y gratificante</h3>
|
39 |
-
<p>La última razón para jugar CarX Street 0.9.1 es su desafiante y gratificante modo de carrera que te reta a unirte a clubes, derrotar jefes y convertirte en la leyenda de Sunset City. El modo carrera consiste en más de 100 misiones que te llevarán a diferentes lugares y modos de carrera en el juego. También conocerás diferentes personajes que te ayudarán o te obstaculizarán en tu viaje. </p>
|
40 |
-
<p>El modo carrera también le presentará el sistema de clubes en el juego. Los clubes son grupos de corredores que tienen sus propios territorios, reglas y reputaciones en Sunset City. Puede unirse a uno de los clubes o crear su propio club y reclutar a otros jugadores. También puedes retar a otros clubes por sus territorios y recursos. El sistema de clubes añade un elemento social y competitivo al juego que lo hace más divertido y atractivo. </p>
|
41 |
-
<h2>Consejos y trucos para jugar CarX Street 0.9.1</h2>
|
42 |
-
|
43 |
-
<h3>Elige el coche adecuado para cada modo de carrera</h3>
|
44 |
-
<p>CarX Street 0.9.1 le ofrece una variedad de modos de carrera que incluyen carreras de velocidad, carreras de deriva, carreras de arrastre, pruebas de tiempo y más. Cada modo de carrera requiere un tipo diferente de coche que se adapte a sus condiciones y objetivos. Por ejemplo, las carreras de velocidad requieren coches rápidos y ágiles que pueden acelerar rápidamente y maniobrar fácilmente en la carretera. Las carreras de deriva requieren coches potentes y estables que puedan deslizarse suavemente en las esquinas y mantener una alta velocidad. </p>
|
45 |
-
<p>Por lo tanto, es necesario elegir el coche adecuado para cada modo de carrera en función de sus características y rendimiento. Puedes consultar las estadísticas de cada coche en el menú del garaje antes de seleccionarlo para una carrera. También puede comparar diferentes coches tocando en ellos en el menú del garaje. Elegir el coche adecuado para cada modo de carrera te dará una ventaja sobre tus oponentes y aumentará tus posibilidades de ganar. </p>
|
46 |
-
<h3>Actualizar las piezas de su coche y el motor</h3>
|
47 |
-
<p>A medida que avanzas en el juego, ganarás dinero y reputación que puedes usar para comprar autos nuevos o mejorar los existentes. La mejora de las piezas y el motor de su automóvil mejorará su rendimiento en la carretera al aumentar su velocidad, aceleración, manejo, deriva y más. Puede actualizar las piezas de su automóvil y el motor en el menú del garaje tocando el botón de actualización junto a cada pieza o motor. También puede ver el efecto de cada actualización en las estadísticas de su coche mirando las barras y los números en la pantalla. </p>
|
48 |
-
<p>La actualización de las piezas y el motor de su automóvil también desbloqueará nuevas opciones de ajuste que le permitirán ajustar la configuración de su automóvil, como el ángulo de curvatura, el ángulo del dedo del pie, la presión de los neumáticos, la relación de engranajes y más. El ajuste de su coche hará que sea más adecuado para diferentes modos de carrera y condiciones. Puede sintonizar su coche en el menú del garaje pulsando en el botón de sintonía junto a cada parte o motor. También puede ver el efecto de cada opción de ajuste en las estadísticas de su coche mirando las barras y los números en la pantalla. </p>
|
49 |
-
|
50 |
-
<p>CarX Street 0.9.1 es un juego que requiere que domines dos habilidades esenciales: deriva y velocidad. La deriva es el arte de deslizar su coche de lado en las esquinas y mantener la alta velocidad. La velocidad es la capacidad de acelerar rápidamente y alcanzar alta velocidad en las carreteras rectas. Ambas habilidades son importantes para diferentes modos de carrera y situaciones en el juego. </p>
|
51 |
-
<p>Para dominar la habilidad de deriva, necesitas practicar usando los controles de freno, acelerador y dirección para iniciar, mantener y salir de una deriva. También necesitas aprender a controlar el ángulo, la velocidad y la dirección de tu coche mientras vas a la deriva. Puedes practicar la deriva en el modo de deriva o en cualquier pista que tenga curvas y giros. También puede ver tutoriales y consejos sobre cómo desplazarse en el menú del juego o en línea. </p>
|
52 |
-
<p>Para dominar la habilidad de velocidad, necesitas practicar usando el nitro, el control de lanzamiento y los controles de cambio para aumentar tu aceleración y velocidad. También debe aprender a evitar obstáculos, tráfico y colisiones mientras acelera. Puede practicar el exceso de velocidad en el modo de velocidad o en cualquier pista que tenga carreteras y autopistas rectas. También puedes ver tutoriales y consejos sobre cómo acelerar en el menú del juego o en línea. </p>
|
53 |
-
<h3>Únete a clubes y compite con otros jugadores</h3>
|
54 |
-
<p>CarX Street 0.9.1 no es solo un juego para un jugador. También es un juego multijugador que te permite unirte a clubes y competir con otros jugadores de todo el mundo. Los clubes son grupos de corredores que tienen sus propios territorios, reglas y reputaciones en Sunset City. Puedes unirte a uno de los clubes o crear tu propio club y reclutar otros jugadores. </p>
|
55 |
-
<p>Unirse a un club le dará acceso a eventos exclusivos, desafíos, recompensas y salas de chat. También puede cooperar con los miembros de su club para desafiar a otros clubes por sus territorios y recursos. Competir con otros clubes aumentará el rango y la reputación de tu club en Sunset City.</p>
|
56 |
-
|
57 |
-
<h2>Conclusión</h2>
|
58 |
-
<p>CarX Street 0.9.1 es un juego de carreras de mundo abierto realista y dinámico que te permite abrazar la libertad de ser un corredor de la calle en Sunset City. Usted puede elegir entre una variedad de coches, personalizarlos, sintonizarlos, y la raza en las carreteras y calles de la ciudad, así como la deriva en las pistas de alta velocidad. </p>
|
59 |
-
<p>CarX Street 0.9.1 está actualmente en pruebas beta abiertas, lo que significa que puede descargarlo de forma gratuita y disfrutar de sus características antes de que se lance oficialmente. La última versión del juego es 0.9.1, que se actualizó el 7 de junio de 2023, y trae algunas mejoras y correcciones de errores al juego. </p>
|
60 |
-
<p>En este artículo, te hemos dicho lo que es CarX Street 0.9.1, por qué deberías jugarlo, cómo descargarlo en tu dispositivo Android o iOS, y algunos consejos y trucos para ayudarte a convertirte en una leyenda de las carreras callejeras en CarX Street 0.9.1. </p>
|
61 |
-
<p>Esperamos que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario sobre CarX Street 0.9.1 o este artículo, no dude en dejar un comentario a continuación o contáctenos a través de nuestro sitio web o canales de redes sociales. </p>
|
62 |
-
<p>¡Gracias por leer este artículo y feliz carrera! </p>
|
63 |
-
<h2>Preguntas frecuentes</h2>
|
64 |
-
<ul>
|
65 |
-
<li><b>Q: ¿Cuánto espacio requiere CarX Street 0.9.1 en mi dispositivo? </b></li>
|
66 |
-
<li>A: CarX Street 0.9.1 requiere aproximadamente 2 GB de espacio libre en su dispositivo. </li>
|
67 |
-
<li><b>Q: ¿Cómo puedo obtener más dinero y reputación en CarX Street 0.9.1? </b></li>
|
68 |
-
<li>A: Puedes obtener más dinero y reputación completando misiones, eventos, desafíos y carreras en el juego. También puede obtener más dinero y reputación uniéndose a clubes y compitiendo con otros jugadores. </li>
|
69 |
-
<li><b>Q: ¿Cómo puedo cambiar la vista de la cámara en CarX Street 0.9.1? </b></li>
|
70 |
-
<li>A: Puede cambiar la vista de la cámara en CarX Street 0.9.1 tocando el icono de la cámara en la esquina superior derecha de la pantalla. Puede elegir entre diferentes vistas de cámara, como primera persona, tercera persona, capucha, parachoques y más. </li>
|
71 |
-
|
72 |
-
<li>A: Puede compartir sus capturas de pantalla y vídeos de CarX Street 0.9.1 tocando el icono de compartir en la esquina superior izquierda de la pantalla. Puedes elegir entre diferentes opciones, como guardar en tu dispositivo, subir a YouTube o compartir en plataformas de redes sociales. </li>
|
73 |
-
<li><b>Q: ¿Cómo puedo contactar a los desarrolladores de CarX Street 0.9.1? </b></li>
|
74 |
-
<li>A: Puede ponerse en contacto con los desarrolladores de CarX Street 0.9.1 visitando su sitio web o siguiendo sus canales de redes sociales. También puede enviarles un correo electrónico a [email protected] o dejar un comentario en la Google Play Store o en la App Store.</li>
|
75 |
-
</ul></p> 64aa2da5cf<br />
|
76 |
-
<br />
|
77 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Gratis De Taxi Driver Juego.md
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar el gambito final gratis</h1>
|
3 |
-
<p>Si eres un fan del misterio, el suspense y el romance, quizás te interese leer The Final Gambit de Jennifer Lynn Barnes. Este es el tercer y último libro de la exitosa serie The Inheritance Games, que sigue a Avery Grambs, un adolescente que hereda miles de millones de un benefactor misterioso, y los hermanos Hawthorne, que están decididos a ganar su corazón y descubrir los secretos detrás de la voluntad de su abuelo. Pero, ¿cómo se puede descargar The Final Gambit gratis sin violar la ley o arriesgar su dispositivo? En este artículo, te mostraremos algunas de las mejores maneras de encontrar y disfrutar de esta emocionante novela gratis. </p>
|
4 |
-
<h2>¿Qué es el gambito final? </h2>
|
5 |
-
<h3>Un breve resumen del libro</h3>
|
6 |
-
<p>The Final Gambit es la tercera y última entrega de la trilogía de The Inheritance Games de Jennifer Lynn Barnes. Fue publicado el 30 de agosto de 2022 por Little, Brown Books for Young Readers. El libro retoma donde quedó el segundo libro, The Hawthorne Legacy, con Avery Grambs y los hermanos Hawthorne enfrentando un nuevo desafío: un visitante misterioso que afirma estar relacionado con su difunto abuelo y que podría cambiarlo todo. A medida que el reloj marca el momento en que Avery se convertirá en el adolescente más rico del planeta, ella y los Hawthorne deben resolver un último rompecabezas y jugar un juego peligroso contra un enemigo desconocido y poderoso. Secretos sobre secretos. Acertijos sobre acertijos. En este juego, hay corazones y vidas en juego, y no hay nada más Hawthorne que ganar. </p>
|
7 |
-
<h2>descargar gratis de taxi driver juego</h2><br /><p><b><b>Download Zip</b> 🆗 <a href="https://bltlly.com/2v6LrN">https://bltlly.com/2v6LrN</a></b></p><br /><br />
|
8 |
-
<h3>Por qué deberías leerlo</h3>
|
9 |
-
|
10 |
-
<h2>Dónde encontrar el gambito final libre</h2>
|
11 |
-
<h3>Amazon Kindle</h3>
|
12 |
-
<p>Una de las formas más fáciles de descargar The Final Gambit gratis es usar Amazon Kindle. Si tiene un dispositivo o aplicación Kindle, puede acceder a miles de libros de forma gratuita con Kindle Unlimited, un servicio de suscripción que cuesta $ 9.99 por mes. También puede obtener una prueba gratuita durante 30 días si es un usuario nuevo. Con Kindle Unlimited, puedes leer The Final Gambit y otros libros de Jennifer Lynn Barnes sin pagar nada. También puede tomar prestados libros de su biblioteca local utilizando aplicaciones OverDrive o Libby, que son compatibles con los dispositivos y aplicaciones de Kindle. Para saber más sobre Amazon Kindle, visite . </p>
|
13 |
-
<h3>Libros de Google</h3>
|
14 |
-
<p>Otra opción para descargar The Final Gambit gratis es utilizar Google Books. Google Books es un servicio que te permite buscar, previsualizar y leer millones de libros en línea. Algunos libros están disponibles en texto completo de forma gratuita, mientras que otros solo están disponibles en fragmentos o vistas previas. También puedes comprar o alquilar libros de Google Play Books, que está integrado con Google Books. Para leer The Final Gambit gratis en Google Books, puedes buscarlo usando palabras clave o navegar por categorías. También puedes usar filtros para reducir tus resultados por idioma, fecha, formato, etc. Para acceder a Google Books, visita . </p>
|
15 |
-
<h3>Scribd</h3>
|
16 |
-
<p>Scribd es otro servicio que te permite descargar The Final Gambit gratis. Scribd es una biblioteca digital que ofrece acceso ilimitado a libros, audiolibros, revistas, podcasts y más por una tarifa mensual de $9.99. También puede obtener una prueba gratuita durante 30 días si es un usuario nuevo. Con Scribd, puedes leer The Final Gambit y otros libros de Jennifer Lynn Barnes en cualquier dispositivo, online o offline. También puede compartir sus pensamientos y opiniones con otros lectores, y descubrir nuevos libros basados en sus preferencias. Para unirse a Scribd, vaya a . </p>
|
17 |
-
<h3>Yumpu</h3>
|
18 |
-
|
19 |
-
<h3>AudioBB</h3>
|
20 |
-
<p>Si prefieres escuchar The Final Gambit en lugar de leerlo, puedes descargarlo gratis desde AudioBB. AudioBB es un sitio web que ofrece audiolibros gratuitos en varios géneros e idiomas. Puedes encontrar The Final Gambit y otros libros de Jennifer Lynn Barnes en AudioBB buscándolos o navegando por categorías. También puede solicitar audiolibros que no están disponibles en el sitio. Para descargar The Final Gambit de AudioBB, vaya a . </p>
|
21 |
-
<h2>Cómo evitar estafas y virus al descargar el gambito final libre</h2>
|
22 |
-
<h3>Comprobar la fuente y comentarios</h3>
|
23 |
-
<p>Antes de descargar The Final Gambit desde cualquier sitio web, asegúrese de que la fuente es confiable y confiable. Puede hacer esto comprobando el nombre de dominio, el diseño, el contenido y las revisiones del sitio web. Evite los sitios web que tienen nombres de dominio sospechosos o mal escritos, diseño pobre o desactualizado, contenido irrelevante o de baja calidad, y críticas negativas o falsas. También puedes usar herramientas como Whois o Scamadvisor para comprobar la reputación y legitimidad del sitio web. </p>
|
24 |
-
<h3>Utilice un software VPN y antivirus</h3>
|
25 |
-
<p>Otra forma de protegerse de estafas y virus al descargar The Final Gambit es usar una VPN y un software antivirus. Una VPN (red privada virtual) es un servicio que encripta su tráfico de Internet y oculta su dirección IP, lo que lo hace anónimo y seguro en línea. Una VPN puede ayudarlo a evitar las restricciones geográficas, acceder a sitios web bloqueados y evitar los ataques de malware y phishing. Un software antivirus es un programa que detecta y elimina virus, gusanos, troyanos, spyware, adware, ransomware y otro software malicioso desde su dispositivo. Un software antivirus puede ayudarlo a escanear y limpiar su dispositivo, evitar el acceso no autorizado y bloquear las descargas dañinas. Puede encontrar muchas VPN gratuitas y de pago y software antivirus en línea. </p>
|
26 |
-
<h3>Cuidado con los enlaces falsos y pop-ups</h3>
|
27 |
-
|
28 |
-
<h2>Conclusión</h2>
|
29 |
-
<p>The Final Gambit es un libro increíble que no debes perderte si te gusta el misterio, el suspense y el romance. Es el tercer y último libro de la serie The Inheritance Games de Jennifer Lynn Barnes, que sigue a Avery Grambs y los hermanos Hawthorne mientras desentrañan los secretos detrás de la voluntad de su abuelo y se enfrentan a una nueva amenaza. Puede descargar The Final Gambit gratis desde varias fuentes en línea, como Amazon Kindle, Google Books, Scribd, Yumpu y AudioBB. Sin embargo, también debe tener cuidado con las estafas y los virus al descargar el libro, y usar precauciones como verificar la fuente y las revisiones, usar una VPN y un software antivirus, y tener cuidado con los enlaces falsos y las ventanas emergentes. Esperamos que este artículo te haya ayudado a encontrar y descargar The Final Gambit gratis y disfrutar de este libro increíble. ¡Feliz lectura! </p>
|
30 |
-
<h2>Preguntas frecuentes</h2>
|
31 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre la descarga de The Final Gambit free:</p>
|
32 |
-
<p></p>
|
33 |
-
<tabla>
|
34 |
-
<tr>
|
35 |
-
<th>Pregunta</th>
|
36 |
-
<th>Respuesta</th>
|
37 |
-
</tr>
|
38 |
-
<tr>
|
39 |
-
<td>¿Es The Final Gambit el último libro de la serie? </td>
|
40 |
-
<td>Sí, The Final Gambit es el tercer y último libro de la trilogía de The Inheritance Games de Jennifer Lynn Barnes. Concluye la historia de Avery Grambs y los hermanos Hawthorne. </td>
|
41 |
-
</tr>
|
42 |
-
<tr>
|
43 |
-
<td>¿Puedo descargar The Final Gambit gratis legalmente? </td>
|
44 |
-
<td>Sí, puede descargar The Final Gambit gratis legalmente desde varias fuentes en línea, como Amazon Kindle, Google Books, Scribd, Yumpu y AudioBB. Sin embargo, también debe tener cuidado con las estafas y los virus al descargar el libro, y usar precauciones como verificar la fuente y las revisiones, usar una VPN y un software antivirus, y tener cuidado con los enlaces falsos y las ventanas emergentes. </td>
|
45 |
-
</tr>
|
46 |
-
<tr>
|
47 |
-
<td>¿Puedo leer The Final Gambit sin leer los libros anteriores? </td>
|
48 |
-
|
49 |
-
</tr>
|
50 |
-
<tr>
|
51 |
-
<td>¿Cuáles son algunos otros libros similares a The Final Gambit? </td>
|
52 |
-
<td>Si te gusta The Final Gambit, también te pueden gustar otros libros de Jennifer Lynn Barnes, como The Naturals, The Fixer y Deadly Little Scandals. Puede que también te gusten otros libros del género de misterio y romance, como One of Us is Lying de Karen M. McManus, Truly Devious de Maureen Johnson, y A Good Girl’s Guide to Murder de Holly Jackson.</td>
|
53 |
-
</tr>
|
54 |
-
<tr>
|
55 |
-
<td>¿Cómo puedo contactar a Jennifer Lynn Barnes? </td>
|
56 |
-
<td>Puede ponerse en contacto con Jennifer Lynn Barnes a través de su sitio web , su Twitter , o su Instagram . También puede enviar su correo de fans a su agente en: Jennifer Lynn Barnes c/o Elizabeth Harding Curtis Brown Ltd. 10 Astor Place New York NY 10003 USA</td>
|
57 |
-
</tr>
|
58 |
-
</tabla></p> 64aa2da5cf<br />
|
59 |
-
<br />
|
60 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_resources/_legacy.py
DELETED
@@ -1,121 +0,0 @@
|
|
1 |
-
import functools
|
2 |
-
import os
|
3 |
-
import pathlib
|
4 |
-
import types
|
5 |
-
import warnings
|
6 |
-
|
7 |
-
from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any
|
8 |
-
|
9 |
-
from . import _common
|
10 |
-
|
11 |
-
Package = Union[types.ModuleType, str]
|
12 |
-
Resource = str
|
13 |
-
|
14 |
-
|
15 |
-
def deprecated(func):
|
16 |
-
@functools.wraps(func)
|
17 |
-
def wrapper(*args, **kwargs):
|
18 |
-
warnings.warn(
|
19 |
-
f"{func.__name__} is deprecated. Use files() instead. "
|
20 |
-
"Refer to https://importlib-resources.readthedocs.io"
|
21 |
-
"/en/latest/using.html#migrating-from-legacy for migration advice.",
|
22 |
-
DeprecationWarning,
|
23 |
-
stacklevel=2,
|
24 |
-
)
|
25 |
-
return func(*args, **kwargs)
|
26 |
-
|
27 |
-
return wrapper
|
28 |
-
|
29 |
-
|
30 |
-
def normalize_path(path):
|
31 |
-
# type: (Any) -> str
|
32 |
-
"""Normalize a path by ensuring it is a string.
|
33 |
-
|
34 |
-
If the resulting string contains path separators, an exception is raised.
|
35 |
-
"""
|
36 |
-
str_path = str(path)
|
37 |
-
parent, file_name = os.path.split(str_path)
|
38 |
-
if parent:
|
39 |
-
raise ValueError(f'{path!r} must be only a file name')
|
40 |
-
return file_name
|
41 |
-
|
42 |
-
|
43 |
-
@deprecated
|
44 |
-
def open_binary(package: Package, resource: Resource) -> BinaryIO:
|
45 |
-
"""Return a file-like object opened for binary reading of the resource."""
|
46 |
-
return (_common.files(package) / normalize_path(resource)).open('rb')
|
47 |
-
|
48 |
-
|
49 |
-
@deprecated
|
50 |
-
def read_binary(package: Package, resource: Resource) -> bytes:
|
51 |
-
"""Return the binary contents of the resource."""
|
52 |
-
return (_common.files(package) / normalize_path(resource)).read_bytes()
|
53 |
-
|
54 |
-
|
55 |
-
@deprecated
|
56 |
-
def open_text(
|
57 |
-
package: Package,
|
58 |
-
resource: Resource,
|
59 |
-
encoding: str = 'utf-8',
|
60 |
-
errors: str = 'strict',
|
61 |
-
) -> TextIO:
|
62 |
-
"""Return a file-like object opened for text reading of the resource."""
|
63 |
-
return (_common.files(package) / normalize_path(resource)).open(
|
64 |
-
'r', encoding=encoding, errors=errors
|
65 |
-
)
|
66 |
-
|
67 |
-
|
68 |
-
@deprecated
|
69 |
-
def read_text(
|
70 |
-
package: Package,
|
71 |
-
resource: Resource,
|
72 |
-
encoding: str = 'utf-8',
|
73 |
-
errors: str = 'strict',
|
74 |
-
) -> str:
|
75 |
-
"""Return the decoded string of the resource.
|
76 |
-
|
77 |
-
The decoding-related arguments have the same semantics as those of
|
78 |
-
bytes.decode().
|
79 |
-
"""
|
80 |
-
with open_text(package, resource, encoding, errors) as fp:
|
81 |
-
return fp.read()
|
82 |
-
|
83 |
-
|
84 |
-
@deprecated
|
85 |
-
def contents(package: Package) -> Iterable[str]:
|
86 |
-
"""Return an iterable of entries in `package`.
|
87 |
-
|
88 |
-
Note that not all entries are resources. Specifically, directories are
|
89 |
-
not considered resources. Use `is_resource()` on each entry returned here
|
90 |
-
to check if it is a resource or not.
|
91 |
-
"""
|
92 |
-
return [path.name for path in _common.files(package).iterdir()]
|
93 |
-
|
94 |
-
|
95 |
-
@deprecated
|
96 |
-
def is_resource(package: Package, name: str) -> bool:
|
97 |
-
"""True if `name` is a resource inside `package`.
|
98 |
-
|
99 |
-
Directories are *not* resources.
|
100 |
-
"""
|
101 |
-
resource = normalize_path(name)
|
102 |
-
return any(
|
103 |
-
traversable.name == resource and traversable.is_file()
|
104 |
-
for traversable in _common.files(package).iterdir()
|
105 |
-
)
|
106 |
-
|
107 |
-
|
108 |
-
@deprecated
|
109 |
-
def path(
|
110 |
-
package: Package,
|
111 |
-
resource: Resource,
|
112 |
-
) -> ContextManager[pathlib.Path]:
|
113 |
-
"""A context manager providing a file path object to the resource.
|
114 |
-
|
115 |
-
If the resource does not already exist on its own on the file system,
|
116 |
-
a temporary file will be created. If the file was created, the file
|
117 |
-
will be deleted upon exiting the context manager (no exception is
|
118 |
-
raised if the file was deleted prior to the context manager
|
119 |
-
exiting).
|
120 |
-
"""
|
121 |
-
return _common.as_file(_common.files(package) / normalize_path(resource))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/version.py
DELETED
@@ -1,504 +0,0 @@
|
|
1 |
-
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
-
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
-
# for complete details.
|
4 |
-
|
5 |
-
import collections
|
6 |
-
import itertools
|
7 |
-
import re
|
8 |
-
import warnings
|
9 |
-
from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
|
10 |
-
|
11 |
-
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
|
12 |
-
|
13 |
-
__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
|
14 |
-
|
15 |
-
InfiniteTypes = Union[InfinityType, NegativeInfinityType]
|
16 |
-
PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
|
17 |
-
SubLocalType = Union[InfiniteTypes, int, str]
|
18 |
-
LocalType = Union[
|
19 |
-
NegativeInfinityType,
|
20 |
-
Tuple[
|
21 |
-
Union[
|
22 |
-
SubLocalType,
|
23 |
-
Tuple[SubLocalType, str],
|
24 |
-
Tuple[NegativeInfinityType, SubLocalType],
|
25 |
-
],
|
26 |
-
...,
|
27 |
-
],
|
28 |
-
]
|
29 |
-
CmpKey = Tuple[
|
30 |
-
int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
|
31 |
-
]
|
32 |
-
LegacyCmpKey = Tuple[int, Tuple[str, ...]]
|
33 |
-
VersionComparisonMethod = Callable[
|
34 |
-
[Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
|
35 |
-
]
|
36 |
-
|
37 |
-
_Version = collections.namedtuple(
|
38 |
-
"_Version", ["epoch", "release", "dev", "pre", "post", "local"]
|
39 |
-
)
|
40 |
-
|
41 |
-
|
42 |
-
def parse(version: str) -> Union["LegacyVersion", "Version"]:
|
43 |
-
"""
|
44 |
-
Parse the given version string and return either a :class:`Version` object
|
45 |
-
or a :class:`LegacyVersion` object depending on if the given version is
|
46 |
-
a valid PEP 440 version or a legacy version.
|
47 |
-
"""
|
48 |
-
try:
|
49 |
-
return Version(version)
|
50 |
-
except InvalidVersion:
|
51 |
-
return LegacyVersion(version)
|
52 |
-
|
53 |
-
|
54 |
-
class InvalidVersion(ValueError):
|
55 |
-
"""
|
56 |
-
An invalid version was found, users should refer to PEP 440.
|
57 |
-
"""
|
58 |
-
|
59 |
-
|
60 |
-
class _BaseVersion:
|
61 |
-
_key: Union[CmpKey, LegacyCmpKey]
|
62 |
-
|
63 |
-
def __hash__(self) -> int:
|
64 |
-
return hash(self._key)
|
65 |
-
|
66 |
-
# Please keep the duplicated `isinstance` check
|
67 |
-
# in the six comparisons hereunder
|
68 |
-
# unless you find a way to avoid adding overhead function calls.
|
69 |
-
def __lt__(self, other: "_BaseVersion") -> bool:
|
70 |
-
if not isinstance(other, _BaseVersion):
|
71 |
-
return NotImplemented
|
72 |
-
|
73 |
-
return self._key < other._key
|
74 |
-
|
75 |
-
def __le__(self, other: "_BaseVersion") -> bool:
|
76 |
-
if not isinstance(other, _BaseVersion):
|
77 |
-
return NotImplemented
|
78 |
-
|
79 |
-
return self._key <= other._key
|
80 |
-
|
81 |
-
def __eq__(self, other: object) -> bool:
|
82 |
-
if not isinstance(other, _BaseVersion):
|
83 |
-
return NotImplemented
|
84 |
-
|
85 |
-
return self._key == other._key
|
86 |
-
|
87 |
-
def __ge__(self, other: "_BaseVersion") -> bool:
|
88 |
-
if not isinstance(other, _BaseVersion):
|
89 |
-
return NotImplemented
|
90 |
-
|
91 |
-
return self._key >= other._key
|
92 |
-
|
93 |
-
def __gt__(self, other: "_BaseVersion") -> bool:
|
94 |
-
if not isinstance(other, _BaseVersion):
|
95 |
-
return NotImplemented
|
96 |
-
|
97 |
-
return self._key > other._key
|
98 |
-
|
99 |
-
def __ne__(self, other: object) -> bool:
|
100 |
-
if not isinstance(other, _BaseVersion):
|
101 |
-
return NotImplemented
|
102 |
-
|
103 |
-
return self._key != other._key
|
104 |
-
|
105 |
-
|
106 |
-
class LegacyVersion(_BaseVersion):
|
107 |
-
def __init__(self, version: str) -> None:
|
108 |
-
self._version = str(version)
|
109 |
-
self._key = _legacy_cmpkey(self._version)
|
110 |
-
|
111 |
-
warnings.warn(
|
112 |
-
"Creating a LegacyVersion has been deprecated and will be "
|
113 |
-
"removed in the next major release",
|
114 |
-
DeprecationWarning,
|
115 |
-
)
|
116 |
-
|
117 |
-
def __str__(self) -> str:
|
118 |
-
return self._version
|
119 |
-
|
120 |
-
def __repr__(self) -> str:
|
121 |
-
return f"<LegacyVersion('{self}')>"
|
122 |
-
|
123 |
-
@property
|
124 |
-
def public(self) -> str:
|
125 |
-
return self._version
|
126 |
-
|
127 |
-
@property
|
128 |
-
def base_version(self) -> str:
|
129 |
-
return self._version
|
130 |
-
|
131 |
-
@property
|
132 |
-
def epoch(self) -> int:
|
133 |
-
return -1
|
134 |
-
|
135 |
-
@property
|
136 |
-
def release(self) -> None:
|
137 |
-
return None
|
138 |
-
|
139 |
-
@property
|
140 |
-
def pre(self) -> None:
|
141 |
-
return None
|
142 |
-
|
143 |
-
@property
|
144 |
-
def post(self) -> None:
|
145 |
-
return None
|
146 |
-
|
147 |
-
@property
|
148 |
-
def dev(self) -> None:
|
149 |
-
return None
|
150 |
-
|
151 |
-
@property
|
152 |
-
def local(self) -> None:
|
153 |
-
return None
|
154 |
-
|
155 |
-
@property
|
156 |
-
def is_prerelease(self) -> bool:
|
157 |
-
return False
|
158 |
-
|
159 |
-
@property
|
160 |
-
def is_postrelease(self) -> bool:
|
161 |
-
return False
|
162 |
-
|
163 |
-
@property
|
164 |
-
def is_devrelease(self) -> bool:
|
165 |
-
return False
|
166 |
-
|
167 |
-
|
168 |
-
_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
|
169 |
-
|
170 |
-
_legacy_version_replacement_map = {
|
171 |
-
"pre": "c",
|
172 |
-
"preview": "c",
|
173 |
-
"-": "final-",
|
174 |
-
"rc": "c",
|
175 |
-
"dev": "@",
|
176 |
-
}
|
177 |
-
|
178 |
-
|
179 |
-
def _parse_version_parts(s: str) -> Iterator[str]:
|
180 |
-
for part in _legacy_version_component_re.split(s):
|
181 |
-
part = _legacy_version_replacement_map.get(part, part)
|
182 |
-
|
183 |
-
if not part or part == ".":
|
184 |
-
continue
|
185 |
-
|
186 |
-
if part[:1] in "0123456789":
|
187 |
-
# pad for numeric comparison
|
188 |
-
yield part.zfill(8)
|
189 |
-
else:
|
190 |
-
yield "*" + part
|
191 |
-
|
192 |
-
# ensure that alpha/beta/candidate are before final
|
193 |
-
yield "*final"
|
194 |
-
|
195 |
-
|
196 |
-
def _legacy_cmpkey(version: str) -> LegacyCmpKey:
|
197 |
-
|
198 |
-
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
|
199 |
-
# greater than or equal to 0. This will effectively put the LegacyVersion,
|
200 |
-
# which uses the defacto standard originally implemented by setuptools,
|
201 |
-
# as before all PEP 440 versions.
|
202 |
-
epoch = -1
|
203 |
-
|
204 |
-
# This scheme is taken from pkg_resources.parse_version setuptools prior to
|
205 |
-
# it's adoption of the packaging library.
|
206 |
-
parts: List[str] = []
|
207 |
-
for part in _parse_version_parts(version.lower()):
|
208 |
-
if part.startswith("*"):
|
209 |
-
# remove "-" before a prerelease tag
|
210 |
-
if part < "*final":
|
211 |
-
while parts and parts[-1] == "*final-":
|
212 |
-
parts.pop()
|
213 |
-
|
214 |
-
# remove trailing zeros from each series of numeric parts
|
215 |
-
while parts and parts[-1] == "00000000":
|
216 |
-
parts.pop()
|
217 |
-
|
218 |
-
parts.append(part)
|
219 |
-
|
220 |
-
return epoch, tuple(parts)
|
221 |
-
|
222 |
-
|
223 |
-
# Deliberately not anchored to the start and end of the string, to make it
|
224 |
-
# easier for 3rd party code to reuse
|
225 |
-
VERSION_PATTERN = r"""
|
226 |
-
v?
|
227 |
-
(?:
|
228 |
-
(?:(?P<epoch>[0-9]+)!)? # epoch
|
229 |
-
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
|
230 |
-
(?P<pre> # pre-release
|
231 |
-
[-_\.]?
|
232 |
-
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
|
233 |
-
[-_\.]?
|
234 |
-
(?P<pre_n>[0-9]+)?
|
235 |
-
)?
|
236 |
-
(?P<post> # post release
|
237 |
-
(?:-(?P<post_n1>[0-9]+))
|
238 |
-
|
|
239 |
-
(?:
|
240 |
-
[-_\.]?
|
241 |
-
(?P<post_l>post|rev|r)
|
242 |
-
[-_\.]?
|
243 |
-
(?P<post_n2>[0-9]+)?
|
244 |
-
)
|
245 |
-
)?
|
246 |
-
(?P<dev> # dev release
|
247 |
-
[-_\.]?
|
248 |
-
(?P<dev_l>dev)
|
249 |
-
[-_\.]?
|
250 |
-
(?P<dev_n>[0-9]+)?
|
251 |
-
)?
|
252 |
-
)
|
253 |
-
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
|
254 |
-
"""
|
255 |
-
|
256 |
-
|
257 |
-
class Version(_BaseVersion):
|
258 |
-
|
259 |
-
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
|
260 |
-
|
261 |
-
def __init__(self, version: str) -> None:
|
262 |
-
|
263 |
-
# Validate the version and parse it into pieces
|
264 |
-
match = self._regex.search(version)
|
265 |
-
if not match:
|
266 |
-
raise InvalidVersion(f"Invalid version: '{version}'")
|
267 |
-
|
268 |
-
# Store the parsed out pieces of the version
|
269 |
-
self._version = _Version(
|
270 |
-
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
|
271 |
-
release=tuple(int(i) for i in match.group("release").split(".")),
|
272 |
-
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
|
273 |
-
post=_parse_letter_version(
|
274 |
-
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
|
275 |
-
),
|
276 |
-
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
|
277 |
-
local=_parse_local_version(match.group("local")),
|
278 |
-
)
|
279 |
-
|
280 |
-
# Generate a key which will be used for sorting
|
281 |
-
self._key = _cmpkey(
|
282 |
-
self._version.epoch,
|
283 |
-
self._version.release,
|
284 |
-
self._version.pre,
|
285 |
-
self._version.post,
|
286 |
-
self._version.dev,
|
287 |
-
self._version.local,
|
288 |
-
)
|
289 |
-
|
290 |
-
def __repr__(self) -> str:
|
291 |
-
return f"<Version('{self}')>"
|
292 |
-
|
293 |
-
def __str__(self) -> str:
|
294 |
-
parts = []
|
295 |
-
|
296 |
-
# Epoch
|
297 |
-
if self.epoch != 0:
|
298 |
-
parts.append(f"{self.epoch}!")
|
299 |
-
|
300 |
-
# Release segment
|
301 |
-
parts.append(".".join(str(x) for x in self.release))
|
302 |
-
|
303 |
-
# Pre-release
|
304 |
-
if self.pre is not None:
|
305 |
-
parts.append("".join(str(x) for x in self.pre))
|
306 |
-
|
307 |
-
# Post-release
|
308 |
-
if self.post is not None:
|
309 |
-
parts.append(f".post{self.post}")
|
310 |
-
|
311 |
-
# Development release
|
312 |
-
if self.dev is not None:
|
313 |
-
parts.append(f".dev{self.dev}")
|
314 |
-
|
315 |
-
# Local version segment
|
316 |
-
if self.local is not None:
|
317 |
-
parts.append(f"+{self.local}")
|
318 |
-
|
319 |
-
return "".join(parts)
|
320 |
-
|
321 |
-
@property
|
322 |
-
def epoch(self) -> int:
|
323 |
-
_epoch: int = self._version.epoch
|
324 |
-
return _epoch
|
325 |
-
|
326 |
-
@property
|
327 |
-
def release(self) -> Tuple[int, ...]:
|
328 |
-
_release: Tuple[int, ...] = self._version.release
|
329 |
-
return _release
|
330 |
-
|
331 |
-
@property
|
332 |
-
def pre(self) -> Optional[Tuple[str, int]]:
|
333 |
-
_pre: Optional[Tuple[str, int]] = self._version.pre
|
334 |
-
return _pre
|
335 |
-
|
336 |
-
@property
|
337 |
-
def post(self) -> Optional[int]:
|
338 |
-
return self._version.post[1] if self._version.post else None
|
339 |
-
|
340 |
-
@property
|
341 |
-
def dev(self) -> Optional[int]:
|
342 |
-
return self._version.dev[1] if self._version.dev else None
|
343 |
-
|
344 |
-
@property
|
345 |
-
def local(self) -> Optional[str]:
|
346 |
-
if self._version.local:
|
347 |
-
return ".".join(str(x) for x in self._version.local)
|
348 |
-
else:
|
349 |
-
return None
|
350 |
-
|
351 |
-
@property
|
352 |
-
def public(self) -> str:
|
353 |
-
return str(self).split("+", 1)[0]
|
354 |
-
|
355 |
-
@property
|
356 |
-
def base_version(self) -> str:
|
357 |
-
parts = []
|
358 |
-
|
359 |
-
# Epoch
|
360 |
-
if self.epoch != 0:
|
361 |
-
parts.append(f"{self.epoch}!")
|
362 |
-
|
363 |
-
# Release segment
|
364 |
-
parts.append(".".join(str(x) for x in self.release))
|
365 |
-
|
366 |
-
return "".join(parts)
|
367 |
-
|
368 |
-
@property
|
369 |
-
def is_prerelease(self) -> bool:
|
370 |
-
return self.dev is not None or self.pre is not None
|
371 |
-
|
372 |
-
@property
|
373 |
-
def is_postrelease(self) -> bool:
|
374 |
-
return self.post is not None
|
375 |
-
|
376 |
-
@property
|
377 |
-
def is_devrelease(self) -> bool:
|
378 |
-
return self.dev is not None
|
379 |
-
|
380 |
-
@property
|
381 |
-
def major(self) -> int:
|
382 |
-
return self.release[0] if len(self.release) >= 1 else 0
|
383 |
-
|
384 |
-
@property
|
385 |
-
def minor(self) -> int:
|
386 |
-
return self.release[1] if len(self.release) >= 2 else 0
|
387 |
-
|
388 |
-
@property
|
389 |
-
def micro(self) -> int:
|
390 |
-
return self.release[2] if len(self.release) >= 3 else 0
|
391 |
-
|
392 |
-
|
393 |
-
def _parse_letter_version(
|
394 |
-
letter: str, number: Union[str, bytes, SupportsInt]
|
395 |
-
) -> Optional[Tuple[str, int]]:
|
396 |
-
|
397 |
-
if letter:
|
398 |
-
# We consider there to be an implicit 0 in a pre-release if there is
|
399 |
-
# not a numeral associated with it.
|
400 |
-
if number is None:
|
401 |
-
number = 0
|
402 |
-
|
403 |
-
# We normalize any letters to their lower case form
|
404 |
-
letter = letter.lower()
|
405 |
-
|
406 |
-
# We consider some words to be alternate spellings of other words and
|
407 |
-
# in those cases we want to normalize the spellings to our preferred
|
408 |
-
# spelling.
|
409 |
-
if letter == "alpha":
|
410 |
-
letter = "a"
|
411 |
-
elif letter == "beta":
|
412 |
-
letter = "b"
|
413 |
-
elif letter in ["c", "pre", "preview"]:
|
414 |
-
letter = "rc"
|
415 |
-
elif letter in ["rev", "r"]:
|
416 |
-
letter = "post"
|
417 |
-
|
418 |
-
return letter, int(number)
|
419 |
-
if not letter and number:
|
420 |
-
# We assume if we are given a number, but we are not given a letter
|
421 |
-
# then this is using the implicit post release syntax (e.g. 1.0-1)
|
422 |
-
letter = "post"
|
423 |
-
|
424 |
-
return letter, int(number)
|
425 |
-
|
426 |
-
return None
|
427 |
-
|
428 |
-
|
429 |
-
_local_version_separators = re.compile(r"[\._-]")
|
430 |
-
|
431 |
-
|
432 |
-
def _parse_local_version(local: str) -> Optional[LocalType]:
|
433 |
-
"""
|
434 |
-
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
|
435 |
-
"""
|
436 |
-
if local is not None:
|
437 |
-
return tuple(
|
438 |
-
part.lower() if not part.isdigit() else int(part)
|
439 |
-
for part in _local_version_separators.split(local)
|
440 |
-
)
|
441 |
-
return None
|
442 |
-
|
443 |
-
|
444 |
-
def _cmpkey(
|
445 |
-
epoch: int,
|
446 |
-
release: Tuple[int, ...],
|
447 |
-
pre: Optional[Tuple[str, int]],
|
448 |
-
post: Optional[Tuple[str, int]],
|
449 |
-
dev: Optional[Tuple[str, int]],
|
450 |
-
local: Optional[Tuple[SubLocalType]],
|
451 |
-
) -> CmpKey:
|
452 |
-
|
453 |
-
# When we compare a release version, we want to compare it with all of the
|
454 |
-
# trailing zeros removed. So we'll use a reverse the list, drop all the now
|
455 |
-
# leading zeros until we come to something non zero, then take the rest
|
456 |
-
# re-reverse it back into the correct order and make it a tuple and use
|
457 |
-
# that for our sorting key.
|
458 |
-
_release = tuple(
|
459 |
-
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
|
460 |
-
)
|
461 |
-
|
462 |
-
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
|
463 |
-
# We'll do this by abusing the pre segment, but we _only_ want to do this
|
464 |
-
# if there is not a pre or a post segment. If we have one of those then
|
465 |
-
# the normal sorting rules will handle this case correctly.
|
466 |
-
if pre is None and post is None and dev is not None:
|
467 |
-
_pre: PrePostDevType = NegativeInfinity
|
468 |
-
# Versions without a pre-release (except as noted above) should sort after
|
469 |
-
# those with one.
|
470 |
-
elif pre is None:
|
471 |
-
_pre = Infinity
|
472 |
-
else:
|
473 |
-
_pre = pre
|
474 |
-
|
475 |
-
# Versions without a post segment should sort before those with one.
|
476 |
-
if post is None:
|
477 |
-
_post: PrePostDevType = NegativeInfinity
|
478 |
-
|
479 |
-
else:
|
480 |
-
_post = post
|
481 |
-
|
482 |
-
# Versions without a development segment should sort after those with one.
|
483 |
-
if dev is None:
|
484 |
-
_dev: PrePostDevType = Infinity
|
485 |
-
|
486 |
-
else:
|
487 |
-
_dev = dev
|
488 |
-
|
489 |
-
if local is None:
|
490 |
-
# Versions without a local segment should sort before those with one.
|
491 |
-
_local: LocalType = NegativeInfinity
|
492 |
-
else:
|
493 |
-
# Versions with a local segment need that segment parsed to implement
|
494 |
-
# the sorting rules in PEP440.
|
495 |
-
# - Alpha numeric segments sort before numeric segments
|
496 |
-
# - Alpha numeric segments sort lexicographically
|
497 |
-
# - Numeric segments sort numerically
|
498 |
-
# - Shorter versions sort before longer versions when the prefixes
|
499 |
-
# match exactly
|
500 |
-
_local = tuple(
|
501 |
-
(i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
|
502 |
-
)
|
503 |
-
|
504 |
-
return epoch, _release, _pre, _post, _dev, _local
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/archive_util.py
DELETED
@@ -1,213 +0,0 @@
|
|
1 |
-
"""Utilities for extracting common archive formats"""
|
2 |
-
|
3 |
-
import zipfile
|
4 |
-
import tarfile
|
5 |
-
import os
|
6 |
-
import shutil
|
7 |
-
import posixpath
|
8 |
-
import contextlib
|
9 |
-
from distutils.errors import DistutilsError
|
10 |
-
|
11 |
-
from ._path import ensure_directory
|
12 |
-
|
13 |
-
__all__ = [
|
14 |
-
"unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
|
15 |
-
"UnrecognizedFormat", "extraction_drivers", "unpack_directory",
|
16 |
-
]
|
17 |
-
|
18 |
-
|
19 |
-
class UnrecognizedFormat(DistutilsError):
|
20 |
-
"""Couldn't recognize the archive type"""
|
21 |
-
|
22 |
-
|
23 |
-
def default_filter(src, dst):
|
24 |
-
"""The default progress/filter callback; returns True for all files"""
|
25 |
-
return dst
|
26 |
-
|
27 |
-
|
28 |
-
def unpack_archive(
|
29 |
-
filename, extract_dir, progress_filter=default_filter,
|
30 |
-
drivers=None):
|
31 |
-
"""Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
|
32 |
-
|
33 |
-
`progress_filter` is a function taking two arguments: a source path
|
34 |
-
internal to the archive ('/'-separated), and a filesystem path where it
|
35 |
-
will be extracted. The callback must return the desired extract path
|
36 |
-
(which may be the same as the one passed in), or else ``None`` to skip
|
37 |
-
that file or directory. The callback can thus be used to report on the
|
38 |
-
progress of the extraction, as well as to filter the items extracted or
|
39 |
-
alter their extraction paths.
|
40 |
-
|
41 |
-
`drivers`, if supplied, must be a non-empty sequence of functions with the
|
42 |
-
same signature as this function (minus the `drivers` argument), that raise
|
43 |
-
``UnrecognizedFormat`` if they do not support extracting the designated
|
44 |
-
archive type. The `drivers` are tried in sequence until one is found that
|
45 |
-
does not raise an error, or until all are exhausted (in which case
|
46 |
-
``UnrecognizedFormat`` is raised). If you do not supply a sequence of
|
47 |
-
drivers, the module's ``extraction_drivers`` constant will be used, which
|
48 |
-
means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
|
49 |
-
order.
|
50 |
-
"""
|
51 |
-
for driver in drivers or extraction_drivers:
|
52 |
-
try:
|
53 |
-
driver(filename, extract_dir, progress_filter)
|
54 |
-
except UnrecognizedFormat:
|
55 |
-
continue
|
56 |
-
else:
|
57 |
-
return
|
58 |
-
else:
|
59 |
-
raise UnrecognizedFormat(
|
60 |
-
"Not a recognized archive type: %s" % filename
|
61 |
-
)
|
62 |
-
|
63 |
-
|
64 |
-
def unpack_directory(filename, extract_dir, progress_filter=default_filter):
|
65 |
-
""""Unpack" a directory, using the same interface as for archives
|
66 |
-
|
67 |
-
Raises ``UnrecognizedFormat`` if `filename` is not a directory
|
68 |
-
"""
|
69 |
-
if not os.path.isdir(filename):
|
70 |
-
raise UnrecognizedFormat("%s is not a directory" % filename)
|
71 |
-
|
72 |
-
paths = {
|
73 |
-
filename: ('', extract_dir),
|
74 |
-
}
|
75 |
-
for base, dirs, files in os.walk(filename):
|
76 |
-
src, dst = paths[base]
|
77 |
-
for d in dirs:
|
78 |
-
paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
|
79 |
-
for f in files:
|
80 |
-
target = os.path.join(dst, f)
|
81 |
-
target = progress_filter(src + f, target)
|
82 |
-
if not target:
|
83 |
-
# skip non-files
|
84 |
-
continue
|
85 |
-
ensure_directory(target)
|
86 |
-
f = os.path.join(base, f)
|
87 |
-
shutil.copyfile(f, target)
|
88 |
-
shutil.copystat(f, target)
|
89 |
-
|
90 |
-
|
91 |
-
def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
|
92 |
-
"""Unpack zip `filename` to `extract_dir`
|
93 |
-
|
94 |
-
Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
|
95 |
-
by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
|
96 |
-
of the `progress_filter` argument.
|
97 |
-
"""
|
98 |
-
|
99 |
-
if not zipfile.is_zipfile(filename):
|
100 |
-
raise UnrecognizedFormat("%s is not a zip file" % (filename,))
|
101 |
-
|
102 |
-
with zipfile.ZipFile(filename) as z:
|
103 |
-
_unpack_zipfile_obj(z, extract_dir, progress_filter)
|
104 |
-
|
105 |
-
|
106 |
-
def _unpack_zipfile_obj(zipfile_obj, extract_dir, progress_filter=default_filter):
|
107 |
-
"""Internal/private API used by other parts of setuptools.
|
108 |
-
Similar to ``unpack_zipfile``, but receives an already opened :obj:`zipfile.ZipFile`
|
109 |
-
object instead of a filename.
|
110 |
-
"""
|
111 |
-
for info in zipfile_obj.infolist():
|
112 |
-
name = info.filename
|
113 |
-
|
114 |
-
# don't extract absolute paths or ones with .. in them
|
115 |
-
if name.startswith('/') or '..' in name.split('/'):
|
116 |
-
continue
|
117 |
-
|
118 |
-
target = os.path.join(extract_dir, *name.split('/'))
|
119 |
-
target = progress_filter(name, target)
|
120 |
-
if not target:
|
121 |
-
continue
|
122 |
-
if name.endswith('/'):
|
123 |
-
# directory
|
124 |
-
ensure_directory(target)
|
125 |
-
else:
|
126 |
-
# file
|
127 |
-
ensure_directory(target)
|
128 |
-
data = zipfile_obj.read(info.filename)
|
129 |
-
with open(target, 'wb') as f:
|
130 |
-
f.write(data)
|
131 |
-
unix_attributes = info.external_attr >> 16
|
132 |
-
if unix_attributes:
|
133 |
-
os.chmod(target, unix_attributes)
|
134 |
-
|
135 |
-
|
136 |
-
def _resolve_tar_file_or_dir(tar_obj, tar_member_obj):
|
137 |
-
"""Resolve any links and extract link targets as normal files."""
|
138 |
-
while tar_member_obj is not None and (
|
139 |
-
tar_member_obj.islnk() or tar_member_obj.issym()):
|
140 |
-
linkpath = tar_member_obj.linkname
|
141 |
-
if tar_member_obj.issym():
|
142 |
-
base = posixpath.dirname(tar_member_obj.name)
|
143 |
-
linkpath = posixpath.join(base, linkpath)
|
144 |
-
linkpath = posixpath.normpath(linkpath)
|
145 |
-
tar_member_obj = tar_obj._getmember(linkpath)
|
146 |
-
|
147 |
-
is_file_or_dir = (
|
148 |
-
tar_member_obj is not None and
|
149 |
-
(tar_member_obj.isfile() or tar_member_obj.isdir())
|
150 |
-
)
|
151 |
-
if is_file_or_dir:
|
152 |
-
return tar_member_obj
|
153 |
-
|
154 |
-
raise LookupError('Got unknown file type')
|
155 |
-
|
156 |
-
|
157 |
-
def _iter_open_tar(tar_obj, extract_dir, progress_filter):
|
158 |
-
"""Emit member-destination pairs from a tar archive."""
|
159 |
-
# don't do any chowning!
|
160 |
-
tar_obj.chown = lambda *args: None
|
161 |
-
|
162 |
-
with contextlib.closing(tar_obj):
|
163 |
-
for member in tar_obj:
|
164 |
-
name = member.name
|
165 |
-
# don't extract absolute paths or ones with .. in them
|
166 |
-
if name.startswith('/') or '..' in name.split('/'):
|
167 |
-
continue
|
168 |
-
|
169 |
-
prelim_dst = os.path.join(extract_dir, *name.split('/'))
|
170 |
-
|
171 |
-
try:
|
172 |
-
member = _resolve_tar_file_or_dir(tar_obj, member)
|
173 |
-
except LookupError:
|
174 |
-
continue
|
175 |
-
|
176 |
-
final_dst = progress_filter(name, prelim_dst)
|
177 |
-
if not final_dst:
|
178 |
-
continue
|
179 |
-
|
180 |
-
if final_dst.endswith(os.sep):
|
181 |
-
final_dst = final_dst[:-1]
|
182 |
-
|
183 |
-
yield member, final_dst
|
184 |
-
|
185 |
-
|
186 |
-
def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
|
187 |
-
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
|
188 |
-
|
189 |
-
Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
|
190 |
-
by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
|
191 |
-
of the `progress_filter` argument.
|
192 |
-
"""
|
193 |
-
try:
|
194 |
-
tarobj = tarfile.open(filename)
|
195 |
-
except tarfile.TarError as e:
|
196 |
-
raise UnrecognizedFormat(
|
197 |
-
"%s is not a compressed or uncompressed tar file" % (filename,)
|
198 |
-
) from e
|
199 |
-
|
200 |
-
for member, final_dst in _iter_open_tar(
|
201 |
-
tarobj, extract_dir, progress_filter,
|
202 |
-
):
|
203 |
-
try:
|
204 |
-
# XXX Ugh
|
205 |
-
tarobj._extract_member(member, final_dst)
|
206 |
-
except tarfile.ExtractError:
|
207 |
-
# chown/chmod/mkfifo/mknode/makedev failed
|
208 |
-
pass
|
209 |
-
|
210 |
-
return True
|
211 |
-
|
212 |
-
|
213 |
-
extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/tutorials/data_loading.md
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
|
2 |
-
# Use Custom Dataloaders
|
3 |
-
|
4 |
-
## How the Existing Dataloader Works
|
5 |
-
|
6 |
-
Detectron2 contains a builtin data loading pipeline.
|
7 |
-
It's good to understand how it works, in case you need to write a custom one.
|
8 |
-
|
9 |
-
Detectron2 provides two functions
|
10 |
-
[build_detection_{train,test}_loader](../modules/data.html#detectron2.data.build_detection_train_loader)
|
11 |
-
that create a default data loader from a given config.
|
12 |
-
Here is how `build_detection_{train,test}_loader` work:
|
13 |
-
|
14 |
-
1. It takes the name of a registered dataset (e.g., "coco_2017_train") and loads a `list[dict]` representing the dataset items
|
15 |
-
in a lightweight, canonical format. These dataset items are not yet ready to be used by the model (e.g., images are
|
16 |
-
not loaded into memory, random augmentations have not been applied, etc.).
|
17 |
-
Details about the dataset format and dataset registration can be found in
|
18 |
-
[datasets](datasets.html).
|
19 |
-
2. Each dict in this list is mapped by a function ("mapper"):
|
20 |
-
* Users can customize this mapping function by specifying the "mapper" argument in
|
21 |
-
`build_detection_{train,test}_loader`. The default mapper is [DatasetMapper]( ../modules/data.html#detectron2.data.DatasetMapper).
|
22 |
-
* The output format of such function can be arbitrary, as long as it is accepted by the consumer of this data loader (usually the model).
|
23 |
-
The outputs of the default mapper, after batching, follow the default model input format documented in
|
24 |
-
[Use Models](https://detectron2.readthedocs.io/tutorials/models.html#model-input-format).
|
25 |
-
* The role of the mapper is to transform the lightweight, canonical representation of a dataset item into a format
|
26 |
-
that is ready for the model to consume (including, e.g., read images, perform random data augmentation and convert to torch Tensors).
|
27 |
-
If you would like to perform custom transformations to data, you often want a custom mapper.
|
28 |
-
3. The outputs of the mapper are batched (simply into a list).
|
29 |
-
4. This batched data is the output of the data loader. Typically, it's also the input of
|
30 |
-
`model.forward()`.
|
31 |
-
|
32 |
-
|
33 |
-
## Write a Custom Dataloader
|
34 |
-
|
35 |
-
Using a different "mapper" with `build_detection_{train,test}_loader(mapper=)` works for most use cases
|
36 |
-
of custom data loading.
|
37 |
-
For example, if you want to resize all images to a fixed size for Mask R-CNN training, write this:
|
38 |
-
|
39 |
-
```python
|
40 |
-
from detectron2.data import build_detection_train_loader
|
41 |
-
from detectron2.data import transforms as T
|
42 |
-
from detectron2.data import detection_utils as utils
|
43 |
-
|
44 |
-
def mapper(dataset_dict):
|
45 |
-
# Implement a mapper, similar to the default DatasetMapper, but with your own customizations
|
46 |
-
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
|
47 |
-
image = utils.read_image(dataset_dict["file_name"], format="BGR")
|
48 |
-
image, transforms = T.apply_transform_gens([T.Resize((800, 800))], image)
|
49 |
-
dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))
|
50 |
-
|
51 |
-
annos = [
|
52 |
-
utils.transform_instance_annotations(obj, transforms, image.shape[:2])
|
53 |
-
for obj in dataset_dict.pop("annotations")
|
54 |
-
if obj.get("iscrowd", 0) == 0
|
55 |
-
]
|
56 |
-
instances = utils.annotations_to_instances(annos, image.shape[:2])
|
57 |
-
dataset_dict["instances"] = utils.filter_empty_instances(instances)
|
58 |
-
return dataset_dict
|
59 |
-
|
60 |
-
data_loader = build_detection_train_loader(cfg, mapper=mapper)
|
61 |
-
# use this dataloader instead of the default
|
62 |
-
```
|
63 |
-
Refer to [API documentation of detectron2.data](../modules/data.html) for details.
|
64 |
-
|
65 |
-
If you want to change not only the mapper (e.g., to write different sampling or batching logic),
|
66 |
-
you can write your own data loader. The data loader is simply a
|
67 |
-
python iterator that produces [the format](models.html) your model accepts.
|
68 |
-
You can implement it using any tools you like.
|
69 |
-
|
70 |
-
## Use a Custom Dataloader
|
71 |
-
|
72 |
-
If you use [DefaultTrainer](../modules/engine.html#detectron2.engine.defaults.DefaultTrainer),
|
73 |
-
you can overwrite its `build_{train,test}_loader` method to use your own dataloader.
|
74 |
-
See the [densepose dataloader](../../projects/DensePose/train_net.py)
|
75 |
-
for an example.
|
76 |
-
|
77 |
-
If you write your own training loop, you can plug in your data loader easily.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/malloc_and_free.h
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/system/cuda/detail/guarded_cuda_runtime_api.h>
|
20 |
-
|
21 |
-
#include <thrust/detail/config.h>
|
22 |
-
#include <thrust/detail/raw_pointer_cast.h>
|
23 |
-
#include <thrust/detail/raw_reference_cast.h>
|
24 |
-
#include <thrust/detail/seq.h>
|
25 |
-
#include <thrust/system/cuda/config.h>
|
26 |
-
#ifdef THRUST_CACHING_DEVICE_MALLOC
|
27 |
-
#include <cub/util_allocator.cuh>
|
28 |
-
#endif
|
29 |
-
#include <thrust/system/cuda/detail/util.h>
|
30 |
-
#include <thrust/system/detail/bad_alloc.h>
|
31 |
-
#include <thrust/detail/malloc_and_free.h>
|
32 |
-
|
33 |
-
namespace thrust
|
34 |
-
{
|
35 |
-
namespace cuda_cub {
|
36 |
-
|
37 |
-
#ifdef THRUST_CACHING_DEVICE_MALLOC
|
38 |
-
#define __CUB_CACHING_MALLOC
|
39 |
-
#ifndef __CUDA_ARCH__
|
40 |
-
inline cub::CachingDeviceAllocator &get_allocator()
|
41 |
-
{
|
42 |
-
static cub::CachingDeviceAllocator g_allocator(true);
|
43 |
-
return g_allocator;
|
44 |
-
}
|
45 |
-
#endif
|
46 |
-
#endif
|
47 |
-
|
48 |
-
|
49 |
-
// note that malloc returns a raw pointer to avoid
|
50 |
-
// depending on the heavyweight thrust/system/cuda/memory.h header
|
51 |
-
template<typename DerivedPolicy>
|
52 |
-
__host__ __device__
|
53 |
-
void *malloc(execution_policy<DerivedPolicy> &, std::size_t n)
|
54 |
-
{
|
55 |
-
void *result = 0;
|
56 |
-
|
57 |
-
if (THRUST_IS_HOST_CODE) {
|
58 |
-
#if THRUST_INCLUDE_HOST_CODE
|
59 |
-
#ifdef __CUB_CACHING_MALLOC
|
60 |
-
cub::CachingDeviceAllocator &alloc = get_allocator();
|
61 |
-
cudaError_t status = alloc.DeviceAllocate(&result, n);
|
62 |
-
#else
|
63 |
-
cudaError_t status = cudaMalloc(&result, n);
|
64 |
-
#endif
|
65 |
-
|
66 |
-
if(status != cudaSuccess)
|
67 |
-
{
|
68 |
-
cudaGetLastError(); // Clear global CUDA error state.
|
69 |
-
throw thrust::system::detail::bad_alloc(thrust::cuda_category().message(status).c_str());
|
70 |
-
}
|
71 |
-
#endif
|
72 |
-
} else {
|
73 |
-
#if THRUST_INCLUDE_DEVICE_CODE
|
74 |
-
result = thrust::raw_pointer_cast(thrust::malloc(thrust::seq, n));
|
75 |
-
#endif
|
76 |
-
}
|
77 |
-
|
78 |
-
return result;
|
79 |
-
} // end malloc()
|
80 |
-
|
81 |
-
|
82 |
-
template<typename DerivedPolicy, typename Pointer>
|
83 |
-
__host__ __device__
|
84 |
-
void free(execution_policy<DerivedPolicy> &, Pointer ptr)
|
85 |
-
{
|
86 |
-
if (THRUST_IS_HOST_CODE) {
|
87 |
-
#if THRUST_INCLUDE_HOST_CODE
|
88 |
-
#ifdef __CUB_CACHING_MALLOC
|
89 |
-
cub::CachingDeviceAllocator &alloc = get_allocator();
|
90 |
-
cudaError_t status = alloc.DeviceFree(thrust::raw_pointer_cast(ptr));
|
91 |
-
#else
|
92 |
-
cudaError_t status = cudaFree(thrust::raw_pointer_cast(ptr));
|
93 |
-
#endif
|
94 |
-
cuda_cub::throw_on_error(status, "device free failed");
|
95 |
-
#endif
|
96 |
-
} else {
|
97 |
-
#if THRUST_INCLUDE_DEVICE_CODE
|
98 |
-
thrust::free(thrust::seq, ptr);
|
99 |
-
#endif
|
100 |
-
}
|
101 |
-
} // end free()
|
102 |
-
|
103 |
-
} // namespace cuda_cub
|
104 |
-
} // end namespace thrust
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/logical.h
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a fill of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// the purpose of this header is to #include the logical.h header
|
22 |
-
// of the sequential, host, and device systems. It should be #included in any
|
23 |
-
// code which uses adl to dispatch logical
|
24 |
-
|
25 |
-
#include <thrust/system/detail/sequential/logical.h>
|
26 |
-
|
27 |
-
// SCons can't see through the #defines below to figure out what this header
|
28 |
-
// includes, so we fake it out by specifying all possible files we might end up
|
29 |
-
// including inside an #if 0.
|
30 |
-
#if 0
|
31 |
-
#include <thrust/system/cpp/detail/logical.h>
|
32 |
-
#include <thrust/system/cuda/detail/logical.h>
|
33 |
-
#include <thrust/system/omp/detail/logical.h>
|
34 |
-
#include <thrust/system/tbb/detail/logical.h>
|
35 |
-
#endif
|
36 |
-
|
37 |
-
#define __THRUST_HOST_SYSTEM_LOGICAL_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/logical.h>
|
38 |
-
#include __THRUST_HOST_SYSTEM_LOGICAL_HEADER
|
39 |
-
#undef __THRUST_HOST_SYSTEM_LOGICAL_HEADER
|
40 |
-
|
41 |
-
#define __THRUST_DEVICE_SYSTEM_LOGICAL_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/logical.h>
|
42 |
-
#include __THRUST_DEVICE_SYSTEM_LOGICAL_HEADER
|
43 |
-
#undef __THRUST_DEVICE_SYSTEM_LOGICAL_HEADER
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/layers/csrc/deformable/deform_conv.h
DELETED
@@ -1,377 +0,0 @@
|
|
1 |
-
// Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
#pragma once
|
3 |
-
#include <torch/types.h>
|
4 |
-
|
5 |
-
namespace detectron2 {
|
6 |
-
|
7 |
-
#if defined(WITH_CUDA) || defined(WITH_HIP)
|
8 |
-
int deform_conv_forward_cuda(
|
9 |
-
at::Tensor input,
|
10 |
-
at::Tensor weight,
|
11 |
-
at::Tensor offset,
|
12 |
-
at::Tensor output,
|
13 |
-
at::Tensor columns,
|
14 |
-
at::Tensor ones,
|
15 |
-
int kW,
|
16 |
-
int kH,
|
17 |
-
int dW,
|
18 |
-
int dH,
|
19 |
-
int padW,
|
20 |
-
int padH,
|
21 |
-
int dilationW,
|
22 |
-
int dilationH,
|
23 |
-
int group,
|
24 |
-
int deformable_group,
|
25 |
-
int im2col_step);
|
26 |
-
|
27 |
-
int deform_conv_backward_input_cuda(
|
28 |
-
at::Tensor input,
|
29 |
-
at::Tensor offset,
|
30 |
-
at::Tensor gradOutput,
|
31 |
-
at::Tensor gradInput,
|
32 |
-
at::Tensor gradOffset,
|
33 |
-
at::Tensor weight,
|
34 |
-
at::Tensor columns,
|
35 |
-
int kW,
|
36 |
-
int kH,
|
37 |
-
int dW,
|
38 |
-
int dH,
|
39 |
-
int padW,
|
40 |
-
int padH,
|
41 |
-
int dilationW,
|
42 |
-
int dilationH,
|
43 |
-
int group,
|
44 |
-
int deformable_group,
|
45 |
-
int im2col_step);
|
46 |
-
|
47 |
-
int deform_conv_backward_parameters_cuda(
|
48 |
-
at::Tensor input,
|
49 |
-
at::Tensor offset,
|
50 |
-
at::Tensor gradOutput,
|
51 |
-
at::Tensor gradWeight, // at::Tensor gradBias,
|
52 |
-
at::Tensor columns,
|
53 |
-
at::Tensor ones,
|
54 |
-
int kW,
|
55 |
-
int kH,
|
56 |
-
int dW,
|
57 |
-
int dH,
|
58 |
-
int padW,
|
59 |
-
int padH,
|
60 |
-
int dilationW,
|
61 |
-
int dilationH,
|
62 |
-
int group,
|
63 |
-
int deformable_group,
|
64 |
-
float scale,
|
65 |
-
int im2col_step);
|
66 |
-
|
67 |
-
void modulated_deform_conv_cuda_forward(
|
68 |
-
at::Tensor input,
|
69 |
-
at::Tensor weight,
|
70 |
-
at::Tensor bias,
|
71 |
-
at::Tensor ones,
|
72 |
-
at::Tensor offset,
|
73 |
-
at::Tensor mask,
|
74 |
-
at::Tensor output,
|
75 |
-
at::Tensor columns,
|
76 |
-
int kernel_h,
|
77 |
-
int kernel_w,
|
78 |
-
const int stride_h,
|
79 |
-
const int stride_w,
|
80 |
-
const int pad_h,
|
81 |
-
const int pad_w,
|
82 |
-
const int dilation_h,
|
83 |
-
const int dilation_w,
|
84 |
-
const int group,
|
85 |
-
const int deformable_group,
|
86 |
-
const bool with_bias);
|
87 |
-
|
88 |
-
void modulated_deform_conv_cuda_backward(
|
89 |
-
at::Tensor input,
|
90 |
-
at::Tensor weight,
|
91 |
-
at::Tensor bias,
|
92 |
-
at::Tensor ones,
|
93 |
-
at::Tensor offset,
|
94 |
-
at::Tensor mask,
|
95 |
-
at::Tensor columns,
|
96 |
-
at::Tensor grad_input,
|
97 |
-
at::Tensor grad_weight,
|
98 |
-
at::Tensor grad_bias,
|
99 |
-
at::Tensor grad_offset,
|
100 |
-
at::Tensor grad_mask,
|
101 |
-
at::Tensor grad_output,
|
102 |
-
int kernel_h,
|
103 |
-
int kernel_w,
|
104 |
-
int stride_h,
|
105 |
-
int stride_w,
|
106 |
-
int pad_h,
|
107 |
-
int pad_w,
|
108 |
-
int dilation_h,
|
109 |
-
int dilation_w,
|
110 |
-
int group,
|
111 |
-
int deformable_group,
|
112 |
-
const bool with_bias);
|
113 |
-
|
114 |
-
#endif
|
115 |
-
|
116 |
-
inline int deform_conv_forward(
|
117 |
-
at::Tensor input,
|
118 |
-
at::Tensor weight,
|
119 |
-
at::Tensor offset,
|
120 |
-
at::Tensor output,
|
121 |
-
at::Tensor columns,
|
122 |
-
at::Tensor ones,
|
123 |
-
int kW,
|
124 |
-
int kH,
|
125 |
-
int dW,
|
126 |
-
int dH,
|
127 |
-
int padW,
|
128 |
-
int padH,
|
129 |
-
int dilationW,
|
130 |
-
int dilationH,
|
131 |
-
int group,
|
132 |
-
int deformable_group,
|
133 |
-
int im2col_step) {
|
134 |
-
if (input.is_cuda()) {
|
135 |
-
#if defined(WITH_CUDA) || defined(WITH_HIP)
|
136 |
-
TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!");
|
137 |
-
TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!");
|
138 |
-
return deform_conv_forward_cuda(
|
139 |
-
input,
|
140 |
-
weight,
|
141 |
-
offset,
|
142 |
-
output,
|
143 |
-
columns,
|
144 |
-
ones,
|
145 |
-
kW,
|
146 |
-
kH,
|
147 |
-
dW,
|
148 |
-
dH,
|
149 |
-
padW,
|
150 |
-
padH,
|
151 |
-
dilationW,
|
152 |
-
dilationH,
|
153 |
-
group,
|
154 |
-
deformable_group,
|
155 |
-
im2col_step);
|
156 |
-
#else
|
157 |
-
AT_ERROR("Not compiled with GPU support");
|
158 |
-
#endif
|
159 |
-
}
|
160 |
-
AT_ERROR("Not implemented on the CPU");
|
161 |
-
}
|
162 |
-
|
163 |
-
inline int deform_conv_backward_input(
|
164 |
-
at::Tensor input,
|
165 |
-
at::Tensor offset,
|
166 |
-
at::Tensor gradOutput,
|
167 |
-
at::Tensor gradInput,
|
168 |
-
at::Tensor gradOffset,
|
169 |
-
at::Tensor weight,
|
170 |
-
at::Tensor columns,
|
171 |
-
int kW,
|
172 |
-
int kH,
|
173 |
-
int dW,
|
174 |
-
int dH,
|
175 |
-
int padW,
|
176 |
-
int padH,
|
177 |
-
int dilationW,
|
178 |
-
int dilationH,
|
179 |
-
int group,
|
180 |
-
int deformable_group,
|
181 |
-
int im2col_step) {
|
182 |
-
if (gradOutput.is_cuda()) {
|
183 |
-
#if defined(WITH_CUDA) || defined(WITH_HIP)
|
184 |
-
TORCH_CHECK(input.is_cuda(), "input tensor is not on GPU!");
|
185 |
-
TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!");
|
186 |
-
TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!");
|
187 |
-
return deform_conv_backward_input_cuda(
|
188 |
-
input,
|
189 |
-
offset,
|
190 |
-
gradOutput,
|
191 |
-
gradInput,
|
192 |
-
gradOffset,
|
193 |
-
weight,
|
194 |
-
columns,
|
195 |
-
kW,
|
196 |
-
kH,
|
197 |
-
dW,
|
198 |
-
dH,
|
199 |
-
padW,
|
200 |
-
padH,
|
201 |
-
dilationW,
|
202 |
-
dilationH,
|
203 |
-
group,
|
204 |
-
deformable_group,
|
205 |
-
im2col_step);
|
206 |
-
#else
|
207 |
-
AT_ERROR("Not compiled with GPU support");
|
208 |
-
#endif
|
209 |
-
}
|
210 |
-
AT_ERROR("Not implemented on the CPU");
|
211 |
-
}
|
212 |
-
|
213 |
-
inline int deform_conv_backward_filter(
|
214 |
-
at::Tensor input,
|
215 |
-
at::Tensor offset,
|
216 |
-
at::Tensor gradOutput,
|
217 |
-
at::Tensor gradWeight, // at::Tensor gradBias,
|
218 |
-
at::Tensor columns,
|
219 |
-
at::Tensor ones,
|
220 |
-
int kW,
|
221 |
-
int kH,
|
222 |
-
int dW,
|
223 |
-
int dH,
|
224 |
-
int padW,
|
225 |
-
int padH,
|
226 |
-
int dilationW,
|
227 |
-
int dilationH,
|
228 |
-
int group,
|
229 |
-
int deformable_group,
|
230 |
-
float scale,
|
231 |
-
int im2col_step) {
|
232 |
-
if (gradOutput.is_cuda()) {
|
233 |
-
#if defined(WITH_CUDA) || defined(WITH_HIP)
|
234 |
-
TORCH_CHECK(input.is_cuda(), "input tensor is not on GPU!");
|
235 |
-
TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!");
|
236 |
-
return deform_conv_backward_parameters_cuda(
|
237 |
-
input,
|
238 |
-
offset,
|
239 |
-
gradOutput,
|
240 |
-
gradWeight,
|
241 |
-
columns,
|
242 |
-
ones,
|
243 |
-
kW,
|
244 |
-
kH,
|
245 |
-
dW,
|
246 |
-
dH,
|
247 |
-
padW,
|
248 |
-
padH,
|
249 |
-
dilationW,
|
250 |
-
dilationH,
|
251 |
-
group,
|
252 |
-
deformable_group,
|
253 |
-
scale,
|
254 |
-
im2col_step);
|
255 |
-
#else
|
256 |
-
AT_ERROR("Not compiled with GPU support");
|
257 |
-
#endif
|
258 |
-
}
|
259 |
-
AT_ERROR("Not implemented on the CPU");
|
260 |
-
}
|
261 |
-
|
262 |
-
inline void modulated_deform_conv_forward(
|
263 |
-
at::Tensor input,
|
264 |
-
at::Tensor weight,
|
265 |
-
at::Tensor bias,
|
266 |
-
at::Tensor ones,
|
267 |
-
at::Tensor offset,
|
268 |
-
at::Tensor mask,
|
269 |
-
at::Tensor output,
|
270 |
-
at::Tensor columns,
|
271 |
-
int kernel_h,
|
272 |
-
int kernel_w,
|
273 |
-
const int stride_h,
|
274 |
-
const int stride_w,
|
275 |
-
const int pad_h,
|
276 |
-
const int pad_w,
|
277 |
-
const int dilation_h,
|
278 |
-
const int dilation_w,
|
279 |
-
const int group,
|
280 |
-
const int deformable_group,
|
281 |
-
const bool with_bias) {
|
282 |
-
if (input.is_cuda()) {
|
283 |
-
#if defined(WITH_CUDA) || defined(WITH_HIP)
|
284 |
-
TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!");
|
285 |
-
TORCH_CHECK(bias.is_cuda(), "bias tensor is not on GPU!");
|
286 |
-
TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!");
|
287 |
-
return modulated_deform_conv_cuda_forward(
|
288 |
-
input,
|
289 |
-
weight,
|
290 |
-
bias,
|
291 |
-
ones,
|
292 |
-
offset,
|
293 |
-
mask,
|
294 |
-
output,
|
295 |
-
columns,
|
296 |
-
kernel_h,
|
297 |
-
kernel_w,
|
298 |
-
stride_h,
|
299 |
-
stride_w,
|
300 |
-
pad_h,
|
301 |
-
pad_w,
|
302 |
-
dilation_h,
|
303 |
-
dilation_w,
|
304 |
-
group,
|
305 |
-
deformable_group,
|
306 |
-
with_bias);
|
307 |
-
#else
|
308 |
-
AT_ERROR("Not compiled with GPU support");
|
309 |
-
#endif
|
310 |
-
}
|
311 |
-
AT_ERROR("Not implemented on the CPU");
|
312 |
-
}
|
313 |
-
|
314 |
-
inline void modulated_deform_conv_backward(
|
315 |
-
at::Tensor input,
|
316 |
-
at::Tensor weight,
|
317 |
-
at::Tensor bias,
|
318 |
-
at::Tensor ones,
|
319 |
-
at::Tensor offset,
|
320 |
-
at::Tensor mask,
|
321 |
-
at::Tensor columns,
|
322 |
-
at::Tensor grad_input,
|
323 |
-
at::Tensor grad_weight,
|
324 |
-
at::Tensor grad_bias,
|
325 |
-
at::Tensor grad_offset,
|
326 |
-
at::Tensor grad_mask,
|
327 |
-
at::Tensor grad_output,
|
328 |
-
int kernel_h,
|
329 |
-
int kernel_w,
|
330 |
-
int stride_h,
|
331 |
-
int stride_w,
|
332 |
-
int pad_h,
|
333 |
-
int pad_w,
|
334 |
-
int dilation_h,
|
335 |
-
int dilation_w,
|
336 |
-
int group,
|
337 |
-
int deformable_group,
|
338 |
-
const bool with_bias) {
|
339 |
-
if (grad_output.is_cuda()) {
|
340 |
-
#if defined(WITH_CUDA) || defined(WITH_HIP)
|
341 |
-
TORCH_CHECK(input.is_cuda(), "input tensor is not on GPU!");
|
342 |
-
TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!");
|
343 |
-
TORCH_CHECK(bias.is_cuda(), "bias tensor is not on GPU!");
|
344 |
-
TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!");
|
345 |
-
return modulated_deform_conv_cuda_backward(
|
346 |
-
input,
|
347 |
-
weight,
|
348 |
-
bias,
|
349 |
-
ones,
|
350 |
-
offset,
|
351 |
-
mask,
|
352 |
-
columns,
|
353 |
-
grad_input,
|
354 |
-
grad_weight,
|
355 |
-
grad_bias,
|
356 |
-
grad_offset,
|
357 |
-
grad_mask,
|
358 |
-
grad_output,
|
359 |
-
kernel_h,
|
360 |
-
kernel_w,
|
361 |
-
stride_h,
|
362 |
-
stride_w,
|
363 |
-
pad_h,
|
364 |
-
pad_w,
|
365 |
-
dilation_h,
|
366 |
-
dilation_w,
|
367 |
-
group,
|
368 |
-
deformable_group,
|
369 |
-
with_bias);
|
370 |
-
#else
|
371 |
-
AT_ERROR("Not compiled with GPU support");
|
372 |
-
#endif
|
373 |
-
}
|
374 |
-
AT_ERROR("Not implemented on the CPU");
|
375 |
-
}
|
376 |
-
|
377 |
-
} // namespace detectron2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/structures/tsv_file.py
DELETED
@@ -1,352 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import os
|
3 |
-
import json
|
4 |
-
import os.path as op
|
5 |
-
import numpy as np
|
6 |
-
from typing import List, Union
|
7 |
-
from collections import OrderedDict
|
8 |
-
|
9 |
-
def generate_lineidx(filein, idxout):
|
10 |
-
idxout_tmp = idxout + '.tmp'
|
11 |
-
with open(filein, 'r') as tsvin, open(idxout_tmp,'w') as tsvout:
|
12 |
-
fsize = os.fstat(tsvin.fileno()).st_size
|
13 |
-
fpos = 0
|
14 |
-
while fpos!=fsize:
|
15 |
-
tsvout.write(str(fpos)+"\n")
|
16 |
-
tsvin.readline()
|
17 |
-
fpos = tsvin.tell()
|
18 |
-
os.rename(idxout_tmp, idxout)
|
19 |
-
|
20 |
-
|
21 |
-
def read_to_character(fp, c):
|
22 |
-
result = []
|
23 |
-
while True:
|
24 |
-
s = fp.read(32)
|
25 |
-
assert s != ''
|
26 |
-
if c in s:
|
27 |
-
result.append(s[: s.index(c)])
|
28 |
-
break
|
29 |
-
else:
|
30 |
-
result.append(s)
|
31 |
-
return ''.join(result)
|
32 |
-
|
33 |
-
|
34 |
-
class TSVFile(object):
|
35 |
-
def __init__(self, tsv_file, generate_lineidx=False):
|
36 |
-
self.tsv_file = tsv_file
|
37 |
-
self.lineidx = op.splitext(tsv_file)[0] + '.lineidx'
|
38 |
-
self._fp = None
|
39 |
-
self._lineidx = None
|
40 |
-
# the process always keeps the process which opens the file.
|
41 |
-
# If the pid is not equal to the currrent pid, we will re-open the file.
|
42 |
-
self.pid = None
|
43 |
-
# generate lineidx if not exist
|
44 |
-
if not op.isfile(self.lineidx) and generate_lineidx:
|
45 |
-
generate_lineidx(self.tsv_file, self.lineidx)
|
46 |
-
|
47 |
-
def __del__(self):
|
48 |
-
if self._fp:
|
49 |
-
self._fp.close()
|
50 |
-
|
51 |
-
def __str__(self):
|
52 |
-
return "TSVFile(tsv_file='{}')".format(self.tsv_file)
|
53 |
-
|
54 |
-
def __repr__(self):
|
55 |
-
return str(self)
|
56 |
-
|
57 |
-
def num_rows(self):
|
58 |
-
self._ensure_lineidx_loaded()
|
59 |
-
return len(self._lineidx)
|
60 |
-
|
61 |
-
def seek(self, idx):
|
62 |
-
self._ensure_tsv_opened()
|
63 |
-
self._ensure_lineidx_loaded()
|
64 |
-
try:
|
65 |
-
pos = self._lineidx[idx]
|
66 |
-
except:
|
67 |
-
logging.info('{}-{}'.format(self.tsv_file, idx))
|
68 |
-
raise
|
69 |
-
self._fp.seek(pos)
|
70 |
-
return [s.strip() for s in self._fp.readline().split('\t')]
|
71 |
-
|
72 |
-
def seek_first_column(self, idx):
|
73 |
-
self._ensure_tsv_opened()
|
74 |
-
self._ensure_lineidx_loaded()
|
75 |
-
pos = self._lineidx[idx]
|
76 |
-
self._fp.seek(pos)
|
77 |
-
return read_to_character(self._fp, '\t')
|
78 |
-
|
79 |
-
def get_key(self, idx):
|
80 |
-
return self.seek_first_column(idx)
|
81 |
-
|
82 |
-
def __getitem__(self, index):
|
83 |
-
return self.seek(index)
|
84 |
-
|
85 |
-
def __len__(self):
|
86 |
-
return self.num_rows()
|
87 |
-
|
88 |
-
def _ensure_lineidx_loaded(self):
|
89 |
-
if self._lineidx is None:
|
90 |
-
# print('loading lineidx: {}'.format(self.lineidx))
|
91 |
-
|
92 |
-
with open(self.lineidx, 'r') as fp:
|
93 |
-
self._lineidx = [int(i.strip()) for i in fp.readlines()]
|
94 |
-
|
95 |
-
def _ensure_tsv_opened(self):
|
96 |
-
if self._fp is None:
|
97 |
-
self._fp = open(self.tsv_file, 'r')
|
98 |
-
self.pid = os.getpid()
|
99 |
-
|
100 |
-
if self.pid != os.getpid():
|
101 |
-
# print('re-open {} because the process id changed'.format(self.tsv_file))
|
102 |
-
self._fp = open(self.tsv_file, 'r')
|
103 |
-
self.pid = os.getpid()
|
104 |
-
|
105 |
-
class TSVFileNew(object):
|
106 |
-
def __init__(self,
|
107 |
-
tsv_file: str,
|
108 |
-
if_generate_lineidx: bool = False,
|
109 |
-
lineidx: str = None,
|
110 |
-
class_selector: List[str] = None):
|
111 |
-
self.tsv_file = tsv_file
|
112 |
-
self.lineidx = op.splitext(tsv_file)[0] + '.lineidx' \
|
113 |
-
if not lineidx else lineidx
|
114 |
-
self.linelist = op.splitext(tsv_file)[0] + '.linelist'
|
115 |
-
self.chunks = op.splitext(tsv_file)[0] + '.chunks'
|
116 |
-
self._fp = None
|
117 |
-
self._lineidx = None
|
118 |
-
self._sample_indices = None
|
119 |
-
self._class_boundaries = None
|
120 |
-
self._class_selector = class_selector
|
121 |
-
# the process always keeps the process which opens the file.
|
122 |
-
# If the pid is not equal to the currrent pid, we will re-open the file.
|
123 |
-
self.pid = None
|
124 |
-
# generate lineidx if not exist
|
125 |
-
if not op.isfile(self.lineidx) and if_generate_lineidx:
|
126 |
-
generate_lineidx(self.tsv_file, self.lineidx)
|
127 |
-
|
128 |
-
def __del__(self):
|
129 |
-
if self._fp:
|
130 |
-
self._fp.close()
|
131 |
-
|
132 |
-
def __str__(self):
|
133 |
-
return "TSVFile(tsv_file='{}')".format(self.tsv_file)
|
134 |
-
|
135 |
-
def __repr__(self):
|
136 |
-
return str(self)
|
137 |
-
|
138 |
-
def get_class_boundaries(self):
|
139 |
-
return self._class_boundaries
|
140 |
-
|
141 |
-
def num_rows(self):
|
142 |
-
self._ensure_lineidx_loaded()
|
143 |
-
return len(self._sample_indices)
|
144 |
-
|
145 |
-
def seek(self, idx: int):
|
146 |
-
self._ensure_tsv_opened()
|
147 |
-
self._ensure_lineidx_loaded()
|
148 |
-
try:
|
149 |
-
pos = self._lineidx[self._sample_indices[idx]]
|
150 |
-
except:
|
151 |
-
logging.info('=> {}-{}'.format(self.tsv_file, idx))
|
152 |
-
raise
|
153 |
-
self._fp.seek(pos)
|
154 |
-
return [s.strip() for s in self._fp.readline().split('\t')]
|
155 |
-
|
156 |
-
def seek_first_column(self, idx: int):
|
157 |
-
self._ensure_tsv_opened()
|
158 |
-
self._ensure_lineidx_loaded()
|
159 |
-
pos = self._lineidx[idx]
|
160 |
-
self._fp.seek(pos)
|
161 |
-
return read_to_character(self._fp, '\t')
|
162 |
-
|
163 |
-
def get_key(self, idx: int):
|
164 |
-
return self.seek_first_column(idx)
|
165 |
-
|
166 |
-
def __getitem__(self, index: int):
|
167 |
-
return self.seek(index)
|
168 |
-
|
169 |
-
def __len__(self):
|
170 |
-
return self.num_rows()
|
171 |
-
|
172 |
-
def _ensure_lineidx_loaded(self):
|
173 |
-
if self._lineidx is None:
|
174 |
-
# print('=> loading lineidx: {}'.format(self.lineidx))
|
175 |
-
with open(self.lineidx, 'r') as fp:
|
176 |
-
lines = fp.readlines()
|
177 |
-
lines = [line.strip() for line in lines]
|
178 |
-
self._lineidx = [int(line) for line in lines]
|
179 |
-
# except:
|
180 |
-
# print("error in loading lineidx file {}, regenerate it".format(self.lineidx))
|
181 |
-
# generate_lineidx(self.tsv_file, self.lineidx)
|
182 |
-
# with open(self.lineidx, 'r') as fp:
|
183 |
-
# lines = fp.readlines()
|
184 |
-
# lines = [line.strip() for line in lines]
|
185 |
-
# self._lineidx = [int(line) for line in lines]
|
186 |
-
# read the line list if exists
|
187 |
-
linelist = None
|
188 |
-
if op.isfile(self.linelist):
|
189 |
-
with open(self.linelist, 'r') as fp:
|
190 |
-
linelist = sorted(
|
191 |
-
[
|
192 |
-
int(line.strip())
|
193 |
-
for line in fp.readlines()
|
194 |
-
]
|
195 |
-
)
|
196 |
-
if op.isfile(self.chunks) and self._class_selector:
|
197 |
-
self._sample_indices = []
|
198 |
-
self._class_boundaries = []
|
199 |
-
class_boundaries = json.load(open(self.chunks, 'r'))
|
200 |
-
for class_name, boundary in class_boundaries.items():
|
201 |
-
start = len(self._sample_indices)
|
202 |
-
if class_name in self._class_selector:
|
203 |
-
for idx in range(boundary[0], boundary[1] + 1):
|
204 |
-
# NOTE: potentially slow when linelist is long, try to speed it up
|
205 |
-
if linelist and idx not in linelist:
|
206 |
-
continue
|
207 |
-
self._sample_indices.append(idx)
|
208 |
-
end = len(self._sample_indices)
|
209 |
-
self._class_boundaries.append((start, end))
|
210 |
-
else:
|
211 |
-
if linelist:
|
212 |
-
self._sample_indices = linelist
|
213 |
-
else:
|
214 |
-
self._sample_indices = list(range(len(self._lineidx)))
|
215 |
-
|
216 |
-
def _ensure_tsv_opened(self):
|
217 |
-
if self._fp is None:
|
218 |
-
self._fp = open(self.tsv_file, 'r')
|
219 |
-
self.pid = os.getpid()
|
220 |
-
|
221 |
-
if self.pid != os.getpid():
|
222 |
-
logging.debug('=> re-open {} because the process id changed'.format(self.tsv_file))
|
223 |
-
self._fp = open(self.tsv_file, 'r')
|
224 |
-
self.pid = os.getpid()
|
225 |
-
|
226 |
-
class LRU(OrderedDict):
|
227 |
-
"""Limit size, evicting the least recently looked-up key when full.
|
228 |
-
https://docs.python.org/3/library/collections.html#collections.OrderedDict
|
229 |
-
"""
|
230 |
-
|
231 |
-
def __init__(self, maxsize=4, *args, **kwds):
|
232 |
-
self.maxsize = maxsize
|
233 |
-
super().__init__(*args, **kwds)
|
234 |
-
|
235 |
-
def __getitem__(self, key):
|
236 |
-
value = super().__getitem__(key)
|
237 |
-
self.move_to_end(key)
|
238 |
-
return value
|
239 |
-
|
240 |
-
def __setitem__(self, key, value):
|
241 |
-
if key in self:
|
242 |
-
self.move_to_end(key)
|
243 |
-
super().__setitem__(key, value)
|
244 |
-
if len(self) > self.maxsize:
|
245 |
-
oldest = next(iter(self))
|
246 |
-
del self[oldest]
|
247 |
-
|
248 |
-
|
249 |
-
class CompositeTSVFile:
|
250 |
-
def __init__(self,
|
251 |
-
file_list: Union[str, list],
|
252 |
-
root: str = '.',
|
253 |
-
class_selector: List[str] = None):
|
254 |
-
if isinstance(file_list, str):
|
255 |
-
self.file_list = load_list_file(file_list)
|
256 |
-
else:
|
257 |
-
assert isinstance(file_list, list)
|
258 |
-
self.file_list = file_list
|
259 |
-
|
260 |
-
self.root = root
|
261 |
-
self.cache = LRU()
|
262 |
-
self.tsvs = None
|
263 |
-
self.chunk_sizes = None
|
264 |
-
self.accum_chunk_sizes = None
|
265 |
-
self._class_selector = class_selector
|
266 |
-
self._class_boundaries = None
|
267 |
-
self.initialized = False
|
268 |
-
self.initialize()
|
269 |
-
|
270 |
-
def get_key(self, index: int):
|
271 |
-
idx_source, idx_row = self._calc_chunk_idx_row(index)
|
272 |
-
k = self.tsvs[idx_source].get_key(idx_row)
|
273 |
-
return '_'.join([self.file_list[idx_source], k])
|
274 |
-
|
275 |
-
def get_class_boundaries(self):
|
276 |
-
return self._class_boundaries
|
277 |
-
|
278 |
-
def get_chunk_size(self):
|
279 |
-
return self.chunk_sizes
|
280 |
-
|
281 |
-
def num_rows(self):
|
282 |
-
return sum(self.chunk_sizes)
|
283 |
-
|
284 |
-
def _calc_chunk_idx_row(self, index: int):
|
285 |
-
idx_chunk = 0
|
286 |
-
idx_row = index
|
287 |
-
while index >= self.accum_chunk_sizes[idx_chunk]:
|
288 |
-
idx_chunk += 1
|
289 |
-
idx_row = index - self.accum_chunk_sizes[idx_chunk-1]
|
290 |
-
return idx_chunk, idx_row
|
291 |
-
|
292 |
-
|
293 |
-
def __getitem__(self, index: int):
|
294 |
-
idx_source, idx_row = self._calc_chunk_idx_row(index)
|
295 |
-
if idx_source not in self.cache:
|
296 |
-
self.cache[idx_source] = TSVFileNew(
|
297 |
-
op.join(self.root, self.file_list[idx_source]),
|
298 |
-
class_selector=self._class_selector
|
299 |
-
)
|
300 |
-
return self.cache[idx_source].seek(idx_row)
|
301 |
-
|
302 |
-
def __len__(self):
|
303 |
-
return sum(self.chunk_sizes)
|
304 |
-
|
305 |
-
def initialize(self):
|
306 |
-
"""
|
307 |
-
this function has to be called in init function if cache_policy is
|
308 |
-
enabled. Thus, let's always call it in init funciton to make it simple.
|
309 |
-
"""
|
310 |
-
if self.initialized:
|
311 |
-
return
|
312 |
-
tsvs = [
|
313 |
-
TSVFileNew(
|
314 |
-
op.join(self.root, f),
|
315 |
-
class_selector=self._class_selector
|
316 |
-
) for f in self.file_list
|
317 |
-
]
|
318 |
-
logging.info("Calculating chunk sizes ...")
|
319 |
-
self.chunk_sizes = [len(tsv) for tsv in tsvs]
|
320 |
-
|
321 |
-
self.accum_chunk_sizes = [0]
|
322 |
-
for size in self.chunk_sizes:
|
323 |
-
self.accum_chunk_sizes += [self.accum_chunk_sizes[-1] + size]
|
324 |
-
self.accum_chunk_sizes = self.accum_chunk_sizes[1:]
|
325 |
-
|
326 |
-
if (
|
327 |
-
self._class_selector
|
328 |
-
and all([tsv.get_class_boundaries() for tsv in tsvs])
|
329 |
-
):
|
330 |
-
"""
|
331 |
-
Note: When using CompositeTSVFile, make sure that the classes contained in each
|
332 |
-
tsv file do not overlap. Otherwise, the class boundaries won't be correct.
|
333 |
-
"""
|
334 |
-
self._class_boundaries = []
|
335 |
-
offset = 0
|
336 |
-
for tsv in tsvs:
|
337 |
-
boundaries = tsv.get_class_boundaries()
|
338 |
-
for bound in boundaries:
|
339 |
-
self._class_boundaries.append((bound[0] + offset, bound[1] + offset))
|
340 |
-
offset += len(tsv)
|
341 |
-
# NOTE: in current setting, get_key is not used during training, so we remove tsvs for saving memory cost
|
342 |
-
del tsvs
|
343 |
-
self.initialized = True
|
344 |
-
|
345 |
-
|
346 |
-
def load_list_file(fname: str) -> List[str]:
|
347 |
-
with open(fname, 'r') as fp:
|
348 |
-
lines = fp.readlines()
|
349 |
-
result = [line.strip() for line in lines]
|
350 |
-
if len(result) > 0 and result[-1] == '':
|
351 |
-
result = result[:-1]
|
352 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/transfiner/configs/common/coco_schedule.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
from fvcore.common.param_scheduler import MultiStepParamScheduler
|
2 |
-
|
3 |
-
from detectron2.config import LazyCall as L
|
4 |
-
from detectron2.solver import WarmupParamScheduler
|
5 |
-
|
6 |
-
|
7 |
-
def default_X_scheduler(num_X):
|
8 |
-
"""
|
9 |
-
Returns the config for a default multi-step LR scheduler such as "1x", "3x",
|
10 |
-
commonly referred to in papers, where every 1x has the total length of 1440k
|
11 |
-
training images (~12 COCO epochs). LR is decayed twice at the end of training
|
12 |
-
following the strategy defined in "Rethinking ImageNet Pretraining", Sec 4.
|
13 |
-
|
14 |
-
Args:
|
15 |
-
num_X: a positive real number
|
16 |
-
|
17 |
-
Returns:
|
18 |
-
DictConfig: configs that define the multiplier for LR during training
|
19 |
-
"""
|
20 |
-
# total number of iterations assuming 16 batch size, using 1440000/16=90000
|
21 |
-
total_steps_16bs = num_X * 90000
|
22 |
-
|
23 |
-
if num_X <= 2:
|
24 |
-
scheduler = L(MultiStepParamScheduler)(
|
25 |
-
values=[1.0, 0.1, 0.01],
|
26 |
-
# note that scheduler is scale-invariant. This is equivalent to
|
27 |
-
# milestones=[6, 8, 9]
|
28 |
-
milestones=[60000, 80000, 90000],
|
29 |
-
)
|
30 |
-
else:
|
31 |
-
scheduler = L(MultiStepParamScheduler)(
|
32 |
-
values=[1.0, 0.1, 0.01],
|
33 |
-
milestones=[total_steps_16bs - 60000, total_steps_16bs - 20000, total_steps_16bs],
|
34 |
-
)
|
35 |
-
return L(WarmupParamScheduler)(
|
36 |
-
scheduler=scheduler,
|
37 |
-
warmup_length=1000 / total_steps_16bs,
|
38 |
-
warmup_method="linear",
|
39 |
-
warmup_factor=0.001,
|
40 |
-
)
|
41 |
-
|
42 |
-
|
43 |
-
lr_multiplier_1x = default_X_scheduler(1)
|
44 |
-
lr_multiplier_2x = default_X_scheduler(2)
|
45 |
-
lr_multiplier_3x = default_X_scheduler(3)
|
46 |
-
lr_multiplier_6x = default_X_scheduler(6)
|
47 |
-
lr_multiplier_9x = default_X_scheduler(9)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cahlil/Speech-Recognition-with-Speaker-Segmentation/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Speech Recognition with Speaker Segmentation
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 2.8.14
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/inference.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
from typing import Tuple, List
|
2 |
-
|
3 |
-
import cv2
|
4 |
-
import numpy as np
|
5 |
-
import supervision as sv
|
6 |
-
import torch
|
7 |
-
from PIL import Image
|
8 |
-
from torchvision.ops import box_convert
|
9 |
-
|
10 |
-
import groundingdino.datasets.transforms as T
|
11 |
-
from groundingdino.models import build_model
|
12 |
-
from groundingdino.util.misc import clean_state_dict
|
13 |
-
from groundingdino.util.slconfig import SLConfig
|
14 |
-
from groundingdino.util.utils import get_phrases_from_posmap
|
15 |
-
|
16 |
-
|
17 |
-
def preprocess_caption(caption: str) -> str:
|
18 |
-
result = caption.lower().strip()
|
19 |
-
if result.endswith("."):
|
20 |
-
return result
|
21 |
-
return result + "."
|
22 |
-
|
23 |
-
|
24 |
-
def load_model(model_config_path: str, model_checkpoint_path: str, device: str = "cuda"):
|
25 |
-
args = SLConfig.fromfile(model_config_path)
|
26 |
-
args.device = device
|
27 |
-
model = build_model(args)
|
28 |
-
checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
|
29 |
-
model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
|
30 |
-
model.eval()
|
31 |
-
return model
|
32 |
-
|
33 |
-
|
34 |
-
def load_image(image_path: str) -> Tuple[np.array, torch.Tensor]:
|
35 |
-
transform = T.Compose(
|
36 |
-
[
|
37 |
-
T.RandomResize([800], max_size=1333),
|
38 |
-
T.ToTensor(),
|
39 |
-
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
|
40 |
-
]
|
41 |
-
)
|
42 |
-
image_source = Image.open(image_path).convert("RGB")
|
43 |
-
image = np.asarray(image_source)
|
44 |
-
image_transformed, _ = transform(image_source, None)
|
45 |
-
return image, image_transformed
|
46 |
-
|
47 |
-
|
48 |
-
def predict(
|
49 |
-
model,
|
50 |
-
image: torch.Tensor,
|
51 |
-
caption: str,
|
52 |
-
box_threshold: float,
|
53 |
-
text_threshold: float,
|
54 |
-
device: str = "cuda"
|
55 |
-
) -> Tuple[torch.Tensor, torch.Tensor, List[str]]:
|
56 |
-
caption = preprocess_caption(caption=caption)
|
57 |
-
|
58 |
-
model = model.to(device)
|
59 |
-
image = image.to(device)
|
60 |
-
|
61 |
-
with torch.no_grad():
|
62 |
-
outputs = model(image[None], captions=[caption])
|
63 |
-
|
64 |
-
prediction_logits = outputs["pred_logits"].cpu().sigmoid()[0] # prediction_logits.shape = (nq, 256)
|
65 |
-
prediction_boxes = outputs["pred_boxes"].cpu()[0] # prediction_boxes.shape = (nq, 4)
|
66 |
-
|
67 |
-
mask = prediction_logits.max(dim=1)[0] > box_threshold
|
68 |
-
logits = prediction_logits[mask] # logits.shape = (n, 256)
|
69 |
-
boxes = prediction_boxes[mask] # boxes.shape = (n, 4)
|
70 |
-
|
71 |
-
tokenizer = model.tokenizer
|
72 |
-
tokenized = tokenizer(caption)
|
73 |
-
|
74 |
-
phrases = [
|
75 |
-
get_phrases_from_posmap(logit > text_threshold, tokenized, tokenizer).replace('.', '')
|
76 |
-
for logit
|
77 |
-
in logits
|
78 |
-
]
|
79 |
-
|
80 |
-
return boxes, logits.max(dim=1)[0], phrases
|
81 |
-
|
82 |
-
|
83 |
-
def annotate(image_source: np.ndarray, boxes: torch.Tensor, logits: torch.Tensor, phrases: List[str]) -> np.ndarray:
|
84 |
-
h, w, _ = image_source.shape
|
85 |
-
boxes = boxes * torch.Tensor([w, h, w, h])
|
86 |
-
xyxy = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy()
|
87 |
-
detections = sv.Detections(xyxy=xyxy)
|
88 |
-
|
89 |
-
labels = [
|
90 |
-
f"{phrase} {logit:.2f}"
|
91 |
-
for phrase, logit
|
92 |
-
in zip(phrases, logits)
|
93 |
-
]
|
94 |
-
|
95 |
-
box_annotator = sv.BoxAnnotator()
|
96 |
-
annotated_frame = cv2.cvtColor(image_source, cv2.COLOR_RGB2BGR)
|
97 |
-
annotated_frame = box_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels)
|
98 |
-
return annotated_frame
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChandraMohanNayal/AutoGPT/ui/utils.py
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import re
|
3 |
-
|
4 |
-
def format_directory(directory):
|
5 |
-
output = []
|
6 |
-
def helper(directory, level, output):
|
7 |
-
files = os.listdir(directory)
|
8 |
-
for i, item in enumerate(files):
|
9 |
-
is_folder = os.path.isdir(os.path.join(directory, item))
|
10 |
-
joiner = "├── " if i < len(files) - 1 else "└── "
|
11 |
-
item_html = item + "/" if is_folder else f"<a href='file={os.path.join(directory, item)}'>{item}</a>"
|
12 |
-
output.append("│ " * level + joiner + item_html)
|
13 |
-
if is_folder:
|
14 |
-
helper(os.path.join(directory, item), level + 1, output)
|
15 |
-
output.append(os.path.basename(directory) + "/")
|
16 |
-
helper(directory, 1, output)
|
17 |
-
return "\n".join(output)
|
18 |
-
|
19 |
-
DOWNLOAD_OUTPUTS_JS = """
|
20 |
-
() => {
|
21 |
-
const a = document.createElement('a');
|
22 |
-
a.href = 'file=outputs.zip';
|
23 |
-
a.download = 'outputs.zip';
|
24 |
-
document.body.appendChild(a);
|
25 |
-
a.click();
|
26 |
-
document.body.removeChild(a);
|
27 |
-
}"""
|
28 |
-
|
29 |
-
def remove_color(text):
|
30 |
-
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
|
31 |
-
return ansi_escape.sub('', text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CognitiveLabs/GPT-auto-webscraping/chains/output_format/templates.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate, PromptTemplate
|
2 |
-
|
3 |
-
# prompt templates
|
4 |
-
system_template_output_format = PromptTemplate(
|
5 |
-
input_variables = ['html_content'],
|
6 |
-
template='''You are a helpful assitant that helps people extract JSON information from HTML content.
|
7 |
-
|
8 |
-
The input is a HTML content.
|
9 |
-
|
10 |
-
The expected output is a JSON with a relevant information in the following html: {html_content}
|
11 |
-
|
12 |
-
Try to extract as much information as possible. Including images, links, etc.
|
13 |
-
|
14 |
-
The assitant answer should ONLY contain the JSON information without any aditional word or character.
|
15 |
-
|
16 |
-
The JSON output must have 1 depth level as much.
|
17 |
-
|
18 |
-
The expected output format is an array of objects.
|
19 |
-
|
20 |
-
''')
|
21 |
-
|
22 |
-
human_template_output_format = PromptTemplate(
|
23 |
-
input_variables = ['html_content'],
|
24 |
-
template='this is the html content: {html_content}'
|
25 |
-
)
|
26 |
-
|
27 |
-
# chat prompts objects
|
28 |
-
system_message_prompt = SystemMessagePromptTemplate.from_template(system_template_output_format.template)
|
29 |
-
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template_output_format.template)
|
30 |
-
output_format_chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|