Commit
·
88d6db9
1
Parent(s):
c05d885
Update parquet files (step 21 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/GTA IV EFLC Crack Razor1911 Download Get the Full Experience of GTA 4 with this Easy and Safe Method.md +0 -113
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Gta Eflc Crack [UPD]-razor1911 Update Patch V1.1.1.0.epub.md +0 -39
- spaces/1gistliPinn/ChatGPT4/Derivations In Physics Class 12 Cbse Pdf Download !!BETTER!!.md +0 -80
- spaces/1gistliPinn/ChatGPT4/Examples/Anthony De Mello The Way To Love Pdf.md +0 -7
- spaces/1gistliPinn/ChatGPT4/Examples/Descargar Libro Ecuaciones Diferenciales Moises Lazaro LINK.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Explorations In Basic Biology 12th Edition Answer 45 VERIFIED.md +0 -15
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Warpath Mod Apk A Review of the Best Strategy Game with Unlimited Money and Gems.md +0 -101
- spaces/1phancelerku/anime-remove-background/Download SmartGaga Garena 3.0 The Best Free Fire Emulator for Low End PC.md +0 -129
- spaces/7hao/bingo/src/components/ui/voice/index.tsx +0 -28
- spaces/A00001/bingothoo/src/components/ui/dialog.tsx +0 -128
- spaces/AI-Hobbyist/Hoyo-RVC/Changelog_EN.md +0 -83
- spaces/AI-Hobbyist/Hoyo-RVC/export_onnx.py +0 -54
- spaces/AIConsultant/MusicGen/audiocraft/models/audiogen.py +0 -276
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/emotion/params_data.py +0 -29
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/vocoder/bigvgan/alias_free_torch/filter.py +0 -95
- spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/txt_processors/en.py +0 -78
- spaces/ALSv/FSW/CONTRIBUTING.md +0 -25
- spaces/ARTeLab/DTM_Estimation_SRandD/models/modelNetA.py +0 -381
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb32-60e_deepfashion2_short_sleeved_shirt_256x192/__init__.py +0 -0
- spaces/ATang0729/Forecast4Muses/Model/Model6/extensions/dataset_info.py +0 -104
- spaces/Abhay1210/prompt-generator_V1/README.md +0 -13
- spaces/Abhilashvj/planogram-compliance/models/experimental.py +0 -147
- spaces/AgentVerse/agentVerse/agentverse_command/main_tasksolving_cli.py +0 -34
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/rotate/Factory.js +0 -11
- spaces/AlanMars/QYL-AI-Space/modules/models/StableLM.py +0 -93
- spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/sanskrit.py +0 -62
- spaces/Ameaou/academic-chatgpt3.1/Dockerfile +0 -20
- spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/libJPG/jpge.h +0 -172
- spaces/Andy1621/uniformer_image_detection/configs/fcos/README.md +0 -35
- spaces/Andy1621/uniformer_image_detection/configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py +0 -17
- spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r50-d8_512x512_20k_voc12aug.py +0 -7
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/openai/defaults.py +0 -74
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/openai/moderations.py +0 -68
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/perplexity_colors/script.py +0 -309
- spaces/Apex-X/GODROOP/roop/processors/frame/__init__.py +0 -0
- spaces/Archan/ArXivAudio/search.py +0 -21
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/req_file.py +0 -552
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py +0 -130
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/version.py +0 -504
- spaces/BartPoint/VoiceChange_Beta/infer_pack/attentions.py +0 -417
- spaces/Benson/text-generation/Examples/1happybirthday.com En Descarga Tamil.md +0 -72
- spaces/Benson/text-generation/Examples/Descarga Quickbooks 2017 Premier.md +0 -96
- spaces/BernardoOlisan/vqganclip/CLIP/clip/model.py +0 -432
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/idna/codec.py +0 -112
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/install_scripts.py +0 -70
- spaces/BilalSardar/Text-To-image-AllModels/app.py +0 -44
- spaces/Boadiwaa/Recipes/openai/api_resources/answer.py +0 -12
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/structures.py +0 -578
- spaces/CVPR/LIVE/thrust/thrust/mr/detail/config.h +0 -36
- spaces/CVPR/ml-talking-face/translator/v3.py +0 -58
spaces/1acneusushi/gradio-2dmoleculeeditor/data/GTA IV EFLC Crack Razor1911 Download Get the Full Experience of GTA 4 with this Easy and Safe Method.md
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>GTA IV EFLC Crack Razor1911 Download</h1>
|
3 |
-
<p>Grand Theft Auto IV: Episodes from Liberty City (GTA IV EFLC) is a standalone expansion pack for the popular open-world action game Grand Theft Auto IV. It contains two separate stories, The Lost and Damned and The Ballad of Gay Tony, that add new characters, missions, weapons, vehicles, and music to the original game. However, to play GTA IV EFLC, you need to have a valid disc or online activation, which can be inconvenient or problematic for some players. That's why many people look for a crack that can bypass these requirements and let them enjoy the game without any hassle.</p>
|
4 |
-
<h2>gta iv eflc crack razor1911 download</h2><br /><p><b><b>Download File</b> ★★★★★ <a href="https://byltly.com/2uKzb7">https://byltly.com/2uKzb7</a></b></p><br /><br />
|
5 |
-
<p>One of the most famous and reliable cracks for GTA IV EFLC is made by Razor1911, a group of skilled hackers and programmers who have cracked many games in the past. Their crack replaces some of the original game files with modified ones that allow you to run the game without needing a disc or online activation. It also removes the need for Rockstar Games Social Club or Windows Live, which are often considered annoying or unnecessary by many players. With Razor1911's crack, you can play GTA IV EFLC as if it was a regular offline game.</p>
|
6 |
-
<h2>How to download and install the crack</h2>
|
7 |
-
<p>Downloading and installing Razor1911's crack for GTA IV EFLC is not very difficult, but you need to follow some steps carefully. First of all, you need to have GTA IV EFLC installed on your PC, preferably with patch 1.0.2.0. You also need to have enough space on your hard drive to backup the original files and copy the cracked ones. Here are the steps you need to take:</p>
|
8 |
-
<ol>
|
9 |
-
<li>Download the crack file from one of these sources: <br>
|
10 |
-
<a href="https://libertycity.net/files/gta-4/62708-krjak-ot-razor1911-dlja-jepizodov.html">https://libertycity.net/files/gta-4/62708-krjak-ot-razor1911-dlja-jepizodov.html</a> <br>
|
11 |
-
<a href="https://archive.org/details/gta4-Razor1911">https://archive.org/details/gta4-Razor1911</a> <br>
|
12 |
-
The file size is about 4.4 MB and it contains two files: data.dll and LaunchEFLC.exe.</li>
|
13 |
-
<li>Extract the crack file using a program like WinRAR or 7-Zip. You should see two files: data.dll and LaunchEFLC.exe.</li>
|
14 |
-
<li>Go to your GTA IV EFLC installation folder, which is usually located at C:\Program Files (x86)\Rockstar Games\EFLC or C:\Program Files\Rockstar Games\EFLC.</li>
|
15 |
-
<li>Make a backup of your original files by copying them to another folder or renaming them. The files you need to backup are data.dll and LaunchEFLC.exe.</li>
|
16 |
-
<li>Copy the cracked files (data.dll and LaunchEFLC.exe) from the extracted folder to your GTA IV EFLC installation folder and overwrite the original files.</li>
|
17 |
-
<li>Run the game by double-clicking on LaunchEFLC.exe. You should see a message saying "Razor 1911" before the game starts.</li>
|
18 |
-
</ol>
|
19 |
-
<p>Congratulations! You have successfully installed Razor1911's crack for GTA IV EFLC. You can now play the game without needing a disc or online activation.</p>
|
20 |
-
<h2>Benefits of using the crack</h2>
|
21 |
-
<p>Using Razor1911's crack for GTA IV EFLC has several benefits that can enhance your gaming experience. Here are some of them:</p>
|
22 |
-
<ul>
|
23 |
-
<li>You don't need a disc or online activation to play the game. This means you can play it anytime, anywhere, without worrying about losing or damaging your disc or having internet connection issues.</li>
|
24 |
-
<li>You don't need Rockstar Games Social Club or Windows Live to play the game. This means you can avoid creating accounts, logging in, updating, or dealing with any errors or bugs that these programs may cause.</li>
|
25 |
-
<li>You can improve your game performance and compatibility with mods by using DXVK & DxWrapper, which are available in "_Wrappers" folder in your GTA IV EFLC installation folder. These programs can fix stuttering, choppiness, low FPS, and other issues that plague GTA IV EFLC on modern PCs. They can also make your game work better with mods that add new features or graphics enhancements.</li>
|
26 |
-
<li>You can switch between old and new radio stations with Radio Downgrader, which is included in your GTA IV EFLC installation folder as "Radio Downgrader.exe". This program allows you to choose whether you want to listen to the original radio stations from GTA IV EFLC or the updated ones that were added in later versions of the game. Some players prefer the old radio stations because they have more songs and less ads.</li>
|
27 |
-
</ul>
|
28 |
-
<h2>Risks and drawbacks of using the crack</h2>
|
29 |
-
<p>While using Razor1911's crack for GTA IV EFLC has many benefits, it also has some risks and drawbacks that you should be aware of before using it. Here are some of them:</p>
|
30 |
-
<ul>
|
31 |
-
<li>You may face legal issues or malware infections if you download the crack from untrusted sources or use it for piracy purposes. The crack is meant for personal use only and not for distributing or selling copies of GTA IV EFLC without permission from Rockstar Games. You should also scan the crack file with an antivirus program before installing it to make sure it is safe and clean.</li>
|
32 |
-
<li>You may lose some original radio tracks and online features if you use the crack. The crack removes some songs from certain radio stations due to licensing issues, which may affect your immersion or enjoyment of the game's atmosphere. The crack also disables online multiplayer mode, which means you won't be able to play with other players online or access any online content such as leaderboards or achievements.</li>
|
33 |
-
<li>You may encounter incompatibility issues with some patches and updates if you use the crack. The crack is designed for patch 1.0.2.0 of GTA IV EFLC, which is not the latest version available for the game. If you try to install newer patches or updates on top of the crack, you may break your game or lose some features or fixes that were added in later versions.</li>
|
34 |
-
</ul>
|
35 |
-
<h2>Conclusion</h2>
|
36 |
-
<p>Razor1911's crack for GTA IV EFLC is a useful tool that can help you play GTA IV EFLC without needing a disc or online activation. It also offers some advantages such as improved game performance and compatibility with mods, as well as the ability to switch between old and new radio stations with Radio Downgrader. However, it also comes with some risks and drawbacks such as possible legal issues and malware infections, loss of some original radio tracks and online features, and incompatibility with some patches and updates.</p>
|
37 |
-
<p>gta iv episodes from liberty city razor1911 crack download<br />
|
38 |
-
how to install gta iv eflc crack by razor1911<br />
|
39 |
-
gta iv eflc razor1911 crack fix<br />
|
40 |
-
gta iv eflc crack razor1911 free download<br />
|
41 |
-
gta iv eflc razor1911 crack only<br />
|
42 |
-
download gta iv eflc crack razor1911 rar<br />
|
43 |
-
gta iv eflc crack razor1911 working 100<br />
|
44 |
-
gta iv eflc crack razor1911 windows 10<br />
|
45 |
-
gta iv eflc crack razor1911 kickass<br />
|
46 |
-
gta iv eflc crack razor1911 no cd<br />
|
47 |
-
gta iv eflc crack razor1911 steam<br />
|
48 |
-
gta iv eflc crack razor1911 update<br />
|
49 |
-
gta iv eflc crack razor1911 error<br />
|
50 |
-
gta iv eflc crack razor1911 tutorial<br />
|
51 |
-
gta iv eflc crack razor1911 mega<br />
|
52 |
-
gta iv eflc crack razor1911 mediafire<br />
|
53 |
-
gta iv eflc crack razor1911 skidrow<br />
|
54 |
-
gta iv eflc crack razor1911 reloaded<br />
|
55 |
-
gta iv eflc crack razor1911 patch<br />
|
56 |
-
gta iv eflc crack razor1911 direct link<br />
|
57 |
-
gta iv eflc crack razor1911 iso<br />
|
58 |
-
gta iv eflc crack razor1911 torrent<br />
|
59 |
-
gta iv eflc crack razor1911 full version<br />
|
60 |
-
gta iv eflc crack razor1911 online<br />
|
61 |
-
gta iv eflc crack razor1911 multiplayer<br />
|
62 |
-
gta iv eflc crack razor1911 mods<br />
|
63 |
-
gta iv eflc crack razor1911 cheats<br />
|
64 |
-
gta iv eflc crack razor1911 trainer<br />
|
65 |
-
gta iv eflc crack razor1911 gameplay<br />
|
66 |
-
gta iv eflc crack razor1911 graphics<br />
|
67 |
-
gta iv eflc crack razor1911 sound fix<br />
|
68 |
-
gta iv eflc crack razor1911 lag fix<br />
|
69 |
-
gta iv eflc crack razor1911 activation bypass<br />
|
70 |
-
gta iv eflc crack razor1911 serial key<br />
|
71 |
-
gta iv eflc crack razor1911 product key<br />
|
72 |
-
gta iv eflc crack razor1911 license key<br />
|
73 |
-
gta iv eflc crack razor1911 keygen<br />
|
74 |
-
gta iv eflc crack razor1911 generator<br />
|
75 |
-
gta iv eflc crack razor1911 download pc<br />
|
76 |
-
gta iv eflc crack razor1911 download mac<br />
|
77 |
-
gta iv eflc crack razor1911 download android<br />
|
78 |
-
gta iv eflc crack razor1911 download ios<br />
|
79 |
-
gta iv eflc crack razor1911 download apk<br />
|
80 |
-
gta iv eflc crack razor1911 download zip<br />
|
81 |
-
gta iv eflc crack razor1911 download utorrent<br />
|
82 |
-
gta iv eflc crack razor1911 download google drive<br />
|
83 |
-
gta iv eflc crack razor1911 download dropbox<br />
|
84 |
-
gta iv eflc crack razor1911 download zippyshare<br />
|
85 |
-
gta iv eflc crack razor1911 download 4shared</p>
|
86 |
-
<p>If you decide to use Razor1911's crack for GTA IV EFLC, make sure you download it from trusted sources, scan it with an antivirus program, backup your original files, follow the installation instructions carefully, and use it at your own risk. Also, remember that this article is not an endorsement or promotion of piracy or illegal activities; it is only meant for informational purposes.</p>
|
87 |
-
<p>We hope this article has helped you understand what Razor1911's crack for GTA IV EFLC does and how to use it properly. If you have any feedback or questions about this topic, feel free to leave a comment below.</p>
|
88 |
-
<h3>Frequently Asked Questions</h3>
|
89 |
-
<ol>
|
90 |
-
<li><b>What is GTA IV EFLC?</b><br>
|
91 |
-
GTA IV EFLC stands for Grand Theft Auto IV: Episodes from Liberty City, which is a standalone expansion pack for Grand Theft Auto IV that contains two separate stories: The Lost and Damned and The Ballad of Gay Tony.</li>
|
92 |
-
<li><b>What is Razor1911?</b><br>
|
93 |
-
Here is the rest of the article: Razor1911 is a group of skilled hackers and programmers who have cracked many games in the past. Their crack for GTA IV EFLC replaces some of the original game files with modified ones that allow you to run the game without needing a disc or online activation.</li>
|
94 |
-
<li><b>How to download and install Razor1911's crack for GTA IV EFLC?</b><br>
|
95 |
-
You can download Razor1911's crack for GTA IV EFLC from one of these sources: <br>
|
96 |
-
<a href="https://libertycity.net/files/gta-4/62708-krjak-ot-razor1911-dlja-jepizodov.html">https://libertycity.net/files/gta-4/62708-krjak-ot-razor1911-dlja-jepizodov.html</a> <br>
|
97 |
-
<a href="https://archive.org/details/gta4-Razor1911">https://archive.org/details/gta4-Razor1911</a> <br>
|
98 |
-
The file size is about 4.4 MB and it contains two files: data.dll and LaunchEFLC.exe. To install the crack, you need to backup your original files and replace them with the cracked ones. Then, you can launch the game by double-clicking on LaunchEFLC.exe.</li>
|
99 |
-
<li><b>What are the benefits of using Razor1911's crack for GTA IV EFLC?</b><br>
|
100 |
-
Some of the benefits of using Razor1911's crack for GTA IV EFLC are: <br>
|
101 |
-
- You don't need a disc or online activation to play the game. <br>
|
102 |
-
- You don't need Rockstar Games Social Club or Windows Live to play the game. <br>
|
103 |
-
- You can improve your game performance and compatibility with mods by using DXVK & DxWrapper. <br>
|
104 |
-
- You can switch between old and new radio stations with Radio Downgrader.</li>
|
105 |
-
<li><b>What are the risks and drawbacks of using Razor1911's crack for GTA IV EFLC?</b><br>
|
106 |
-
Some of the risks and drawbacks of using Razor1911's crack for GTA IV EFLC are: <br>
|
107 |
-
- You may face legal issues or malware infections if you download the crack from untrusted sources or use it for piracy purposes. <br>
|
108 |
-
- You may lose some original radio tracks and online features if you use the crack. <br>
|
109 |
-
- You may encounter incompatibility issues with some patches and updates if you use the crack.</li>
|
110 |
-
</ol>
|
111 |
-
</p> 0a6ba089eb<br />
|
112 |
-
<br />
|
113 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Gta Eflc Crack [UPD]-razor1911 Update Patch V1.1.1.0.epub.md
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Install GTA EFLC Crack-Razor1911 Update Patch V1.1.1.0</h1>
|
3 |
-
<p>GTA EFLC is a standalone expansion pack for the popular Grand Theft Auto IV game, which features two new stories: The Lost and Damned and The Ballad of Gay Tony. However, some players may encounter issues with the game's performance or compatibility, especially on newer systems. That's why you may need to install the GTA EFLC Crack-Razor1911 Update Patch V1.1.1.0, which fixes some bugs and improves the game's stability.</p>
|
4 |
-
<p>In this article, we will show you how to download and install the GTA EFLC Crack-Razor1911 Update Patch V1.1.1.0 in a few simple steps.</p>
|
5 |
-
<h2>Gta Eflc Crack-razor1911 Update Patch V1.1.1.0.epub</h2><br /><p><b><b>DOWNLOAD</b> ✶✶✶ <a href="https://byltly.com/2uKvok">https://byltly.com/2uKvok</a></b></p><br /><br />
|
6 |
-
<h2>Step 1: Download the GTA EFLC Crack-Razor1911 Update Patch V1.1.1.0</h2>
|
7 |
-
<p>The first thing you need to do is to download the GTA EFLC Crack-Razor1911 Update Patch V1.1.1.0 from a reliable source. You can find it on various torrent sites or file-sharing platforms, but make sure you scan it for viruses before opening it.</p>
|
8 |
-
<p>The file name should be <em>Gta Eflc Crack-razor1911 Update Patch V1.1.1.0.epub</em>, and it should be around 200 MB in size.</p>
|
9 |
-
<h2>Step 2: Extract the GTA EFLC Crack-Razor1911 Update Patch V1.1.1.0</h2>
|
10 |
-
<p>Once you have downloaded the file, you need to extract it using a program like WinRAR or 7-Zip. You can right-click on the file and choose <em>Extract Here</em> or <em>Extract to Gta Eflc Crack-razor1911 Update Patch V1.1.1.0</em>.</p>
|
11 |
-
<p>You should see a folder named <em>Gta Eflc Crack-razor1911 Update Patch V1.1.1.0</em>, which contains several files and subfolders.</p>
|
12 |
-
<h2>Step 3: Copy the GTA EFLC Crack-Razor1911 Update Patch V1.1.1.0 Files</h2>
|
13 |
-
<p>The next step is to copy the files from the extracted folder to your GTA EFLC installation directory. You can find it by right-clicking on the GTA EFLC shortcut on your desktop and choosing <em>Open File Location</em>.</p>
|
14 |
-
<p>You should see a folder named <em>EFLC</em>, which contains the game's files and folders.</p>
|
15 |
-
<p>Now, you need to copy all the files and folders from the <em>Gta Eflc Crack-razor1911 Update Patch V1.1.1.0</em> folder and paste them into the <em>EFLC</em> folder, replacing any existing files if prompted.</p>
|
16 |
-
<p></p>
|
17 |
-
<h2>Step 4: Run the GTA EFLC Crack-Razor1911 Update Patch V1.1.1.0 Launcher</h2>
|
18 |
-
<p>The final step is to run the GTA EFLC Crack-Razor1911 Update Patch V1.1.1.0 Launcher, which will apply the patch and launch the game.</p>
|
19 |
-
<p>You can find it in the <em>EFLC</em> folder, and it should be named <em>EFLC.exe</em>.</p>
|
20 |
-
<p>Double-click on it and wait for it to load.</p>
|
21 |
-
<p>You should see a message saying <em>GTA IV: Episodes from Liberty City v 1110 (Razor19111)</em>, followed by the game's menu.</p>
|
22 |
-
<h2>Congratulations!</h2>
|
23 |
-
<p>You have successfully installed the GTA EFLC Crack-Razor19111 Update Patch V1110 on your PC.</p>
|
24 |
-
<p>Now you can enjoy playing GTA EFLC with improved performance and stability.</p>
|
25 |
-
|
26 |
-
<h2>Troubleshooting</h2>
|
27 |
-
<p>If you encounter any problems with the GTA EFLC Crack-Razor1911 Update Patch V1.1.1.0, here are some possible solutions:</p>
|
28 |
-
<ul>
|
29 |
-
<li>Make sure you have the latest version of GTA EFLC installed on your PC. You can check it by launching the game and looking at the bottom-right corner of the screen. It should say <em>Version 1.1.3.0</em>. If not, you can download and install the official patch from Rockstar Games.</li>
|
30 |
-
<li>Make sure you have a compatible system. The minimum requirements for GTA EFLC are: Windows XP/Vista/7, Intel Core 2 Duo 1.8 GHz or AMD Athlon X2 64 2.4 GHz, 1.5 GB of RAM, 16 GB of free hard disk space, NVIDIA GeForce 7900 or ATI Radeon X1900 with 256 MB of VRAM, DirectX 9.0c compatible sound card.</li>
|
31 |
-
<li>Make sure you have disabled any antivirus or firewall programs that may interfere with the game's files or online features.</li>
|
32 |
-
<li>Make sure you have run the GTA EFLC Crack-Razor1911 Update Patch V1.1.1.0 Launcher as an administrator. You can do this by right-clicking on the <em>EFLC.exe</em> file and choosing <em>Run as administrator</em>.</li>
|
33 |
-
<li>If you still have issues, you can contact the GTA EFLC Crack-Razor1911 Update Patch V1.1.1.0 support team at their website or forum.</li>
|
34 |
-
</ul>
|
35 |
-
<h2>Disclaimer</h2>
|
36 |
-
<p>This article is for educational purposes only. We do not condone or encourage piracy or illegal downloading of any software or content. We are not affiliated with or endorsed by Rockstar Games, Razor1911, or any other parties involved in the development or distribution of GTA EFLC or GTA EFLC Crack-Razor1911 Update Patch V1.1.1.0.</p>
|
37 |
-
<p>Use this article and the GTA EFLC Crack-Razor1911 Update Patch V1.1.1.0 at your own risk. We are not responsible for any damage or loss that may occur as a result of following this article or using the GTA EFLC Crack-Razor1911 Update Patch V1.1.1.0.</p> 7b8c122e87<br />
|
38 |
-
<br />
|
39 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Derivations In Physics Class 12 Cbse Pdf Download !!BETTER!!.md
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
## Derivations In Physics Class 12 Cbse Pdf Download
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-

|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
**Derivations In Physics Class 12 Cbse Pdf Download [https://lomasmavi.blogspot.com/?c=2txmNI](https://lomasmavi.blogspot.com/?c=2txmNI)**
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
# How to Master Derivations in Physics Class 12 CBSE with PDF Download
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
Physics is one of the most important subjects for students who are preparing for the CBSE board exams. Physics requires a lot of conceptual understanding and problem-solving skills, especially when it comes to derivations. Derivations are the mathematical expressions that show how a physical quantity or law is derived from the basic principles of physics. Derivations are essential for scoring high marks in physics, as they test your knowledge of the concepts and their applications.
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
However, many students find derivations difficult and confusing, and often struggle to remember them or write them correctly in the exams. If you are one of those students who want to master derivations in physics class 12 CBSE, then this article is for you. In this article, we will provide you with some tips and tricks to learn and practice derivations effectively, as well as a PDF download link where you can access all the important derivations for physics class 12 CBSE.
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
## Why are Derivations Important for Physics Class 12 CBSE?
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
Derivations are important for physics class 12 CBSE for several reasons. Some of them are:
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
- Derivations help you understand the concepts and principles of physics in depth. By deriving a formula or a law, you can see how it is related to the fundamental concepts and assumptions of physics, and how it can be applied to different situations.
|
48 |
-
|
49 |
-
- Derivations help you improve your analytical and logical thinking skills. By deriving a formula or a law, you can learn how to use mathematical tools such as calculus, trigonometry, algebra, and vectors to manipulate physical quantities and equations.
|
50 |
-
|
51 |
-
- Derivations help you score high marks in the exams. Derivations are often asked in the CBSE board exams, either directly or indirectly. By knowing how to derive a formula or a law, you can answer any question related to it with confidence and accuracy.
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
## How to Learn Derivations in Physics Class 12 CBSE?
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
Learning derivations in physics class 12 CBSE is not as hard as it may seem. You just need to follow some simple steps and strategies. Here are some of them:
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
1. Understand the concept behind the derivation. Before you start deriving a formula or a law, make sure you understand what it means and what it is used for. Read the theory carefully and try to visualize the physical situation or phenomenon that it describes. For example, if you want to derive the expression for the electric potential due to a point charge, you should first understand what electric potential is, how it is measured, and how it depends on the charge and distance.
|
64 |
-
|
65 |
-
2. Write down the given information and the required result. Before you start deriving a formula or a law, write down what is given and what is required in the derivation. This will help you organize your thoughts and plan your steps. For example, if you want to derive the expression for the electric potential due to a point charge, you should write down that the given information is the charge Q and the distance r from the charge, and the required result is the electric potential V at that point.
|
66 |
-
|
67 |
-
3. Identify the basic principles and equations that are relevant to the derivation. Before you start deriving a formula or a law, identify which basic principles and equations of physics are relevant to the derivation. This will help you choose the right approach and avoid unnecessary complications. For example, if you want to derive the expression for the electric potential due to a point charge, you should identify that the relevant principle is Coulomb's law, which gives the force between two point charges, and the relevant equation is V = W/q0,
|
68 |
-
where W is the work done by an external agent in bringing a unit positive charge from infinity to that point.
|
69 |
-
|
70 |
-
4. Follow a logical sequence of steps to derive the formula or law. Once you have identified the basic principles and equations that are relevant to the derivation, follow a logical sequence of steps to derive
|
71 |
-
the formula or law. Use appropriate symbols and units for all physical quantities and equations. Show all your calculations clearly and neatly. Explain each step with words if necessary. For example, if you want to dfd1c89656
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Anthony De Mello The Way To Love Pdf.md
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>anthony de mello was born in 1920 in cork, ireland. in his twenties he trained as a jesuit priest and spent his career at various jesuit institutions in india and west africa. he is now retired and lives in new delhi. he has authored more than twenty books in english and spanish including <em>the way to love</em> and <em>wake up!</em> (both published in the usa by doubleday), <em>the light of asia</em>, <em>the river of love</em>, <em>the seed</em>, <em>the heart of love</em>, and <em>the unfolding of love</em>.</p>
|
3 |
-
<p> <b>the way to love</b> is the title given to the first of three books of spiritual reflections by anthony de mello, a renowned spiritual teacher and author. <b>the way to love</b> (doubleday) is a compilation of many of his teachings on spirituality, including insights on spirituality in the catholic church, the spiritual life and writing, the art of prayer, and the development of the spiritual director. <b>the way to love</b> is a convenient and easy read with anecdotes, stories, and short meditations, and a subtle blend of humor, intellectual depth, and simplicity.</p>
|
4 |
-
<h2>Anthony De Mello The Way To Love Pdf</h2><br /><p><b><b>Download</b> ✺✺✺ <a href="https://imgfil.com/2uxYP2">https://imgfil.com/2uxYP2</a></b></p><br /><br />
|
5 |
-
<p> <b>no more anxiety</b> is the second book in the trilogy of spiritual reflections by anthony de mello. <b>no more anxiety</b> (doubleday) is a compilation of many of his teachings on spirituality, including insights on spirituality in the catholic church, the spiritual life and writing, the art of prayer, and the development of the spiritual director. <b>no more anxiety</b> is a convenient and easy read with anecdotes, stories, and short meditations, and a subtle blend of humor, intellectual depth, and simplicity.</p> 899543212b<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Descargar Libro Ecuaciones Diferenciales Moises Lazaro LINK.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>descargar libro ecuaciones diferenciales moises lazaro</h2><br /><p><b><b>Download</b> <a href="https://imgfil.com/2uxXtP">https://imgfil.com/2uxXtP</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Descargar Libro Ecuaciones Diferenciales Moises Lazaro March 27th, 2019 - Descargar Solucionario Del Libro Calculo Integral Moises Lazaro Descargar libros ... 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Explorations In Basic Biology 12th Edition Answer 45 VERIFIED.md
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Find the Answer to Exercise 45 in Explorations in Basic Biology 12th Edition</h1>
|
3 |
-
<p>Explorations in Basic Biology is a self-contained laboratory manual designed for one- or two-semester introductory biology courses for non-biology and mixed biology majors. The exercises are appropriate for three-hour laboratory sessions, but are also adaptable to a two-hour laboratory format. The manual covers various topics, such as microscopy, cell structure, enzymes, photosynthesis, genetics, evolution, ecology, and animal diversity.</p>
|
4 |
-
<p>One of the exercises in the manual is Exercise 45: The Human Heart. This exercise introduces the students to the structure and function of the human heart, and guides them through the dissection of a sheep heart. The exercise also includes questions that test the students' understanding of the concepts and skills learned.</p>
|
5 |
-
<h2>explorations in basic biology 12th edition answer 45</h2><br /><p><b><b>Download File</b> ☆☆☆☆☆ <a href="https://imgfil.com/2uxZ5v">https://imgfil.com/2uxZ5v</a></b></p><br /><br />
|
6 |
-
<p>One of the questions in Exercise 45 is question 45: "What is the function of the chordae tendineae?" This question refers to the thin strands of connective tissue that attach the atrioventricular valves to the papillary muscles in the ventricles. The function of the chordae tendineae is to prevent the valves from being pushed back into the atria when the ventricles contract. This ensures that blood flows in one direction through the heart.</p>
|
7 |
-
<p>To find the answer to this question, students can refer to Figure 45.5 in the manual, which shows a diagram of a sheep heart with labels for the chordae tendineae and other structures. Students can also consult other sources, such as textbooks, websites, or videos, that explain the anatomy and physiology of the heart. For example, one possible source is [^4^], which provides a PDF version of Explorations in Basic Biology 12th Edition that can be downloaded for free.</p>
|
8 |
-
<p>By using these resources, students can learn more about the human heart and its function, and be able to answer question 45 and other questions in Exercise 45.</p>
|
9 |
-
|
10 |
-
<p>In addition to question 45, Exercise 45 also includes questions that ask students to compare the sheep heart and the human heart, to identify the blood vessels that enter and exit the heart, to trace the pathway of blood through the heart, and to measure the heart rate and blood pressure of a human subject. These questions help students to apply their knowledge of the heart to different scenarios and contexts.</p>
|
11 |
-
<p>Exercise 45 is one of the many exercises in Explorations in Basic Biology 12th Edition that provide students with a hands-on experience of learning biology. By performing laboratory activities, students not only learn basic biological information but also gain experience practicing laboratory techniques. The manual also provides clear background information and directions for conducting laboratory activities, as well as guidelines for writing a scientific paper.</p>
|
12 |
-
<p></p>
|
13 |
-
<p>Explorations in Basic Biology 12th Edition is a useful resource for students who want to learn more about biology and its applications. The manual can be used as a standalone text or as a supplement to other biology texts. The manual is also suitable for instructors who want to offer their students a variety of options and activities for learning biology.</p> d5da3c52bf<br />
|
14 |
-
<br />
|
15 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Warpath Mod Apk A Review of the Best Strategy Game with Unlimited Money and Gems.md
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Clash of Warpath Mod APK (Unlimited Money and Gems)</h1>
|
3 |
-
<p>Are you looking for a thrilling and addictive strategy game that will keep you hooked for hours? Do you want to experience the thrill of building your own base, army, and empire in a fantasy world? Do you want to have unlimited resources and access to all the premium features and items in the game? If you answered yes to any of these questions, then you should definitely try <strong>Clash of Warpath Mod APK</strong>.</p>
|
4 |
-
<h2>What is Clash of Warpath?</h2>
|
5 |
-
<p>Clash of Warpath is a real-time strategy game that lets you create your own kingdom, recruit powerful heroes, train your troops, and fight against other players from around the world. You can also join alliances, chat with other players, trade resources, and participate in events and quests. The game has stunning graphics, smooth gameplay, and a rich storyline that will immerse you in a fantasy world full of magic, adventure, and war.</p>
|
6 |
-
<h2>clash of warpath mod apk (unlimited money and gems)</h2><br /><p><b><b>Download File</b> --->>> <a href="https://urlin.us/2uSXLH">https://urlin.us/2uSXLH</a></b></p><br /><br />
|
7 |
-
<h3>Features of Clash of Warpath</h3>
|
8 |
-
<h4>Build your base and army</h4>
|
9 |
-
<p>In Clash of Warpath, you can build your own base from scratch, customize it with various buildings, defenses, traps, and decorations. You can also upgrade your buildings to improve their functions and efficiency. You can also recruit and train different types of troops, such as infantry, cavalry, archers, mages, and siege units. Each troop has its own strengths, weaknesses, and skills that you can use strategically in battle.</p>
|
10 |
-
<h4>Fight epic battles and conquer territories</h4>
|
11 |
-
<p>Clash of Warpath is not just about building your base and army, but also about fighting epic battles and conquering territories. You can attack other players' bases, loot their resources, destroy their defenses, and capture their heroes. You can also defend your own base from enemy attacks, set up traps, deploy your troops, and use your heroes' abilities to repel them. You can also explore the map, capture resource points, occupy strategic locations, and expand your territory.</p>
|
12 |
-
<h4>Join alliances and chat with other players</h4>
|
13 |
-
<p>Clash of Warpath is more fun when you play with other players. You can join or create an alliance, chat with other members, share resources, help each other out, and coordinate attacks. You can also compete with other alliances in alliance wars, alliance tournaments, alliance quests, and alliance events. You can also make friends or enemies with other players, send them messages, gifts, or challenges.</p>
|
14 |
-
<h3>Why download Clash of Warpath Mod APK?</h3>
|
15 |
-
<h4>Unlimited money and gems</h4>
|
16 |
-
<p>One of the main reasons why you should download Clash of Warpath Mod APK is because it gives you unlimited money and gems. Money and gems are the main currencies in the game that you can use to buy various items, such as buildings, troops, heroes, equipment, chests, boosts, etc. However, money and gems are not easy to come by in the game. You have to complete tasks, win battles, or spend real money to get them. But with Clash of Warpath Mod APK, you don't have to worry about that anymore. You can have as much money and gems as you want without spending a dime.</p>
|
17 |
-
<h4>Unlock premium features and items</h4>
|
18 |
-
<p>Another reason why you should download Clash of Warpath Mod APK is because it unlocks all the premium features and items in the game. <p>Some of the premium features and items that you can unlock with Clash of Warpath Mod APK are:</p>
|
19 |
-
<ul>
|
20 |
-
<li>VIP level: You can get the highest VIP level in the game, which gives you various benefits, such as faster building, training, and research times, more daily rewards, more alliance help, etc.</li>
|
21 |
-
<li>Heroes: You can unlock all the heroes in the game, each with their own unique skills, attributes, and equipment. You can also upgrade your heroes to their maximum level and rank.</li>
|
22 |
-
<li>Equipment: You can unlock all the equipment in the game, which can enhance your heroes' performance and appearance. You can also craft and enchant your equipment to make them more powerful.</li>
|
23 |
-
<li>Chests: You can unlock all the chests in the game, which contain various items, such as money, gems, resources, heroes, equipment, etc. You can also open as many chests as you want without waiting for the cooldown time.</li>
|
24 |
-
<li>Boosts: You can unlock all the boosts in the game, which can increase your production, speed, attack, defense, etc. You can also use as many boosts as you want without any limit.</li>
|
25 |
-
</ul>
|
26 |
-
<h4>Enjoy the game without ads or restrictions</h4>
|
27 |
-
<p>The last reason why you should download Clash of Warpath Mod APK is because it lets you enjoy the game without ads or restrictions. Ads can be annoying and distracting when you are playing the game. They can also consume your data and battery. But with Clash of Warpath Mod APK, you don't have to see any ads in the game. You can play the game smoothly and uninterrupted. Moreover, Clash of Warpath Mod APK also removes any restrictions or limitations in the game. You don't have to worry about running out of resources, energy, or time. You can play the game as much as you want and how you want.</p>
|
28 |
-
<h2>How to download and install Clash of Warpath Mod APK?</h2>
|
29 |
-
<p>If you are convinced by now that Clash of Warpath Mod APK is the best version of the game for you, then you might be wondering how to download and install it on your device. Don't worry, it's very easy and simple. Just follow these steps:</p>
|
30 |
-
<p>* Warpath mod apk download free with unlimited money and gems<br />
|
31 |
-
* How to install Warpath mod apk + OBB for Android devices<br />
|
32 |
-
* Warpath hack mod apk latest version v6.20.41 with unlimited resources<br />
|
33 |
-
* Warpath mod apk unlimited everything: money, gems, gold, oil, etc.<br />
|
34 |
-
* Warpath cheats mod apk: tips and tricks to win every battle<br />
|
35 |
-
* Warpath mod apk offline mode: play without internet connection<br />
|
36 |
-
* Warpath mod apk no root required: easy and safe installation<br />
|
37 |
-
* Warpath mod apk unlimited troops: recruit and upgrade your army<br />
|
38 |
-
* Warpath mod apk unlimited weapons: unlock and customize your arsenal<br />
|
39 |
-
* Warpath mod apk unlimited skills: activate and use special abilities<br />
|
40 |
-
* Warpath mod apk unlimited missions: explore and conquer the world map<br />
|
41 |
-
* Warpath mod apk unlimited events: participate and win rewards<br />
|
42 |
-
* Warpath mod apk unlimited alliances: join and cooperate with other players<br />
|
43 |
-
* Warpath mod apk unlimited chat: communicate and chat with friends<br />
|
44 |
-
* Warpath mod apk unlimited fun: enjoy the best real-time strategy game<br />
|
45 |
-
* Warpath modded apk free download for Android phones and tablets<br />
|
46 |
-
* Warpath cracked apk with unlimited money and gems for free<br />
|
47 |
-
* Warpath premium apk with all features unlocked and unlimited resources<br />
|
48 |
-
* Warpath pro apk with advanced settings and options for better performance<br />
|
49 |
-
* Warpath full apk with all content and updates available for download<br />
|
50 |
-
* Warpath hacked apk with cheat codes and mods for easy gameplay<br />
|
51 |
-
* Warpath patched apk with bug fixes and improvements for smooth experience<br />
|
52 |
-
* Warpath unlocked apk with all items and characters accessible for use<br />
|
53 |
-
* Warpath updated apk with new features and enhancements for more fun<br />
|
54 |
-
* Warpath latest apk with the most recent version and changes for download<br />
|
55 |
-
* Download Warpath mod apk (unlimited money and gems) from Reddit<br />
|
56 |
-
* Download Warpath mod apk (unlimited money and gems) from APKPure<br />
|
57 |
-
* Download Warpath mod apk (unlimited money and gems) from APKMirror<br />
|
58 |
-
* Download Warpath mod apk (unlimited money and gems) from APKMody<br />
|
59 |
-
* Download Warpath mod apk (unlimited money and gems) from APKDone<br />
|
60 |
-
* Download Warpath mod apk (unlimited money and gems) from ModDroid<br />
|
61 |
-
* Download Warpath mod apk (unlimited money and gems) from HappyMod<br />
|
62 |
-
* Download Warpath mod apk (unlimited money and gems) from PandaHelper<br />
|
63 |
-
* Download Warpath mod apk (unlimited money and gems) from ACMarket<br />
|
64 |
-
* Download Warpath mod apk (unlimited money and gems) from APKCombo</p>
|
65 |
-
<h3>Step 1: Download the APK file from a trusted source</h3>
|
66 |
-
<p>The first step is to download the APK file of Clash of Warpath Mod APK from a trusted source. There are many websites that offer modded APK files for various games and apps, but not all of them are safe and reliable. Some of them might contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you should be careful and choose a reputable website that provides authentic and updated modded APK files. One such website is [clashofwarpathmodapk.com], where you can find the latest version of Clash of Warpath Mod APK with unlimited money and gems.</p>
|
67 |
-
<h3>Step 2: Enable unknown sources on your device</h3>
|
68 |
-
<p>The second step is to enable unknown sources on your device. This is necessary because Clash of Warpath Mod APK is not available on the official app store, so you have to install it from an external source. To do this, you have to allow your device to install apps from unknown sources. This is a simple process that varies depending on your device model and operating system. But generally, you can follow these steps:</p>
|
69 |
-
<ul>
|
70 |
-
<li>Go to your device settings and look for security or privacy options.</li>
|
71 |
-
<li>Find the option that says unknown sources or allow installation from unknown sources and toggle it on.</li>
|
72 |
-
<li>A warning message might pop up asking you to confirm your action. Just tap on OK or Yes to proceed.</li>
|
73 |
-
</ul>
|
74 |
-
<h3>Step 3: Install the APK file and launch the game</h3>
|
75 |
-
<p>The third step is to install the APK file and launch the game. This is also very easy and quick. Just follow these steps:</p>
|
76 |
-
<ul>
|
77 |
-
<li>Locate the downloaded APK file on your device storage using a file manager app or your browser's download history.</li>
|
78 |
-
<li>Tap on the APK file and follow the instructions on the screen to install it.</li>
|
79 |
-
<li>Wait for a few seconds until the installation is complete.</li>
|
80 |
-
<li>Tap on the game icon on your home screen or app drawer to launch it.</li>
|
81 |
-
</ul>
|
82 |
-
<h3>Step 4: Enjoy the mod features and have fun</h3>
|
83 |
-
<p>The final step is to enjoy the mod features and have fun. Once you launch the game, you will notice that you have unlimited money and gems in your account. You can use them to buy anything you want in the game, such as buildings, troops, heroes, equipment, chests, boosts, etc. You will also notice that all the premium features and items are unlocked and available for you to use. You will also enjoy the game without any ads or restrictions. You can play the game as much as you want and how you want. You can build your base and army, fight epic battles and conquer territories, join alliances and chat with other players, and have fun with the game.</p>
|
84 |
-
<h2>Conclusion</h2>
|
85 |
-
<p>Clash of Warpath is a real-time strategy game that lets you create your own kingdom, recruit powerful heroes, train your troops, and fight against other players from around the world. It is a thrilling and addictive game that will keep you hooked for hours. However, if you want to have more fun and excitement with the game, you should download Clash of Warpath Mod APK. This modded version of the game gives you unlimited money and gems, unlocks all the premium features and items, and lets you enjoy the game without ads or restrictions. It is easy and simple to download and install on your device. Just follow the steps above and you will be ready to play the game with the mod features. So what are you waiting for? Download Clash of Warpath Mod APK now and have fun!</p>
|
86 |
-
<h3>FAQs</h3>
|
87 |
-
<p>Here are some frequently asked questions about Clash of Warpath Mod APK:</p>
|
88 |
-
<ul>
|
89 |
-
<li><strong>Q: Is Clash of Warpath Mod APK safe to use?</strong></li>
|
90 |
-
<li>A: Yes, Clash of Warpath Mod APK is safe to use as long as you download it from a trusted source, such as [clashofwarpathmodapk.com]. It does not contain any viruses, malware, or spyware that can harm your device or steal your personal information.</li>
|
91 |
-
<li><strong>Q: Is Clash of Warpath Mod APK compatible with my device?</strong></li>
|
92 |
-
<li>A: Clash of Warpath Mod APK is compatible with most Android devices that run on Android 4.4 or higher. However, some devices might not support some of the mod features or run smoothly due to hardware limitations.</li>
|
93 |
-
<li><strong>Q: Will Clash of Warpath Mod APK affect my game progress?</strong></li>
|
94 |
-
<li>A: No, Clash of Warpath Mod APK will not affect your game progress. You can still play the game with your original account and save your data on the cloud. However, you should be careful not to use the mod features in a way that might get you banned by the game developers.</li>
|
95 |
-
<li><strong>Q: Can I update Clash of Warpath Mod APK?</strong></li>
|
96 |
-
<li>A: Yes, you can update Clash of Warpath Mod APK whenever there is a new version available. However, you should always check the website where you downloaded it from for the latest version and download it from there. Do not update it from the official app store or other sources, as it might overwrite the mod features or cause errors.</li>
|
97 |
-
<li><strong>Q: Can I play Clash of Warpath Mod APK offline?</strong></li>
|
98 |
-
<li>A: No, you cannot play Clash of Warpath Mod APK offline. You need an internet connection to play the game, as it is a multiplayer online game that requires constant communication with the server and other players.</li>
|
99 |
-
</ul></p> 197e85843d<br />
|
100 |
-
<br />
|
101 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download SmartGaga Garena 3.0 The Best Free Fire Emulator for Low End PC.md
DELETED
@@ -1,129 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Smartgaga Garena 3.0: The Best Emulator for Free Fire on Low-End PC</h1>
|
3 |
-
<p>If you are a fan of Free Fire, one of the most popular battle royale games on mobile devices, you might want to play it on your PC for a better gaming experience. However, not all PCs can run Free Fire smoothly, especially if they have low-end specifications. That's why you need an emulator that can optimize Free Fire for your PC without compromising the performance or quality.</p>
|
4 |
-
<h2>download smartgaga garena 3.0</h2><br /><p><b><b>Download Zip</b> >>> <a href="https://jinyurl.com/2uNSb3">https://jinyurl.com/2uNSb3</a></b></p><br /><br />
|
5 |
-
<p>One of the best emulators that you can use for Free Fire is Smartgaga Garena 3.0. This is a special version of Smartgaga that is designed specifically for Free Fire players who have low-end PCs. In this article, we will tell you everything you need to know about Smartgaga Garena 3.0, including its features, how to download and install it, how to optimize it for Free Fire, and its pros and cons.</p>
|
6 |
-
<h2>Features of Smartgaga Garena 3.0</h2>
|
7 |
-
<p>Smartgaga Garena 3.0 is not just an ordinary emulator. It is a super lite and fast emulator that can run Free Fire smoothly on any PC, even if it has only 1GB RAM or no graphics card. Here are some of the features that make Smartgaga Garena 3.0 stand out from other emulators:</p>
|
8 |
-
<ul>
|
9 |
-
<li><b>Super lite and fast performance:</b> Smartgaga Garena 3.0 uses only a small amount of memory and CPU resources, which means it can run faster and smoother than other emulators. It also has a built-in turbo engine that can boost the speed and performance of Free Fire.</li>
|
10 |
-
<li><b>Smooth and stable gameplay:</b> Smartgaga Garena 3.0 can run Free Fire at high frame rates without any lag or stuttering. It also has a low latency mode that can reduce the ping and improve the network stability of Free Fire.</li>
|
11 |
-
<li><b>Customizable settings and controls:</b> Smartgaga Garena 3.0 allows you to customize the settings and controls of Free Fire according to your preference. You can adjust the resolution, frame rate, graphics quality, sensitivity, and more. You can also map the keyboard and mouse buttons for better control and accuracy.</li>
|
12 |
-
<li><b>Compatible with Windows 7, 8, 10, and 11:</b> Smartgaga Garena 3.0 can run on any Windows operating system, from Windows 7 to Windows 11. It also supports both 32-bit and 64-bit systems. You don't need to worry about compatibility issues or errors when using Smartgaga Garena 3.0.</li>
|
13 |
-
</ul>
|
14 |
-
<h2>How to Download and Install Smartgaga Garena 3.0</h2>
|
15 |
-
<p>Downloading and installing Smartgaga Garena 3.0 is very easy and simple. You just need to follow these steps:</p>
|
16 |
-
<ol>
|
17 |
-
<li><b>Step 1: Go to the official website of Smartgaga Garena 3.0:</b> The official website of Smartgaga Garena 3.0 is <a href="">https://smartgagagarena.com/</a>. You can also search for "Smartgaga Garena" on Google or any other search engine and click on the first result.</li>
|
18 |
-
<li><b>Step 2: Click on the download button and wait for the file to be downloaded:</b> On the homepage of the website, you will see a big download button that says "Download Smartgaga Garena". Click on it and wait for the file to be downloaded. The file size is about 300 MB, so it may take some time depending on your internet speed.</li>
|
19 |
-
<li><b>Step 3: Run the installer and follow the instructions on the screen:</b> After the file is downloaded, locate it on your PC and double-click on it to run the installer. Follow the instructions on the screen to install Smartgaga Garena 3.0 on your PC. The installation process is very quick and easy.</li>
|
20 |
-
<li><b>Step 4: Launch the emulator and log in with your Google account:</b> After the installation is completed, launch the emulator from your desktop or start menu. You will see a welcome screen that asks you to log in with your Google account. Log in with your Google account to access the Play Store and other Google services.</li>
|
21 |
-
<li><b>Step 5: Download Free Fire from the Play Store or use the APK file:</b> Once you are logged in, you can download Free Fire from the Play Store or use the APK file if you have it. To download Free Fire from the Play Store, click on the Play Store icon on the emulator's home screen and search for "Free Fire". Click on the install button and wait for the game to be downloaded and installed. To use the APK file, drag and drop it onto the emulator's window and follow the prompts to install it.</li>
|
22 |
-
</ol>
|
23 |
-
<h2>How to Optimize Smartgaga Garena 3.0 for Free Fire</h2>
|
24 |
-
<p>To get the best gaming experience with Free Fire on Smartgaga Garena 3.0, you need to optimize some settings and controls according to your PC specifications and preference. Here are some steps that you can follow to optimize Smartgaga Garena 3.0 for Free Fire:</p>
|
25 |
-
<ol>
|
26 |
-
<li><b>Step 1: Adjust the resolution and frame rate according to your PC specifications:</b> To adjust the resolution and frame rate of Free Fire on Smartgaga Garena 3.0, click on the gear icon on the top right corner of the emulator's window and go to "Settings". Under "Display", you can choose from different resolutions and frame rates that suit your PC specifications. The higher the resolution and frame rate, the better the graphics quality, but also the more resources required. Choose a balance between quality and performance that works for you.</li>
|
27 |
-
<li><b>Step 2: Enable or disable the virtualization technology (VT) depending on your CPU support:</b> To enable or disable VT on Smartgaga Garena 3.0, go to "Settings" again and under "Engine", you will see an option called "Enable VT". VT is a feature that can improve the performance and stability of the emulator, but it may not be supported by some CPUs. To check if your CPU supports VT, you can use a tool like <a href="">https://www.intel.com/content/www/us/en/support/articles/000005486/processors.html</a>. If your CPU supports VT, you can enable it on Smartgaga Garena 3.0 and also on your BIOS settings. If your CPU does not support VT, you can disable it on Smartgaga Garena 3.0 and still enjoy the emulator.</li>
|
28 |
-
<li><b>Step 3: Change the graphics quality and sensitivity settings according to your preference:</b> To change the graphics quality and sensitivity settings of Free Fire on Smartgaga Garena 3.0, launch the game and go to "Settings". Under "Graphics", you can choose from different graphics modes, such as Smooth, Standard, Ultra, and HDR. You can also adjust the brightness, shadows, anti-aliasing, and other options. Under "Sensitivity", you can adjust the sensitivity of the mouse and the touch screen for different scenarios, such as general, red dot, scope, and vehicle.</li>
|
29 |
-
<li><b>Step 4: Map the keyboard and mouse buttons for better control and accuracy:</b> To map the keyboard and mouse buttons for Free Fire on Smartgaga Garena 3.0, click on the keyboard icon on the right side of the emulator's window and go to "Keymapping". You will see a list of preset keys that correspond to different actions in the game, such as move, aim, shoot, jump, crouch, reload, and more. You can modify these keys or add new ones according to your preference. You can also save different profiles for different games or modes.</li>
|
30 |
-
</ol>
|
31 |
-
<h2>Pros and Cons of Smartgaga Garena 3.0</h2>
|
32 |
-
<p>Smartgaga Garena 3.0 is undoubtedly one of the best emulators for Free Fire on low-end PC, but it also has some drawbacks that you should be aware of. Here are some of the pros and cons of Smartgaga Garena 3.0:</p>
|
33 |
-
<table>
|
34 |
-
<tr>
|
35 |
-
<th>Pros</th>
|
36 |
-
<th>Cons</th>
|
37 |
-
</tr>
|
38 |
-
<tr>
|
39 |
-
<td><ul>
|
40 |
-
<li>Lightweight, fast, stable, customizable, compatible, free</li>
|
41 |
-
<li>Can run Free Fire smoothly on any PC with low-end specifications</li>
|
42 |
-
<li>Has a turbo engine that can boost the speed and performance of Free Fire</li>
|
43 |
-
<li>Has a low latency mode that can reduce the ping and improve the network stability of Free Fire</li>
|
44 |
-
<li>Allows you to customize the settings and controls of Free Fire according to your preference</li>
|
45 |
-
<li>Can run on any Windows operating system from Windows 7 to Windows 11</li>
|
46 |
-
<li>Supports both 32-bit and 64-bit systems</li>
|
47 |
-
</ul></td>
|
48 |
-
<td><ul>
|
49 |
-
<li>May encounter some bugs or errors occasionally</li>
|
50 |
-
<li>May require VT for some PCs to run smoothly</li>
|
51 |
-
<li>May not support some games or apps other than Free Fire</li>
|
52 |
-
</ul></td>
|
53 |
-
</tr>
|
54 |
-
</table>
|
55 |
-
<h2>Conclusion</h2>
|
56 |
-
<p>In conclusion, Smartgaga Garena 3.0 is a great emulator for Free Fire players who have low-end PCs. It can run Free Fire smoothly and stably on any PC with minimal resources. It also has many features that can enhance the gaming experience of Free Fire, such as customizable settings and controls, turbo engine, low latency mode, and more. It is also compatible with any Windows operating system from Windows 7 to Windows 11.</p>
|
57 |
-
<p>If you want to play Free Fire on your PC without any lag or stuttering, you should download Smartgaga Garena 3.0 from its official website and follow the steps that we have provided in this article. You will not regret it!</p>
|
58 |
-
<h2>FAQs</h2>
|
59 |
-
<p>Here are some of the frequently asked questions about Smartgaga Garena 3.0:</p>
|
60 |
-
<ul>
|
61 |
-
<li><b>Q1: What is the difference between Smartgaga and Smartgaga Garena?</b></li>
|
62 |
-
<li>A1: Smartgaga is a general emulator that can run various Android games and apps on PC. Smartgaga Garena is a special version of Smartgaga that is optimized for Free Fire players who have low-end PCs.</li>
|
63 |
-
<li><b>Q2: Is Smartgaga Garena safe to use?</b></li>
|
64 |
-
<li>A2: Yes, Smartgaga Garena is safe to use. It does not contain any malware or virus that can harm your PC or data. It also does not violate any terms or policies of Free Fire or Google Play.</li>
|
65 |
-
<li><b>Q3: How can I update Smart gaga Garena to the latest version?</b></li>
|
66 |
-
<li>A3: You can update Smartgaga Garena to the latest version by going to the official website of Smartgaga Garena and downloading the latest file. You can also check for updates from the emulator's settings and click on the update button if available.</li>
|
67 |
-
<li><b>Q4: How can I fix the blue screen or black screen issue on Smartgaga Garena?</b></li>
|
68 |
-
<li>A4: The blue screen or black screen issue on Smartgaga Garena may be caused by some compatibility or driver issues. You can try to fix it by doing the following steps:</li>
|
69 |
-
<ul>
|
70 |
-
<li>Restart your PC and try to run Smartgaga Garena again.</li>
|
71 |
-
<li>Update your graphics card driver and DirectX to the latest version.</li>
|
72 |
-
<li>Enable or disable VT on your BIOS settings and Smartgaga Garena settings depending on your CPU support.</li>
|
73 |
-
<li>Change the resolution and frame rate of Free Fire on Smartgaga Garena settings to a lower level.</li>
|
74 |
-
<li>Contact the support team of Smartgaga Garena if none of the above steps work.</li>
|
75 |
-
</ul>
|
76 |
-
<li><b>Q5: How can I contact the support team of Smartgaga Garena?</b></li>
|
77 |
-
<li>A5: You can contact the support team of Smartgaga Garena by sending an email to <a href="">[email protected]</a>. You can also visit their Facebook page or YouTube channel for more information and updates.</li>
|
78 |
-
</ul></p>
|
79 |
-
<p>download smartgaga garena 3.0 best emulator for free fire<br />
|
80 |
-
download smartgaga garena 3.0 low end pc emulator<br />
|
81 |
-
download smartgaga garena 3.0 ob37 updated emulator<br />
|
82 |
-
download smartgaga garena 3.0 super lite emulator<br />
|
83 |
-
download smartgaga garena 3.0 for windows 10<br />
|
84 |
-
download smartgaga garena 3.0 for windows 7<br />
|
85 |
-
download smartgaga garena 3.0 for windows 11<br />
|
86 |
-
download smartgaga garena 3.0 for pc without graphics card<br />
|
87 |
-
download smartgaga garena 3.0 for pc with 1gb ram<br />
|
88 |
-
download smartgaga garena 3.0 for pc with 2gb ram<br />
|
89 |
-
download smartgaga garena 3.0 for free fire max<br />
|
90 |
-
download smartgaga garena 3.0 for free fire ob37<br />
|
91 |
-
download smartgaga garena 3.0 for free fire low end pc<br />
|
92 |
-
download smartgaga garena 3.0 for free fire headshot<br />
|
93 |
-
download smartgaga garena 3.0 for free fire gameplay<br />
|
94 |
-
download smartgaga garena 3.0 apk for android<br />
|
95 |
-
download smartgaga garena 3.0 offline installer<br />
|
96 |
-
download smartgaga garena 3.0 latest version<br />
|
97 |
-
download smartgaga garena 3.0 new version<br />
|
98 |
-
download smartgaga garena 3.0 best version<br />
|
99 |
-
how to download smartgaga garena 3.0 on pc<br />
|
100 |
-
how to download smartgaga garena 3.0 on laptop<br />
|
101 |
-
how to download smartgaga garena 3.0 on mac<br />
|
102 |
-
how to download smartgaga garena 3.0 on windows<br />
|
103 |
-
how to download smartgaga garena 3.0 and install it<br />
|
104 |
-
how to download smartgaga garena 3.0 and play free fire<br />
|
105 |
-
how to download smartgaga garena 3.0 and fix lag<br />
|
106 |
-
how to download smartgaga garena 3.0 and fix error<br />
|
107 |
-
how to download smartgaga garena 3.0 and fix mouse problem<br />
|
108 |
-
how to download smartgaga garena 3.0 and fix key mapping problem<br />
|
109 |
-
where to download smartgaga garena 3.0 for free<br />
|
110 |
-
where to download smartgaga garena 3.0 for pc<br />
|
111 |
-
where to download smartgaga garena 3.0 for laptop<br />
|
112 |
-
where to download smartgaga garena 3.0 for windows<br />
|
113 |
-
where to download smartgaga garena 3.0 apk file<br />
|
114 |
-
where to download smartgaga garena 3.0 offline installer file<br />
|
115 |
-
where to download smartgaga garena 3.0 latest version file<br />
|
116 |
-
where to download smartgaga garena 3.0 new version file<br />
|
117 |
-
where to download smartgaga garena 3.0 best version file<br />
|
118 |
-
why download smartgaga garena 3.0 emulator for free fire<br />
|
119 |
-
why download smartgaga garena 3.0 emulator for low end pc<br />
|
120 |
-
why download smartgaga garena 3.0 emulator for ob37 update<br />
|
121 |
-
why download smartgaga garena 3.0 emulator for super lite performance<br />
|
122 |
-
why download smartgaga garena 3.0 emulator for windows compatibility<br />
|
123 |
-
why download smartgaga garena 3.0 emulator for no graphics card requirement<br />
|
124 |
-
why download smartgaga garena 3.0 emulator for smooth gameplay<br />
|
125 |
-
why download smartgaga garena 3.0 emulator for easy installation<br />
|
126 |
-
why download smartgaga garena 3.0 emulator for best settings<br />
|
127 |
-
why download smartgaga garena 3.0 emulator for best sensitivity</p> 197e85843d<br />
|
128 |
-
<br />
|
129 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7hao/bingo/src/components/ui/voice/index.tsx
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
import './index.scss'
|
2 |
-
|
3 |
-
export interface VoiceProps extends CSSPropertyRule {
|
4 |
-
num?: number;
|
5 |
-
duration?: number;
|
6 |
-
}
|
7 |
-
export default function Voice({ duration = 400, num = 7, ...others }) {
|
8 |
-
return (
|
9 |
-
<div className="voice-button" { ...others }>
|
10 |
-
{Array.from({ length: num }).map((_, index) => {
|
11 |
-
const randomDuration = Math.random() * 100 + duration
|
12 |
-
const initialDelay = Math.random() * 2 * duration
|
13 |
-
const initialScale = Math.sin((index + 1) * Math.PI / num)
|
14 |
-
return (
|
15 |
-
<div
|
16 |
-
className="voice-button-item"
|
17 |
-
key={index}
|
18 |
-
style={{
|
19 |
-
animationDelay: initialDelay + 'ms',
|
20 |
-
animationDuration: randomDuration + 'ms',
|
21 |
-
transform: `scale(${initialScale})`
|
22 |
-
}}
|
23 |
-
/>
|
24 |
-
)
|
25 |
-
})}
|
26 |
-
</div>
|
27 |
-
)
|
28 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A00001/bingothoo/src/components/ui/dialog.tsx
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import * as React from 'react'
|
4 |
-
import * as DialogPrimitive from '@radix-ui/react-dialog'
|
5 |
-
|
6 |
-
import { cn } from '@/lib/utils'
|
7 |
-
import { IconClose } from '@/components/ui/icons'
|
8 |
-
|
9 |
-
const Dialog = DialogPrimitive.Root
|
10 |
-
|
11 |
-
const DialogTrigger = DialogPrimitive.Trigger
|
12 |
-
|
13 |
-
const DialogPortal = ({
|
14 |
-
className,
|
15 |
-
children,
|
16 |
-
...props
|
17 |
-
}: DialogPrimitive.DialogPortalProps) => (
|
18 |
-
<DialogPrimitive.Portal className={cn(className)} {...props}>
|
19 |
-
<div className="fixed inset-0 z-50 flex items-start justify-center sm:items-center">
|
20 |
-
{children}
|
21 |
-
</div>
|
22 |
-
</DialogPrimitive.Portal>
|
23 |
-
)
|
24 |
-
DialogPortal.displayName = DialogPrimitive.Portal.displayName
|
25 |
-
|
26 |
-
const DialogOverlay = React.forwardRef<
|
27 |
-
React.ElementRef<typeof DialogPrimitive.Overlay>,
|
28 |
-
React.ComponentPropsWithoutRef<typeof DialogPrimitive.Overlay>
|
29 |
-
>(({ className, ...props }, ref) => (
|
30 |
-
<DialogPrimitive.Overlay
|
31 |
-
ref={ref}
|
32 |
-
className={cn(
|
33 |
-
'bg-white fixed inset-0 z-50 bg-background/80 backdrop-blur-sm transition-all duration-100 data-[state=closed]:animate-out data-[state=closed]:fade-out data-[state=open]:fade-in',
|
34 |
-
className
|
35 |
-
)}
|
36 |
-
{...props}
|
37 |
-
/>
|
38 |
-
))
|
39 |
-
DialogOverlay.displayName = DialogPrimitive.Overlay.displayName
|
40 |
-
|
41 |
-
const DialogContent = React.forwardRef<
|
42 |
-
React.ElementRef<typeof DialogPrimitive.Content>,
|
43 |
-
React.ComponentPropsWithoutRef<typeof DialogPrimitive.Content>
|
44 |
-
>(({ className, children, ...props }, ref) => (
|
45 |
-
<DialogPortal>
|
46 |
-
<DialogOverlay />
|
47 |
-
<DialogPrimitive.Content
|
48 |
-
ref={ref}
|
49 |
-
className={cn(
|
50 |
-
'fixed z-50 grid w-full gap-4 rounded-b-lg border bg-background p-6 shadow-sm animate-in data-[state=open]:fade-in-90 data-[state=open]:slide-in-from-bottom-10 sm:max-w-lg sm:rounded-lg sm:zoom-in-90 data-[state=open]:sm:slide-in-from-bottom-0',
|
51 |
-
className
|
52 |
-
)}
|
53 |
-
{...props}
|
54 |
-
>
|
55 |
-
{children}
|
56 |
-
<DialogPrimitive.Close className="absolute right-4 top-4 rounded-sm opacity-70 ring-offset-background transition-opacity hover:opacity-100 focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2 disabled:pointer-events-none data-[state=open]:bg-accent data-[state=open]:text-muted-foreground">
|
57 |
-
<IconClose />
|
58 |
-
<span className="sr-only">Close</span>
|
59 |
-
</DialogPrimitive.Close>
|
60 |
-
</DialogPrimitive.Content>
|
61 |
-
</DialogPortal>
|
62 |
-
))
|
63 |
-
DialogContent.displayName = DialogPrimitive.Content.displayName
|
64 |
-
|
65 |
-
const DialogHeader = ({
|
66 |
-
className,
|
67 |
-
...props
|
68 |
-
}: React.HTMLAttributes<HTMLDivElement>) => (
|
69 |
-
<div
|
70 |
-
className={cn(
|
71 |
-
'flex flex-col space-y-1.5 text-center sm:text-left',
|
72 |
-
className
|
73 |
-
)}
|
74 |
-
{...props}
|
75 |
-
/>
|
76 |
-
)
|
77 |
-
DialogHeader.displayName = 'DialogHeader'
|
78 |
-
|
79 |
-
const DialogFooter = ({
|
80 |
-
className,
|
81 |
-
...props
|
82 |
-
}: React.HTMLAttributes<HTMLDivElement>) => (
|
83 |
-
<div
|
84 |
-
className={cn(
|
85 |
-
'flex flex-col-reverse sm:flex-row sm:justify-end sm:space-x-2',
|
86 |
-
className
|
87 |
-
)}
|
88 |
-
{...props}
|
89 |
-
/>
|
90 |
-
)
|
91 |
-
DialogFooter.displayName = 'DialogFooter'
|
92 |
-
|
93 |
-
const DialogTitle = React.forwardRef<
|
94 |
-
React.ElementRef<typeof DialogPrimitive.Title>,
|
95 |
-
React.ComponentPropsWithoutRef<typeof DialogPrimitive.Title>
|
96 |
-
>(({ className, ...props }, ref) => (
|
97 |
-
<DialogPrimitive.Title
|
98 |
-
ref={ref}
|
99 |
-
className={cn(
|
100 |
-
'text-lg font-semibold leading-none tracking-tight',
|
101 |
-
className
|
102 |
-
)}
|
103 |
-
{...props}
|
104 |
-
/>
|
105 |
-
))
|
106 |
-
DialogTitle.displayName = DialogPrimitive.Title.displayName
|
107 |
-
|
108 |
-
const DialogDescription = React.forwardRef<
|
109 |
-
React.ElementRef<typeof DialogPrimitive.Description>,
|
110 |
-
React.ComponentPropsWithoutRef<typeof DialogPrimitive.Description>
|
111 |
-
>(({ className, ...props }, ref) => (
|
112 |
-
<DialogPrimitive.Description
|
113 |
-
ref={ref}
|
114 |
-
className={cn('text-sm text-muted-foreground', className)}
|
115 |
-
{...props}
|
116 |
-
/>
|
117 |
-
))
|
118 |
-
DialogDescription.displayName = DialogPrimitive.Description.displayName
|
119 |
-
|
120 |
-
export {
|
121 |
-
Dialog,
|
122 |
-
DialogTrigger,
|
123 |
-
DialogContent,
|
124 |
-
DialogHeader,
|
125 |
-
DialogFooter,
|
126 |
-
DialogTitle,
|
127 |
-
DialogDescription
|
128 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/Changelog_EN.md
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
### 2023-06-18
|
2 |
-
- New pretrained v2 models: 32k and 48k
|
3 |
-
- Fix non-f0 model inference errors
|
4 |
-
- For training-set exceeding 1 hour, do automatic minibatch-kmeans to reduce feature shape, so that index training, adding, and searching will be much faster.
|
5 |
-
- Provide a toy vocal2guitar huggingface space
|
6 |
-
- Auto delete outlier short cut training-set audios
|
7 |
-
- Onnx export tab
|
8 |
-
|
9 |
-
Failed experiments:
|
10 |
-
- ~~Feature retrieval: add temporal feature retrieval: not effective~~
|
11 |
-
- ~~Feature retrieval: add PCAR dimensionality reduction: searching is even slower~~
|
12 |
-
- ~~Random data augmentation when training: not effective~~
|
13 |
-
|
14 |
-
todolist:
|
15 |
-
- Vocos-RVC (tiny vocoder)
|
16 |
-
- Crepe support for training
|
17 |
-
- Half precision crepe inference
|
18 |
-
- F0 editor support
|
19 |
-
|
20 |
-
### 2023-05-28
|
21 |
-
- Add v2 jupyter notebook, korean changelog, fix some environment requirments
|
22 |
-
- Add voiceless consonant and breath protection mode
|
23 |
-
- Support crepe-full pitch detect
|
24 |
-
- UVR5 vocal separation: support dereverb models and de-echo models
|
25 |
-
- Add experiment name and version on the name of index
|
26 |
-
- Support users to manually select export format of output audios when batch voice conversion processing and UVR5 vocal separation
|
27 |
-
- v1 32k model training is no more supported
|
28 |
-
|
29 |
-
### 2023-05-13
|
30 |
-
- Clear the redundant codes in the old version of runtime in the one-click-package: infer_pack and uvr5_pack
|
31 |
-
- Fix pseudo multiprocessing bug in training set preprocessing
|
32 |
-
- Adding median filtering radius adjustment for harvest pitch recognize algorithm
|
33 |
-
- Support post processing resampling for exporting audio
|
34 |
-
- Multi processing "n_cpu" setting for training is changed from "f0 extraction" to "data preprocessing and f0 extraction"
|
35 |
-
- Automatically detect the index paths under the logs folder and provide a drop-down list function
|
36 |
-
- Add "Frequently Asked Questions and Answers" on the tab page (you can also refer to github RVC wiki)
|
37 |
-
- When inference, harvest pitch is cached when using same input audio path (purpose: using harvest pitch extraction, the entire pipeline will go through a long and repetitive pitch extraction process. If caching is not used, users who experiment with different timbre, index, and pitch median filtering radius settings will experience a very painful waiting process after the first inference)
|
38 |
-
|
39 |
-
### 2023-05-14
|
40 |
-
- Use volume envelope of input to mix or replace the volume envelope of output (can alleviate the problem of "input muting and output small amplitude noise". If the input audio background noise is high, it is not recommended to turn it on, and it is not turned on by default (1 can be considered as not turned on)
|
41 |
-
- Support saving extracted small models at a specified frequency (if you want to see the performance under different epochs, but do not want to save all large checkpoints and manually extract small models by ckpt-processing every time, this feature will be very practical)
|
42 |
-
- Resolve the issue of "connection errors" caused by the server's global proxy by setting environment variables
|
43 |
-
- Supports pre-trained v2 models (currently only 40k versions are publicly available for testing, and the other two sampling rates have not been fully trained yet)
|
44 |
-
- Limit excessive volume exceeding 1 before inference
|
45 |
-
- Slightly adjusted the settings of training-set preprocessing
|
46 |
-
|
47 |
-
|
48 |
-
#######################
|
49 |
-
|
50 |
-
History changelogs:
|
51 |
-
|
52 |
-
### 2023-04-09
|
53 |
-
- Fixed training parameters to improve GPU utilization rate: A100 increased from 25% to around 90%, V100: 50% to around 90%, 2060S: 60% to around 85%, P40: 25% to around 95%; significantly improved training speed
|
54 |
-
- Changed parameter: total batch_size is now per GPU batch_size
|
55 |
-
- Changed total_epoch: maximum limit increased from 100 to 1000; default increased from 10 to 20
|
56 |
-
- Fixed issue of ckpt extraction recognizing pitch incorrectly, causing abnormal inference
|
57 |
-
- Fixed issue of distributed training saving ckpt for each rank
|
58 |
-
- Applied nan feature filtering for feature extraction
|
59 |
-
- Fixed issue with silent input/output producing random consonants or noise (old models need to retrain with a new dataset)
|
60 |
-
|
61 |
-
### 2023-04-16 Update
|
62 |
-
- Added local real-time voice changing mini-GUI, start by double-clicking go-realtime-gui.bat
|
63 |
-
- Applied filtering for frequency bands below 50Hz during training and inference
|
64 |
-
- Lowered the minimum pitch extraction of pyworld from the default 80 to 50 for training and inference, allowing male low-pitched voices between 50-80Hz not to be muted
|
65 |
-
- WebUI supports changing languages according to system locale (currently supporting en_US, ja_JP, zh_CN, zh_HK, zh_SG, zh_TW; defaults to en_US if not supported)
|
66 |
-
- Fixed recognition of some GPUs (e.g., V100-16G recognition failure, P4 recognition failure)
|
67 |
-
|
68 |
-
### 2023-04-28 Update
|
69 |
-
- Upgraded faiss index settings for faster speed and higher quality
|
70 |
-
- Removed dependency on total_npy; future model sharing will not require total_npy input
|
71 |
-
- Unlocked restrictions for the 16-series GPUs, providing 4GB inference settings for 4GB VRAM GPUs
|
72 |
-
- Fixed bug in UVR5 vocal accompaniment separation for certain audio formats
|
73 |
-
- Real-time voice changing mini-GUI now supports non-40k and non-lazy pitch models
|
74 |
-
|
75 |
-
### Future Plans:
|
76 |
-
Features:
|
77 |
-
- Add option: extract small models for each epoch save
|
78 |
-
- Add option: export additional mp3 to the specified path during inference
|
79 |
-
- Support multi-person training tab (up to 4 people)
|
80 |
-
|
81 |
-
Base model:
|
82 |
-
- Collect breathing wav files to add to the training dataset to fix the issue of distorted breath sounds
|
83 |
-
- We are currently training a base model with an extended singing dataset, which will be released in the future
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/export_onnx.py
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
from infer_pack.models_onnx import SynthesizerTrnMsNSFsidM
|
2 |
-
import torch
|
3 |
-
|
4 |
-
if __name__ == "__main__":
|
5 |
-
MoeVS = True # 模型是否为MoeVoiceStudio(原MoeSS)使用
|
6 |
-
|
7 |
-
ModelPath = "Shiroha/shiroha.pth" # 模型路径
|
8 |
-
ExportedPath = "model.onnx" # 输出路径
|
9 |
-
hidden_channels = 256 # hidden_channels,为768Vec做准备
|
10 |
-
cpt = torch.load(ModelPath, map_location="cpu")
|
11 |
-
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
|
12 |
-
print(*cpt["config"])
|
13 |
-
|
14 |
-
test_phone = torch.rand(1, 200, hidden_channels) # hidden unit
|
15 |
-
test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用)
|
16 |
-
test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹)
|
17 |
-
test_pitchf = torch.rand(1, 200) # nsf基频
|
18 |
-
test_ds = torch.LongTensor([0]) # 说话人ID
|
19 |
-
test_rnd = torch.rand(1, 192, 200) # 噪声(加入随机因子)
|
20 |
-
|
21 |
-
device = "cpu" # 导出时设备(不影响使用模型)
|
22 |
-
|
23 |
-
net_g = SynthesizerTrnMsNSFsidM(
|
24 |
-
*cpt["config"], is_half=False
|
25 |
-
) # fp32导出(C++要支持fp16必须手动将内存重新排列所以暂时不用fp16)
|
26 |
-
net_g.load_state_dict(cpt["weight"], strict=False)
|
27 |
-
input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds", "rnd"]
|
28 |
-
output_names = [
|
29 |
-
"audio",
|
30 |
-
]
|
31 |
-
# net_g.construct_spkmixmap(n_speaker) 多角色混合轨道导出
|
32 |
-
torch.onnx.export(
|
33 |
-
net_g,
|
34 |
-
(
|
35 |
-
test_phone.to(device),
|
36 |
-
test_phone_lengths.to(device),
|
37 |
-
test_pitch.to(device),
|
38 |
-
test_pitchf.to(device),
|
39 |
-
test_ds.to(device),
|
40 |
-
test_rnd.to(device),
|
41 |
-
),
|
42 |
-
ExportedPath,
|
43 |
-
dynamic_axes={
|
44 |
-
"phone": [1],
|
45 |
-
"pitch": [1],
|
46 |
-
"pitchf": [1],
|
47 |
-
"rnd": [2],
|
48 |
-
},
|
49 |
-
do_constant_folding=False,
|
50 |
-
opset_version=16,
|
51 |
-
verbose=False,
|
52 |
-
input_names=input_names,
|
53 |
-
output_names=output_names,
|
54 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/models/audiogen.py
DELETED
@@ -1,276 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
"""
|
8 |
-
Main model for using AudioGen. This will combine all the required components
|
9 |
-
and provide easy access to the generation API.
|
10 |
-
"""
|
11 |
-
|
12 |
-
import typing as tp
|
13 |
-
|
14 |
-
import torch
|
15 |
-
|
16 |
-
from .encodec import CompressionModel
|
17 |
-
from .lm import LMModel
|
18 |
-
from .builders import get_debug_compression_model, get_debug_lm_model
|
19 |
-
from .loaders import load_compression_model, load_lm_model
|
20 |
-
from ..data.audio_utils import convert_audio
|
21 |
-
from ..modules.conditioners import ConditioningAttributes
|
22 |
-
from ..utils.autocast import TorchAutocast
|
23 |
-
|
24 |
-
|
25 |
-
class AudioGen:
|
26 |
-
"""AudioGen main model with convenient generation API.
|
27 |
-
|
28 |
-
Args:
|
29 |
-
name (str): name of the model.
|
30 |
-
compression_model (CompressionModel): Compression model
|
31 |
-
used to map audio to invertible discrete representations.
|
32 |
-
lm (LMModel): Language model over discrete representations.
|
33 |
-
max_duration (float, optional): maximum duration the model can produce,
|
34 |
-
otherwise, inferred from the training params.
|
35 |
-
"""
|
36 |
-
def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel,
|
37 |
-
max_duration: tp.Optional[float] = None):
|
38 |
-
self.name = name
|
39 |
-
self.compression_model = compression_model
|
40 |
-
self.lm = lm
|
41 |
-
if max_duration is None:
|
42 |
-
if hasattr(lm, 'cfg'):
|
43 |
-
max_duration = lm.cfg.dataset.segment_duration # type: ignore
|
44 |
-
else:
|
45 |
-
raise ValueError("You must provide max_duration when building directly AudioGen")
|
46 |
-
assert max_duration is not None
|
47 |
-
self.max_duration: float = max_duration
|
48 |
-
self.device = next(iter(lm.parameters())).device
|
49 |
-
self.generation_params: dict = {}
|
50 |
-
self.set_generation_params(duration=5) # 5 seconds by default
|
51 |
-
self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None
|
52 |
-
if self.device.type == 'cpu':
|
53 |
-
self.autocast = TorchAutocast(enabled=False)
|
54 |
-
else:
|
55 |
-
self.autocast = TorchAutocast(
|
56 |
-
enabled=True, device_type=self.device.type, dtype=torch.float16)
|
57 |
-
|
58 |
-
@property
|
59 |
-
def frame_rate(self) -> float:
|
60 |
-
"""Roughly the number of AR steps per seconds."""
|
61 |
-
return self.compression_model.frame_rate
|
62 |
-
|
63 |
-
@property
|
64 |
-
def sample_rate(self) -> int:
|
65 |
-
"""Sample rate of the generated audio."""
|
66 |
-
return self.compression_model.sample_rate
|
67 |
-
|
68 |
-
@property
|
69 |
-
def audio_channels(self) -> int:
|
70 |
-
"""Audio channels of the generated audio."""
|
71 |
-
return self.compression_model.channels
|
72 |
-
|
73 |
-
@staticmethod
|
74 |
-
def get_pretrained(name: str = 'facebook/audiogen-medium', device=None):
|
75 |
-
"""Return pretrained model, we provide a single model for now:
|
76 |
-
- facebook/audiogen-medium (1.5B), text to sound,
|
77 |
-
# see: https://huggingface.co/facebook/audiogen-medium
|
78 |
-
"""
|
79 |
-
if device is None:
|
80 |
-
if torch.cuda.device_count():
|
81 |
-
device = 'cuda'
|
82 |
-
else:
|
83 |
-
device = 'cpu'
|
84 |
-
|
85 |
-
if name == 'debug':
|
86 |
-
# used only for unit tests
|
87 |
-
compression_model = get_debug_compression_model(device, sample_rate=16000)
|
88 |
-
lm = get_debug_lm_model(device)
|
89 |
-
return AudioGen(name, compression_model, lm, max_duration=10)
|
90 |
-
|
91 |
-
compression_model = load_compression_model(name, device=device)
|
92 |
-
lm = load_lm_model(name, device=device)
|
93 |
-
assert 'self_wav' not in lm.condition_provider.conditioners, \
|
94 |
-
"AudioGen do not support waveform conditioning for now"
|
95 |
-
return AudioGen(name, compression_model, lm)
|
96 |
-
|
97 |
-
def set_generation_params(self, use_sampling: bool = True, top_k: int = 250,
|
98 |
-
top_p: float = 0.0, temperature: float = 1.0,
|
99 |
-
duration: float = 10.0, cfg_coef: float = 3.0,
|
100 |
-
two_step_cfg: bool = False, extend_stride: float = 2):
|
101 |
-
"""Set the generation parameters for AudioGen.
|
102 |
-
|
103 |
-
Args:
|
104 |
-
use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True.
|
105 |
-
top_k (int, optional): top_k used for sampling. Defaults to 250.
|
106 |
-
top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0.
|
107 |
-
temperature (float, optional): Softmax temperature parameter. Defaults to 1.0.
|
108 |
-
duration (float, optional): Duration of the generated waveform. Defaults to 10.0.
|
109 |
-
cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0.
|
110 |
-
two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance,
|
111 |
-
instead of batching together the two. This has some impact on how things
|
112 |
-
are padded but seems to have little impact in practice.
|
113 |
-
extend_stride: when doing extended generation (i.e. more than 10 seconds), by how much
|
114 |
-
should we extend the audio each time. Larger values will mean less context is
|
115 |
-
preserved, and shorter value will require extra computations.
|
116 |
-
"""
|
117 |
-
assert extend_stride < self.max_duration, "Cannot stride by more than max generation duration."
|
118 |
-
self.extend_stride = extend_stride
|
119 |
-
self.duration = duration
|
120 |
-
self.generation_params = {
|
121 |
-
'use_sampling': use_sampling,
|
122 |
-
'temp': temperature,
|
123 |
-
'top_k': top_k,
|
124 |
-
'top_p': top_p,
|
125 |
-
'cfg_coef': cfg_coef,
|
126 |
-
'two_step_cfg': two_step_cfg,
|
127 |
-
}
|
128 |
-
|
129 |
-
def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None):
|
130 |
-
"""Override the default progress callback."""
|
131 |
-
self._progress_callback = progress_callback
|
132 |
-
|
133 |
-
def generate(self, descriptions: tp.List[str], progress: bool = False) -> torch.Tensor:
|
134 |
-
"""Generate samples conditioned on text.
|
135 |
-
|
136 |
-
Args:
|
137 |
-
descriptions (list of str): A list of strings used as text conditioning.
|
138 |
-
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
|
139 |
-
"""
|
140 |
-
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)
|
141 |
-
assert prompt_tokens is None
|
142 |
-
return self._generate_tokens(attributes, prompt_tokens, progress)
|
143 |
-
|
144 |
-
def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int,
|
145 |
-
descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,
|
146 |
-
progress: bool = False) -> torch.Tensor:
|
147 |
-
"""Generate samples conditioned on audio prompts.
|
148 |
-
|
149 |
-
Args:
|
150 |
-
prompt (torch.Tensor): A batch of waveforms used for continuation.
|
151 |
-
Prompt should be [B, C, T], or [C, T] if only one sample is generated.
|
152 |
-
prompt_sample_rate (int): Sampling rate of the given audio waveforms.
|
153 |
-
descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.
|
154 |
-
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
|
155 |
-
"""
|
156 |
-
if prompt.dim() == 2:
|
157 |
-
prompt = prompt[None]
|
158 |
-
if prompt.dim() != 3:
|
159 |
-
raise ValueError("prompt should have 3 dimensions: [B, C, T] (C = 1).")
|
160 |
-
prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)
|
161 |
-
if descriptions is None:
|
162 |
-
descriptions = [None] * len(prompt)
|
163 |
-
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt)
|
164 |
-
assert prompt_tokens is not None
|
165 |
-
return self._generate_tokens(attributes, prompt_tokens, progress)
|
166 |
-
|
167 |
-
@torch.no_grad()
|
168 |
-
def _prepare_tokens_and_attributes(
|
169 |
-
self,
|
170 |
-
descriptions: tp.Sequence[tp.Optional[str]],
|
171 |
-
prompt: tp.Optional[torch.Tensor],
|
172 |
-
) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]:
|
173 |
-
"""Prepare model inputs.
|
174 |
-
|
175 |
-
Args:
|
176 |
-
descriptions (list of str): A list of strings used as text conditioning.
|
177 |
-
prompt (torch.Tensor): A batch of waveforms used for continuation.
|
178 |
-
"""
|
179 |
-
attributes = [
|
180 |
-
ConditioningAttributes(text={'description': description})
|
181 |
-
for description in descriptions]
|
182 |
-
|
183 |
-
if prompt is not None:
|
184 |
-
if descriptions is not None:
|
185 |
-
assert len(descriptions) == len(prompt), "Prompt and nb. descriptions doesn't match"
|
186 |
-
prompt = prompt.to(self.device)
|
187 |
-
prompt_tokens, scale = self.compression_model.encode(prompt)
|
188 |
-
assert scale is None
|
189 |
-
else:
|
190 |
-
prompt_tokens = None
|
191 |
-
return attributes, prompt_tokens
|
192 |
-
|
193 |
-
def _generate_tokens(self, attributes: tp.List[ConditioningAttributes],
|
194 |
-
prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor:
|
195 |
-
"""Generate discrete audio tokens given audio prompt and/or conditions.
|
196 |
-
|
197 |
-
Args:
|
198 |
-
attributes (list of ConditioningAttributes): Conditions used for generation (here text).
|
199 |
-
prompt_tokens (torch.Tensor, optional): Audio prompt used for continuation.
|
200 |
-
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
|
201 |
-
Returns:
|
202 |
-
torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params.
|
203 |
-
"""
|
204 |
-
i = 0
|
205 |
-
prompt_list = attributes[0].text['description']
|
206 |
-
total_gen_len = int(self.duration * self.frame_rate)
|
207 |
-
max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate)
|
208 |
-
current_gen_offset: int = 0
|
209 |
-
|
210 |
-
def _progress_callback(generated_tokens: int, tokens_to_generate: int):
|
211 |
-
generated_tokens += current_gen_offset
|
212 |
-
if self._progress_callback is not None:
|
213 |
-
# Note that total_gen_len might be quite wrong depending on the
|
214 |
-
# codebook pattern used, but with delay it is almost accurate.
|
215 |
-
self._progress_callback(generated_tokens, total_gen_len)
|
216 |
-
else:
|
217 |
-
print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\r')
|
218 |
-
|
219 |
-
if prompt_tokens is not None:
|
220 |
-
assert max_prompt_len >= prompt_tokens.shape[-1], \
|
221 |
-
"Prompt is longer than audio to generate"
|
222 |
-
|
223 |
-
callback = None
|
224 |
-
if progress:
|
225 |
-
callback = _progress_callback
|
226 |
-
|
227 |
-
if self.duration <= self.max_duration:
|
228 |
-
# generate by sampling from LM, simple case.
|
229 |
-
with self.autocast:
|
230 |
-
attributes[0].text['description'] = prompt_list[0]
|
231 |
-
gen_tokens = self.lm.generate(
|
232 |
-
prompt_tokens, attributes,
|
233 |
-
callback=callback, max_gen_len=total_gen_len, **self.generation_params)
|
234 |
-
|
235 |
-
else:
|
236 |
-
all_tokens = []
|
237 |
-
if prompt_tokens is None:
|
238 |
-
prompt_length = 0
|
239 |
-
else:
|
240 |
-
all_tokens.append(prompt_tokens)
|
241 |
-
prompt_length = prompt_tokens.shape[-1]
|
242 |
-
|
243 |
-
stride_tokens = int(self.frame_rate * self.extend_stride)
|
244 |
-
|
245 |
-
while current_gen_offset + prompt_length < total_gen_len:
|
246 |
-
time_offset = current_gen_offset / self.frame_rate
|
247 |
-
chunk_duration = min(self.duration - time_offset, self.max_duration)
|
248 |
-
max_gen_len = int(chunk_duration * self.frame_rate)
|
249 |
-
with self.autocast:
|
250 |
-
if i >= len(prompt_list):
|
251 |
-
i = len(prompt_list) - 1
|
252 |
-
attributes[0].text['description'] = prompt_list[i]
|
253 |
-
gen_tokens = self.lm.generate(
|
254 |
-
prompt_tokens, attributes,
|
255 |
-
callback=callback, max_gen_len=max_gen_len, **self.generation_params)
|
256 |
-
i = i + 1
|
257 |
-
if prompt_tokens is None:
|
258 |
-
all_tokens.append(gen_tokens)
|
259 |
-
else:
|
260 |
-
all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:])
|
261 |
-
prompt_tokens = gen_tokens[:, :, stride_tokens:]
|
262 |
-
prompt_length = prompt_tokens.shape[-1]
|
263 |
-
current_gen_offset += stride_tokens
|
264 |
-
|
265 |
-
gen_tokens = torch.cat(all_tokens, dim=-1)
|
266 |
-
|
267 |
-
# generate audio
|
268 |
-
assert gen_tokens.dim() == 3
|
269 |
-
with torch.no_grad():
|
270 |
-
gen_audio = self.compression_model.decode(gen_tokens, None)
|
271 |
-
return gen_audio
|
272 |
-
|
273 |
-
def to(self, device: str):
|
274 |
-
self.compression_model.to(device)
|
275 |
-
self.lm.to(device)
|
276 |
-
return self
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/emotion/params_data.py
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
|
2 |
-
## Mel-filterbank
|
3 |
-
mel_window_length = 25 # In milliseconds
|
4 |
-
mel_window_step = 10 # In milliseconds
|
5 |
-
mel_n_channels = 40
|
6 |
-
|
7 |
-
|
8 |
-
## Audio
|
9 |
-
sampling_rate = 16000
|
10 |
-
# Number of spectrogram frames in a partial utterance
|
11 |
-
partials_n_frames = 160 # 1600 ms
|
12 |
-
# Number of spectrogram frames at inference
|
13 |
-
inference_n_frames = 80 # 800 ms
|
14 |
-
|
15 |
-
|
16 |
-
## Voice Activation Detection
|
17 |
-
# Window size of the VAD. Must be either 10, 20 or 30 milliseconds.
|
18 |
-
# This sets the granularity of the VAD. Should not need to be changed.
|
19 |
-
vad_window_length = 30 # In milliseconds
|
20 |
-
# Number of frames to average together when performing the moving average smoothing.
|
21 |
-
# The larger this value, the larger the VAD variations must be to not get smoothed out.
|
22 |
-
vad_moving_average_width = 8
|
23 |
-
# Maximum number of consecutive silent frames a segment can have.
|
24 |
-
vad_max_silence_length = 6
|
25 |
-
|
26 |
-
|
27 |
-
## Audio volume normalization
|
28 |
-
audio_norm_target_dBFS = -30
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/vocoder/bigvgan/alias_free_torch/filter.py
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
|
2 |
-
# LICENSE is in incl_licenses directory.
|
3 |
-
|
4 |
-
import torch
|
5 |
-
import torch.nn as nn
|
6 |
-
import torch.nn.functional as F
|
7 |
-
import math
|
8 |
-
|
9 |
-
if 'sinc' in dir(torch):
|
10 |
-
sinc = torch.sinc
|
11 |
-
else:
|
12 |
-
# This code is adopted from adefossez's julius.core.sinc under the MIT License
|
13 |
-
# https://adefossez.github.io/julius/julius/core.html
|
14 |
-
# LICENSE is in incl_licenses directory.
|
15 |
-
def sinc(x: torch.Tensor):
|
16 |
-
"""
|
17 |
-
Implementation of sinc, i.e. sin(pi * x) / (pi * x)
|
18 |
-
__Warning__: Different to julius.sinc, the input is multiplied by `pi`!
|
19 |
-
"""
|
20 |
-
return torch.where(x == 0,
|
21 |
-
torch.tensor(1., device=x.device, dtype=x.dtype),
|
22 |
-
torch.sin(math.pi * x) / math.pi / x)
|
23 |
-
|
24 |
-
|
25 |
-
# This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License
|
26 |
-
# https://adefossez.github.io/julius/julius/lowpass.html
|
27 |
-
# LICENSE is in incl_licenses directory.
|
28 |
-
def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): # return filter [1,1,kernel_size]
|
29 |
-
even = (kernel_size % 2 == 0)
|
30 |
-
half_size = kernel_size // 2
|
31 |
-
|
32 |
-
#For kaiser window
|
33 |
-
delta_f = 4 * half_width
|
34 |
-
A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95
|
35 |
-
if A > 50.:
|
36 |
-
beta = 0.1102 * (A - 8.7)
|
37 |
-
elif A >= 21.:
|
38 |
-
beta = 0.5842 * (A - 21)**0.4 + 0.07886 * (A - 21.)
|
39 |
-
else:
|
40 |
-
beta = 0.
|
41 |
-
window = torch.kaiser_window(kernel_size, beta=beta, periodic=False)
|
42 |
-
|
43 |
-
# ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio
|
44 |
-
if even:
|
45 |
-
time = (torch.arange(-half_size, half_size) + 0.5)
|
46 |
-
else:
|
47 |
-
time = torch.arange(kernel_size) - half_size
|
48 |
-
if cutoff == 0:
|
49 |
-
filter_ = torch.zeros_like(time)
|
50 |
-
else:
|
51 |
-
filter_ = 2 * cutoff * window * sinc(2 * cutoff * time)
|
52 |
-
# Normalize filter to have sum = 1, otherwise we will have a small leakage
|
53 |
-
# of the constant component in the input signal.
|
54 |
-
filter_ /= filter_.sum()
|
55 |
-
filter = filter_.view(1, 1, kernel_size)
|
56 |
-
|
57 |
-
return filter
|
58 |
-
|
59 |
-
|
60 |
-
class LowPassFilter1d(nn.Module):
|
61 |
-
def __init__(self,
|
62 |
-
cutoff=0.5,
|
63 |
-
half_width=0.6,
|
64 |
-
stride: int = 1,
|
65 |
-
padding: bool = True,
|
66 |
-
padding_mode: str = 'replicate',
|
67 |
-
kernel_size: int = 12):
|
68 |
-
# kernel_size should be even number for stylegan3 setup,
|
69 |
-
# in this implementation, odd number is also possible.
|
70 |
-
super().__init__()
|
71 |
-
if cutoff < -0.:
|
72 |
-
raise ValueError("Minimum cutoff must be larger than zero.")
|
73 |
-
if cutoff > 0.5:
|
74 |
-
raise ValueError("A cutoff above 0.5 does not make sense.")
|
75 |
-
self.kernel_size = kernel_size
|
76 |
-
self.even = (kernel_size % 2 == 0)
|
77 |
-
self.pad_left = kernel_size // 2 - int(self.even)
|
78 |
-
self.pad_right = kernel_size // 2
|
79 |
-
self.stride = stride
|
80 |
-
self.padding = padding
|
81 |
-
self.padding_mode = padding_mode
|
82 |
-
filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size)
|
83 |
-
self.register_buffer("filter", filter)
|
84 |
-
|
85 |
-
#input [B, C, T]
|
86 |
-
def forward(self, x):
|
87 |
-
_, C, _ = x.shape
|
88 |
-
|
89 |
-
if self.padding:
|
90 |
-
x = F.pad(x, (self.pad_left, self.pad_right),
|
91 |
-
mode=self.padding_mode)
|
92 |
-
out = F.conv1d(x, self.filter.expand(C, -1, -1),
|
93 |
-
stride=self.stride, groups=C)
|
94 |
-
|
95 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/txt_processors/en.py
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
import unicodedata
|
3 |
-
|
4 |
-
from g2p_en import G2p
|
5 |
-
from g2p_en.expand import normalize_numbers
|
6 |
-
from nltk import pos_tag
|
7 |
-
from nltk.tokenize import TweetTokenizer
|
8 |
-
|
9 |
-
from text_to_speech.data_gen.tts.txt_processors.base_text_processor import BaseTxtProcessor, register_txt_processors
|
10 |
-
from text_to_speech.utils.text.text_encoder import PUNCS, is_sil_phoneme
|
11 |
-
|
12 |
-
|
13 |
-
class EnG2p(G2p):
|
14 |
-
word_tokenize = TweetTokenizer().tokenize
|
15 |
-
|
16 |
-
def __call__(self, text):
|
17 |
-
# preprocessing
|
18 |
-
words = EnG2p.word_tokenize(text)
|
19 |
-
tokens = pos_tag(words) # tuples of (word, tag)
|
20 |
-
|
21 |
-
# steps
|
22 |
-
prons = []
|
23 |
-
for word, pos in tokens:
|
24 |
-
if re.search("[a-z]", word) is None:
|
25 |
-
pron = [word]
|
26 |
-
|
27 |
-
elif word in self.homograph2features: # Check homograph
|
28 |
-
pron1, pron2, pos1 = self.homograph2features[word]
|
29 |
-
if pos.startswith(pos1):
|
30 |
-
pron = pron1
|
31 |
-
else:
|
32 |
-
pron = pron2
|
33 |
-
elif word in self.cmu: # lookup CMU dict
|
34 |
-
pron = self.cmu[word][0]
|
35 |
-
else: # predict for oov
|
36 |
-
pron = self.predict(word)
|
37 |
-
|
38 |
-
prons.extend(pron)
|
39 |
-
prons.extend([" "])
|
40 |
-
|
41 |
-
return prons[:-1]
|
42 |
-
|
43 |
-
|
44 |
-
@register_txt_processors('en')
|
45 |
-
class TxtProcessor(BaseTxtProcessor):
|
46 |
-
g2p = EnG2p()
|
47 |
-
|
48 |
-
@staticmethod
|
49 |
-
def preprocess_text(text):
|
50 |
-
text = normalize_numbers(text)
|
51 |
-
text = ''.join(char for char in unicodedata.normalize('NFD', text)
|
52 |
-
if unicodedata.category(char) != 'Mn') # Strip accents
|
53 |
-
text = text.lower()
|
54 |
-
text = re.sub("[\'\"()]+", "", text)
|
55 |
-
text = re.sub("[-]+", " ", text)
|
56 |
-
text = re.sub(f"[^ a-z{PUNCS}]", "", text)
|
57 |
-
text = re.sub(f" ?([{PUNCS}]) ?", r"\1", text) # !! -> !
|
58 |
-
text = re.sub(f"([{PUNCS}])+", r"\1", text) # !! -> !
|
59 |
-
text = text.replace("i.e.", "that is")
|
60 |
-
text = text.replace("i.e.", "that is")
|
61 |
-
text = text.replace("etc.", "etc")
|
62 |
-
text = re.sub(f"([{PUNCS}])", r" \1 ", text)
|
63 |
-
text = re.sub(rf"\s+", r" ", text)
|
64 |
-
return text
|
65 |
-
|
66 |
-
@classmethod
|
67 |
-
def process(cls, txt, preprocess_args):
|
68 |
-
txt = cls.preprocess_text(txt).strip()
|
69 |
-
phs = cls.g2p(txt)
|
70 |
-
txt_struct = [[w, []] for w in txt.split(" ")]
|
71 |
-
i_word = 0
|
72 |
-
for p in phs:
|
73 |
-
if p == ' ':
|
74 |
-
i_word += 1
|
75 |
-
else:
|
76 |
-
txt_struct[i_word][1].append(p)
|
77 |
-
txt_struct = cls.postprocess(txt_struct, preprocess_args)
|
78 |
-
return txt_struct, txt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ALSv/FSW/CONTRIBUTING.md
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
## Pull Requests
|
2 |
-
|
3 |
-
Before submitting a pull request, please ensure to align with us as we need to establish both technical and business requirements.
|
4 |
-
|
5 |
-
|
6 |
-
### Do
|
7 |
-
|
8 |
-
- ...consider to fix bugs over adding features
|
9 |
-
- ...one pull request for one feature or improvement
|
10 |
-
- ...consult us about implementation details
|
11 |
-
- ...proper testing before you submit your code
|
12 |
-
- ...resolve failed CI pipelines
|
13 |
-
|
14 |
-
|
15 |
-
### Don't
|
16 |
-
|
17 |
-
- ...introduce fundamental changes in terms of software architecture
|
18 |
-
- ...introduce OOP - we accept functional programming only
|
19 |
-
- ...ignore given requirements or try to work around them
|
20 |
-
- ...submit code to a development branch without consulting us
|
21 |
-
- ...submit massive amount of code changes
|
22 |
-
- ...submit a proof of concept
|
23 |
-
- ...submit code that is using undocumented and private APIs
|
24 |
-
- ...solve third party issues in our project
|
25 |
-
- ...comment what your code does - use proper naming instead
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ARTeLab/DTM_Estimation_SRandD/models/modelNetA.py
DELETED
@@ -1,381 +0,0 @@
|
|
1 |
-
# Copyright 2021 Dakewe Biotech Corporation. All Rights Reserved.
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
# ==============================================================================
|
14 |
-
|
15 |
-
# ==============================================================================
|
16 |
-
# File description: Realize the model definition function.
|
17 |
-
# ==============================================================================
|
18 |
-
import torch
|
19 |
-
import torch.nn as nn
|
20 |
-
import torch.nn.functional as F
|
21 |
-
import torchvision.models as models
|
22 |
-
from torch import Tensor
|
23 |
-
|
24 |
-
__all__ = [
|
25 |
-
"ResidualDenseBlock", "ResidualResidualDenseBlock",
|
26 |
-
"Discriminator", "Generator",
|
27 |
-
"DownSamplingNetwork"
|
28 |
-
]
|
29 |
-
|
30 |
-
|
31 |
-
class ResidualDenseBlock(nn.Module):
|
32 |
-
"""Achieves densely connected convolutional layers.
|
33 |
-
`Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993v5.pdf>` paper.
|
34 |
-
|
35 |
-
Args:
|
36 |
-
channels (int): The number of channels in the input image.
|
37 |
-
growths (int): The number of channels that increase in each layer of convolution.
|
38 |
-
"""
|
39 |
-
|
40 |
-
def __init__(self, channels: int, growths: int) -> None:
|
41 |
-
super(ResidualDenseBlock, self).__init__()
|
42 |
-
self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1, 1), (1, 1))
|
43 |
-
self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1, 1), (1, 1))
|
44 |
-
self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1, 1), (1, 1))
|
45 |
-
self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1, 1), (1, 1))
|
46 |
-
self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1, 1), (1, 1))
|
47 |
-
|
48 |
-
self.leaky_relu = nn.LeakyReLU(0.2, True)
|
49 |
-
self.identity = nn.Identity()
|
50 |
-
|
51 |
-
def forward(self, x: Tensor) -> Tensor:
|
52 |
-
identity = x
|
53 |
-
|
54 |
-
out1 = self.leaky_relu(self.conv1(x))
|
55 |
-
out2 = self.leaky_relu(self.conv2(torch.cat([x, out1], 1)))
|
56 |
-
out3 = self.leaky_relu(self.conv3(torch.cat([x, out1, out2], 1)))
|
57 |
-
out4 = self.leaky_relu(self.conv4(torch.cat([x, out1, out2, out3], 1)))
|
58 |
-
out5 = self.identity(self.conv5(torch.cat([x, out1, out2, out3, out4], 1)))
|
59 |
-
out = out5 * 0.2 + identity
|
60 |
-
|
61 |
-
return out
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
class ResidualDenseBlock(nn.Module):
|
66 |
-
"""Achieves densely connected convolutional layers.
|
67 |
-
`Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993v5.pdf>` paper.
|
68 |
-
|
69 |
-
Args:
|
70 |
-
channels (int): The number of channels in the input image.
|
71 |
-
growths (int): The number of channels that increase in each layer of convolution.
|
72 |
-
"""
|
73 |
-
|
74 |
-
def __init__(self, channels: int, growths: int) -> None:
|
75 |
-
super(ResidualDenseBlock, self).__init__()
|
76 |
-
self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1, 1), (1, 1))
|
77 |
-
self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1, 1), (1, 1))
|
78 |
-
self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1, 1), (1, 1))
|
79 |
-
self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1, 1), (1, 1))
|
80 |
-
self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1, 1), (1, 1))
|
81 |
-
|
82 |
-
self.leaky_relu = nn.LeakyReLU(0.2, True)
|
83 |
-
self.identity = nn.Identity()
|
84 |
-
|
85 |
-
def forward(self, x: Tensor) -> Tensor:
|
86 |
-
identity = x
|
87 |
-
|
88 |
-
out1 = self.leaky_relu(self.conv1(x))
|
89 |
-
out2 = self.leaky_relu(self.conv2(torch.cat([x, out1], 1)))
|
90 |
-
out3 = self.leaky_relu(self.conv3(torch.cat([x, out1, out2], 1)))
|
91 |
-
out4 = self.leaky_relu(self.conv4(torch.cat([x, out1, out2, out3], 1)))
|
92 |
-
out5 = self.identity(self.conv5(torch.cat([x, out1, out2, out3, out4], 1)))
|
93 |
-
out = out5 * 0.2 + identity
|
94 |
-
|
95 |
-
return out
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
class MiniResidualDenseBlock(nn.Module):
|
100 |
-
"""Achieves densely connected convolutional layers.
|
101 |
-
`Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993v5.pdf>` paper.
|
102 |
-
|
103 |
-
Args:
|
104 |
-
channels (int): The number of channels in the input image.
|
105 |
-
growths (int): The number of channels that increase in each layer of convolution.
|
106 |
-
"""
|
107 |
-
|
108 |
-
def __init__(self, channels: int, growths: int) -> None:
|
109 |
-
super(MiniResidualDenseBlock, self).__init__()
|
110 |
-
self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1, 1), (1, 1))
|
111 |
-
self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1, 1), (1, 1))
|
112 |
-
self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1, 1), (1, 1))
|
113 |
-
self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1, 1), (1, 1))
|
114 |
-
self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1, 1), (1, 1))
|
115 |
-
|
116 |
-
self.leaky_relu = nn.LeakyReLU(0.2, True)
|
117 |
-
|
118 |
-
def forward(self, x: Tensor) -> Tensor:
|
119 |
-
identity = x
|
120 |
-
|
121 |
-
out1 = self.leaky_relu(self.conv1(x))
|
122 |
-
out2 = self.leaky_relu(self.conv2(torch.cat([x, out1], 1)))
|
123 |
-
out3 = self.leaky_relu(self.conv3(torch.cat([x, out1, out2], 1)))
|
124 |
-
out4 = self.leaky_relu(self.conv4(torch.cat([x, out1, out2, out3], 1)))
|
125 |
-
out5 = self.leaky_relu(self.conv5(torch.cat([x, out1, out2, out3, out4], 1)))
|
126 |
-
out = out5 * 0.2 + identity
|
127 |
-
|
128 |
-
return out
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
class ResidualResidualDenseBlock(nn.Module):
|
133 |
-
"""Multi-layer residual dense convolution block.
|
134 |
-
|
135 |
-
Args:
|
136 |
-
channels (int): The number of channels in the input image.
|
137 |
-
growths (int): The number of channels that increase in each layer of convolution.
|
138 |
-
"""
|
139 |
-
|
140 |
-
def __init__(self, channels: int, growths: int) -> None:
|
141 |
-
super(ResidualResidualDenseBlock, self).__init__()
|
142 |
-
self.rdb1 = ResidualDenseBlock(channels, growths)
|
143 |
-
self.rdb2 = ResidualDenseBlock(channels, growths)
|
144 |
-
self.rdb3 = ResidualDenseBlock(channels, growths)
|
145 |
-
|
146 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
147 |
-
identity = x
|
148 |
-
|
149 |
-
out = self.rdb1(x)
|
150 |
-
out = self.rdb2(out)
|
151 |
-
out = self.rdb3(out)
|
152 |
-
out = out * 0.2 + identity
|
153 |
-
|
154 |
-
return out
|
155 |
-
|
156 |
-
|
157 |
-
class MiniResidualResidualDenseBlock(nn.Module):
|
158 |
-
"""Multi-layer residual dense convolution block.
|
159 |
-
|
160 |
-
Args:
|
161 |
-
channels (int): The number of channels in the input image.
|
162 |
-
growths (int): The number of channels that increase in each layer of convolution.
|
163 |
-
"""
|
164 |
-
|
165 |
-
def __init__(self, channels: int, growths: int) -> None:
|
166 |
-
super(MiniResidualResidualDenseBlock, self).__init__()
|
167 |
-
self.M_rdb1 = MiniResidualDenseBlock(channels, growths)
|
168 |
-
self.M_rdb2 = MiniResidualDenseBlock(channels, growths)
|
169 |
-
self.M_rdb3 = MiniResidualDenseBlock(channels, growths)
|
170 |
-
|
171 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
172 |
-
identity = x
|
173 |
-
out = self.M_rdb1(x)
|
174 |
-
out = self.M_rdb2(out)
|
175 |
-
out = self.M_rdb3(out)
|
176 |
-
out = out * 0.2 + identity
|
177 |
-
return out
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
class Discriminator(nn.Module):
|
182 |
-
def __init__(self) -> None:
|
183 |
-
super(Discriminator, self).__init__()
|
184 |
-
self.features = nn.Sequential(
|
185 |
-
# input size. (3) x 512 x 512
|
186 |
-
nn.Conv2d(2, 32, (3, 3), (1, 1), (1, 1), bias=True),
|
187 |
-
nn.LeakyReLU(0.2, True),
|
188 |
-
nn.Conv2d(32, 64, (4, 4), (2, 2), (1, 1), bias=False),
|
189 |
-
nn.BatchNorm2d(64),
|
190 |
-
nn.LeakyReLU(0.2, True),
|
191 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1), bias=False),
|
192 |
-
nn.BatchNorm2d(64),
|
193 |
-
nn.LeakyReLU(0.2, True),
|
194 |
-
# state size. (128) x 256 x 256
|
195 |
-
nn.Conv2d(64, 128, (4, 4), (2, 2), (1, 1), bias=False),
|
196 |
-
nn.BatchNorm2d(128),
|
197 |
-
nn.LeakyReLU(0.2, True),
|
198 |
-
nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1), bias=False),
|
199 |
-
nn.BatchNorm2d(128),
|
200 |
-
nn.LeakyReLU(0.2, True),
|
201 |
-
# state size. (256) x 64 x 64
|
202 |
-
nn.Conv2d(128, 256, (4, 4), (2, 2), (1, 1), bias=False),
|
203 |
-
nn.BatchNorm2d(256),
|
204 |
-
nn.LeakyReLU(0.2, True),
|
205 |
-
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), bias=False),
|
206 |
-
nn.BatchNorm2d(256),
|
207 |
-
nn.LeakyReLU(0.2, True),
|
208 |
-
nn.Conv2d(256, 256, (4, 4), (2, 2), (1, 1), bias=False),
|
209 |
-
nn.BatchNorm2d(256),
|
210 |
-
nn.LeakyReLU(0.2, True),
|
211 |
-
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), bias=False),
|
212 |
-
nn.BatchNorm2d(256),
|
213 |
-
nn.LeakyReLU(0.2, True),
|
214 |
-
# state size. (512) x 16 x 16
|
215 |
-
nn.Conv2d(256, 256, (4, 4), (2, 2), (1, 1), bias=False),
|
216 |
-
nn.BatchNorm2d(256),
|
217 |
-
nn.LeakyReLU(0.2, True),
|
218 |
-
|
219 |
-
nn.Conv2d(256, 256, (4, 4), (2, 2), (1, 1), bias=False),
|
220 |
-
nn.BatchNorm2d(256),
|
221 |
-
nn.LeakyReLU(0.2, True),
|
222 |
-
# state size. (512) x 8 x 8
|
223 |
-
)
|
224 |
-
|
225 |
-
self.classifier = nn.Sequential(
|
226 |
-
nn.Linear(256 * 8 * 8, 100),
|
227 |
-
nn.LeakyReLU(0.2, True),
|
228 |
-
nn.Linear(100, 1),
|
229 |
-
)
|
230 |
-
|
231 |
-
def forward(self, x: Tensor) -> Tensor:
|
232 |
-
out = self.features(x)
|
233 |
-
out = torch.flatten(out, 1)
|
234 |
-
out = self.classifier(out)
|
235 |
-
return out
|
236 |
-
|
237 |
-
class Generator(nn.Module):
|
238 |
-
def __init__(self) -> None:
|
239 |
-
super(Generator, self).__init__()
|
240 |
-
#RLNet
|
241 |
-
self.RLNetconv_block1 = nn.Conv2d(1, 64, (3, 3), (1, 1), (1, 1))
|
242 |
-
RLNettrunk = []
|
243 |
-
for _ in range(4):
|
244 |
-
RLNettrunk += [ResidualResidualDenseBlock(64, 32)]
|
245 |
-
self.RLNettrunk = nn.Sequential(*RLNettrunk)
|
246 |
-
self.RLNetconv_block2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
|
247 |
-
self.RLNetconv_block3 = nn.Sequential(
|
248 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
249 |
-
nn.LeakyReLU(0.2, True)
|
250 |
-
)
|
251 |
-
self.RLNetconv_block4 = nn.Sequential(
|
252 |
-
nn.Conv2d(64, 1, (3, 3), (1, 1), (1, 1)),
|
253 |
-
nn.Tanh()
|
254 |
-
)
|
255 |
-
|
256 |
-
#############################################################################
|
257 |
-
#Generator
|
258 |
-
self.conv_block1 = nn.Conv2d(1, 64, (3, 3), (1, 1), (1, 1))
|
259 |
-
|
260 |
-
trunk = []
|
261 |
-
for _ in range(16):
|
262 |
-
trunk += [ResidualResidualDenseBlock(64, 32)]
|
263 |
-
self.trunk = nn.Sequential(*trunk)
|
264 |
-
|
265 |
-
# After the feature extraction network, reconnect a layer of convolutional blocks.
|
266 |
-
self.conv_block2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
|
267 |
-
|
268 |
-
|
269 |
-
# Upsampling convolutional layer.
|
270 |
-
self.upsampling = nn.Sequential(
|
271 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
272 |
-
nn.LeakyReLU(0.2, True)
|
273 |
-
)
|
274 |
-
|
275 |
-
# Reconnect a layer of convolution block after upsampling.
|
276 |
-
self.conv_block3 = nn.Sequential(
|
277 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
278 |
-
nn.LeakyReLU(0.2, True)
|
279 |
-
)
|
280 |
-
|
281 |
-
self.conv_block4 = nn.Sequential(
|
282 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
283 |
-
#nn.Sigmoid()
|
284 |
-
)
|
285 |
-
|
286 |
-
self.conv_block0_branch0 = nn.Sequential(
|
287 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
288 |
-
nn.LeakyReLU(0.2, True),
|
289 |
-
nn.Conv2d(64, 128, (3, 3), (1, 1), (1, 1)),
|
290 |
-
nn.LeakyReLU(0.2, True),
|
291 |
-
nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)),
|
292 |
-
nn.LeakyReLU(0.2, True),
|
293 |
-
nn.Conv2d(128, 64, (3, 3), (1, 1), (1, 1)),
|
294 |
-
nn.Tanh()
|
295 |
-
)
|
296 |
-
|
297 |
-
self.conv_block0_branch1 = nn.Sequential(
|
298 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
299 |
-
nn.LeakyReLU(0.2, True),
|
300 |
-
nn.Conv2d(64, 128, (3, 3), (1, 1), (1, 1)),
|
301 |
-
nn.LeakyReLU(0.2, True),
|
302 |
-
nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)),
|
303 |
-
nn.LeakyReLU(0.2, True),
|
304 |
-
nn.Conv2d(128, 64, (3, 3), (1, 1), (1, 1)),
|
305 |
-
nn.Tanh()
|
306 |
-
)
|
307 |
-
|
308 |
-
self.conv_block1_branch0 = nn.Sequential(
|
309 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
310 |
-
nn.LeakyReLU(0.2, True),
|
311 |
-
nn.Conv2d(64, 1, (3, 3), (1, 1), (1, 1)),
|
312 |
-
#nn.LeakyReLU(0.2, True),
|
313 |
-
#nn.Conv2d(32, 1, (3, 3), (1, 1), (1, 1)),
|
314 |
-
nn.Sigmoid()
|
315 |
-
)
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
self.conv_block1_branch1 = nn.Sequential(
|
320 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
321 |
-
nn.LeakyReLU(0.2, True),
|
322 |
-
nn.Conv2d(64, 1, (3, 3), (1, 1), (1, 1)),
|
323 |
-
nn.Sigmoid())
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
def _forward_impl(self, x: Tensor) -> Tensor:
|
329 |
-
#RLNet
|
330 |
-
out1 = self.RLNetconv_block1(x)
|
331 |
-
out = self.RLNettrunk(out1)
|
332 |
-
out2 = self.RLNetconv_block2(out)
|
333 |
-
out = out1 + out2
|
334 |
-
out = self.RLNetconv_block3(out)
|
335 |
-
out = self.RLNetconv_block4(out)
|
336 |
-
rlNet_out = out + x
|
337 |
-
|
338 |
-
#Generator
|
339 |
-
out1 = self.conv_block1(rlNet_out)
|
340 |
-
out = self.trunk(out1)
|
341 |
-
out2 = self.conv_block2(out)
|
342 |
-
out = out1 + out2
|
343 |
-
out = self.upsampling(F.interpolate(out, scale_factor=2, mode="bicubic"))
|
344 |
-
out = self.upsampling(F.interpolate(out, scale_factor=2, mode="bicubic"))
|
345 |
-
out = self.conv_block3(out)
|
346 |
-
#
|
347 |
-
out = self.conv_block4(out)
|
348 |
-
|
349 |
-
#demResidual = out[:, 1:2, :, :]
|
350 |
-
#grayResidual = out[:, 0:1, :, :]
|
351 |
-
|
352 |
-
# out = self.trunkRGB(out_4)
|
353 |
-
#
|
354 |
-
# out_dem = out[:, 3:4, :, :] * 0.2 + demResidual # DEM images extracted
|
355 |
-
# out_rgb = out[:, 0:3, :, :] * 0.2 + rgbResidual # RGB images extracted
|
356 |
-
|
357 |
-
#ra0
|
358 |
-
#out_rgb= rgbResidual + self.conv_block0_branch0(rgbResidual)
|
359 |
-
|
360 |
-
out_dem = out + self.conv_block0_branch1(out) #out+ tanh()
|
361 |
-
out_gray = out + self.conv_block0_branch0(out) #out+ tanh()
|
362 |
-
|
363 |
-
out_gray = self.conv_block1_branch0(out_gray) #sigmoid()
|
364 |
-
out_dem = self.conv_block1_branch1(out_dem) #sigmoid()
|
365 |
-
|
366 |
-
return out_gray, out_dem, rlNet_out
|
367 |
-
|
368 |
-
|
369 |
-
def forward(self, x: Tensor) -> Tensor:
|
370 |
-
return self._forward_impl(x)
|
371 |
-
|
372 |
-
def _initialize_weights(self) -> None:
|
373 |
-
for m in self.modules():
|
374 |
-
if isinstance(m, nn.Conv2d):
|
375 |
-
nn.init.kaiming_normal_(m.weight)
|
376 |
-
if m.bias is not None:
|
377 |
-
nn.init.constant_(m.bias, 0)
|
378 |
-
m.weight.data *= 0.1
|
379 |
-
elif isinstance(m, nn.BatchNorm2d):
|
380 |
-
nn.init.constant_(m.weight, 1)
|
381 |
-
m.weight.data *= 0.1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb32-60e_deepfashion2_short_sleeved_shirt_256x192/__init__.py
DELETED
File without changes
|
spaces/ATang0729/Forecast4Muses/Model/Model6/extensions/dataset_info.py
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import numpy as np
|
3 |
-
|
4 |
-
|
5 |
-
class DatasetInfo:
|
6 |
-
|
7 |
-
def __init__(self, dataset_info):
|
8 |
-
self._dataset_info = dataset_info
|
9 |
-
self.dataset_name = self._dataset_info['dataset_name']
|
10 |
-
self.paper_info = self._dataset_info['paper_info']
|
11 |
-
self.keypoint_info = self._dataset_info['keypoint_info']
|
12 |
-
self.skeleton_info = self._dataset_info['skeleton_info']
|
13 |
-
self.joint_weights = np.array(
|
14 |
-
self._dataset_info['joint_weights'], dtype=np.float32)[:, None]
|
15 |
-
|
16 |
-
self.sigmas = np.array(self._dataset_info['sigmas'])
|
17 |
-
|
18 |
-
self._parse_keypoint_info()
|
19 |
-
self._parse_skeleton_info()
|
20 |
-
|
21 |
-
def _parse_skeleton_info(self):
|
22 |
-
"""Parse skeleton information.
|
23 |
-
|
24 |
-
- link_num (int): number of links.
|
25 |
-
- skeleton (list((2,))): list of links (id).
|
26 |
-
- skeleton_name (list((2,))): list of links (name).
|
27 |
-
- pose_link_color (np.ndarray): the color of the link for
|
28 |
-
visualization.
|
29 |
-
"""
|
30 |
-
self.link_num = len(self.skeleton_info.keys())
|
31 |
-
self.pose_link_color = []
|
32 |
-
|
33 |
-
self.skeleton_name = []
|
34 |
-
self.skeleton = []
|
35 |
-
for skid in self.skeleton_info.keys():
|
36 |
-
link = self.skeleton_info[skid]['link']
|
37 |
-
self.skeleton_name.append(link)
|
38 |
-
self.skeleton.append([
|
39 |
-
self.keypoint_name2id[link[0]], self.keypoint_name2id[link[1]]
|
40 |
-
])
|
41 |
-
self.pose_link_color.append(self.skeleton_info[skid].get(
|
42 |
-
'color', [255, 128, 0]))
|
43 |
-
self.pose_link_color = np.array(self.pose_link_color)
|
44 |
-
|
45 |
-
def _parse_keypoint_info(self):
|
46 |
-
"""Parse keypoint information.
|
47 |
-
|
48 |
-
- keypoint_num (int): number of keypoints.
|
49 |
-
- keypoint_id2name (dict): mapping keypoint id to keypoint name.
|
50 |
-
- keypoint_name2id (dict): mapping keypoint name to keypoint id.
|
51 |
-
- upper_body_ids (list): a list of keypoints that belong to the
|
52 |
-
upper body.
|
53 |
-
- lower_body_ids (list): a list of keypoints that belong to the
|
54 |
-
lower body.
|
55 |
-
- flip_index (list): list of flip index (id)
|
56 |
-
- flip_pairs (list((2,))): list of flip pairs (id)
|
57 |
-
- flip_index_name (list): list of flip index (name)
|
58 |
-
- flip_pairs_name (list((2,))): list of flip pairs (name)
|
59 |
-
- pose_kpt_color (np.ndarray): the color of the keypoint for
|
60 |
-
visualization.
|
61 |
-
"""
|
62 |
-
|
63 |
-
self.keypoint_num = len(self.keypoint_info.keys())
|
64 |
-
self.keypoint_id2name = {}
|
65 |
-
self.keypoint_name2id = {}
|
66 |
-
|
67 |
-
self.pose_kpt_color = []
|
68 |
-
self.upper_body_ids = []
|
69 |
-
self.lower_body_ids = []
|
70 |
-
|
71 |
-
self.flip_index_name = []
|
72 |
-
self.flip_pairs_name = []
|
73 |
-
|
74 |
-
for kid in self.keypoint_info.keys():
|
75 |
-
|
76 |
-
keypoint_name = self.keypoint_info[kid]['name']
|
77 |
-
self.keypoint_id2name[kid] = keypoint_name
|
78 |
-
self.keypoint_name2id[keypoint_name] = kid
|
79 |
-
self.pose_kpt_color.append(self.keypoint_info[kid].get(
|
80 |
-
'color', [255, 128, 0]))
|
81 |
-
|
82 |
-
type = self.keypoint_info[kid].get('type', '')
|
83 |
-
if type == 'upper':
|
84 |
-
self.upper_body_ids.append(kid)
|
85 |
-
elif type == 'lower':
|
86 |
-
self.lower_body_ids.append(kid)
|
87 |
-
else:
|
88 |
-
pass
|
89 |
-
|
90 |
-
swap_keypoint = self.keypoint_info[kid].get('swap', '')
|
91 |
-
if swap_keypoint == keypoint_name or swap_keypoint == '':
|
92 |
-
self.flip_index_name.append(keypoint_name)
|
93 |
-
else:
|
94 |
-
self.flip_index_name.append(swap_keypoint)
|
95 |
-
if [swap_keypoint, keypoint_name] not in self.flip_pairs_name:
|
96 |
-
self.flip_pairs_name.append([keypoint_name, swap_keypoint])
|
97 |
-
|
98 |
-
self.flip_pairs = [[
|
99 |
-
self.keypoint_name2id[pair[0]], self.keypoint_name2id[pair[1]]
|
100 |
-
] for pair in self.flip_pairs_name]
|
101 |
-
self.flip_index = [
|
102 |
-
self.keypoint_name2id[name] for name in self.flip_index_name
|
103 |
-
]
|
104 |
-
self.pose_kpt_color = np.array(self.pose_kpt_color)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhay1210/prompt-generator_V1/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Prompt-generator V1
|
3 |
-
emoji: 🏆
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.38.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/models/experimental.py
DELETED
@@ -1,147 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
Experimental modules
|
4 |
-
"""
|
5 |
-
import math
|
6 |
-
|
7 |
-
import numpy as np
|
8 |
-
import torch
|
9 |
-
import torch.nn as nn
|
10 |
-
|
11 |
-
from utils.downloads import attempt_download
|
12 |
-
|
13 |
-
|
14 |
-
class Sum(nn.Module):
|
15 |
-
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
|
16 |
-
def __init__(self, n, weight=False): # n: number of inputs
|
17 |
-
super().__init__()
|
18 |
-
self.weight = weight # apply weights boolean
|
19 |
-
self.iter = range(n - 1) # iter object
|
20 |
-
if weight:
|
21 |
-
self.w = nn.Parameter(
|
22 |
-
-torch.arange(1.0, n) / 2, requires_grad=True
|
23 |
-
) # layer weights
|
24 |
-
|
25 |
-
def forward(self, x):
|
26 |
-
y = x[0] # no weight
|
27 |
-
if self.weight:
|
28 |
-
w = torch.sigmoid(self.w) * 2
|
29 |
-
for i in self.iter:
|
30 |
-
y = y + x[i + 1] * w[i]
|
31 |
-
else:
|
32 |
-
for i in self.iter:
|
33 |
-
y = y + x[i + 1]
|
34 |
-
return y
|
35 |
-
|
36 |
-
|
37 |
-
class MixConv2d(nn.Module):
|
38 |
-
# Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
|
39 |
-
def __init__(
|
40 |
-
self, c1, c2, k=(1, 3), s=1, equal_ch=True
|
41 |
-
): # ch_in, ch_out, kernel, stride, ch_strategy
|
42 |
-
super().__init__()
|
43 |
-
n = len(k) # number of convolutions
|
44 |
-
if equal_ch: # equal c_ per group
|
45 |
-
i = torch.linspace(0, n - 1e-6, c2).floor() # c2 indices
|
46 |
-
c_ = [(i == g).sum() for g in range(n)] # intermediate channels
|
47 |
-
else: # equal weight.numel() per group
|
48 |
-
b = [c2] + [0] * n
|
49 |
-
a = np.eye(n + 1, n, k=-1)
|
50 |
-
a -= np.roll(a, 1, axis=1)
|
51 |
-
a *= np.array(k) ** 2
|
52 |
-
a[0] = 1
|
53 |
-
c_ = np.linalg.lstsq(a, b, rcond=None)[
|
54 |
-
0
|
55 |
-
].round() # solve for equal weight indices, ax = b
|
56 |
-
|
57 |
-
self.m = nn.ModuleList(
|
58 |
-
[
|
59 |
-
nn.Conv2d(
|
60 |
-
c1,
|
61 |
-
int(c_),
|
62 |
-
k,
|
63 |
-
s,
|
64 |
-
k // 2,
|
65 |
-
groups=math.gcd(c1, int(c_)),
|
66 |
-
bias=False,
|
67 |
-
)
|
68 |
-
for k, c_ in zip(k, c_)
|
69 |
-
]
|
70 |
-
)
|
71 |
-
self.bn = nn.BatchNorm2d(c2)
|
72 |
-
self.act = nn.SiLU()
|
73 |
-
|
74 |
-
def forward(self, x):
|
75 |
-
return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
|
76 |
-
|
77 |
-
|
78 |
-
class Ensemble(nn.ModuleList):
|
79 |
-
# Ensemble of models
|
80 |
-
def __init__(self):
|
81 |
-
super().__init__()
|
82 |
-
|
83 |
-
def forward(self, x, augment=False, profile=False, visualize=False):
|
84 |
-
y = [module(x, augment, profile, visualize)[0] for module in self]
|
85 |
-
# y = torch.stack(y).max(0)[0] # max ensemble
|
86 |
-
# y = torch.stack(y).mean(0) # mean ensemble
|
87 |
-
y = torch.cat(y, 1) # nms ensemble
|
88 |
-
return y, None # inference, train output
|
89 |
-
|
90 |
-
|
91 |
-
def attempt_load(weights, device=None, inplace=True, fuse=True):
|
92 |
-
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
|
93 |
-
from models.yolo import Detect, Model
|
94 |
-
|
95 |
-
model = Ensemble()
|
96 |
-
for w in weights if isinstance(weights, list) else [weights]:
|
97 |
-
ckpt = torch.load(attempt_download(w), map_location="cpu") # load
|
98 |
-
ckpt = (
|
99 |
-
(ckpt.get("ema") or ckpt["model"]).to(device).float()
|
100 |
-
) # FP32 model
|
101 |
-
|
102 |
-
# Model compatibility updates
|
103 |
-
if not hasattr(ckpt, "stride"):
|
104 |
-
ckpt.stride = torch.tensor([32.0])
|
105 |
-
if hasattr(ckpt, "names") and isinstance(ckpt.names, (list, tuple)):
|
106 |
-
ckpt.names = dict(enumerate(ckpt.names)) # convert to dict
|
107 |
-
|
108 |
-
model.append(
|
109 |
-
ckpt.fuse().eval()
|
110 |
-
if fuse and hasattr(ckpt, "fuse")
|
111 |
-
else ckpt.eval()
|
112 |
-
) # model in eval mode
|
113 |
-
|
114 |
-
# Module compatibility updates
|
115 |
-
for m in model.modules():
|
116 |
-
t = type(m)
|
117 |
-
if t in (
|
118 |
-
nn.Hardswish,
|
119 |
-
nn.LeakyReLU,
|
120 |
-
nn.ReLU,
|
121 |
-
nn.ReLU6,
|
122 |
-
nn.SiLU,
|
123 |
-
Detect,
|
124 |
-
Model,
|
125 |
-
):
|
126 |
-
m.inplace = inplace # torch 1.7.0 compatibility
|
127 |
-
if t is Detect and not isinstance(m.anchor_grid, list):
|
128 |
-
delattr(m, "anchor_grid")
|
129 |
-
setattr(m, "anchor_grid", [torch.zeros(1)] * m.nl)
|
130 |
-
elif t is nn.Upsample and not hasattr(m, "recompute_scale_factor"):
|
131 |
-
m.recompute_scale_factor = None # torch 1.11.0 compatibility
|
132 |
-
|
133 |
-
# Return model
|
134 |
-
if len(model) == 1:
|
135 |
-
return model[-1]
|
136 |
-
|
137 |
-
# Return detection ensemble
|
138 |
-
print(f"Ensemble created with {weights}\n")
|
139 |
-
for k in "names", "nc", "yaml":
|
140 |
-
setattr(model, k, getattr(model[0], k))
|
141 |
-
model.stride = model[
|
142 |
-
torch.argmax(torch.tensor([m.stride.max() for m in model])).int()
|
143 |
-
].stride # max stride
|
144 |
-
assert all(
|
145 |
-
model[0].nc == m.nc for m in model
|
146 |
-
), f"Models have different class counts: {[m.nc for m in model]}"
|
147 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse_command/main_tasksolving_cli.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import logging
|
3 |
-
|
4 |
-
# from agentverse.agentverse import AgentVerse
|
5 |
-
from agentverse.tasksolving import TaskSolving
|
6 |
-
from agentverse.gui import GUI
|
7 |
-
from agentverse.logging import logger
|
8 |
-
from argparse import ArgumentParser
|
9 |
-
|
10 |
-
parser = ArgumentParser()
|
11 |
-
|
12 |
-
parser.add_argument(
|
13 |
-
"--task",
|
14 |
-
type=str,
|
15 |
-
default="tasksolving/brainstorming",
|
16 |
-
)
|
17 |
-
parser.add_argument("--debug", action="store_true")
|
18 |
-
parser.add_argument(
|
19 |
-
"--tasks_dir",
|
20 |
-
type=str,
|
21 |
-
default=os.path.join(os.path.dirname(__file__), "..", "agentverse", "tasks"),
|
22 |
-
)
|
23 |
-
args = parser.parse_args()
|
24 |
-
|
25 |
-
logger.set_level(logging.DEBUG if args.debug else logging.INFO)
|
26 |
-
|
27 |
-
|
28 |
-
def cli_main():
|
29 |
-
agentversepipeline = TaskSolving.from_task(args.task, args.tasks_dir)
|
30 |
-
agentversepipeline.run()
|
31 |
-
|
32 |
-
|
33 |
-
if __name__ == "__main__":
|
34 |
-
cli_main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/rotate/Factory.js
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
import Rotate from './Rotate.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('rotate', function (config) {
|
6 |
-
return new Rotate(this.scene, config);
|
7 |
-
});
|
8 |
-
|
9 |
-
SetValue(window, 'RexPlugins.UI.Rotate', Rotate);
|
10 |
-
|
11 |
-
export default Rotate;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlanMars/QYL-AI-Space/modules/models/StableLM.py
DELETED
@@ -1,93 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
3 |
-
import time
|
4 |
-
import numpy as np
|
5 |
-
from torch.nn import functional as F
|
6 |
-
import os
|
7 |
-
from .base_model import BaseLLMModel
|
8 |
-
from threading import Thread
|
9 |
-
|
10 |
-
STABLELM_MODEL = None
|
11 |
-
STABLELM_TOKENIZER = None
|
12 |
-
|
13 |
-
|
14 |
-
class StopOnTokens(StoppingCriteria):
|
15 |
-
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
16 |
-
stop_ids = [50278, 50279, 50277, 1, 0]
|
17 |
-
for stop_id in stop_ids:
|
18 |
-
if input_ids[0][-1] == stop_id:
|
19 |
-
return True
|
20 |
-
return False
|
21 |
-
|
22 |
-
|
23 |
-
class StableLM_Client(BaseLLMModel):
|
24 |
-
def __init__(self, model_name, user_name="") -> None:
|
25 |
-
super().__init__(model_name=model_name, user=user_name)
|
26 |
-
global STABLELM_MODEL, STABLELM_TOKENIZER
|
27 |
-
print(f"Starting to load StableLM to memory")
|
28 |
-
if model_name == "StableLM":
|
29 |
-
model_name = "stabilityai/stablelm-tuned-alpha-7b"
|
30 |
-
else:
|
31 |
-
model_name = f"models/{model_name}"
|
32 |
-
if STABLELM_MODEL is None:
|
33 |
-
STABLELM_MODEL = AutoModelForCausalLM.from_pretrained(
|
34 |
-
model_name, torch_dtype=torch.float16).cuda()
|
35 |
-
if STABLELM_TOKENIZER is None:
|
36 |
-
STABLELM_TOKENIZER = AutoTokenizer.from_pretrained(model_name)
|
37 |
-
self.generator = pipeline(
|
38 |
-
'text-generation', model=STABLELM_MODEL, tokenizer=STABLELM_TOKENIZER, device=0)
|
39 |
-
print(f"Sucessfully loaded StableLM to the memory")
|
40 |
-
self.system_prompt = """StableAssistant
|
41 |
-
- StableAssistant is A helpful and harmless Open Source AI Language Model developed by Stability and CarperAI.
|
42 |
-
- StableAssistant is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
|
43 |
-
- StableAssistant is more than just an information source, StableAssistant is also able to write poetry, short stories, and make jokes.
|
44 |
-
- StableAssistant will refuse to participate in anything that could harm a human."""
|
45 |
-
self.max_generation_token = 1024
|
46 |
-
self.top_p = 0.95
|
47 |
-
self.temperature = 1.0
|
48 |
-
|
49 |
-
def _get_stablelm_style_input(self):
|
50 |
-
history = self.history + [{"role": "assistant", "content": ""}]
|
51 |
-
print(history)
|
52 |
-
messages = self.system_prompt + \
|
53 |
-
"".join(["".join(["<|USER|>"+history[i]["content"], "<|ASSISTANT|>"+history[i + 1]["content"]])
|
54 |
-
for i in range(0, len(history), 2)])
|
55 |
-
return messages
|
56 |
-
|
57 |
-
def _generate(self, text, bad_text=None):
|
58 |
-
stop = StopOnTokens()
|
59 |
-
result = self.generator(text, max_new_tokens=self.max_generation_token, num_return_sequences=1, num_beams=1, do_sample=True,
|
60 |
-
temperature=self.temperature, top_p=self.top_p, top_k=1000, stopping_criteria=StoppingCriteriaList([stop]))
|
61 |
-
return result[0]["generated_text"].replace(text, "")
|
62 |
-
|
63 |
-
def get_answer_at_once(self):
|
64 |
-
messages = self._get_stablelm_style_input()
|
65 |
-
return self._generate(messages), len(messages)
|
66 |
-
|
67 |
-
def get_answer_stream_iter(self):
|
68 |
-
stop = StopOnTokens()
|
69 |
-
messages = self._get_stablelm_style_input()
|
70 |
-
|
71 |
-
# model_inputs = tok([messages], return_tensors="pt")['input_ids'].cuda()[:, :4096-1024]
|
72 |
-
model_inputs = STABLELM_TOKENIZER(
|
73 |
-
[messages], return_tensors="pt").to("cuda")
|
74 |
-
streamer = TextIteratorStreamer(
|
75 |
-
STABLELM_TOKENIZER, timeout=10., skip_prompt=True, skip_special_tokens=True)
|
76 |
-
generate_kwargs = dict(
|
77 |
-
model_inputs,
|
78 |
-
streamer=streamer,
|
79 |
-
max_new_tokens=self.max_generation_token,
|
80 |
-
do_sample=True,
|
81 |
-
top_p=self.top_p,
|
82 |
-
top_k=1000,
|
83 |
-
temperature=self.temperature,
|
84 |
-
num_beams=1,
|
85 |
-
stopping_criteria=StoppingCriteriaList([stop])
|
86 |
-
)
|
87 |
-
t = Thread(target=STABLELM_MODEL.generate, kwargs=generate_kwargs)
|
88 |
-
t.start()
|
89 |
-
|
90 |
-
partial_text = ""
|
91 |
-
for new_text in streamer:
|
92 |
-
partial_text += new_text
|
93 |
-
yield partial_text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/sanskrit.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from indic_transliteration import sanscript
|
3 |
-
|
4 |
-
|
5 |
-
# List of (iast, ipa) pairs:
|
6 |
-
_iast_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
|
7 |
-
('a', 'ə'),
|
8 |
-
('ā', 'aː'),
|
9 |
-
('ī', 'iː'),
|
10 |
-
('ū', 'uː'),
|
11 |
-
('ṛ', 'ɹ`'),
|
12 |
-
('ṝ', 'ɹ`ː'),
|
13 |
-
('ḷ', 'l`'),
|
14 |
-
('ḹ', 'l`ː'),
|
15 |
-
('e', 'eː'),
|
16 |
-
('o', 'oː'),
|
17 |
-
('k', 'k⁼'),
|
18 |
-
('k⁼h', 'kʰ'),
|
19 |
-
('g', 'g⁼'),
|
20 |
-
('g⁼h', 'gʰ'),
|
21 |
-
('ṅ', 'ŋ'),
|
22 |
-
('c', 'ʧ⁼'),
|
23 |
-
('ʧ⁼h', 'ʧʰ'),
|
24 |
-
('j', 'ʥ⁼'),
|
25 |
-
('ʥ⁼h', 'ʥʰ'),
|
26 |
-
('ñ', 'n^'),
|
27 |
-
('ṭ', 't`⁼'),
|
28 |
-
('t`⁼h', 't`ʰ'),
|
29 |
-
('ḍ', 'd`⁼'),
|
30 |
-
('d`⁼h', 'd`ʰ'),
|
31 |
-
('ṇ', 'n`'),
|
32 |
-
('t', 't⁼'),
|
33 |
-
('t⁼h', 'tʰ'),
|
34 |
-
('d', 'd⁼'),
|
35 |
-
('d⁼h', 'dʰ'),
|
36 |
-
('p', 'p⁼'),
|
37 |
-
('p⁼h', 'pʰ'),
|
38 |
-
('b', 'b⁼'),
|
39 |
-
('b⁼h', 'bʰ'),
|
40 |
-
('y', 'j'),
|
41 |
-
('ś', 'ʃ'),
|
42 |
-
('ṣ', 's`'),
|
43 |
-
('r', 'ɾ'),
|
44 |
-
('l̤', 'l`'),
|
45 |
-
('h', 'ɦ'),
|
46 |
-
("'", ''),
|
47 |
-
('~', '^'),
|
48 |
-
('ṃ', '^')
|
49 |
-
]]
|
50 |
-
|
51 |
-
|
52 |
-
def devanagari_to_ipa(text):
|
53 |
-
text = text.replace('ॐ', 'ओम्')
|
54 |
-
text = re.sub(r'\s*।\s*$', '.', text)
|
55 |
-
text = re.sub(r'\s*।\s*', ', ', text)
|
56 |
-
text = re.sub(r'\s*॥', '.', text)
|
57 |
-
text = sanscript.transliterate(text, sanscript.DEVANAGARI, sanscript.IAST)
|
58 |
-
for regex, replacement in _iast_to_ipa:
|
59 |
-
text = re.sub(regex, replacement, text)
|
60 |
-
text = re.sub('(.)[`ː]*ḥ', lambda x: x.group(0)
|
61 |
-
[:-1]+'h'+x.group(1)+'*', text)
|
62 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ameaou/academic-chatgpt3.1/Dockerfile
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM
|
2 |
-
# 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic .
|
3 |
-
# 如何运行: docker run --rm -it --net=host gpt-academic
|
4 |
-
FROM python:3.11
|
5 |
-
|
6 |
-
RUN echo '[global]' > /etc/pip.conf && \
|
7 |
-
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
8 |
-
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
9 |
-
|
10 |
-
|
11 |
-
WORKDIR /gpt
|
12 |
-
COPY requirements.txt .
|
13 |
-
RUN pip3 install -r requirements.txt
|
14 |
-
|
15 |
-
COPY . .
|
16 |
-
|
17 |
-
# 可选步骤,用于预热模块
|
18 |
-
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
19 |
-
|
20 |
-
CMD ["python3", "-u", "main.py"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/libJPG/jpge.h
DELETED
@@ -1,172 +0,0 @@
|
|
1 |
-
|
2 |
-
// jpge.h - C++ class for JPEG compression.
|
3 |
-
// Public domain, Rich Geldreich <[email protected]>
|
4 |
-
// Alex Evans: Added RGBA support, linear memory allocator.
|
5 |
-
#ifndef JPEG_ENCODER_H
|
6 |
-
#define JPEG_ENCODER_H
|
7 |
-
|
8 |
-
#include <stdint.h>
|
9 |
-
|
10 |
-
namespace jpge
|
11 |
-
{
|
12 |
-
typedef unsigned char uint8;
|
13 |
-
typedef signed short int16;
|
14 |
-
typedef signed int int32;
|
15 |
-
typedef unsigned short uint16;
|
16 |
-
typedef unsigned int uint32;
|
17 |
-
typedef unsigned int uint;
|
18 |
-
|
19 |
-
// JPEG chroma subsampling factors. Y_ONLY (grayscale images) and H2V2 (color images) are the most common.
|
20 |
-
enum subsampling_t { Y_ONLY = 0, H1V1 = 1, H2V1 = 2, H2V2 = 3 };
|
21 |
-
|
22 |
-
// JPEG compression parameters structure.
|
23 |
-
struct params
|
24 |
-
{
|
25 |
-
inline params() : m_quality(85), m_subsampling(H2V2), m_no_chroma_discrim_flag(false), m_two_pass_flag(false) { }
|
26 |
-
|
27 |
-
inline bool check_valid() const
|
28 |
-
{
|
29 |
-
if ((m_quality < 1) || (m_quality > 100)) return false;
|
30 |
-
if ((uint)m_subsampling > (uint)H2V2) return false;
|
31 |
-
return true;
|
32 |
-
}
|
33 |
-
|
34 |
-
// Quality: 1-100, higher is better. Typical values are around 50-95.
|
35 |
-
int m_quality;
|
36 |
-
|
37 |
-
// m_subsampling:
|
38 |
-
// 0 = Y (grayscale) only
|
39 |
-
// 1 = YCbCr, no subsampling (H1V1, YCbCr 1x1x1, 3 blocks per MCU)
|
40 |
-
// 2 = YCbCr, H2V1 subsampling (YCbCr 2x1x1, 4 blocks per MCU)
|
41 |
-
// 3 = YCbCr, H2V2 subsampling (YCbCr 4x1x1, 6 blocks per MCU-- very common)
|
42 |
-
subsampling_t m_subsampling;
|
43 |
-
|
44 |
-
// Disables CbCr discrimination - only intended for testing.
|
45 |
-
// If true, the Y quantization table is also used for the CbCr channels.
|
46 |
-
bool m_no_chroma_discrim_flag;
|
47 |
-
|
48 |
-
bool m_two_pass_flag;
|
49 |
-
};
|
50 |
-
|
51 |
-
// Writes JPEG image to a file.
|
52 |
-
// num_channels must be 1 (Y) or 3 (RGB), image pitch must be width*num_channels.
|
53 |
-
bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params());
|
54 |
-
|
55 |
-
// Writes JPEG image to memory buffer.
|
56 |
-
// On entry, buf_size is the size of the output buffer pointed at by pBuf, which should be at least ~1024 bytes.
|
57 |
-
// If return value is true, buf_size will be set to the size of the compressed data.
|
58 |
-
bool compress_image_to_jpeg_file_in_memory(void *pBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params());
|
59 |
-
|
60 |
-
// Output stream abstract class - used by the jpeg_encoder class to write to the output stream.
|
61 |
-
// put_buf() is generally called with len==JPGE_OUT_BUF_SIZE bytes, but for headers it'll be called with smaller amounts.
|
62 |
-
class output_stream
|
63 |
-
{
|
64 |
-
public:
|
65 |
-
virtual ~output_stream() { };
|
66 |
-
virtual bool put_buf(const void* Pbuf, int64_t len) = 0;
|
67 |
-
template<class T> inline bool put_obj(const T& obj) { return put_buf(&obj, sizeof(T)); }
|
68 |
-
};
|
69 |
-
|
70 |
-
// Lower level jpeg_encoder class - useful if more control is needed than the above helper functions.
|
71 |
-
class jpeg_encoder
|
72 |
-
{
|
73 |
-
public:
|
74 |
-
jpeg_encoder();
|
75 |
-
~jpeg_encoder();
|
76 |
-
|
77 |
-
// Initializes the compressor.
|
78 |
-
// pStream: The stream object to use for writing compressed data.
|
79 |
-
// params - Compression parameters structure, defined above.
|
80 |
-
// width, height - Image dimensions.
|
81 |
-
// channels - May be 1, or 3. 1 indicates grayscale, 3 indicates RGB source data.
|
82 |
-
// Returns false on out of memory or if a stream write fails.
|
83 |
-
bool init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params = params());
|
84 |
-
|
85 |
-
const params &get_params() const { return m_params; }
|
86 |
-
|
87 |
-
// Deinitializes the compressor, freeing any allocated memory. May be called at any time.
|
88 |
-
void deinit();
|
89 |
-
|
90 |
-
uint get_total_passes() const { return m_params.m_two_pass_flag ? 2 : 1; }
|
91 |
-
inline uint get_cur_pass() { return m_pass_num; }
|
92 |
-
|
93 |
-
// Call this method with each source scanline.
|
94 |
-
// width * src_channels bytes per scanline is expected (RGB or Y format).
|
95 |
-
// You must call with NULL after all scanlines are processed to finish compression.
|
96 |
-
// Returns false on out of memory or if a stream write fails.
|
97 |
-
bool process_scanline(const void* pScanline);
|
98 |
-
|
99 |
-
private:
|
100 |
-
jpeg_encoder(const jpeg_encoder &);
|
101 |
-
jpeg_encoder &operator =(const jpeg_encoder &);
|
102 |
-
|
103 |
-
typedef int32 sample_array_t;
|
104 |
-
|
105 |
-
output_stream *m_pStream;
|
106 |
-
params m_params;
|
107 |
-
uint8 m_num_components;
|
108 |
-
uint8 m_comp_h_samp[3], m_comp_v_samp[3];
|
109 |
-
int m_image_x, m_image_y, m_image_bpp, m_image_bpl;
|
110 |
-
int m_image_x_mcu, m_image_y_mcu;
|
111 |
-
int m_image_bpl_xlt, m_image_bpl_mcu;
|
112 |
-
int m_mcus_per_row;
|
113 |
-
int m_mcu_x, m_mcu_y;
|
114 |
-
uint8 *m_mcu_lines[16];
|
115 |
-
uint8 m_mcu_y_ofs;
|
116 |
-
sample_array_t m_sample_array[64];
|
117 |
-
int16 m_coefficient_array[64];
|
118 |
-
int32 m_quantization_tables[2][64];
|
119 |
-
uint m_huff_codes[4][256];
|
120 |
-
uint8 m_huff_code_sizes[4][256];
|
121 |
-
uint8 m_huff_bits[4][17];
|
122 |
-
uint8 m_huff_val[4][256];
|
123 |
-
uint32 m_huff_count[4][256];
|
124 |
-
int m_last_dc_val[3];
|
125 |
-
enum { JPGE_OUT_BUF_SIZE = 2048 };
|
126 |
-
uint8 m_out_buf[JPGE_OUT_BUF_SIZE];
|
127 |
-
uint8 *m_pOut_buf;
|
128 |
-
uint m_out_buf_left;
|
129 |
-
uint32 m_bit_buffer;
|
130 |
-
uint m_bits_in;
|
131 |
-
uint8 m_pass_num;
|
132 |
-
bool m_all_stream_writes_succeeded;
|
133 |
-
|
134 |
-
void optimize_huffman_table(int table_num, int table_len);
|
135 |
-
void emit_byte(uint8 i);
|
136 |
-
void emit_word(uint i);
|
137 |
-
void emit_marker(int marker);
|
138 |
-
void emit_jfif_app0();
|
139 |
-
void emit_dqt();
|
140 |
-
void emit_sof();
|
141 |
-
void emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag);
|
142 |
-
void emit_dhts();
|
143 |
-
void emit_sos();
|
144 |
-
void emit_markers();
|
145 |
-
void compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val);
|
146 |
-
void compute_quant_table(int32 *dst, int16 *src);
|
147 |
-
void adjust_quant_table(int32 *dst, int32 *src);
|
148 |
-
void first_pass_init();
|
149 |
-
bool second_pass_init();
|
150 |
-
bool jpg_open(int p_x_res, int p_y_res, int src_channels);
|
151 |
-
void load_block_8_8_grey(int x);
|
152 |
-
void load_block_8_8(int x, int y, int c);
|
153 |
-
void load_block_16_8(int x, int c);
|
154 |
-
void load_block_16_8_8(int x, int c);
|
155 |
-
void load_quantized_coefficients(int component_num);
|
156 |
-
void flush_output_buffer();
|
157 |
-
void put_bits(uint bits, uint len);
|
158 |
-
void code_coefficients_pass_one(int component_num);
|
159 |
-
void code_coefficients_pass_two(int component_num);
|
160 |
-
void code_block(int component_num);
|
161 |
-
void process_mcu_row();
|
162 |
-
bool terminate_pass_one();
|
163 |
-
bool terminate_pass_two();
|
164 |
-
bool process_end_of_image();
|
165 |
-
void load_mcu(const void* src);
|
166 |
-
void clear();
|
167 |
-
void init();
|
168 |
-
};
|
169 |
-
|
170 |
-
} // namespace jpge
|
171 |
-
|
172 |
-
#endif // JPEG_ENCODER
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/fcos/README.md
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
# FCOS: Fully Convolutional One-Stage Object Detection
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[ALGORITHM]
|
6 |
-
|
7 |
-
```latex
|
8 |
-
@article{tian2019fcos,
|
9 |
-
title={FCOS: Fully Convolutional One-Stage Object Detection},
|
10 |
-
author={Tian, Zhi and Shen, Chunhua and Chen, Hao and He, Tong},
|
11 |
-
journal={arXiv preprint arXiv:1904.01355},
|
12 |
-
year={2019}
|
13 |
-
}
|
14 |
-
```
|
15 |
-
|
16 |
-
## Results and Models
|
17 |
-
|
18 |
-
| Backbone | Style | GN | MS train | Tricks | DCN | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
|
19 |
-
|:---------:|:-------:|:-------:|:--------:|:-------:|:-------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:|
|
20 |
-
| R-50 | caffe | Y | N | N | N | 1x | 3.6 | 22.7 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/20201227_180009.log.json) |
|
21 |
-
| R-50 | caffe | Y | N | Y | N | 1x | 3.7 | - | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/20210105_135818.log.json)|
|
22 |
-
| R-50 | caffe | Y | N | Y | Y | 1x | 3.8 | - | 42.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco-ae4d8b3d.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/20210105_224556.log.json)|
|
23 |
-
| R-101 | caffe | Y | N | N | N | 1x | 5.5 | 17.3 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/fcos_r101_caffe_fpn_gn-head_1x_coco-0e37b982.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/20210103_155046.log.json) |
|
24 |
-
|
25 |
-
| Backbone | Style | GN | MS train | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
|
26 |
-
|:---------:|:-------:|:-------:|:--------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:|
|
27 |
-
| R-50 | caffe | Y | Y | 2x | 2.6 | 22.9 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco-d92ceeea.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/20201227_161900.log.json) |
|
28 |
-
| R-101 | caffe | Y | Y | 2x | 5.5 | 17.3 | 40.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/20210103_155046.log.json) |
|
29 |
-
| X-101 | pytorch | Y | Y | 2x | 10.0 | 9.7 | 42.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco-ede514a8.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/20210114_133041.log.json) |
|
30 |
-
|
31 |
-
**Notes:**
|
32 |
-
|
33 |
-
- The X-101 backbone is X-101-64x4d.
|
34 |
-
- Tricks means setting `norm_on_bbox`, `centerness_on_reg`, `center_sampling` as `True`.
|
35 |
-
- DCN means using `DCNv2` in both backbone and head.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
_base_ = './gfl_r50_fpn_mstrain_2x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
type='GFL',
|
4 |
-
pretrained='open-mmlab://resnext101_32x4d',
|
5 |
-
backbone=dict(
|
6 |
-
type='ResNeXt',
|
7 |
-
depth=101,
|
8 |
-
groups=32,
|
9 |
-
base_width=4,
|
10 |
-
num_stages=4,
|
11 |
-
out_indices=(0, 1, 2, 3),
|
12 |
-
frozen_stages=1,
|
13 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
14 |
-
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
|
15 |
-
stage_with_dcn=(False, False, True, True),
|
16 |
-
norm_eval=True,
|
17 |
-
style='pytorch'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r50-d8_512x512_20k_voc12aug.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/gcnet_r50-d8.py',
|
3 |
-
'../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_20k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/openai/defaults.py
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
|
3 |
-
# Slightly different defaults for OpenAI's API
|
4 |
-
# Data type is important, Ex. use 0.0 for a float 0
|
5 |
-
default_req_params = {
|
6 |
-
'max_new_tokens': 16, # 'Inf' for chat
|
7 |
-
'auto_max_new_tokens': False,
|
8 |
-
'max_tokens_second': 0,
|
9 |
-
'temperature': 1.0,
|
10 |
-
'top_p': 1.0,
|
11 |
-
'top_k': 1, # choose 20 for chat in absence of another default
|
12 |
-
'repetition_penalty': 1.18,
|
13 |
-
'repetition_penalty_range': 0,
|
14 |
-
'encoder_repetition_penalty': 1.0,
|
15 |
-
'suffix': None,
|
16 |
-
'stream': False,
|
17 |
-
'echo': False,
|
18 |
-
'seed': -1,
|
19 |
-
# 'n' : default(body, 'n', 1), # 'n' doesn't have a direct map
|
20 |
-
'truncation_length': 2048, # first use shared.settings value
|
21 |
-
'add_bos_token': True,
|
22 |
-
'do_sample': True,
|
23 |
-
'typical_p': 1.0,
|
24 |
-
'epsilon_cutoff': 0.0, # In units of 1e-4
|
25 |
-
'eta_cutoff': 0.0, # In units of 1e-4
|
26 |
-
'tfs': 1.0,
|
27 |
-
'top_a': 0.0,
|
28 |
-
'min_length': 0,
|
29 |
-
'no_repeat_ngram_size': 0,
|
30 |
-
'num_beams': 1,
|
31 |
-
'penalty_alpha': 0.0,
|
32 |
-
'length_penalty': 1.0,
|
33 |
-
'early_stopping': False,
|
34 |
-
'mirostat_mode': 0,
|
35 |
-
'mirostat_tau': 5.0,
|
36 |
-
'mirostat_eta': 0.1,
|
37 |
-
'grammar_string': '',
|
38 |
-
'guidance_scale': 1,
|
39 |
-
'negative_prompt': '',
|
40 |
-
'ban_eos_token': False,
|
41 |
-
'custom_token_bans': '',
|
42 |
-
'skip_special_tokens': True,
|
43 |
-
'custom_stopping_strings': '',
|
44 |
-
# 'logits_processor' - conditionally passed
|
45 |
-
# 'stopping_strings' - temporarily used
|
46 |
-
# 'logprobs' - temporarily used
|
47 |
-
# 'requested_model' - temporarily used
|
48 |
-
}
|
49 |
-
|
50 |
-
|
51 |
-
def get_default_req_params():
|
52 |
-
return copy.deepcopy(default_req_params)
|
53 |
-
|
54 |
-
|
55 |
-
def default(dic, key, default):
|
56 |
-
'''
|
57 |
-
little helper to get defaults if arg is present but None and should be the same type as default.
|
58 |
-
'''
|
59 |
-
val = dic.get(key, default)
|
60 |
-
if not isinstance(val, type(default)):
|
61 |
-
# maybe it's just something like 1 instead of 1.0
|
62 |
-
try:
|
63 |
-
v = type(default)(val)
|
64 |
-
if type(val)(v) == val: # if it's the same value passed in, it's ok.
|
65 |
-
return v
|
66 |
-
except:
|
67 |
-
pass
|
68 |
-
|
69 |
-
val = default
|
70 |
-
return val
|
71 |
-
|
72 |
-
|
73 |
-
def clamp(value, minvalue, maxvalue):
|
74 |
-
return max(minvalue, min(value, maxvalue))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/openai/moderations.py
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
import time
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
from extensions.openai.embeddings import get_embeddings
|
5 |
-
from numpy.linalg import norm
|
6 |
-
|
7 |
-
moderations_disabled = False # return 0/false
|
8 |
-
category_embeddings = None
|
9 |
-
antonym_embeddings = None
|
10 |
-
categories = ["sexual", "hate", "harassment", "self-harm", "sexual/minors", "hate/threatening", "violence/graphic", "self-harm/intent", "self-harm/instructions", "harassment/threatening", "violence"]
|
11 |
-
flag_threshold = 0.5
|
12 |
-
|
13 |
-
|
14 |
-
def get_category_embeddings() -> dict:
|
15 |
-
global category_embeddings, categories
|
16 |
-
if category_embeddings is None:
|
17 |
-
embeddings = get_embeddings(categories).tolist()
|
18 |
-
category_embeddings = dict(zip(categories, embeddings))
|
19 |
-
|
20 |
-
return category_embeddings
|
21 |
-
|
22 |
-
|
23 |
-
def cosine_similarity(a: np.ndarray, b: np.ndarray) -> float:
|
24 |
-
return np.dot(a, b) / (norm(a) * norm(b))
|
25 |
-
|
26 |
-
|
27 |
-
# seems most openai like with all-mpnet-base-v2
|
28 |
-
def mod_score(a: np.ndarray, b: np.ndarray) -> float:
|
29 |
-
return 2.0 * np.dot(a, b)
|
30 |
-
|
31 |
-
|
32 |
-
def moderations(input):
|
33 |
-
global category_embeddings, categories, flag_threshold, moderations_disabled
|
34 |
-
results = {
|
35 |
-
"id": f"modr-{int(time.time()*1e9)}",
|
36 |
-
"model": "text-moderation-001",
|
37 |
-
"results": [],
|
38 |
-
}
|
39 |
-
|
40 |
-
if moderations_disabled:
|
41 |
-
results['results'] = [{
|
42 |
-
'categories': dict([(C, False) for C in categories]),
|
43 |
-
'category_scores': dict([(C, 0.0) for C in categories]),
|
44 |
-
'flagged': False,
|
45 |
-
}]
|
46 |
-
return results
|
47 |
-
|
48 |
-
category_embeddings = get_category_embeddings()
|
49 |
-
|
50 |
-
# input, string or array
|
51 |
-
if isinstance(input, str):
|
52 |
-
input = [input]
|
53 |
-
|
54 |
-
for in_str in input:
|
55 |
-
for ine in get_embeddings([in_str]):
|
56 |
-
category_scores = dict([(C, mod_score(category_embeddings[C], ine)) for C in categories])
|
57 |
-
category_flags = dict([(C, bool(category_scores[C] > flag_threshold)) for C in categories])
|
58 |
-
flagged = any(category_flags.values())
|
59 |
-
|
60 |
-
results['results'].extend([{
|
61 |
-
'flagged': flagged,
|
62 |
-
'categories': category_flags,
|
63 |
-
'category_scores': category_scores,
|
64 |
-
}])
|
65 |
-
|
66 |
-
print(results)
|
67 |
-
|
68 |
-
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/perplexity_colors/script.py
DELETED
@@ -1,309 +0,0 @@
|
|
1 |
-
import time
|
2 |
-
|
3 |
-
import gradio
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
from transformers import LogitsProcessor
|
7 |
-
|
8 |
-
from modules import html_generator, shared
|
9 |
-
|
10 |
-
params = {
|
11 |
-
'active': True,
|
12 |
-
'color_by_perplexity': False,
|
13 |
-
'color_by_probability': False,
|
14 |
-
'ppl_scale': 15.0, # No slider for this right now, because I don't think it really needs to be changed. Very large perplexity scores don't show up often.
|
15 |
-
'probability_dropdown': False,
|
16 |
-
'verbose': False # For debugging mostly
|
17 |
-
}
|
18 |
-
|
19 |
-
|
20 |
-
class PerplexityLogits(LogitsProcessor):
|
21 |
-
def __init__(self, verbose=False):
|
22 |
-
self.generated_token_ids = []
|
23 |
-
self.selected_probs = []
|
24 |
-
self.top_token_ids_list = []
|
25 |
-
self.top_probs_list = []
|
26 |
-
self.perplexities_list = []
|
27 |
-
self.last_probs = None
|
28 |
-
self.verbose = verbose
|
29 |
-
|
30 |
-
def __call__(self, input_ids, scores):
|
31 |
-
# t0 = time.time()
|
32 |
-
probs = torch.softmax(scores, dim=-1, dtype=torch.float)
|
33 |
-
log_probs = torch.nan_to_num(torch.log(probs)) # Note: This is to convert log(0) nan to 0, but probs*log_probs makes this 0 not affect the perplexity.
|
34 |
-
entropy = -torch.sum(probs * log_probs)
|
35 |
-
entropy = entropy.cpu().numpy()
|
36 |
-
perplexity = round(float(np.exp(entropy)), 4)
|
37 |
-
self.perplexities_list.append(perplexity)
|
38 |
-
last_token_id = int(input_ids[0][-1].cpu().numpy().item())
|
39 |
-
# Store the generated tokens (not sure why this isn't accessible in the output endpoint!)
|
40 |
-
self.generated_token_ids.append(last_token_id)
|
41 |
-
# Get last probability, and add to the list if it wasn't there
|
42 |
-
if len(self.selected_probs) > 0:
|
43 |
-
# Is the selected token in the top tokens?
|
44 |
-
if self.verbose:
|
45 |
-
print('Probs: Token after', shared.tokenizer.decode(last_token_id))
|
46 |
-
print('Probs:', [shared.tokenizer.decode(token_id) for token_id in self.top_token_ids_list[-1][0]])
|
47 |
-
print('Probs:', [round(float(prob), 4) for prob in self.top_probs_list[-1][0]])
|
48 |
-
if last_token_id in self.top_token_ids_list[-1][0]:
|
49 |
-
idx = self.top_token_ids_list[-1][0].index(last_token_id)
|
50 |
-
self.selected_probs.append(self.top_probs_list[-1][0][idx])
|
51 |
-
else:
|
52 |
-
self.top_token_ids_list[-1][0].append(last_token_id)
|
53 |
-
last_prob = round(float(self.last_probs[last_token_id]), 4)
|
54 |
-
self.top_probs_list[-1][0].append(last_prob)
|
55 |
-
self.selected_probs.append(last_prob)
|
56 |
-
else:
|
57 |
-
self.selected_probs.append(1.0) # Placeholder for the last token of the prompt
|
58 |
-
|
59 |
-
if self.verbose:
|
60 |
-
pplbar = "-"
|
61 |
-
if not np.isnan(perplexity):
|
62 |
-
pplbar = "*" * round(perplexity)
|
63 |
-
print(f"PPL: Token after {shared.tokenizer.decode(last_token_id)}\t{perplexity:.2f}\t{pplbar}")
|
64 |
-
|
65 |
-
# Get top 5 probabilities
|
66 |
-
top_tokens_and_probs = torch.topk(probs, 5)
|
67 |
-
top_probs = top_tokens_and_probs.values.cpu().numpy().astype(float).tolist()
|
68 |
-
top_token_ids = top_tokens_and_probs.indices.cpu().numpy().astype(int).tolist()
|
69 |
-
|
70 |
-
self.top_token_ids_list.append(top_token_ids)
|
71 |
-
self.top_probs_list.append(top_probs)
|
72 |
-
|
73 |
-
probs = probs.cpu().numpy().flatten()
|
74 |
-
self.last_probs = probs # Need to keep this as a reference for top probs
|
75 |
-
|
76 |
-
# t1 = time.time()
|
77 |
-
# print(f"PPL Processor: {(t1-t0):.3f} s")
|
78 |
-
# About 1 ms, though occasionally up to around 100 ms, not sure why...
|
79 |
-
# Doesn't actually modify the logits!
|
80 |
-
return scores
|
81 |
-
|
82 |
-
|
83 |
-
# Stores the perplexity and top probabilities
|
84 |
-
ppl_logits_processor = None
|
85 |
-
|
86 |
-
|
87 |
-
def logits_processor_modifier(logits_processor_list, input_ids):
|
88 |
-
global ppl_logits_processor
|
89 |
-
if params['active']:
|
90 |
-
ppl_logits_processor = PerplexityLogits(verbose=params['verbose'])
|
91 |
-
logits_processor_list.append(ppl_logits_processor)
|
92 |
-
|
93 |
-
|
94 |
-
def output_modifier(text):
|
95 |
-
global ppl_logits_processor
|
96 |
-
# t0 = time.time()
|
97 |
-
|
98 |
-
if not params['active']:
|
99 |
-
return text
|
100 |
-
|
101 |
-
# TODO: It's probably more efficient to do this above rather than modifying all these lists
|
102 |
-
# Remove last element of perplexities_list, top_token_ids_list, top_tokens_list, top_probs_list since everything is off by one because this extension runs before generation
|
103 |
-
perplexities = ppl_logits_processor.perplexities_list[:-1]
|
104 |
-
top_token_ids_list = ppl_logits_processor.top_token_ids_list[:-1]
|
105 |
-
top_tokens_list = [[shared.tokenizer.decode(token_id) for token_id in top_token_ids[0]] for top_token_ids in top_token_ids_list]
|
106 |
-
top_probs_list = ppl_logits_processor.top_probs_list[:-1]
|
107 |
-
# Remove first element of generated_token_ids, generated_tokens, selected_probs because they are for the last token of the prompt
|
108 |
-
gen_token_ids = ppl_logits_processor.generated_token_ids[1:]
|
109 |
-
gen_tokens = [shared.tokenizer.decode(token_id) for token_id in gen_token_ids]
|
110 |
-
sel_probs = ppl_logits_processor.selected_probs[1:]
|
111 |
-
|
112 |
-
end_part = '</div></div>' if params['probability_dropdown'] else '</span>' # Helps with finding the index after replacing part of the text.
|
113 |
-
|
114 |
-
i = 0
|
115 |
-
for token, prob, ppl, top_tokens, top_probs in zip(gen_tokens, sel_probs, perplexities, top_tokens_list, top_probs_list):
|
116 |
-
color = 'ffffff'
|
117 |
-
if params['color_by_probability'] and params['color_by_perplexity']:
|
118 |
-
color = probability_perplexity_color_scale(prob, ppl)
|
119 |
-
elif params['color_by_perplexity']:
|
120 |
-
color = perplexity_color_scale(ppl)
|
121 |
-
elif params['color_by_probability']:
|
122 |
-
color = probability_color_scale(prob)
|
123 |
-
if token in text[i:]:
|
124 |
-
if params['probability_dropdown']:
|
125 |
-
text = text[:i] + text[i:].replace(token, add_dropdown_html(token, color, top_tokens, top_probs[0], ppl), 1)
|
126 |
-
else:
|
127 |
-
text = text[:i] + text[i:].replace(token, add_color_html(token, color), 1)
|
128 |
-
i += text[i:].find(end_part) + len(end_part)
|
129 |
-
|
130 |
-
# Use full perplexity list for calculating the average here.
|
131 |
-
print('Average perplexity:', round(np.mean(ppl_logits_processor.perplexities_list[:-1]), 4))
|
132 |
-
# t1 = time.time()
|
133 |
-
# print(f"Modifier: {(t1-t0):.3f} s")
|
134 |
-
# About 50 ms
|
135 |
-
return text
|
136 |
-
|
137 |
-
|
138 |
-
def probability_color_scale(prob):
|
139 |
-
'''
|
140 |
-
Green-yellow-red color scale
|
141 |
-
'''
|
142 |
-
|
143 |
-
rv = 0
|
144 |
-
gv = 0
|
145 |
-
if prob <= 0.5:
|
146 |
-
rv = 'ff'
|
147 |
-
gv = hex(int(255 * prob * 2))[2:]
|
148 |
-
if len(gv) < 2:
|
149 |
-
gv = '0' * (2 - len(gv)) + gv
|
150 |
-
else:
|
151 |
-
rv = hex(int(255 - 255 * (prob - 0.5) * 2))[2:]
|
152 |
-
gv = 'ff'
|
153 |
-
if len(rv) < 2:
|
154 |
-
rv = '0' * (2 - len(rv)) + rv
|
155 |
-
|
156 |
-
return rv + gv + '00'
|
157 |
-
|
158 |
-
|
159 |
-
def perplexity_color_scale(ppl):
|
160 |
-
'''
|
161 |
-
Red component only, white for 0 perplexity (sorry if you're not in dark mode)
|
162 |
-
'''
|
163 |
-
value = hex(max(int(255.0 - params['ppl_scale'] * (float(ppl) - 1.0)), 0))[2:]
|
164 |
-
if len(value) < 2:
|
165 |
-
value = '0' * (2 - len(value)) + value
|
166 |
-
|
167 |
-
return 'ff' + value + value
|
168 |
-
|
169 |
-
|
170 |
-
def probability_perplexity_color_scale(prob, ppl):
|
171 |
-
'''
|
172 |
-
Green-yellow-red for probability and blue component for perplexity
|
173 |
-
'''
|
174 |
-
|
175 |
-
rv = 0
|
176 |
-
gv = 0
|
177 |
-
bv = hex(min(max(int(params['ppl_scale'] * (float(ppl) - 1.0)), 0), 255))[2:]
|
178 |
-
if len(bv) < 2:
|
179 |
-
bv = '0' * (2 - len(bv)) + bv
|
180 |
-
|
181 |
-
if prob <= 0.5:
|
182 |
-
rv = 'ff'
|
183 |
-
gv = hex(int(255 * prob * 2))[2:]
|
184 |
-
if len(gv) < 2:
|
185 |
-
gv = '0' * (2 - len(gv)) + gv
|
186 |
-
else:
|
187 |
-
rv = hex(int(255 - 255 * (prob - 0.5) * 2))[2:]
|
188 |
-
gv = 'ff'
|
189 |
-
if len(rv) < 2:
|
190 |
-
rv = '0' * (2 - len(rv)) + rv
|
191 |
-
|
192 |
-
return rv + gv + bv
|
193 |
-
|
194 |
-
|
195 |
-
def add_color_html(token, color):
|
196 |
-
return f'<span style="color: #{color}">{token}</span>'
|
197 |
-
|
198 |
-
|
199 |
-
# TODO: Major issue: Applying this to too many tokens will cause a permanent slowdown in generation speed until the messages are removed from the history.
|
200 |
-
# I think the issue is from HTML elements taking up space in the visible history, and things like history deepcopy add latency proportional to the size of the history.
|
201 |
-
# Potential solution is maybe to modify the main generation code to send just the internal text and not the visible history, to avoid moving too much around.
|
202 |
-
# I wonder if we can also avoid using deepcopy here.
|
203 |
-
def add_dropdown_html(token, color, top_tokens, top_probs, perplexity=0):
|
204 |
-
html = f'<div class="hoverable"><span style="color: #{color}">{token}</span><div class="dropdown"><table class="dropdown-content"><tbody>'
|
205 |
-
for token_option, prob in zip(top_tokens, top_probs):
|
206 |
-
# TODO: Bold for selected token?
|
207 |
-
# Using divs prevented the problem of divs inside spans causing issues.
|
208 |
-
# Now the problem is that divs show the same whitespace of one space between every token.
|
209 |
-
# There is probably some way to fix this in CSS that I don't know about.
|
210 |
-
row_color = probability_color_scale(prob)
|
211 |
-
row_class = ' class="selected"' if token_option == token else ''
|
212 |
-
html += f'<tr{row_class}><td style="color: #{row_color}">{token_option}</td><td style="color: #{row_color}">{prob:.4f}</td></tr>'
|
213 |
-
if perplexity != 0:
|
214 |
-
ppl_color = perplexity_color_scale(perplexity)
|
215 |
-
html += f'<tr><td>Perplexity:</td><td style="color: #{ppl_color}">{perplexity:.4f}</td></tr>'
|
216 |
-
html += '</tbody></table></div></div>'
|
217 |
-
return html # About 750 characters per token...
|
218 |
-
|
219 |
-
|
220 |
-
def custom_css():
|
221 |
-
return """
|
222 |
-
.dropdown {
|
223 |
-
display: none;
|
224 |
-
position: absolute;
|
225 |
-
z-index: 50;
|
226 |
-
background-color: var(--block-background-fill);
|
227 |
-
box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
|
228 |
-
width: max-content;
|
229 |
-
overflow: visible;
|
230 |
-
padding: 5px;
|
231 |
-
border-radius: 10px;
|
232 |
-
border: 1px solid var(--border-color-primary);
|
233 |
-
}
|
234 |
-
|
235 |
-
.dropdown-content {
|
236 |
-
border: none;
|
237 |
-
z-index: 50;
|
238 |
-
}
|
239 |
-
|
240 |
-
.dropdown-content tr.selected {
|
241 |
-
background-color: var(--block-label-background-fill);
|
242 |
-
}
|
243 |
-
|
244 |
-
.dropdown-content td {
|
245 |
-
color: var(--body-text-color);
|
246 |
-
}
|
247 |
-
|
248 |
-
.hoverable {
|
249 |
-
color: var(--body-text-color);
|
250 |
-
position: relative;
|
251 |
-
display: inline-block;
|
252 |
-
overflow: visible;
|
253 |
-
font-size: 15px;
|
254 |
-
line-height: 1.75;
|
255 |
-
margin: 0;
|
256 |
-
padding: 0;
|
257 |
-
}
|
258 |
-
|
259 |
-
.hoverable:hover .dropdown {
|
260 |
-
display: block;
|
261 |
-
}
|
262 |
-
|
263 |
-
pre {
|
264 |
-
white-space: pre-wrap;
|
265 |
-
}
|
266 |
-
|
267 |
-
# TODO: This makes the hover menus extend outside the bounds of the chat area, which is good.
|
268 |
-
# However, it also makes the scrollbar disappear, which is bad.
|
269 |
-
# The scroll bar needs to still be present. So for now, we can't see dropdowns that extend past the edge of the chat area.
|
270 |
-
#.chat {
|
271 |
-
# overflow-y: auto;
|
272 |
-
#}
|
273 |
-
"""
|
274 |
-
|
275 |
-
|
276 |
-
# Monkeypatch applied to html_generator.py
|
277 |
-
# We simply don't render markdown into HTML. We wrap everything in <pre> tags to preserve whitespace
|
278 |
-
# formatting. If you're coloring tokens by perplexity or probability, or especially if you're using
|
279 |
-
# the probability dropdown, you probably care more about seeing the tokens the model actually outputted
|
280 |
-
# rather than rendering ```code blocks``` or *italics*.
|
281 |
-
def convert_to_markdown(string):
|
282 |
-
return '<pre>' + string + '</pre>'
|
283 |
-
|
284 |
-
|
285 |
-
html_generator.convert_to_markdown = convert_to_markdown
|
286 |
-
|
287 |
-
|
288 |
-
def ui():
|
289 |
-
def update_active_check(x):
|
290 |
-
params.update({'active': x})
|
291 |
-
|
292 |
-
def update_color_by_ppl_check(x):
|
293 |
-
params.update({'color_by_perplexity': x})
|
294 |
-
|
295 |
-
def update_color_by_prob_check(x):
|
296 |
-
params.update({'color_by_probability': x})
|
297 |
-
|
298 |
-
def update_prob_dropdown_check(x):
|
299 |
-
params.update({'probability_dropdown': x})
|
300 |
-
|
301 |
-
active_check = gradio.Checkbox(value=True, label="Compute probabilities and perplexity scores", info="Activate this extension. Note that this extension currently does not work with exllama or llama.cpp.")
|
302 |
-
color_by_ppl_check = gradio.Checkbox(value=False, label="Color by perplexity", info="Higher perplexity is more red. If also showing probability, higher perplexity has more blue component.")
|
303 |
-
color_by_prob_check = gradio.Checkbox(value=False, label="Color by probability", info="Green-yellow-red linear scale, with 100% green, 50% yellow, 0% red.")
|
304 |
-
prob_dropdown_check = gradio.Checkbox(value=False, label="Probability dropdown", info="Hover over a token to show a dropdown of top token probabilities. Currently slightly buggy with whitespace between tokens.")
|
305 |
-
|
306 |
-
active_check.change(update_active_check, active_check, None)
|
307 |
-
color_by_ppl_check.change(update_color_by_ppl_check, color_by_ppl_check, None)
|
308 |
-
color_by_prob_check.change(update_color_by_prob_check, color_by_prob_check, None)
|
309 |
-
prob_dropdown_check.change(update_prob_dropdown_check, prob_dropdown_check, None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/GODROOP/roop/processors/frame/__init__.py
DELETED
File without changes
|
spaces/Archan/ArXivAudio/search.py
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
import arxiv
|
2 |
-
|
3 |
-
|
4 |
-
def search(query="", max_results=10, sort_by="Relevance", sort_order="Descending"):
|
5 |
-
|
6 |
-
sr_by_dict = {"Relevance": arxiv.SortCriterion.Relevance, "Last Updated Date":
|
7 |
-
arxiv.SortCriterion.LastUpdatedDate, "Submitted Date": arxiv.SortCriterion.SubmittedDate}
|
8 |
-
sr_or_dict = {"Descending": arxiv.SortOrder.Descending,
|
9 |
-
"Ascending": arxiv.SortOrder.Ascending}
|
10 |
-
|
11 |
-
search = arxiv.Search(
|
12 |
-
query=query,
|
13 |
-
max_results=max_results,
|
14 |
-
sort_by=sr_by_dict[sort_by],
|
15 |
-
sort_order=sr_or_dict[sort_order])
|
16 |
-
src_lst = []
|
17 |
-
for i in search.results():
|
18 |
-
id = i.entry_id.split("/")
|
19 |
-
src_lst.append(i.title+" - " + str(id[-1]))
|
20 |
-
|
21 |
-
return src_lst
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/req_file.py
DELETED
@@ -1,552 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Requirements file parsing
|
3 |
-
"""
|
4 |
-
|
5 |
-
import logging
|
6 |
-
import optparse
|
7 |
-
import os
|
8 |
-
import re
|
9 |
-
import shlex
|
10 |
-
import urllib.parse
|
11 |
-
from optparse import Values
|
12 |
-
from typing import (
|
13 |
-
TYPE_CHECKING,
|
14 |
-
Any,
|
15 |
-
Callable,
|
16 |
-
Dict,
|
17 |
-
Generator,
|
18 |
-
Iterable,
|
19 |
-
List,
|
20 |
-
Optional,
|
21 |
-
Tuple,
|
22 |
-
)
|
23 |
-
|
24 |
-
from pip._internal.cli import cmdoptions
|
25 |
-
from pip._internal.exceptions import InstallationError, RequirementsFileParseError
|
26 |
-
from pip._internal.models.search_scope import SearchScope
|
27 |
-
from pip._internal.network.session import PipSession
|
28 |
-
from pip._internal.network.utils import raise_for_status
|
29 |
-
from pip._internal.utils.encoding import auto_decode
|
30 |
-
from pip._internal.utils.urls import get_url_scheme
|
31 |
-
|
32 |
-
if TYPE_CHECKING:
|
33 |
-
# NoReturn introduced in 3.6.2; imported only for type checking to maintain
|
34 |
-
# pip compatibility with older patch versions of Python 3.6
|
35 |
-
from typing import NoReturn
|
36 |
-
|
37 |
-
from pip._internal.index.package_finder import PackageFinder
|
38 |
-
|
39 |
-
__all__ = ["parse_requirements"]
|
40 |
-
|
41 |
-
ReqFileLines = Iterable[Tuple[int, str]]
|
42 |
-
|
43 |
-
LineParser = Callable[[str], Tuple[str, Values]]
|
44 |
-
|
45 |
-
SCHEME_RE = re.compile(r"^(http|https|file):", re.I)
|
46 |
-
COMMENT_RE = re.compile(r"(^|\s+)#.*$")
|
47 |
-
|
48 |
-
# Matches environment variable-style values in '${MY_VARIABLE_1}' with the
|
49 |
-
# variable name consisting of only uppercase letters, digits or the '_'
|
50 |
-
# (underscore). This follows the POSIX standard defined in IEEE Std 1003.1,
|
51 |
-
# 2013 Edition.
|
52 |
-
ENV_VAR_RE = re.compile(r"(?P<var>\$\{(?P<name>[A-Z0-9_]+)\})")
|
53 |
-
|
54 |
-
SUPPORTED_OPTIONS: List[Callable[..., optparse.Option]] = [
|
55 |
-
cmdoptions.index_url,
|
56 |
-
cmdoptions.extra_index_url,
|
57 |
-
cmdoptions.no_index,
|
58 |
-
cmdoptions.constraints,
|
59 |
-
cmdoptions.requirements,
|
60 |
-
cmdoptions.editable,
|
61 |
-
cmdoptions.find_links,
|
62 |
-
cmdoptions.no_binary,
|
63 |
-
cmdoptions.only_binary,
|
64 |
-
cmdoptions.prefer_binary,
|
65 |
-
cmdoptions.require_hashes,
|
66 |
-
cmdoptions.pre,
|
67 |
-
cmdoptions.trusted_host,
|
68 |
-
cmdoptions.use_new_feature,
|
69 |
-
]
|
70 |
-
|
71 |
-
# options to be passed to requirements
|
72 |
-
SUPPORTED_OPTIONS_REQ: List[Callable[..., optparse.Option]] = [
|
73 |
-
cmdoptions.global_options,
|
74 |
-
cmdoptions.hash,
|
75 |
-
cmdoptions.config_settings,
|
76 |
-
]
|
77 |
-
|
78 |
-
# the 'dest' string values
|
79 |
-
SUPPORTED_OPTIONS_REQ_DEST = [str(o().dest) for o in SUPPORTED_OPTIONS_REQ]
|
80 |
-
|
81 |
-
logger = logging.getLogger(__name__)
|
82 |
-
|
83 |
-
|
84 |
-
class ParsedRequirement:
|
85 |
-
def __init__(
|
86 |
-
self,
|
87 |
-
requirement: str,
|
88 |
-
is_editable: bool,
|
89 |
-
comes_from: str,
|
90 |
-
constraint: bool,
|
91 |
-
options: Optional[Dict[str, Any]] = None,
|
92 |
-
line_source: Optional[str] = None,
|
93 |
-
) -> None:
|
94 |
-
self.requirement = requirement
|
95 |
-
self.is_editable = is_editable
|
96 |
-
self.comes_from = comes_from
|
97 |
-
self.options = options
|
98 |
-
self.constraint = constraint
|
99 |
-
self.line_source = line_source
|
100 |
-
|
101 |
-
|
102 |
-
class ParsedLine:
|
103 |
-
def __init__(
|
104 |
-
self,
|
105 |
-
filename: str,
|
106 |
-
lineno: int,
|
107 |
-
args: str,
|
108 |
-
opts: Values,
|
109 |
-
constraint: bool,
|
110 |
-
) -> None:
|
111 |
-
self.filename = filename
|
112 |
-
self.lineno = lineno
|
113 |
-
self.opts = opts
|
114 |
-
self.constraint = constraint
|
115 |
-
|
116 |
-
if args:
|
117 |
-
self.is_requirement = True
|
118 |
-
self.is_editable = False
|
119 |
-
self.requirement = args
|
120 |
-
elif opts.editables:
|
121 |
-
self.is_requirement = True
|
122 |
-
self.is_editable = True
|
123 |
-
# We don't support multiple -e on one line
|
124 |
-
self.requirement = opts.editables[0]
|
125 |
-
else:
|
126 |
-
self.is_requirement = False
|
127 |
-
|
128 |
-
|
129 |
-
def parse_requirements(
|
130 |
-
filename: str,
|
131 |
-
session: PipSession,
|
132 |
-
finder: Optional["PackageFinder"] = None,
|
133 |
-
options: Optional[optparse.Values] = None,
|
134 |
-
constraint: bool = False,
|
135 |
-
) -> Generator[ParsedRequirement, None, None]:
|
136 |
-
"""Parse a requirements file and yield ParsedRequirement instances.
|
137 |
-
|
138 |
-
:param filename: Path or url of requirements file.
|
139 |
-
:param session: PipSession instance.
|
140 |
-
:param finder: Instance of pip.index.PackageFinder.
|
141 |
-
:param options: cli options.
|
142 |
-
:param constraint: If true, parsing a constraint file rather than
|
143 |
-
requirements file.
|
144 |
-
"""
|
145 |
-
line_parser = get_line_parser(finder)
|
146 |
-
parser = RequirementsFileParser(session, line_parser)
|
147 |
-
|
148 |
-
for parsed_line in parser.parse(filename, constraint):
|
149 |
-
parsed_req = handle_line(
|
150 |
-
parsed_line, options=options, finder=finder, session=session
|
151 |
-
)
|
152 |
-
if parsed_req is not None:
|
153 |
-
yield parsed_req
|
154 |
-
|
155 |
-
|
156 |
-
def preprocess(content: str) -> ReqFileLines:
|
157 |
-
"""Split, filter, and join lines, and return a line iterator
|
158 |
-
|
159 |
-
:param content: the content of the requirements file
|
160 |
-
"""
|
161 |
-
lines_enum: ReqFileLines = enumerate(content.splitlines(), start=1)
|
162 |
-
lines_enum = join_lines(lines_enum)
|
163 |
-
lines_enum = ignore_comments(lines_enum)
|
164 |
-
lines_enum = expand_env_variables(lines_enum)
|
165 |
-
return lines_enum
|
166 |
-
|
167 |
-
|
168 |
-
def handle_requirement_line(
|
169 |
-
line: ParsedLine,
|
170 |
-
options: Optional[optparse.Values] = None,
|
171 |
-
) -> ParsedRequirement:
|
172 |
-
# preserve for the nested code path
|
173 |
-
line_comes_from = "{} {} (line {})".format(
|
174 |
-
"-c" if line.constraint else "-r",
|
175 |
-
line.filename,
|
176 |
-
line.lineno,
|
177 |
-
)
|
178 |
-
|
179 |
-
assert line.is_requirement
|
180 |
-
|
181 |
-
if line.is_editable:
|
182 |
-
# For editable requirements, we don't support per-requirement
|
183 |
-
# options, so just return the parsed requirement.
|
184 |
-
return ParsedRequirement(
|
185 |
-
requirement=line.requirement,
|
186 |
-
is_editable=line.is_editable,
|
187 |
-
comes_from=line_comes_from,
|
188 |
-
constraint=line.constraint,
|
189 |
-
)
|
190 |
-
else:
|
191 |
-
# get the options that apply to requirements
|
192 |
-
req_options = {}
|
193 |
-
for dest in SUPPORTED_OPTIONS_REQ_DEST:
|
194 |
-
if dest in line.opts.__dict__ and line.opts.__dict__[dest]:
|
195 |
-
req_options[dest] = line.opts.__dict__[dest]
|
196 |
-
|
197 |
-
line_source = f"line {line.lineno} of {line.filename}"
|
198 |
-
return ParsedRequirement(
|
199 |
-
requirement=line.requirement,
|
200 |
-
is_editable=line.is_editable,
|
201 |
-
comes_from=line_comes_from,
|
202 |
-
constraint=line.constraint,
|
203 |
-
options=req_options,
|
204 |
-
line_source=line_source,
|
205 |
-
)
|
206 |
-
|
207 |
-
|
208 |
-
def handle_option_line(
|
209 |
-
opts: Values,
|
210 |
-
filename: str,
|
211 |
-
lineno: int,
|
212 |
-
finder: Optional["PackageFinder"] = None,
|
213 |
-
options: Optional[optparse.Values] = None,
|
214 |
-
session: Optional[PipSession] = None,
|
215 |
-
) -> None:
|
216 |
-
if opts.hashes:
|
217 |
-
logger.warning(
|
218 |
-
"%s line %s has --hash but no requirement, and will be ignored.",
|
219 |
-
filename,
|
220 |
-
lineno,
|
221 |
-
)
|
222 |
-
|
223 |
-
if options:
|
224 |
-
# percolate options upward
|
225 |
-
if opts.require_hashes:
|
226 |
-
options.require_hashes = opts.require_hashes
|
227 |
-
if opts.features_enabled:
|
228 |
-
options.features_enabled.extend(
|
229 |
-
f for f in opts.features_enabled if f not in options.features_enabled
|
230 |
-
)
|
231 |
-
|
232 |
-
# set finder options
|
233 |
-
if finder:
|
234 |
-
find_links = finder.find_links
|
235 |
-
index_urls = finder.index_urls
|
236 |
-
no_index = finder.search_scope.no_index
|
237 |
-
if opts.no_index is True:
|
238 |
-
no_index = True
|
239 |
-
index_urls = []
|
240 |
-
if opts.index_url and not no_index:
|
241 |
-
index_urls = [opts.index_url]
|
242 |
-
if opts.extra_index_urls and not no_index:
|
243 |
-
index_urls.extend(opts.extra_index_urls)
|
244 |
-
if opts.find_links:
|
245 |
-
# FIXME: it would be nice to keep track of the source
|
246 |
-
# of the find_links: support a find-links local path
|
247 |
-
# relative to a requirements file.
|
248 |
-
value = opts.find_links[0]
|
249 |
-
req_dir = os.path.dirname(os.path.abspath(filename))
|
250 |
-
relative_to_reqs_file = os.path.join(req_dir, value)
|
251 |
-
if os.path.exists(relative_to_reqs_file):
|
252 |
-
value = relative_to_reqs_file
|
253 |
-
find_links.append(value)
|
254 |
-
|
255 |
-
if session:
|
256 |
-
# We need to update the auth urls in session
|
257 |
-
session.update_index_urls(index_urls)
|
258 |
-
|
259 |
-
search_scope = SearchScope(
|
260 |
-
find_links=find_links,
|
261 |
-
index_urls=index_urls,
|
262 |
-
no_index=no_index,
|
263 |
-
)
|
264 |
-
finder.search_scope = search_scope
|
265 |
-
|
266 |
-
if opts.pre:
|
267 |
-
finder.set_allow_all_prereleases()
|
268 |
-
|
269 |
-
if opts.prefer_binary:
|
270 |
-
finder.set_prefer_binary()
|
271 |
-
|
272 |
-
if session:
|
273 |
-
for host in opts.trusted_hosts or []:
|
274 |
-
source = f"line {lineno} of {filename}"
|
275 |
-
session.add_trusted_host(host, source=source)
|
276 |
-
|
277 |
-
|
278 |
-
def handle_line(
|
279 |
-
line: ParsedLine,
|
280 |
-
options: Optional[optparse.Values] = None,
|
281 |
-
finder: Optional["PackageFinder"] = None,
|
282 |
-
session: Optional[PipSession] = None,
|
283 |
-
) -> Optional[ParsedRequirement]:
|
284 |
-
"""Handle a single parsed requirements line; This can result in
|
285 |
-
creating/yielding requirements, or updating the finder.
|
286 |
-
|
287 |
-
:param line: The parsed line to be processed.
|
288 |
-
:param options: CLI options.
|
289 |
-
:param finder: The finder - updated by non-requirement lines.
|
290 |
-
:param session: The session - updated by non-requirement lines.
|
291 |
-
|
292 |
-
Returns a ParsedRequirement object if the line is a requirement line,
|
293 |
-
otherwise returns None.
|
294 |
-
|
295 |
-
For lines that contain requirements, the only options that have an effect
|
296 |
-
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
|
297 |
-
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
|
298 |
-
ignored.
|
299 |
-
|
300 |
-
For lines that do not contain requirements, the only options that have an
|
301 |
-
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
|
302 |
-
be present, but are ignored. These lines may contain multiple options
|
303 |
-
(although our docs imply only one is supported), and all our parsed and
|
304 |
-
affect the finder.
|
305 |
-
"""
|
306 |
-
|
307 |
-
if line.is_requirement:
|
308 |
-
parsed_req = handle_requirement_line(line, options)
|
309 |
-
return parsed_req
|
310 |
-
else:
|
311 |
-
handle_option_line(
|
312 |
-
line.opts,
|
313 |
-
line.filename,
|
314 |
-
line.lineno,
|
315 |
-
finder,
|
316 |
-
options,
|
317 |
-
session,
|
318 |
-
)
|
319 |
-
return None
|
320 |
-
|
321 |
-
|
322 |
-
class RequirementsFileParser:
|
323 |
-
def __init__(
|
324 |
-
self,
|
325 |
-
session: PipSession,
|
326 |
-
line_parser: LineParser,
|
327 |
-
) -> None:
|
328 |
-
self._session = session
|
329 |
-
self._line_parser = line_parser
|
330 |
-
|
331 |
-
def parse(
|
332 |
-
self, filename: str, constraint: bool
|
333 |
-
) -> Generator[ParsedLine, None, None]:
|
334 |
-
"""Parse a given file, yielding parsed lines."""
|
335 |
-
yield from self._parse_and_recurse(filename, constraint)
|
336 |
-
|
337 |
-
def _parse_and_recurse(
|
338 |
-
self, filename: str, constraint: bool
|
339 |
-
) -> Generator[ParsedLine, None, None]:
|
340 |
-
for line in self._parse_file(filename, constraint):
|
341 |
-
if not line.is_requirement and (
|
342 |
-
line.opts.requirements or line.opts.constraints
|
343 |
-
):
|
344 |
-
# parse a nested requirements file
|
345 |
-
if line.opts.requirements:
|
346 |
-
req_path = line.opts.requirements[0]
|
347 |
-
nested_constraint = False
|
348 |
-
else:
|
349 |
-
req_path = line.opts.constraints[0]
|
350 |
-
nested_constraint = True
|
351 |
-
|
352 |
-
# original file is over http
|
353 |
-
if SCHEME_RE.search(filename):
|
354 |
-
# do a url join so relative paths work
|
355 |
-
req_path = urllib.parse.urljoin(filename, req_path)
|
356 |
-
# original file and nested file are paths
|
357 |
-
elif not SCHEME_RE.search(req_path):
|
358 |
-
# do a join so relative paths work
|
359 |
-
req_path = os.path.join(
|
360 |
-
os.path.dirname(filename),
|
361 |
-
req_path,
|
362 |
-
)
|
363 |
-
|
364 |
-
yield from self._parse_and_recurse(req_path, nested_constraint)
|
365 |
-
else:
|
366 |
-
yield line
|
367 |
-
|
368 |
-
def _parse_file(
|
369 |
-
self, filename: str, constraint: bool
|
370 |
-
) -> Generator[ParsedLine, None, None]:
|
371 |
-
_, content = get_file_content(filename, self._session)
|
372 |
-
|
373 |
-
lines_enum = preprocess(content)
|
374 |
-
|
375 |
-
for line_number, line in lines_enum:
|
376 |
-
try:
|
377 |
-
args_str, opts = self._line_parser(line)
|
378 |
-
except OptionParsingError as e:
|
379 |
-
# add offending line
|
380 |
-
msg = f"Invalid requirement: {line}\n{e.msg}"
|
381 |
-
raise RequirementsFileParseError(msg)
|
382 |
-
|
383 |
-
yield ParsedLine(
|
384 |
-
filename,
|
385 |
-
line_number,
|
386 |
-
args_str,
|
387 |
-
opts,
|
388 |
-
constraint,
|
389 |
-
)
|
390 |
-
|
391 |
-
|
392 |
-
def get_line_parser(finder: Optional["PackageFinder"]) -> LineParser:
|
393 |
-
def parse_line(line: str) -> Tuple[str, Values]:
|
394 |
-
# Build new parser for each line since it accumulates appendable
|
395 |
-
# options.
|
396 |
-
parser = build_parser()
|
397 |
-
defaults = parser.get_default_values()
|
398 |
-
defaults.index_url = None
|
399 |
-
if finder:
|
400 |
-
defaults.format_control = finder.format_control
|
401 |
-
|
402 |
-
args_str, options_str = break_args_options(line)
|
403 |
-
|
404 |
-
try:
|
405 |
-
options = shlex.split(options_str)
|
406 |
-
except ValueError as e:
|
407 |
-
raise OptionParsingError(f"Could not split options: {options_str}") from e
|
408 |
-
|
409 |
-
opts, _ = parser.parse_args(options, defaults)
|
410 |
-
|
411 |
-
return args_str, opts
|
412 |
-
|
413 |
-
return parse_line
|
414 |
-
|
415 |
-
|
416 |
-
def break_args_options(line: str) -> Tuple[str, str]:
|
417 |
-
"""Break up the line into an args and options string. We only want to shlex
|
418 |
-
(and then optparse) the options, not the args. args can contain markers
|
419 |
-
which are corrupted by shlex.
|
420 |
-
"""
|
421 |
-
tokens = line.split(" ")
|
422 |
-
args = []
|
423 |
-
options = tokens[:]
|
424 |
-
for token in tokens:
|
425 |
-
if token.startswith("-") or token.startswith("--"):
|
426 |
-
break
|
427 |
-
else:
|
428 |
-
args.append(token)
|
429 |
-
options.pop(0)
|
430 |
-
return " ".join(args), " ".join(options)
|
431 |
-
|
432 |
-
|
433 |
-
class OptionParsingError(Exception):
|
434 |
-
def __init__(self, msg: str) -> None:
|
435 |
-
self.msg = msg
|
436 |
-
|
437 |
-
|
438 |
-
def build_parser() -> optparse.OptionParser:
|
439 |
-
"""
|
440 |
-
Return a parser for parsing requirement lines
|
441 |
-
"""
|
442 |
-
parser = optparse.OptionParser(add_help_option=False)
|
443 |
-
|
444 |
-
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
|
445 |
-
for option_factory in option_factories:
|
446 |
-
option = option_factory()
|
447 |
-
parser.add_option(option)
|
448 |
-
|
449 |
-
# By default optparse sys.exits on parsing errors. We want to wrap
|
450 |
-
# that in our own exception.
|
451 |
-
def parser_exit(self: Any, msg: str) -> "NoReturn":
|
452 |
-
raise OptionParsingError(msg)
|
453 |
-
|
454 |
-
# NOTE: mypy disallows assigning to a method
|
455 |
-
# https://github.com/python/mypy/issues/2427
|
456 |
-
parser.exit = parser_exit # type: ignore
|
457 |
-
|
458 |
-
return parser
|
459 |
-
|
460 |
-
|
461 |
-
def join_lines(lines_enum: ReqFileLines) -> ReqFileLines:
|
462 |
-
"""Joins a line ending in '\' with the previous line (except when following
|
463 |
-
comments). The joined line takes on the index of the first line.
|
464 |
-
"""
|
465 |
-
primary_line_number = None
|
466 |
-
new_line: List[str] = []
|
467 |
-
for line_number, line in lines_enum:
|
468 |
-
if not line.endswith("\\") or COMMENT_RE.match(line):
|
469 |
-
if COMMENT_RE.match(line):
|
470 |
-
# this ensures comments are always matched later
|
471 |
-
line = " " + line
|
472 |
-
if new_line:
|
473 |
-
new_line.append(line)
|
474 |
-
assert primary_line_number is not None
|
475 |
-
yield primary_line_number, "".join(new_line)
|
476 |
-
new_line = []
|
477 |
-
else:
|
478 |
-
yield line_number, line
|
479 |
-
else:
|
480 |
-
if not new_line:
|
481 |
-
primary_line_number = line_number
|
482 |
-
new_line.append(line.strip("\\"))
|
483 |
-
|
484 |
-
# last line contains \
|
485 |
-
if new_line:
|
486 |
-
assert primary_line_number is not None
|
487 |
-
yield primary_line_number, "".join(new_line)
|
488 |
-
|
489 |
-
# TODO: handle space after '\'.
|
490 |
-
|
491 |
-
|
492 |
-
def ignore_comments(lines_enum: ReqFileLines) -> ReqFileLines:
|
493 |
-
"""
|
494 |
-
Strips comments and filter empty lines.
|
495 |
-
"""
|
496 |
-
for line_number, line in lines_enum:
|
497 |
-
line = COMMENT_RE.sub("", line)
|
498 |
-
line = line.strip()
|
499 |
-
if line:
|
500 |
-
yield line_number, line
|
501 |
-
|
502 |
-
|
503 |
-
def expand_env_variables(lines_enum: ReqFileLines) -> ReqFileLines:
|
504 |
-
"""Replace all environment variables that can be retrieved via `os.getenv`.
|
505 |
-
|
506 |
-
The only allowed format for environment variables defined in the
|
507 |
-
requirement file is `${MY_VARIABLE_1}` to ensure two things:
|
508 |
-
|
509 |
-
1. Strings that contain a `$` aren't accidentally (partially) expanded.
|
510 |
-
2. Ensure consistency across platforms for requirement files.
|
511 |
-
|
512 |
-
These points are the result of a discussion on the `github pull
|
513 |
-
request #3514 <https://github.com/pypa/pip/pull/3514>`_.
|
514 |
-
|
515 |
-
Valid characters in variable names follow the `POSIX standard
|
516 |
-
<http://pubs.opengroup.org/onlinepubs/9699919799/>`_ and are limited
|
517 |
-
to uppercase letter, digits and the `_` (underscore).
|
518 |
-
"""
|
519 |
-
for line_number, line in lines_enum:
|
520 |
-
for env_var, var_name in ENV_VAR_RE.findall(line):
|
521 |
-
value = os.getenv(var_name)
|
522 |
-
if not value:
|
523 |
-
continue
|
524 |
-
|
525 |
-
line = line.replace(env_var, value)
|
526 |
-
|
527 |
-
yield line_number, line
|
528 |
-
|
529 |
-
|
530 |
-
def get_file_content(url: str, session: PipSession) -> Tuple[str, str]:
|
531 |
-
"""Gets the content of a file; it may be a filename, file: URL, or
|
532 |
-
http: URL. Returns (location, content). Content is unicode.
|
533 |
-
Respects # -*- coding: declarations on the retrieved files.
|
534 |
-
|
535 |
-
:param url: File path or url.
|
536 |
-
:param session: PipSession instance.
|
537 |
-
"""
|
538 |
-
scheme = get_url_scheme(url)
|
539 |
-
|
540 |
-
# Pip has special support for file:// URLs (LocalFSAdapter).
|
541 |
-
if scheme in ["http", "https", "file"]:
|
542 |
-
resp = session.get(url)
|
543 |
-
raise_for_status(resp)
|
544 |
-
return resp.url, resp.text
|
545 |
-
|
546 |
-
# Assume this is a bare path.
|
547 |
-
try:
|
548 |
-
with open(url, "rb") as f:
|
549 |
-
content = auto_decode(f.read())
|
550 |
-
except OSError as exc:
|
551 |
-
raise InstallationError(f"Could not open requirements file: {exc}")
|
552 |
-
return url, content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
NTLM authenticating pool, contributed by erikcederstran
|
3 |
-
|
4 |
-
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
|
5 |
-
"""
|
6 |
-
from __future__ import absolute_import
|
7 |
-
|
8 |
-
import warnings
|
9 |
-
from logging import getLogger
|
10 |
-
|
11 |
-
from ntlm import ntlm
|
12 |
-
|
13 |
-
from .. import HTTPSConnectionPool
|
14 |
-
from ..packages.six.moves.http_client import HTTPSConnection
|
15 |
-
|
16 |
-
warnings.warn(
|
17 |
-
"The 'urllib3.contrib.ntlmpool' module is deprecated and will be removed "
|
18 |
-
"in urllib3 v2.0 release, urllib3 is not able to support it properly due "
|
19 |
-
"to reasons listed in issue: https://github.com/urllib3/urllib3/issues/2282. "
|
20 |
-
"If you are a user of this module please comment in the mentioned issue.",
|
21 |
-
DeprecationWarning,
|
22 |
-
)
|
23 |
-
|
24 |
-
log = getLogger(__name__)
|
25 |
-
|
26 |
-
|
27 |
-
class NTLMConnectionPool(HTTPSConnectionPool):
|
28 |
-
"""
|
29 |
-
Implements an NTLM authentication version of an urllib3 connection pool
|
30 |
-
"""
|
31 |
-
|
32 |
-
scheme = "https"
|
33 |
-
|
34 |
-
def __init__(self, user, pw, authurl, *args, **kwargs):
|
35 |
-
"""
|
36 |
-
authurl is a random URL on the server that is protected by NTLM.
|
37 |
-
user is the Windows user, probably in the DOMAIN\\username format.
|
38 |
-
pw is the password for the user.
|
39 |
-
"""
|
40 |
-
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
|
41 |
-
self.authurl = authurl
|
42 |
-
self.rawuser = user
|
43 |
-
user_parts = user.split("\\", 1)
|
44 |
-
self.domain = user_parts[0].upper()
|
45 |
-
self.user = user_parts[1]
|
46 |
-
self.pw = pw
|
47 |
-
|
48 |
-
def _new_conn(self):
|
49 |
-
# Performs the NTLM handshake that secures the connection. The socket
|
50 |
-
# must be kept open while requests are performed.
|
51 |
-
self.num_connections += 1
|
52 |
-
log.debug(
|
53 |
-
"Starting NTLM HTTPS connection no. %d: https://%s%s",
|
54 |
-
self.num_connections,
|
55 |
-
self.host,
|
56 |
-
self.authurl,
|
57 |
-
)
|
58 |
-
|
59 |
-
headers = {"Connection": "Keep-Alive"}
|
60 |
-
req_header = "Authorization"
|
61 |
-
resp_header = "www-authenticate"
|
62 |
-
|
63 |
-
conn = HTTPSConnection(host=self.host, port=self.port)
|
64 |
-
|
65 |
-
# Send negotiation message
|
66 |
-
headers[req_header] = "NTLM %s" % ntlm.create_NTLM_NEGOTIATE_MESSAGE(
|
67 |
-
self.rawuser
|
68 |
-
)
|
69 |
-
log.debug("Request headers: %s", headers)
|
70 |
-
conn.request("GET", self.authurl, None, headers)
|
71 |
-
res = conn.getresponse()
|
72 |
-
reshdr = dict(res.headers)
|
73 |
-
log.debug("Response status: %s %s", res.status, res.reason)
|
74 |
-
log.debug("Response headers: %s", reshdr)
|
75 |
-
log.debug("Response data: %s [...]", res.read(100))
|
76 |
-
|
77 |
-
# Remove the reference to the socket, so that it can not be closed by
|
78 |
-
# the response object (we want to keep the socket open)
|
79 |
-
res.fp = None
|
80 |
-
|
81 |
-
# Server should respond with a challenge message
|
82 |
-
auth_header_values = reshdr[resp_header].split(", ")
|
83 |
-
auth_header_value = None
|
84 |
-
for s in auth_header_values:
|
85 |
-
if s[:5] == "NTLM ":
|
86 |
-
auth_header_value = s[5:]
|
87 |
-
if auth_header_value is None:
|
88 |
-
raise Exception(
|
89 |
-
"Unexpected %s response header: %s" % (resp_header, reshdr[resp_header])
|
90 |
-
)
|
91 |
-
|
92 |
-
# Send authentication message
|
93 |
-
ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE(
|
94 |
-
auth_header_value
|
95 |
-
)
|
96 |
-
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(
|
97 |
-
ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags
|
98 |
-
)
|
99 |
-
headers[req_header] = "NTLM %s" % auth_msg
|
100 |
-
log.debug("Request headers: %s", headers)
|
101 |
-
conn.request("GET", self.authurl, None, headers)
|
102 |
-
res = conn.getresponse()
|
103 |
-
log.debug("Response status: %s %s", res.status, res.reason)
|
104 |
-
log.debug("Response headers: %s", dict(res.headers))
|
105 |
-
log.debug("Response data: %s [...]", res.read()[:100])
|
106 |
-
if res.status != 200:
|
107 |
-
if res.status == 401:
|
108 |
-
raise Exception("Server rejected request: wrong username or password")
|
109 |
-
raise Exception("Wrong server response: %s %s" % (res.status, res.reason))
|
110 |
-
|
111 |
-
res.fp = None
|
112 |
-
log.debug("Connection established")
|
113 |
-
return conn
|
114 |
-
|
115 |
-
def urlopen(
|
116 |
-
self,
|
117 |
-
method,
|
118 |
-
url,
|
119 |
-
body=None,
|
120 |
-
headers=None,
|
121 |
-
retries=3,
|
122 |
-
redirect=True,
|
123 |
-
assert_same_host=True,
|
124 |
-
):
|
125 |
-
if headers is None:
|
126 |
-
headers = {}
|
127 |
-
headers["Connection"] = "Keep-Alive"
|
128 |
-
return super(NTLMConnectionPool, self).urlopen(
|
129 |
-
method, url, body, headers, retries, redirect, assert_same_host
|
130 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/version.py
DELETED
@@ -1,504 +0,0 @@
|
|
1 |
-
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
-
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
-
# for complete details.
|
4 |
-
|
5 |
-
import collections
|
6 |
-
import itertools
|
7 |
-
import re
|
8 |
-
import warnings
|
9 |
-
from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
|
10 |
-
|
11 |
-
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
|
12 |
-
|
13 |
-
__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
|
14 |
-
|
15 |
-
InfiniteTypes = Union[InfinityType, NegativeInfinityType]
|
16 |
-
PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
|
17 |
-
SubLocalType = Union[InfiniteTypes, int, str]
|
18 |
-
LocalType = Union[
|
19 |
-
NegativeInfinityType,
|
20 |
-
Tuple[
|
21 |
-
Union[
|
22 |
-
SubLocalType,
|
23 |
-
Tuple[SubLocalType, str],
|
24 |
-
Tuple[NegativeInfinityType, SubLocalType],
|
25 |
-
],
|
26 |
-
...,
|
27 |
-
],
|
28 |
-
]
|
29 |
-
CmpKey = Tuple[
|
30 |
-
int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
|
31 |
-
]
|
32 |
-
LegacyCmpKey = Tuple[int, Tuple[str, ...]]
|
33 |
-
VersionComparisonMethod = Callable[
|
34 |
-
[Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
|
35 |
-
]
|
36 |
-
|
37 |
-
_Version = collections.namedtuple(
|
38 |
-
"_Version", ["epoch", "release", "dev", "pre", "post", "local"]
|
39 |
-
)
|
40 |
-
|
41 |
-
|
42 |
-
def parse(version: str) -> Union["LegacyVersion", "Version"]:
|
43 |
-
"""
|
44 |
-
Parse the given version string and return either a :class:`Version` object
|
45 |
-
or a :class:`LegacyVersion` object depending on if the given version is
|
46 |
-
a valid PEP 440 version or a legacy version.
|
47 |
-
"""
|
48 |
-
try:
|
49 |
-
return Version(version)
|
50 |
-
except InvalidVersion:
|
51 |
-
return LegacyVersion(version)
|
52 |
-
|
53 |
-
|
54 |
-
class InvalidVersion(ValueError):
|
55 |
-
"""
|
56 |
-
An invalid version was found, users should refer to PEP 440.
|
57 |
-
"""
|
58 |
-
|
59 |
-
|
60 |
-
class _BaseVersion:
|
61 |
-
_key: Union[CmpKey, LegacyCmpKey]
|
62 |
-
|
63 |
-
def __hash__(self) -> int:
|
64 |
-
return hash(self._key)
|
65 |
-
|
66 |
-
# Please keep the duplicated `isinstance` check
|
67 |
-
# in the six comparisons hereunder
|
68 |
-
# unless you find a way to avoid adding overhead function calls.
|
69 |
-
def __lt__(self, other: "_BaseVersion") -> bool:
|
70 |
-
if not isinstance(other, _BaseVersion):
|
71 |
-
return NotImplemented
|
72 |
-
|
73 |
-
return self._key < other._key
|
74 |
-
|
75 |
-
def __le__(self, other: "_BaseVersion") -> bool:
|
76 |
-
if not isinstance(other, _BaseVersion):
|
77 |
-
return NotImplemented
|
78 |
-
|
79 |
-
return self._key <= other._key
|
80 |
-
|
81 |
-
def __eq__(self, other: object) -> bool:
|
82 |
-
if not isinstance(other, _BaseVersion):
|
83 |
-
return NotImplemented
|
84 |
-
|
85 |
-
return self._key == other._key
|
86 |
-
|
87 |
-
def __ge__(self, other: "_BaseVersion") -> bool:
|
88 |
-
if not isinstance(other, _BaseVersion):
|
89 |
-
return NotImplemented
|
90 |
-
|
91 |
-
return self._key >= other._key
|
92 |
-
|
93 |
-
def __gt__(self, other: "_BaseVersion") -> bool:
|
94 |
-
if not isinstance(other, _BaseVersion):
|
95 |
-
return NotImplemented
|
96 |
-
|
97 |
-
return self._key > other._key
|
98 |
-
|
99 |
-
def __ne__(self, other: object) -> bool:
|
100 |
-
if not isinstance(other, _BaseVersion):
|
101 |
-
return NotImplemented
|
102 |
-
|
103 |
-
return self._key != other._key
|
104 |
-
|
105 |
-
|
106 |
-
class LegacyVersion(_BaseVersion):
|
107 |
-
def __init__(self, version: str) -> None:
|
108 |
-
self._version = str(version)
|
109 |
-
self._key = _legacy_cmpkey(self._version)
|
110 |
-
|
111 |
-
warnings.warn(
|
112 |
-
"Creating a LegacyVersion has been deprecated and will be "
|
113 |
-
"removed in the next major release",
|
114 |
-
DeprecationWarning,
|
115 |
-
)
|
116 |
-
|
117 |
-
def __str__(self) -> str:
|
118 |
-
return self._version
|
119 |
-
|
120 |
-
def __repr__(self) -> str:
|
121 |
-
return f"<LegacyVersion('{self}')>"
|
122 |
-
|
123 |
-
@property
|
124 |
-
def public(self) -> str:
|
125 |
-
return self._version
|
126 |
-
|
127 |
-
@property
|
128 |
-
def base_version(self) -> str:
|
129 |
-
return self._version
|
130 |
-
|
131 |
-
@property
|
132 |
-
def epoch(self) -> int:
|
133 |
-
return -1
|
134 |
-
|
135 |
-
@property
|
136 |
-
def release(self) -> None:
|
137 |
-
return None
|
138 |
-
|
139 |
-
@property
|
140 |
-
def pre(self) -> None:
|
141 |
-
return None
|
142 |
-
|
143 |
-
@property
|
144 |
-
def post(self) -> None:
|
145 |
-
return None
|
146 |
-
|
147 |
-
@property
|
148 |
-
def dev(self) -> None:
|
149 |
-
return None
|
150 |
-
|
151 |
-
@property
|
152 |
-
def local(self) -> None:
|
153 |
-
return None
|
154 |
-
|
155 |
-
@property
|
156 |
-
def is_prerelease(self) -> bool:
|
157 |
-
return False
|
158 |
-
|
159 |
-
@property
|
160 |
-
def is_postrelease(self) -> bool:
|
161 |
-
return False
|
162 |
-
|
163 |
-
@property
|
164 |
-
def is_devrelease(self) -> bool:
|
165 |
-
return False
|
166 |
-
|
167 |
-
|
168 |
-
_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
|
169 |
-
|
170 |
-
_legacy_version_replacement_map = {
|
171 |
-
"pre": "c",
|
172 |
-
"preview": "c",
|
173 |
-
"-": "final-",
|
174 |
-
"rc": "c",
|
175 |
-
"dev": "@",
|
176 |
-
}
|
177 |
-
|
178 |
-
|
179 |
-
def _parse_version_parts(s: str) -> Iterator[str]:
|
180 |
-
for part in _legacy_version_component_re.split(s):
|
181 |
-
part = _legacy_version_replacement_map.get(part, part)
|
182 |
-
|
183 |
-
if not part or part == ".":
|
184 |
-
continue
|
185 |
-
|
186 |
-
if part[:1] in "0123456789":
|
187 |
-
# pad for numeric comparison
|
188 |
-
yield part.zfill(8)
|
189 |
-
else:
|
190 |
-
yield "*" + part
|
191 |
-
|
192 |
-
# ensure that alpha/beta/candidate are before final
|
193 |
-
yield "*final"
|
194 |
-
|
195 |
-
|
196 |
-
def _legacy_cmpkey(version: str) -> LegacyCmpKey:
|
197 |
-
|
198 |
-
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
|
199 |
-
# greater than or equal to 0. This will effectively put the LegacyVersion,
|
200 |
-
# which uses the defacto standard originally implemented by setuptools,
|
201 |
-
# as before all PEP 440 versions.
|
202 |
-
epoch = -1
|
203 |
-
|
204 |
-
# This scheme is taken from pkg_resources.parse_version setuptools prior to
|
205 |
-
# it's adoption of the packaging library.
|
206 |
-
parts: List[str] = []
|
207 |
-
for part in _parse_version_parts(version.lower()):
|
208 |
-
if part.startswith("*"):
|
209 |
-
# remove "-" before a prerelease tag
|
210 |
-
if part < "*final":
|
211 |
-
while parts and parts[-1] == "*final-":
|
212 |
-
parts.pop()
|
213 |
-
|
214 |
-
# remove trailing zeros from each series of numeric parts
|
215 |
-
while parts and parts[-1] == "00000000":
|
216 |
-
parts.pop()
|
217 |
-
|
218 |
-
parts.append(part)
|
219 |
-
|
220 |
-
return epoch, tuple(parts)
|
221 |
-
|
222 |
-
|
223 |
-
# Deliberately not anchored to the start and end of the string, to make it
|
224 |
-
# easier for 3rd party code to reuse
|
225 |
-
VERSION_PATTERN = r"""
|
226 |
-
v?
|
227 |
-
(?:
|
228 |
-
(?:(?P<epoch>[0-9]+)!)? # epoch
|
229 |
-
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
|
230 |
-
(?P<pre> # pre-release
|
231 |
-
[-_\.]?
|
232 |
-
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
|
233 |
-
[-_\.]?
|
234 |
-
(?P<pre_n>[0-9]+)?
|
235 |
-
)?
|
236 |
-
(?P<post> # post release
|
237 |
-
(?:-(?P<post_n1>[0-9]+))
|
238 |
-
|
|
239 |
-
(?:
|
240 |
-
[-_\.]?
|
241 |
-
(?P<post_l>post|rev|r)
|
242 |
-
[-_\.]?
|
243 |
-
(?P<post_n2>[0-9]+)?
|
244 |
-
)
|
245 |
-
)?
|
246 |
-
(?P<dev> # dev release
|
247 |
-
[-_\.]?
|
248 |
-
(?P<dev_l>dev)
|
249 |
-
[-_\.]?
|
250 |
-
(?P<dev_n>[0-9]+)?
|
251 |
-
)?
|
252 |
-
)
|
253 |
-
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
|
254 |
-
"""
|
255 |
-
|
256 |
-
|
257 |
-
class Version(_BaseVersion):
|
258 |
-
|
259 |
-
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
|
260 |
-
|
261 |
-
def __init__(self, version: str) -> None:
|
262 |
-
|
263 |
-
# Validate the version and parse it into pieces
|
264 |
-
match = self._regex.search(version)
|
265 |
-
if not match:
|
266 |
-
raise InvalidVersion(f"Invalid version: '{version}'")
|
267 |
-
|
268 |
-
# Store the parsed out pieces of the version
|
269 |
-
self._version = _Version(
|
270 |
-
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
|
271 |
-
release=tuple(int(i) for i in match.group("release").split(".")),
|
272 |
-
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
|
273 |
-
post=_parse_letter_version(
|
274 |
-
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
|
275 |
-
),
|
276 |
-
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
|
277 |
-
local=_parse_local_version(match.group("local")),
|
278 |
-
)
|
279 |
-
|
280 |
-
# Generate a key which will be used for sorting
|
281 |
-
self._key = _cmpkey(
|
282 |
-
self._version.epoch,
|
283 |
-
self._version.release,
|
284 |
-
self._version.pre,
|
285 |
-
self._version.post,
|
286 |
-
self._version.dev,
|
287 |
-
self._version.local,
|
288 |
-
)
|
289 |
-
|
290 |
-
def __repr__(self) -> str:
|
291 |
-
return f"<Version('{self}')>"
|
292 |
-
|
293 |
-
def __str__(self) -> str:
|
294 |
-
parts = []
|
295 |
-
|
296 |
-
# Epoch
|
297 |
-
if self.epoch != 0:
|
298 |
-
parts.append(f"{self.epoch}!")
|
299 |
-
|
300 |
-
# Release segment
|
301 |
-
parts.append(".".join(str(x) for x in self.release))
|
302 |
-
|
303 |
-
# Pre-release
|
304 |
-
if self.pre is not None:
|
305 |
-
parts.append("".join(str(x) for x in self.pre))
|
306 |
-
|
307 |
-
# Post-release
|
308 |
-
if self.post is not None:
|
309 |
-
parts.append(f".post{self.post}")
|
310 |
-
|
311 |
-
# Development release
|
312 |
-
if self.dev is not None:
|
313 |
-
parts.append(f".dev{self.dev}")
|
314 |
-
|
315 |
-
# Local version segment
|
316 |
-
if self.local is not None:
|
317 |
-
parts.append(f"+{self.local}")
|
318 |
-
|
319 |
-
return "".join(parts)
|
320 |
-
|
321 |
-
@property
|
322 |
-
def epoch(self) -> int:
|
323 |
-
_epoch: int = self._version.epoch
|
324 |
-
return _epoch
|
325 |
-
|
326 |
-
@property
|
327 |
-
def release(self) -> Tuple[int, ...]:
|
328 |
-
_release: Tuple[int, ...] = self._version.release
|
329 |
-
return _release
|
330 |
-
|
331 |
-
@property
|
332 |
-
def pre(self) -> Optional[Tuple[str, int]]:
|
333 |
-
_pre: Optional[Tuple[str, int]] = self._version.pre
|
334 |
-
return _pre
|
335 |
-
|
336 |
-
@property
|
337 |
-
def post(self) -> Optional[int]:
|
338 |
-
return self._version.post[1] if self._version.post else None
|
339 |
-
|
340 |
-
@property
|
341 |
-
def dev(self) -> Optional[int]:
|
342 |
-
return self._version.dev[1] if self._version.dev else None
|
343 |
-
|
344 |
-
@property
|
345 |
-
def local(self) -> Optional[str]:
|
346 |
-
if self._version.local:
|
347 |
-
return ".".join(str(x) for x in self._version.local)
|
348 |
-
else:
|
349 |
-
return None
|
350 |
-
|
351 |
-
@property
|
352 |
-
def public(self) -> str:
|
353 |
-
return str(self).split("+", 1)[0]
|
354 |
-
|
355 |
-
@property
|
356 |
-
def base_version(self) -> str:
|
357 |
-
parts = []
|
358 |
-
|
359 |
-
# Epoch
|
360 |
-
if self.epoch != 0:
|
361 |
-
parts.append(f"{self.epoch}!")
|
362 |
-
|
363 |
-
# Release segment
|
364 |
-
parts.append(".".join(str(x) for x in self.release))
|
365 |
-
|
366 |
-
return "".join(parts)
|
367 |
-
|
368 |
-
@property
|
369 |
-
def is_prerelease(self) -> bool:
|
370 |
-
return self.dev is not None or self.pre is not None
|
371 |
-
|
372 |
-
@property
|
373 |
-
def is_postrelease(self) -> bool:
|
374 |
-
return self.post is not None
|
375 |
-
|
376 |
-
@property
|
377 |
-
def is_devrelease(self) -> bool:
|
378 |
-
return self.dev is not None
|
379 |
-
|
380 |
-
@property
|
381 |
-
def major(self) -> int:
|
382 |
-
return self.release[0] if len(self.release) >= 1 else 0
|
383 |
-
|
384 |
-
@property
|
385 |
-
def minor(self) -> int:
|
386 |
-
return self.release[1] if len(self.release) >= 2 else 0
|
387 |
-
|
388 |
-
@property
|
389 |
-
def micro(self) -> int:
|
390 |
-
return self.release[2] if len(self.release) >= 3 else 0
|
391 |
-
|
392 |
-
|
393 |
-
def _parse_letter_version(
|
394 |
-
letter: str, number: Union[str, bytes, SupportsInt]
|
395 |
-
) -> Optional[Tuple[str, int]]:
|
396 |
-
|
397 |
-
if letter:
|
398 |
-
# We consider there to be an implicit 0 in a pre-release if there is
|
399 |
-
# not a numeral associated with it.
|
400 |
-
if number is None:
|
401 |
-
number = 0
|
402 |
-
|
403 |
-
# We normalize any letters to their lower case form
|
404 |
-
letter = letter.lower()
|
405 |
-
|
406 |
-
# We consider some words to be alternate spellings of other words and
|
407 |
-
# in those cases we want to normalize the spellings to our preferred
|
408 |
-
# spelling.
|
409 |
-
if letter == "alpha":
|
410 |
-
letter = "a"
|
411 |
-
elif letter == "beta":
|
412 |
-
letter = "b"
|
413 |
-
elif letter in ["c", "pre", "preview"]:
|
414 |
-
letter = "rc"
|
415 |
-
elif letter in ["rev", "r"]:
|
416 |
-
letter = "post"
|
417 |
-
|
418 |
-
return letter, int(number)
|
419 |
-
if not letter and number:
|
420 |
-
# We assume if we are given a number, but we are not given a letter
|
421 |
-
# then this is using the implicit post release syntax (e.g. 1.0-1)
|
422 |
-
letter = "post"
|
423 |
-
|
424 |
-
return letter, int(number)
|
425 |
-
|
426 |
-
return None
|
427 |
-
|
428 |
-
|
429 |
-
_local_version_separators = re.compile(r"[\._-]")
|
430 |
-
|
431 |
-
|
432 |
-
def _parse_local_version(local: str) -> Optional[LocalType]:
|
433 |
-
"""
|
434 |
-
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
|
435 |
-
"""
|
436 |
-
if local is not None:
|
437 |
-
return tuple(
|
438 |
-
part.lower() if not part.isdigit() else int(part)
|
439 |
-
for part in _local_version_separators.split(local)
|
440 |
-
)
|
441 |
-
return None
|
442 |
-
|
443 |
-
|
444 |
-
def _cmpkey(
|
445 |
-
epoch: int,
|
446 |
-
release: Tuple[int, ...],
|
447 |
-
pre: Optional[Tuple[str, int]],
|
448 |
-
post: Optional[Tuple[str, int]],
|
449 |
-
dev: Optional[Tuple[str, int]],
|
450 |
-
local: Optional[Tuple[SubLocalType]],
|
451 |
-
) -> CmpKey:
|
452 |
-
|
453 |
-
# When we compare a release version, we want to compare it with all of the
|
454 |
-
# trailing zeros removed. So we'll use a reverse the list, drop all the now
|
455 |
-
# leading zeros until we come to something non zero, then take the rest
|
456 |
-
# re-reverse it back into the correct order and make it a tuple and use
|
457 |
-
# that for our sorting key.
|
458 |
-
_release = tuple(
|
459 |
-
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
|
460 |
-
)
|
461 |
-
|
462 |
-
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
|
463 |
-
# We'll do this by abusing the pre segment, but we _only_ want to do this
|
464 |
-
# if there is not a pre or a post segment. If we have one of those then
|
465 |
-
# the normal sorting rules will handle this case correctly.
|
466 |
-
if pre is None and post is None and dev is not None:
|
467 |
-
_pre: PrePostDevType = NegativeInfinity
|
468 |
-
# Versions without a pre-release (except as noted above) should sort after
|
469 |
-
# those with one.
|
470 |
-
elif pre is None:
|
471 |
-
_pre = Infinity
|
472 |
-
else:
|
473 |
-
_pre = pre
|
474 |
-
|
475 |
-
# Versions without a post segment should sort before those with one.
|
476 |
-
if post is None:
|
477 |
-
_post: PrePostDevType = NegativeInfinity
|
478 |
-
|
479 |
-
else:
|
480 |
-
_post = post
|
481 |
-
|
482 |
-
# Versions without a development segment should sort after those with one.
|
483 |
-
if dev is None:
|
484 |
-
_dev: PrePostDevType = Infinity
|
485 |
-
|
486 |
-
else:
|
487 |
-
_dev = dev
|
488 |
-
|
489 |
-
if local is None:
|
490 |
-
# Versions without a local segment should sort before those with one.
|
491 |
-
_local: LocalType = NegativeInfinity
|
492 |
-
else:
|
493 |
-
# Versions with a local segment need that segment parsed to implement
|
494 |
-
# the sorting rules in PEP440.
|
495 |
-
# - Alpha numeric segments sort before numeric segments
|
496 |
-
# - Alpha numeric segments sort lexicographically
|
497 |
-
# - Numeric segments sort numerically
|
498 |
-
# - Shorter versions sort before longer versions when the prefixes
|
499 |
-
# match exactly
|
500 |
-
_local = tuple(
|
501 |
-
(i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
|
502 |
-
)
|
503 |
-
|
504 |
-
return epoch, _release, _pre, _post, _dev, _local
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BartPoint/VoiceChange_Beta/infer_pack/attentions.py
DELETED
@@ -1,417 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
import math
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
from torch.nn import functional as F
|
7 |
-
|
8 |
-
from infer_pack import commons
|
9 |
-
from infer_pack import modules
|
10 |
-
from infer_pack.modules import LayerNorm
|
11 |
-
|
12 |
-
|
13 |
-
class Encoder(nn.Module):
|
14 |
-
def __init__(
|
15 |
-
self,
|
16 |
-
hidden_channels,
|
17 |
-
filter_channels,
|
18 |
-
n_heads,
|
19 |
-
n_layers,
|
20 |
-
kernel_size=1,
|
21 |
-
p_dropout=0.0,
|
22 |
-
window_size=10,
|
23 |
-
**kwargs
|
24 |
-
):
|
25 |
-
super().__init__()
|
26 |
-
self.hidden_channels = hidden_channels
|
27 |
-
self.filter_channels = filter_channels
|
28 |
-
self.n_heads = n_heads
|
29 |
-
self.n_layers = n_layers
|
30 |
-
self.kernel_size = kernel_size
|
31 |
-
self.p_dropout = p_dropout
|
32 |
-
self.window_size = window_size
|
33 |
-
|
34 |
-
self.drop = nn.Dropout(p_dropout)
|
35 |
-
self.attn_layers = nn.ModuleList()
|
36 |
-
self.norm_layers_1 = nn.ModuleList()
|
37 |
-
self.ffn_layers = nn.ModuleList()
|
38 |
-
self.norm_layers_2 = nn.ModuleList()
|
39 |
-
for i in range(self.n_layers):
|
40 |
-
self.attn_layers.append(
|
41 |
-
MultiHeadAttention(
|
42 |
-
hidden_channels,
|
43 |
-
hidden_channels,
|
44 |
-
n_heads,
|
45 |
-
p_dropout=p_dropout,
|
46 |
-
window_size=window_size,
|
47 |
-
)
|
48 |
-
)
|
49 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
50 |
-
self.ffn_layers.append(
|
51 |
-
FFN(
|
52 |
-
hidden_channels,
|
53 |
-
hidden_channels,
|
54 |
-
filter_channels,
|
55 |
-
kernel_size,
|
56 |
-
p_dropout=p_dropout,
|
57 |
-
)
|
58 |
-
)
|
59 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
60 |
-
|
61 |
-
def forward(self, x, x_mask):
|
62 |
-
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
63 |
-
x = x * x_mask
|
64 |
-
for i in range(self.n_layers):
|
65 |
-
y = self.attn_layers[i](x, x, attn_mask)
|
66 |
-
y = self.drop(y)
|
67 |
-
x = self.norm_layers_1[i](x + y)
|
68 |
-
|
69 |
-
y = self.ffn_layers[i](x, x_mask)
|
70 |
-
y = self.drop(y)
|
71 |
-
x = self.norm_layers_2[i](x + y)
|
72 |
-
x = x * x_mask
|
73 |
-
return x
|
74 |
-
|
75 |
-
|
76 |
-
class Decoder(nn.Module):
|
77 |
-
def __init__(
|
78 |
-
self,
|
79 |
-
hidden_channels,
|
80 |
-
filter_channels,
|
81 |
-
n_heads,
|
82 |
-
n_layers,
|
83 |
-
kernel_size=1,
|
84 |
-
p_dropout=0.0,
|
85 |
-
proximal_bias=False,
|
86 |
-
proximal_init=True,
|
87 |
-
**kwargs
|
88 |
-
):
|
89 |
-
super().__init__()
|
90 |
-
self.hidden_channels = hidden_channels
|
91 |
-
self.filter_channels = filter_channels
|
92 |
-
self.n_heads = n_heads
|
93 |
-
self.n_layers = n_layers
|
94 |
-
self.kernel_size = kernel_size
|
95 |
-
self.p_dropout = p_dropout
|
96 |
-
self.proximal_bias = proximal_bias
|
97 |
-
self.proximal_init = proximal_init
|
98 |
-
|
99 |
-
self.drop = nn.Dropout(p_dropout)
|
100 |
-
self.self_attn_layers = nn.ModuleList()
|
101 |
-
self.norm_layers_0 = nn.ModuleList()
|
102 |
-
self.encdec_attn_layers = nn.ModuleList()
|
103 |
-
self.norm_layers_1 = nn.ModuleList()
|
104 |
-
self.ffn_layers = nn.ModuleList()
|
105 |
-
self.norm_layers_2 = nn.ModuleList()
|
106 |
-
for i in range(self.n_layers):
|
107 |
-
self.self_attn_layers.append(
|
108 |
-
MultiHeadAttention(
|
109 |
-
hidden_channels,
|
110 |
-
hidden_channels,
|
111 |
-
n_heads,
|
112 |
-
p_dropout=p_dropout,
|
113 |
-
proximal_bias=proximal_bias,
|
114 |
-
proximal_init=proximal_init,
|
115 |
-
)
|
116 |
-
)
|
117 |
-
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
118 |
-
self.encdec_attn_layers.append(
|
119 |
-
MultiHeadAttention(
|
120 |
-
hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
|
121 |
-
)
|
122 |
-
)
|
123 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
124 |
-
self.ffn_layers.append(
|
125 |
-
FFN(
|
126 |
-
hidden_channels,
|
127 |
-
hidden_channels,
|
128 |
-
filter_channels,
|
129 |
-
kernel_size,
|
130 |
-
p_dropout=p_dropout,
|
131 |
-
causal=True,
|
132 |
-
)
|
133 |
-
)
|
134 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
135 |
-
|
136 |
-
def forward(self, x, x_mask, h, h_mask):
|
137 |
-
"""
|
138 |
-
x: decoder input
|
139 |
-
h: encoder output
|
140 |
-
"""
|
141 |
-
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
|
142 |
-
device=x.device, dtype=x.dtype
|
143 |
-
)
|
144 |
-
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
145 |
-
x = x * x_mask
|
146 |
-
for i in range(self.n_layers):
|
147 |
-
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
148 |
-
y = self.drop(y)
|
149 |
-
x = self.norm_layers_0[i](x + y)
|
150 |
-
|
151 |
-
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
152 |
-
y = self.drop(y)
|
153 |
-
x = self.norm_layers_1[i](x + y)
|
154 |
-
|
155 |
-
y = self.ffn_layers[i](x, x_mask)
|
156 |
-
y = self.drop(y)
|
157 |
-
x = self.norm_layers_2[i](x + y)
|
158 |
-
x = x * x_mask
|
159 |
-
return x
|
160 |
-
|
161 |
-
|
162 |
-
class MultiHeadAttention(nn.Module):
|
163 |
-
def __init__(
|
164 |
-
self,
|
165 |
-
channels,
|
166 |
-
out_channels,
|
167 |
-
n_heads,
|
168 |
-
p_dropout=0.0,
|
169 |
-
window_size=None,
|
170 |
-
heads_share=True,
|
171 |
-
block_length=None,
|
172 |
-
proximal_bias=False,
|
173 |
-
proximal_init=False,
|
174 |
-
):
|
175 |
-
super().__init__()
|
176 |
-
assert channels % n_heads == 0
|
177 |
-
|
178 |
-
self.channels = channels
|
179 |
-
self.out_channels = out_channels
|
180 |
-
self.n_heads = n_heads
|
181 |
-
self.p_dropout = p_dropout
|
182 |
-
self.window_size = window_size
|
183 |
-
self.heads_share = heads_share
|
184 |
-
self.block_length = block_length
|
185 |
-
self.proximal_bias = proximal_bias
|
186 |
-
self.proximal_init = proximal_init
|
187 |
-
self.attn = None
|
188 |
-
|
189 |
-
self.k_channels = channels // n_heads
|
190 |
-
self.conv_q = nn.Conv1d(channels, channels, 1)
|
191 |
-
self.conv_k = nn.Conv1d(channels, channels, 1)
|
192 |
-
self.conv_v = nn.Conv1d(channels, channels, 1)
|
193 |
-
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
194 |
-
self.drop = nn.Dropout(p_dropout)
|
195 |
-
|
196 |
-
if window_size is not None:
|
197 |
-
n_heads_rel = 1 if heads_share else n_heads
|
198 |
-
rel_stddev = self.k_channels**-0.5
|
199 |
-
self.emb_rel_k = nn.Parameter(
|
200 |
-
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
|
201 |
-
* rel_stddev
|
202 |
-
)
|
203 |
-
self.emb_rel_v = nn.Parameter(
|
204 |
-
torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
|
205 |
-
* rel_stddev
|
206 |
-
)
|
207 |
-
|
208 |
-
nn.init.xavier_uniform_(self.conv_q.weight)
|
209 |
-
nn.init.xavier_uniform_(self.conv_k.weight)
|
210 |
-
nn.init.xavier_uniform_(self.conv_v.weight)
|
211 |
-
if proximal_init:
|
212 |
-
with torch.no_grad():
|
213 |
-
self.conv_k.weight.copy_(self.conv_q.weight)
|
214 |
-
self.conv_k.bias.copy_(self.conv_q.bias)
|
215 |
-
|
216 |
-
def forward(self, x, c, attn_mask=None):
|
217 |
-
q = self.conv_q(x)
|
218 |
-
k = self.conv_k(c)
|
219 |
-
v = self.conv_v(c)
|
220 |
-
|
221 |
-
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
222 |
-
|
223 |
-
x = self.conv_o(x)
|
224 |
-
return x
|
225 |
-
|
226 |
-
def attention(self, query, key, value, mask=None):
|
227 |
-
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
228 |
-
b, d, t_s, t_t = (*key.size(), query.size(2))
|
229 |
-
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
230 |
-
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
231 |
-
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
232 |
-
|
233 |
-
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
234 |
-
if self.window_size is not None:
|
235 |
-
assert (
|
236 |
-
t_s == t_t
|
237 |
-
), "Relative attention is only available for self-attention."
|
238 |
-
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
239 |
-
rel_logits = self._matmul_with_relative_keys(
|
240 |
-
query / math.sqrt(self.k_channels), key_relative_embeddings
|
241 |
-
)
|
242 |
-
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
243 |
-
scores = scores + scores_local
|
244 |
-
if self.proximal_bias:
|
245 |
-
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
246 |
-
scores = scores + self._attention_bias_proximal(t_s).to(
|
247 |
-
device=scores.device, dtype=scores.dtype
|
248 |
-
)
|
249 |
-
if mask is not None:
|
250 |
-
scores = scores.masked_fill(mask == 0, -1e4)
|
251 |
-
if self.block_length is not None:
|
252 |
-
assert (
|
253 |
-
t_s == t_t
|
254 |
-
), "Local attention is only available for self-attention."
|
255 |
-
block_mask = (
|
256 |
-
torch.ones_like(scores)
|
257 |
-
.triu(-self.block_length)
|
258 |
-
.tril(self.block_length)
|
259 |
-
)
|
260 |
-
scores = scores.masked_fill(block_mask == 0, -1e4)
|
261 |
-
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
262 |
-
p_attn = self.drop(p_attn)
|
263 |
-
output = torch.matmul(p_attn, value)
|
264 |
-
if self.window_size is not None:
|
265 |
-
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
266 |
-
value_relative_embeddings = self._get_relative_embeddings(
|
267 |
-
self.emb_rel_v, t_s
|
268 |
-
)
|
269 |
-
output = output + self._matmul_with_relative_values(
|
270 |
-
relative_weights, value_relative_embeddings
|
271 |
-
)
|
272 |
-
output = (
|
273 |
-
output.transpose(2, 3).contiguous().view(b, d, t_t)
|
274 |
-
) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
275 |
-
return output, p_attn
|
276 |
-
|
277 |
-
def _matmul_with_relative_values(self, x, y):
|
278 |
-
"""
|
279 |
-
x: [b, h, l, m]
|
280 |
-
y: [h or 1, m, d]
|
281 |
-
ret: [b, h, l, d]
|
282 |
-
"""
|
283 |
-
ret = torch.matmul(x, y.unsqueeze(0))
|
284 |
-
return ret
|
285 |
-
|
286 |
-
def _matmul_with_relative_keys(self, x, y):
|
287 |
-
"""
|
288 |
-
x: [b, h, l, d]
|
289 |
-
y: [h or 1, m, d]
|
290 |
-
ret: [b, h, l, m]
|
291 |
-
"""
|
292 |
-
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
293 |
-
return ret
|
294 |
-
|
295 |
-
def _get_relative_embeddings(self, relative_embeddings, length):
|
296 |
-
max_relative_position = 2 * self.window_size + 1
|
297 |
-
# Pad first before slice to avoid using cond ops.
|
298 |
-
pad_length = max(length - (self.window_size + 1), 0)
|
299 |
-
slice_start_position = max((self.window_size + 1) - length, 0)
|
300 |
-
slice_end_position = slice_start_position + 2 * length - 1
|
301 |
-
if pad_length > 0:
|
302 |
-
padded_relative_embeddings = F.pad(
|
303 |
-
relative_embeddings,
|
304 |
-
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
|
305 |
-
)
|
306 |
-
else:
|
307 |
-
padded_relative_embeddings = relative_embeddings
|
308 |
-
used_relative_embeddings = padded_relative_embeddings[
|
309 |
-
:, slice_start_position:slice_end_position
|
310 |
-
]
|
311 |
-
return used_relative_embeddings
|
312 |
-
|
313 |
-
def _relative_position_to_absolute_position(self, x):
|
314 |
-
"""
|
315 |
-
x: [b, h, l, 2*l-1]
|
316 |
-
ret: [b, h, l, l]
|
317 |
-
"""
|
318 |
-
batch, heads, length, _ = x.size()
|
319 |
-
# Concat columns of pad to shift from relative to absolute indexing.
|
320 |
-
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
|
321 |
-
|
322 |
-
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
323 |
-
x_flat = x.view([batch, heads, length * 2 * length])
|
324 |
-
x_flat = F.pad(
|
325 |
-
x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
|
326 |
-
)
|
327 |
-
|
328 |
-
# Reshape and slice out the padded elements.
|
329 |
-
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
|
330 |
-
:, :, :length, length - 1 :
|
331 |
-
]
|
332 |
-
return x_final
|
333 |
-
|
334 |
-
def _absolute_position_to_relative_position(self, x):
|
335 |
-
"""
|
336 |
-
x: [b, h, l, l]
|
337 |
-
ret: [b, h, l, 2*l-1]
|
338 |
-
"""
|
339 |
-
batch, heads, length, _ = x.size()
|
340 |
-
# padd along column
|
341 |
-
x = F.pad(
|
342 |
-
x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
|
343 |
-
)
|
344 |
-
x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
|
345 |
-
# add 0's in the beginning that will skew the elements after reshape
|
346 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
347 |
-
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
|
348 |
-
return x_final
|
349 |
-
|
350 |
-
def _attention_bias_proximal(self, length):
|
351 |
-
"""Bias for self-attention to encourage attention to close positions.
|
352 |
-
Args:
|
353 |
-
length: an integer scalar.
|
354 |
-
Returns:
|
355 |
-
a Tensor with shape [1, 1, length, length]
|
356 |
-
"""
|
357 |
-
r = torch.arange(length, dtype=torch.float32)
|
358 |
-
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
359 |
-
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
360 |
-
|
361 |
-
|
362 |
-
class FFN(nn.Module):
|
363 |
-
def __init__(
|
364 |
-
self,
|
365 |
-
in_channels,
|
366 |
-
out_channels,
|
367 |
-
filter_channels,
|
368 |
-
kernel_size,
|
369 |
-
p_dropout=0.0,
|
370 |
-
activation=None,
|
371 |
-
causal=False,
|
372 |
-
):
|
373 |
-
super().__init__()
|
374 |
-
self.in_channels = in_channels
|
375 |
-
self.out_channels = out_channels
|
376 |
-
self.filter_channels = filter_channels
|
377 |
-
self.kernel_size = kernel_size
|
378 |
-
self.p_dropout = p_dropout
|
379 |
-
self.activation = activation
|
380 |
-
self.causal = causal
|
381 |
-
|
382 |
-
if causal:
|
383 |
-
self.padding = self._causal_padding
|
384 |
-
else:
|
385 |
-
self.padding = self._same_padding
|
386 |
-
|
387 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
388 |
-
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
389 |
-
self.drop = nn.Dropout(p_dropout)
|
390 |
-
|
391 |
-
def forward(self, x, x_mask):
|
392 |
-
x = self.conv_1(self.padding(x * x_mask))
|
393 |
-
if self.activation == "gelu":
|
394 |
-
x = x * torch.sigmoid(1.702 * x)
|
395 |
-
else:
|
396 |
-
x = torch.relu(x)
|
397 |
-
x = self.drop(x)
|
398 |
-
x = self.conv_2(self.padding(x * x_mask))
|
399 |
-
return x * x_mask
|
400 |
-
|
401 |
-
def _causal_padding(self, x):
|
402 |
-
if self.kernel_size == 1:
|
403 |
-
return x
|
404 |
-
pad_l = self.kernel_size - 1
|
405 |
-
pad_r = 0
|
406 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
407 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
408 |
-
return x
|
409 |
-
|
410 |
-
def _same_padding(self, x):
|
411 |
-
if self.kernel_size == 1:
|
412 |
-
return x
|
413 |
-
pad_l = (self.kernel_size - 1) // 2
|
414 |
-
pad_r = self.kernel_size // 2
|
415 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
416 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
417 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/1happybirthday.com En Descarga Tamil.md
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Plantas vs Zombies Descargar 1: Cómo jugar el clásico juego de defensa de la torre en su PC</h1>
|
3 |
-
<h2>Introducción</h2>
|
4 |
-
<p>Plants vs Zombies es uno de los juegos de defensa de torres más populares y adictivos jamás creados. Fue desarrollado por PopCap Games y lanzado en 2009 para Windows y Mac OS X. El juego ha ganado varios premios y ha sido elogiado por su humor, jugabilidad y gráficos. </p>
|
5 |
-
<p>En Plants vs Zombies, tienes que proteger tu casa de las olas de zombies que quieren comerse tu cerebro. Haces esto plantando varios tipos de plantas que pueden disparar, explotar o ralentizar a los zombies. El juego tiene 50 niveles en el modo Aventura, además de otros modos como Supervivencia, Puzzle y Mini-Games. También puedes desbloquear diferentes plantas, zombies y logros a medida que avanzas. </p>
|
6 |
-
<h2>1happybirthday.com en descarga tamil</h2><br /><p><b><b>DOWNLOAD</b> ○○○ <a href="https://bltlly.com/2v6JJo">https://bltlly.com/2v6JJo</a></b></p><br /><br />
|
7 |
-
<p>Si eres un fan de Plants vs Zombies, o si quieres probarlo por primera vez, es posible que te estés preguntando cómo jugarlo en tu PC. En este artículo, te mostraremos dos formas fáciles de descargar e instalar Plants vs Zombies en tu PC, para que puedas disfrutar de este clásico juego en una pantalla más grande. </p>
|
8 |
-
<h2>Cómo descargar e instalar Plants vs Zombies en PC</h2>
|
9 |
-
<h3>Opción 1: Descarga desde Google Play Store usando el emulador de BlueStacks</h3>
|
10 |
-
<p>Una de las formas más fáciles de jugar Plants vs Zombies en tu PC es usar un emulador de Android como BlueStacks. BlueStacks es un software que te permite ejecutar aplicaciones y juegos Android en tu PC. Puedes descargarlo gratis desde [BlueStacks.com]( 2 ). </p>
|
11 |
-
<h4>Paso 1: Descargar e instalar BlueStacks en su PC</h4>
|
12 |
-
<p>Vaya a [BlueStacks.com]( 2 ) y haga clic en el botón de descarga. La descarga se iniciará automáticamente. Una vez finalizada la descarga, ejecute el archivo de instalación y siga las instrucciones para instalar BlueStacks en su PC.</p>
|
13 |
-
<h4>Paso 2: Inicie BlueStacks e inicie sesión con su cuenta de Google</h4>
|
14 |
-
|
15 |
-
<h4>Paso 3: Búsqueda de plantas vs zombies en la Google Play Store</h4>
|
16 |
-
<p>Una vez que haya iniciado sesión, verá la pantalla de inicio de BlueStacks. En la esquina superior derecha, verá un icono de búsqueda. Haz clic en él y escribe "Plants vs Zombies" en la barra de búsqueda. Verás una lista de resultados. Haga clic en el que dice "Plants vs. Zombies=" por ELECTRONIC ARTS.</p>
|
17 |
-
<h4>Paso 4: Instalar plantas vs zombies y disfrutar jugando en su PC</h4>
|
18 |
-
<p>Serás llevado a la página de aplicaciones de Plants vs Zombies en la Google Play Store. Haga clic en el botón de instalación y espere a que la instalación termine <p>Después de que la instalación se haya completado, verá un botón abierto. Haga clic en él y podrá jugar Plants vs Zombies en su PC. También puede encontrar el icono del juego en la pantalla de inicio de BlueStacks o en el escritorio. Puede utilizar el ratón y el teclado para controlar el juego, o personalizar la configuración a su preferencia. </p>
|
19 |
-
<h3>Opción 2: Descargar desde Filehippo.com usando un archivo de instalación</h3>
|
20 |
-
<p>Otra forma de jugar Plants vs Zombies en tu PC es descargarlo desde un sitio web que ofrece archivos de instalación para juegos de PC. Uno de los sitios web que puedes utilizar es [Filehippo.com]. Filehippo.com es una fuente confiable y confiable de descargas de software libre para Windows, Mac y Android. Puedes descargar Plants vs Zombies de Filehippo.com gratis y sin virus ni malware. </p>
|
21 |
-
<h4>Paso 1: Ir a Filehippo.com y buscar plantas vs zombies</h4>
|
22 |
-
<p>Abra su navegador web y vaya a [Filehippo.com]. En la esquina superior derecha, verá un cuadro de búsqueda. Escribe "Plants vs Zombies" en el cuadro de búsqueda y pulsa enter. Verás una lista de resultados. Haga clic en el que dice "Plants vs. Zombies Game Of The Year Edition 1.2.0.1073 for PC Windows". </p>
|
23 |
-
<p></p>
|
24 |
-
<h4>Paso 2: Haga clic en el botón de descarga y guarde el archivo de instalación en su PC</h4>
|
25 |
-
|
26 |
-
<h4>Paso 3: Ejecute el archivo de instalación y siga las instrucciones para instalar Plants vs Zombies en su PC</h4>
|
27 |
-
<p>Una vez completada la descarga, vaya a la ubicación donde guardó el archivo de instalación y haga doble clic en él. Aparecerá una ventana pidiéndole que confirme si desea ejecutar el archivo. Haz clic en sí y sigue las instrucciones para instalar Plants vs Zombies en tu PC. Es posible que tenga que aceptar los términos y condiciones y elegir una carpeta de destino para el juego. </p>
|
28 |
-
<h4>Paso 4: Plantas de lanzamiento vs zombies y divertirse jugando en su PC</h4>
|
29 |
-
<p>Una vez completada la instalación, verá un icono de acceso directo para Plants vs Zombies en su escritorio o menú de inicio. Haz clic en él y podrás jugar Plants vs Zombies en tu PC. Puedes usar el ratón y el teclado para controlar el juego, o ajustar la configuración a tu gusto. </p>
|
30 |
-
<h2>Conclusión</h2>
|
31 |
-
<p>Plants vs Zombies es un clásico juego de torre de defensa que puedes jugar en tu PC usando un emulador de Android como BlueStacks o un archivo de instalación de Filehippo.com. Ambos métodos son fáciles y gratuitos, y te permiten disfrutar de este divertido y adictivo juego en una pantalla más grande. Ya sea que quieras revivir tus recuerdos de infancia o descubrir este juego por primera vez, Plants vs Zombies es una gran opción para cualquiera que ame la estrategia, el humor y los zombies. </p>
|
32 |
-
<p>Si estás listo para jugar Plants vs Zombies en tu PC, elige una de las opciones de arriba y sigue los pasos que te proporcionamos. Usted será capaz de descargar e instalar Plants vs Zombies en ningún momento, y empezar a plantar sus defensas contra los invasores muertos vivientes. Diviértete! </p>
|
33 |
-
<h3>Preguntas frecuentes</h3>
|
34 |
-
<ul>
|
35 |
-
<li><b>¿Es libre Plants vs Zombies? </b></li>
|
36 |
-
<p>Sí, Plants vs Zombies es gratis para descargar y jugar en tu PC usando BlueStacks o Filehippo.com. Sin embargo, puede haber algunas compras en la aplicación o anuncios en el juego que puedes ignorar o comprar. </p>
|
37 |
-
<li><b>¿Son seguras las plantas contra los zombis? </b></li>
|
38 |
-
|
39 |
-
<li><b>¿Cuáles son los requisitos del sistema para Plantas vs Zombies? </b></li>
|
40 |
-
<p>Los requisitos mínimos del sistema para Plantas vs Zombies son:</p>
|
41 |
-
<tabla>
|
42 |
-
<tr><td>OS</td><td>Windows XP/Vista/7/8/10</td></tr>
|
43 |
-
<tr><td>CPU</td><td>procesador de 1,2 GHz</td></tr>
|
44 |
-
<tr><td>RAM</td><td>512 MB</td></tr>
|
45 |
-
<tr><td>HDD</td><td>65 MB de espacio libre</td></tr>
|
46 |
-
<tr><td>Gráficos</td <td>DirectX 8 o posterior</td></tr>
|
47 |
-
<tr><td>Sonido</td><td>Tarjeta de sonido compatible con DirectX</td></tr>
|
48 |
-
</tabla>
|
49 |
-
<p>Los requisitos de sistema recomendados para Plants vs Zombies son:</p>
|
50 |
-
<tabla>
|
51 |
-
<tr><td>OS</td><td>Windows XP/Vista/7/8/10</td></tr>
|
52 |
-
<tr><td>CPU</td><td>procesador de 1,5 GHz</td></tr>
|
53 |
-
<tr><td>RAM</td><td>1 GB</td></tr>
|
54 |
-
<tr><td>HDD</td><td>65 MB de espacio libre</td></tr>
|
55 |
-
<tr><td>Gráficos</td><td>DirectX 9 o posterior</td></tr>
|
56 |
-
<tr><td>Sonido</td><td>Tarjeta de sonido compatible con DirectX</td></tr>
|
57 |
-
</tabla>
|
58 |
-
<li><b>¿Cuántas plantas y zombies hay en Plants vs Zombies? </b></li>
|
59 |
-
<p>Hay 49 plantas diferentes y 26 zombis diferentes en Plants vs Zombies. Cada planta y zombi tiene sus propias habilidades y características únicas. Puedes desbloquear más plantas y zombies mientras juegas el juego y completas los niveles. </p>
|
60 |
-
<li><b>¿Cuáles son los otros modos en Plants vs Zombies? </b></li>
|
61 |
-
<p>Además del modo Aventura, que tiene 50 niveles, también hay otros modos en Plants vs Zombies que puedes jugar para más diversión y desafío. Estos modos son:</p>
|
62 |
-
<ul>
|
63 |
-
<li>Modo de supervivencia: Tienes que sobrevivir a interminables oleadas de zombies con recursos limitados. </li>
|
64 |
-
<li>Modo de rompecabezas: Tienes que resolver varios puzzles que involucran plantas y zombies. </li>
|
65 |
-
Modo de minijuegos: Tienes que jugar varios minijuegos que tienen diferentes reglas y objetivos. </li>
|
66 |
-
<li>Modo de jardín zen: Tienes que crecer y cuidar de tus propias plantas en un jardín relajante. </li>
|
67 |
-
<li>Crazy Dave’s Shop: Puedes comprar varios artículos y mejoras de Crazy Dave, el vecino excéntrico que te ayuda a lo largo del juego. </li>
|
68 |
-
</ul>
|
69 |
-
|
70 |
-
<p>Sí, hay una secuela de Plants vs Zombies llamada Plants vs. Zombies 2: It’s About Time. Fue lanzado en 2013 para dispositivos iOS y Android. La secuela cuenta con nuevas plantas, zombies, mundos, niveles y modos. También tiene un tema de viaje en el tiempo que le permite visitar diferentes períodos históricos y luchar contra zombies allí. </p> 64aa2da5cf<br />
|
71 |
-
<br />
|
72 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descarga Quickbooks 2017 Premier.md
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Tamil Movie Download 2017: Cómo ver las mejores películas tamiles del año en línea</h1>
|
3 |
-
<p>Las películas tamiles son una forma de cine indio que se origina en el estado de Tamil Nadu en el sur de la India. Son conocidos por su rica cultura, historia y diversidad, así como por su estilo, música y acción únicos. Las películas tamiles tienen una enorme base de fans no solo en la India, sino también en otros países como Sri Lanka, Malasia, Singapur y Oriente Medio.</p>
|
4 |
-
<h2>descarga quickbooks 2017 premier</h2><br /><p><b><b>DOWNLOAD</b> === <a href="https://bltlly.com/2v6Jg3">https://bltlly.com/2v6Jg3</a></b></p><br /><br />
|
5 |
-
<p>2017 fue un gran año para el cine tamil, ya que produjo algunas de las películas más aclamadas y comercialmente exitosas en los últimos tiempos. Algunas de estas películas incluyen <i>Vikram Vedha</i>, un thriller criminal que explora el dilema moral entre un policía y un gángster; <i>Baahubali 2: The Conclusion</i>, una fantasía épica que rompió récords de taquilla en toda la India; <i>Mersal</i>, un drama político que provocó controversia y debate; y <i>Vivegam</i>, un thriller de espías lleno de acción que mostró el poder estelar de Ajith Kumar.</p>
|
6 |
-
<h2>Cómo descargar películas Tamil de forma legal y segura</h2>
|
7 |
-
<p>Si quieres ver estas increíbles películas Tamil en línea, es posible que tenga la tentación de buscar descargas gratuitas o baratas de varios sitios web. Sin embargo, esto no es una buena idea, ya que puede exponerlo a muchos riesgos y problemas. Estas son algunas de las razones por las que debería evitar los sitios ilegales o piratas y optar por servicios de streaming legales y seguros en su lugar. </p>
|
8 |
-
<h3>Los beneficios de usar un servicio de streaming</h3>
|
9 |
-
<p>Un servicio de streaming es una plataforma que te permite ver películas y programas online sin necesidad de descargarlos. Puede acceder a ellos en cualquier momento y en cualquier lugar, siempre y cuando tenga una conexión a Internet y un dispositivo compatible. Algunos de los beneficios de usar un servicio de streaming son:</p>
|
10 |
-
<p></p>
|
11 |
-
<ul>
|
12 |
-
<li>Puedes ver vídeos de alta calidad sin búfer ni interrupciones. </li>
|
13 |
-
<li> Puede elegir entre una amplia gama de géneros, idiomas y categorías. </li>
|
14 |
-
<li>Puedes disfrutar de contenido exclusivo que no está disponible en otros lugares. </li>
|
15 |
-
|
16 |
-
<li> Puede evitar problemas legales y sanciones que pueden surgir de descargar o compartir contenido con derechos de autor. </li>
|
17 |
-
</ul>
|
18 |
-
<h4>Lista de algunos de los mejores servicios de streaming para películas en tamil</h4>
|
19 |
-
<p>Hay muchos servicios de streaming que ofrecen películas Tamil en línea, pero algunos de ellos son mejores que otros. Estos son algunos de los mejores que puedes probar:</p>
|
20 |
-
<tabla>
|
21 |
-
<tr><th>Servicio de streaming</th><th>Características</th><th>Precio</th></tr>
|
22 |
-
<tr><td>Hotstar</td><td>- Ofrece más de 1000 películas tamiles, incluyendo nuevos lanzamientos y clásicos<br>- También tiene películas en hindi, inglés, malayalam, telugu, kannada, bengalí y marathi<br>- Tiene deportes en vivo, noticias, programas de televisión y originales<br>- Tiene subtítulos y opciones de doblaje<br> Tiene función de visualización sin conexión</td><td>- Gratis para contenido limitado<br>- Rs. 299 por mes o Rs. 1499 por año para contenido premium</td></tr>
|
23 |
-
<tr><td>Amazon Prime Video</td><td>- Ofrece más de 500 películas tamiles, incluyendo nuevos lanzamientos y exclusivas<br>- También tiene Hindi, Inglés, Malayalam, Telugu, Kannada, Bengalí, Marathi, y otras películas regionales<br>- Tiene deportes en vivo, noticias, programas de televisión, y originales br<>- Tiene subtítulos y opciones de doblaje<br>- Tiene función de visualización sin conexión</td><td>- Gratis durante 30 días de prueba<br>- Rs. 129 por mes o Rs. 999 por año para contenido ilimitado</td></tr>
|
24 |
-
<tr><td>Netflix</td><td>- Ofrece más de 300 películas tamiles, incluyendo nuevos lanzamientos y originales<br>- También tiene hindi, inglés, malayalam, telugu, kannada, bengalí, marathi y otras películas regionales e internacionales<br>- Tiene programas de televisión, documentales, y especiales<br>- Tiene subtítulos y opciones de doblaje<br>- Tiene función de visualización sin conexión</td><td>- Rs. 199 por mes para el plan móvil (un dispositivo)<br>- Rs. 499 por mes para el plan básico (un dispositivo)<br>- Rs. 649 por mes para el plan estándar (dos dispositivos)<br>- Rs. 799 por mes para el plan premium (cuatro dispositivos)</td></tr>
|
25 |
-
|
26 |
-
</tabla>
|
27 |
-
<h3>Los riesgos de usar sitios ilegales o pirateados</h3>
|
28 |
-
<p>Si bien los servicios de streaming son la mejor manera de ver películas tamiles en línea, algunas personas todavía pueden recurrir a sitios ilegales o piratas que ofrecen descargas gratuitas o baratas. Sin embargo, esta es una práctica muy arriesgada e irresponsable que puede tener graves consecuencias. Estos son algunos de los riesgos y problemas de usar estos sitios:</p>
|
29 |
-
<ul>
|
30 |
-
<li>Puede descargar virus, malware o spyware que pueden dañar su dispositivo o robar su información personal. </li>
|
31 |
-
<li>Usted puede enfrentar acciones legales o multas por violar los derechos de propiedad intelectual de los cineastas y distribuidores. </li>
|
32 |
-
<li>Puedes comprometer la calidad y la seguridad de tu conexión a Internet exponiéndola a hackers o ciberataques. </li>
|
33 |
-
<li> Puede perderse la experiencia original y auténtica de ver películas tamiles, ya que están destinados a ser vistos. </li>
|
34 |
-
<li>Usted puede contribuir a la pérdida de ingresos y puestos de trabajo para la industria cinematográfica tamil y sus trabajadores. </li>
|
35 |
-
</ul>
|
36 |
-
<h4>Lista de algunos de los peligros y desventajas comunes de usar tales sitios</h4>
|
37 |
-
<p>Hay muchos sitios ilegales o pirateados que dicen ofrecer descargas de películas tamiles, pero la mayoría de ellos son poco fiables, inseguros o poco éticos. Aquí están algunos de los peligros y desventajas comunes de usar tales sitios:</p>
|
38 |
-
<tabla>
|
39 |
-
<tr><th>Sitio ilegal o pirata</th><th>Peligros e inconvenientes</th></tr>
|
40 |
-
<tr><td>Tamilrockers</td><td>- Uno de los sitios más notorios que filtra nuevas películas tamiles en línea<br>- A menudo cambia su nombre de dominio para evadir a las autoridades<br>- Contiene anuncios emergentes, redirecciones y malware<br>- Se enfrenta a acciones legales y prohibiciones de varios gobiernos e ISPs No respeta el trabajo duro y la creatividad de los cineastas y actores</td></tr>
|
41 |
-
|
42 |
-
<tr><td>Isaimini</td><td>- Un sitio que proporciona descargas de películas tamiles en varios formatos y tamaños<br>- Tiene un diseño y una interfaz desactualizados con enlaces rotos<br>- Contiene anuncios de spam, redirecciones y malware<br>- Enfrenta acciones legales y prohibiciones de varios gobiernos e ISP<>-> Perjudica la reputación y la imagen de la industria cinematográfica tamil</td></tr>
|
43 |
-
</table> <h2>Cómo disfrutar de películas tamiles con subtítulos y doblaje</h2>
|
44 |
-
<p>Otro aspecto de ver películas tamiles en línea es la elección de subtítulos y doblaje. Los subtítulos son el texto que aparece en la parte inferior de la pantalla que traduce el diálogo a otro idioma. El doblaje es el proceso de reemplazar la voz original de los actores por otro lenguaje. Tanto los subtítulos como el doblaje tienen sus pros y sus contras, dependiendo de su preferencia y conveniencia. Estas son algunas de las ventajas y desventajas de ver películas tamiles con subtítulos y doblaje. </p>
|
45 |
-
<h3>Las ventajas de ver películas en tamil con subtítulos</h3>
|
46 |
-
<p>Los subtítulos son una gran manera de disfrutar de las películas tamiles si no estás familiarizado con el idioma o no quieres aprenderlo. Algunas de las ventajas de ver películas en tamil con subtítulos son:</p>
|
47 |
-
<ul>
|
48 |
-
<li>Puedes entender mejor el diálogo y la historia. </li>
|
49 |
-
<li>Puedes apreciar los matices, emociones y expresiones de los actores. </li>
|
50 |
-
<li>Puedes aprender nuevas palabras, frases y expresiones en tamil.</li>
|
51 |
-
<li> Puede evitar perder cualquier detalle o chistes importantes que puedan perderse en la traducción. </li>
|
52 |
-
<li> Puedes ver la película en su forma original y calidad. </li>
|
53 |
-
</ul>
|
54 |
-
<h4>Lista de algunos de los mejores sitios para encontrar subtítulos para películas en tamil</h4>
|
55 |
-
<p>Hay muchos sitios que ofrecen subtítulos para películas tamiles en línea, pero algunos de ellos son mejores que otros. Estos son algunos de los mejores que puedes probar:</p>
|
56 |
-
<tabla>
|
57 |
-
<tr><th>Sitio de subtítulos</th><th>Características</th></tr>
|
58 |
-
|
59 |
-
<tr><td>Opensubtitles</td><td>- Otro sitio popular y confiable para encontrar subtítulos para películas y programas<br>- Tiene una gran colección de subtítulos en varios idiomas, incluyendo Tamil<br>- Tiene una interfaz simple y rápida y función de búsqueda<br>- Permite a los usuarios subir, rate, and comment on subtitles<br>- Tiene un blog para noticias y actualizaciones sobre subtítulos</td></tr>
|
60 |
-
<tr><td>YIFY Subtitles</td><td>- Un sitio que se especializa en encontrar subtítulos para películas YIFY, que son películas de alta calidad y de tamaño pequeño<br>- Tiene una selección decente de subtítulos en varios idiomas, incluyendo Tamil<br>- Tiene una interfaz elegante y moderna y la función de búsqueda<br>-br Permite a los usuarios subir, calificar y comentar subtítulos<br>- Tiene una sección de preguntas frecuentes para ayuda y soporte</td></tr>
|
61 |
-
</table> <h3>Las desventajas de ver películas tamiles con doblaje</h3>
|
62 |
-
<p>El doblaje es una práctica común que se utiliza para hacer películas accesibles a un público más amplio que no habla el idioma original. Sin embargo, el doblaje también puede tener algunos inconvenientes que pueden afectar el disfrute y la apreciación de las películas tamiles. Algunas de las desventajas de ver películas tamiles con doblaje son:</p>
|
63 |
-
<ul>
|
64 |
-
<li> Puede perder la voz y el tono originales de los actores. </li>
|
65 |
-
<li>Puedes perderte los matices culturales y lingüísticos y las referencias que son específicas de Tamil.</li>
|
66 |
-
<li> Puedes distraerte o molestarte por el desajuste entre los movimientos de los labios y la voz. </li>
|
67 |
-
<li> Puede encontrarse con mala calidad o doblaje inexacto que puede arruinar el estado de ánimo y el significado de la película. </li>
|
68 |
-
<li>Se puede faltar el respeto a la visión artística y la intención de los cineastas y actores. </li>
|
69 |
-
</ul>
|
70 |
-
<h4> Lista de algunas de las razones por las que el doblaje puede arruinar la experiencia original</h4>
|
71 |
-
<p>Hay muchos ejemplos de cómo el doblaje puede arruinar la experiencia original de ver películas tamiles. Estos son algunos de ellos:</p>
|
72 |
-
<tabla>
|
73 |
-
<tr><th>Película tamil</th><th>Razón por la que el doblaje puede arruinarlo</th></tr>
|
74 |
-
|
75 |
-
<tr><td><i>Baahubali 2: La Conclusión</i></td><td>- La película es un espectáculo visual que muestra la grandeza y belleza de la cultura e historia tamil<br>- El doblaje puede diluir la autenticidad y riqueza de la lengua y la música que son parte integral de la película<br>- El doblaje también puede reducir la intensidad y la emoción de las secuencias de acción y el clímax</td></tr>
|
76 |
-
<tr><td><i>Mersal</i></td><td>- La película es un drama político que aborda algunos de los problemas sociales y económicos en la India<br>- El doblaje puede perder la relevancia y resonancia de algunos de los diálogos y canciones que están dirigidos a la audiencia tamil<br>- El doblaje también puede cambiar o censurar algunos de los aspectos controvertidos o sensibles de la película</td></tr>
|
77 |
-
</tabla>
|
78 |
-
<h2>Conclusión</h2>
|
79 |
-
<p>Las películas tamiles son una gran fuente de entretenimiento, educación e inspiración para millones de personas en todo el mundo. Ofrecen una variedad de géneros, temas y estilos que se adaptan a diferentes gustos y preferencias. Sin embargo, si quieres ver películas Tamil en línea, debes tener cuidado con la forma en que las descargas. Debe evitar los sitios ilegales o piratas que pueden dañar su dispositivo, su seguridad y su conciencia. Debe optar por servicios de transmisión legales y seguros que puedan proporcionarle videos de alta calidad, una amplia gama de opciones y contenido exclusivo. También debe considerar ver películas tamiles con subtítulos en lugar de doblaje, ya que puede mejorar su comprensión y apreciación de la lengua, la cultura y el arte del cine tamil. </p>
|
80 |
-
|
81 |
-
<p>¿Qué estás esperando? Regístrate para Hotstar hoy y comienza a ver tus películas favoritas de Tamil en línea. ¡No te arrepentirás! </p>
|
82 |
-
<h3>Preguntas frecuentes</h3>
|
83 |
-
<ol>
|
84 |
-
<li>¿Cuáles son algunas de las mejores películas tamiles de 2017? </li>
|
85 |
-
<p>Some of the best Tamil movies of 2017 are <i>Vikram Vedha</i>, <i>Baahubali 2: The Conclusion</i>, <i>Mersal</i>, <i>Vivegam</i>, <i>Aruvi</i>, <i>Theeran Adhigaaram Ondru</i>, <i>Thupparivaalan</i>, <i>Velaikkaran</i>, <i>Meesaya Murukku</i>, and <i>Vikram Vedha</i>. </ p>2. ¿Cómo puedo descargar películas Tamil de forma legal y segura? </p>
|
86 |
-
<p>Puede descargar películas tamiles de forma legal y segura mediante el uso de un servicio de transmisión que ofrece películas tamiles en línea, como Hotstar, Amazon Prime Video, Netflix o ZEE5. Estos servicios de transmisión tienen la licencia y el permiso para distribuir películas tamiles en línea, y también proporcionan videos de alta calidad, una amplia gama de opciones y contenido exclusivo. También puede evitar los riesgos y problemas de usar sitios ilegales o pirateados que pueden dañar su dispositivo, su seguridad y su conciencia. </p>
|
87 |
-
<li>¿Cómo puedo ver películas en tamil con subtítulos o doblaje? </li>
|
88 |
-
<p>Puedes ver películas en tamil con subtítulos o doblaje eligiendo la opción que se adapte a tu preferencia y conveniencia. La mayoría de los servicios de streaming ofrecen subtítulos o opciones de doblaje para películas tamiles, dependiendo de la disponibilidad y la demanda. También puede encontrar subtítulos para películas tamiles en algunos sitios que se especializan en proporcionar subtítulos para películas y programas, como Subscene, Opensubtitles o YIFY Subtitles. Sin embargo, debe tener cuidado con la calidad y precisión de los subtítulos o doblajes, ya que pueden afectar su disfrute y apreciación de la película. </p>
|
89 |
-
<li>¿Cuáles son las ventajas y desventajas de ver películas tamiles con subtítulos o doblaje? </li>
|
90 |
-
|
91 |
-
<li>¿Por qué debería evitar sitios ilegales o piratas para descargar películas Tamil? </li>
|
92 |
-
<p>Debes evitar sitios ilegales o pirateados para descargar películas tamiles porque pueden exponerte a muchos riesgos y problemas. Algunos de los riesgos y problemas de usar estos sitios son que puede descargar virus, malware o spyware que pueden dañar su dispositivo o robar su información personal, enfrentar acciones legales o multas por violar los derechos de propiedad intelectual de los cineastas y distribuidores, comprometer la calidad y la seguridad de su conexión a Internet al exponerla a hackers o ciberataques, perderse la experiencia original y auténtica de ver películas tamiles como están destinadas a ser vistas, y contribuir a la pérdida de ingresos y puestos de trabajo para la industria cinematográfica tamil y sus trabajadores. </p>
|
93 |
-
<li>¿Qué es Hotstar y por qué debería usarlo para ver películas Tamil en línea? </li>
|
94 |
-
<p>Hotstar es uno de los mejores servicios de streaming para ver películas tamiles en línea, ya que ofrece más de 1000 películas tamiles, incluidos nuevos lanzamientos y clásicos. También puedes ver películas en hindi, inglés, malayalam, telugu, kannada, bengalí, marathi y otras películas regionales en Hotstar. Puede disfrutar de deportes en vivo, noticias, programas de televisión y originales en Hotstar también. Puede ver Hotstar con subtítulos o opciones de doblaje, según su preferencia. También puede descargar vídeos para verlos sin conexión en Hotstar. Puede obtener Hotstar gratis para contenido limitado, o para Rs. 299 por mes o Rs. 1499 por año para contenido premium. </p> 64aa2da5cf<br />
|
95 |
-
<br />
|
96 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BernardoOlisan/vqganclip/CLIP/clip/model.py
DELETED
@@ -1,432 +0,0 @@
|
|
1 |
-
from collections import OrderedDict
|
2 |
-
from typing import Tuple, Union
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import torch.nn.functional as F
|
7 |
-
from torch import nn
|
8 |
-
|
9 |
-
|
10 |
-
class Bottleneck(nn.Module):
|
11 |
-
expansion = 4
|
12 |
-
|
13 |
-
def __init__(self, inplanes, planes, stride=1):
|
14 |
-
super().__init__()
|
15 |
-
|
16 |
-
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
|
17 |
-
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
|
18 |
-
self.bn1 = nn.BatchNorm2d(planes)
|
19 |
-
|
20 |
-
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
|
21 |
-
self.bn2 = nn.BatchNorm2d(planes)
|
22 |
-
|
23 |
-
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
|
24 |
-
|
25 |
-
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
|
26 |
-
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
|
27 |
-
|
28 |
-
self.relu = nn.ReLU(inplace=True)
|
29 |
-
self.downsample = None
|
30 |
-
self.stride = stride
|
31 |
-
|
32 |
-
if stride > 1 or inplanes != planes * Bottleneck.expansion:
|
33 |
-
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
|
34 |
-
self.downsample = nn.Sequential(OrderedDict([
|
35 |
-
("-1", nn.AvgPool2d(stride)),
|
36 |
-
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
|
37 |
-
("1", nn.BatchNorm2d(planes * self.expansion))
|
38 |
-
]))
|
39 |
-
|
40 |
-
def forward(self, x: torch.Tensor):
|
41 |
-
identity = x
|
42 |
-
|
43 |
-
out = self.relu(self.bn1(self.conv1(x)))
|
44 |
-
out = self.relu(self.bn2(self.conv2(out)))
|
45 |
-
out = self.avgpool(out)
|
46 |
-
out = self.bn3(self.conv3(out))
|
47 |
-
|
48 |
-
if self.downsample is not None:
|
49 |
-
identity = self.downsample(x)
|
50 |
-
|
51 |
-
out += identity
|
52 |
-
out = self.relu(out)
|
53 |
-
return out
|
54 |
-
|
55 |
-
|
56 |
-
class AttentionPool2d(nn.Module):
|
57 |
-
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
|
58 |
-
super().__init__()
|
59 |
-
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
|
60 |
-
self.k_proj = nn.Linear(embed_dim, embed_dim)
|
61 |
-
self.q_proj = nn.Linear(embed_dim, embed_dim)
|
62 |
-
self.v_proj = nn.Linear(embed_dim, embed_dim)
|
63 |
-
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
|
64 |
-
self.num_heads = num_heads
|
65 |
-
|
66 |
-
def forward(self, x):
|
67 |
-
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
|
68 |
-
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
|
69 |
-
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
|
70 |
-
x, _ = F.multi_head_attention_forward(
|
71 |
-
query=x, key=x, value=x,
|
72 |
-
embed_dim_to_check=x.shape[-1],
|
73 |
-
num_heads=self.num_heads,
|
74 |
-
q_proj_weight=self.q_proj.weight,
|
75 |
-
k_proj_weight=self.k_proj.weight,
|
76 |
-
v_proj_weight=self.v_proj.weight,
|
77 |
-
in_proj_weight=None,
|
78 |
-
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
|
79 |
-
bias_k=None,
|
80 |
-
bias_v=None,
|
81 |
-
add_zero_attn=False,
|
82 |
-
dropout_p=0,
|
83 |
-
out_proj_weight=self.c_proj.weight,
|
84 |
-
out_proj_bias=self.c_proj.bias,
|
85 |
-
use_separate_proj_weight=True,
|
86 |
-
training=self.training,
|
87 |
-
need_weights=False
|
88 |
-
)
|
89 |
-
|
90 |
-
return x[0]
|
91 |
-
|
92 |
-
|
93 |
-
class ModifiedResNet(nn.Module):
|
94 |
-
"""
|
95 |
-
A ResNet class that is similar to torchvision's but contains the following changes:
|
96 |
-
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
|
97 |
-
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
|
98 |
-
- The final pooling layer is a QKV attention instead of an average pool
|
99 |
-
"""
|
100 |
-
|
101 |
-
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
|
102 |
-
super().__init__()
|
103 |
-
self.output_dim = output_dim
|
104 |
-
self.input_resolution = input_resolution
|
105 |
-
|
106 |
-
# the 3-layer stem
|
107 |
-
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
|
108 |
-
self.bn1 = nn.BatchNorm2d(width // 2)
|
109 |
-
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
|
110 |
-
self.bn2 = nn.BatchNorm2d(width // 2)
|
111 |
-
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
|
112 |
-
self.bn3 = nn.BatchNorm2d(width)
|
113 |
-
self.avgpool = nn.AvgPool2d(2)
|
114 |
-
self.relu = nn.ReLU(inplace=True)
|
115 |
-
|
116 |
-
# residual layers
|
117 |
-
self._inplanes = width # this is a *mutable* variable used during construction
|
118 |
-
self.layer1 = self._make_layer(width, layers[0])
|
119 |
-
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
|
120 |
-
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
|
121 |
-
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
|
122 |
-
|
123 |
-
embed_dim = width * 32 # the ResNet feature dimension
|
124 |
-
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
|
125 |
-
|
126 |
-
def _make_layer(self, planes, blocks, stride=1):
|
127 |
-
layers = [Bottleneck(self._inplanes, planes, stride)]
|
128 |
-
|
129 |
-
self._inplanes = planes * Bottleneck.expansion
|
130 |
-
for _ in range(1, blocks):
|
131 |
-
layers.append(Bottleneck(self._inplanes, planes))
|
132 |
-
|
133 |
-
return nn.Sequential(*layers)
|
134 |
-
|
135 |
-
def forward(self, x):
|
136 |
-
def stem(x):
|
137 |
-
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
|
138 |
-
x = self.relu(bn(conv(x)))
|
139 |
-
x = self.avgpool(x)
|
140 |
-
return x
|
141 |
-
|
142 |
-
x = x.type(self.conv1.weight.dtype)
|
143 |
-
x = stem(x)
|
144 |
-
x = self.layer1(x)
|
145 |
-
x = self.layer2(x)
|
146 |
-
x = self.layer3(x)
|
147 |
-
x = self.layer4(x)
|
148 |
-
x = self.attnpool(x)
|
149 |
-
|
150 |
-
return x
|
151 |
-
|
152 |
-
|
153 |
-
class LayerNorm(nn.LayerNorm):
|
154 |
-
"""Subclass torch's LayerNorm to handle fp16."""
|
155 |
-
|
156 |
-
def forward(self, x: torch.Tensor):
|
157 |
-
orig_type = x.dtype
|
158 |
-
ret = super().forward(x.type(torch.float32))
|
159 |
-
return ret.type(orig_type)
|
160 |
-
|
161 |
-
|
162 |
-
class QuickGELU(nn.Module):
|
163 |
-
def forward(self, x: torch.Tensor):
|
164 |
-
return x * torch.sigmoid(1.702 * x)
|
165 |
-
|
166 |
-
|
167 |
-
class ResidualAttentionBlock(nn.Module):
|
168 |
-
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
|
169 |
-
super().__init__()
|
170 |
-
|
171 |
-
self.attn = nn.MultiheadAttention(d_model, n_head)
|
172 |
-
self.ln_1 = LayerNorm(d_model)
|
173 |
-
self.mlp = nn.Sequential(OrderedDict([
|
174 |
-
("c_fc", nn.Linear(d_model, d_model * 4)),
|
175 |
-
("gelu", QuickGELU()),
|
176 |
-
("c_proj", nn.Linear(d_model * 4, d_model))
|
177 |
-
]))
|
178 |
-
self.ln_2 = LayerNorm(d_model)
|
179 |
-
self.attn_mask = attn_mask
|
180 |
-
|
181 |
-
def attention(self, x: torch.Tensor):
|
182 |
-
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
|
183 |
-
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
|
184 |
-
|
185 |
-
def forward(self, x: torch.Tensor):
|
186 |
-
x = x + self.attention(self.ln_1(x))
|
187 |
-
x = x + self.mlp(self.ln_2(x))
|
188 |
-
return x
|
189 |
-
|
190 |
-
|
191 |
-
class Transformer(nn.Module):
|
192 |
-
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
|
193 |
-
super().__init__()
|
194 |
-
self.width = width
|
195 |
-
self.layers = layers
|
196 |
-
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
|
197 |
-
|
198 |
-
def forward(self, x: torch.Tensor):
|
199 |
-
return self.resblocks(x)
|
200 |
-
|
201 |
-
|
202 |
-
class VisionTransformer(nn.Module):
|
203 |
-
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
|
204 |
-
super().__init__()
|
205 |
-
self.input_resolution = input_resolution
|
206 |
-
self.output_dim = output_dim
|
207 |
-
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
|
208 |
-
|
209 |
-
scale = width ** -0.5
|
210 |
-
self.class_embedding = nn.Parameter(scale * torch.randn(width))
|
211 |
-
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
|
212 |
-
self.ln_pre = LayerNorm(width)
|
213 |
-
|
214 |
-
self.transformer = Transformer(width, layers, heads)
|
215 |
-
|
216 |
-
self.ln_post = LayerNorm(width)
|
217 |
-
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
|
218 |
-
|
219 |
-
def forward(self, x: torch.Tensor):
|
220 |
-
x = self.conv1(x) # shape = [*, width, grid, grid]
|
221 |
-
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
|
222 |
-
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
|
223 |
-
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
|
224 |
-
x = x + self.positional_embedding.to(x.dtype)
|
225 |
-
x = self.ln_pre(x)
|
226 |
-
|
227 |
-
x = x.permute(1, 0, 2) # NLD -> LND
|
228 |
-
x = self.transformer(x)
|
229 |
-
x = x.permute(1, 0, 2) # LND -> NLD
|
230 |
-
|
231 |
-
x = self.ln_post(x[:, 0, :])
|
232 |
-
|
233 |
-
if self.proj is not None:
|
234 |
-
x = x @ self.proj
|
235 |
-
|
236 |
-
return x
|
237 |
-
|
238 |
-
|
239 |
-
class CLIP(nn.Module):
|
240 |
-
def __init__(self,
|
241 |
-
embed_dim: int,
|
242 |
-
# vision
|
243 |
-
image_resolution: int,
|
244 |
-
vision_layers: Union[Tuple[int, int, int, int], int],
|
245 |
-
vision_width: int,
|
246 |
-
vision_patch_size: int,
|
247 |
-
# text
|
248 |
-
context_length: int,
|
249 |
-
vocab_size: int,
|
250 |
-
transformer_width: int,
|
251 |
-
transformer_heads: int,
|
252 |
-
transformer_layers: int
|
253 |
-
):
|
254 |
-
super().__init__()
|
255 |
-
|
256 |
-
self.context_length = context_length
|
257 |
-
|
258 |
-
if isinstance(vision_layers, (tuple, list)):
|
259 |
-
vision_heads = vision_width * 32 // 64
|
260 |
-
self.visual = ModifiedResNet(
|
261 |
-
layers=vision_layers,
|
262 |
-
output_dim=embed_dim,
|
263 |
-
heads=vision_heads,
|
264 |
-
input_resolution=image_resolution,
|
265 |
-
width=vision_width
|
266 |
-
)
|
267 |
-
else:
|
268 |
-
vision_heads = vision_width // 64
|
269 |
-
self.visual = VisionTransformer(
|
270 |
-
input_resolution=image_resolution,
|
271 |
-
patch_size=vision_patch_size,
|
272 |
-
width=vision_width,
|
273 |
-
layers=vision_layers,
|
274 |
-
heads=vision_heads,
|
275 |
-
output_dim=embed_dim
|
276 |
-
)
|
277 |
-
|
278 |
-
self.transformer = Transformer(
|
279 |
-
width=transformer_width,
|
280 |
-
layers=transformer_layers,
|
281 |
-
heads=transformer_heads,
|
282 |
-
attn_mask=self.build_attention_mask()
|
283 |
-
)
|
284 |
-
|
285 |
-
self.vocab_size = vocab_size
|
286 |
-
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
|
287 |
-
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
|
288 |
-
self.ln_final = LayerNorm(transformer_width)
|
289 |
-
|
290 |
-
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
|
291 |
-
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
|
292 |
-
|
293 |
-
self.initialize_parameters()
|
294 |
-
|
295 |
-
def initialize_parameters(self):
|
296 |
-
nn.init.normal_(self.token_embedding.weight, std=0.02)
|
297 |
-
nn.init.normal_(self.positional_embedding, std=0.01)
|
298 |
-
|
299 |
-
if isinstance(self.visual, ModifiedResNet):
|
300 |
-
if self.visual.attnpool is not None:
|
301 |
-
std = self.visual.attnpool.c_proj.in_features ** -0.5
|
302 |
-
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
|
303 |
-
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
|
304 |
-
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
|
305 |
-
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
|
306 |
-
|
307 |
-
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
|
308 |
-
for name, param in resnet_block.named_parameters():
|
309 |
-
if name.endswith("bn3.weight"):
|
310 |
-
nn.init.zeros_(param)
|
311 |
-
|
312 |
-
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
|
313 |
-
attn_std = self.transformer.width ** -0.5
|
314 |
-
fc_std = (2 * self.transformer.width) ** -0.5
|
315 |
-
for block in self.transformer.resblocks:
|
316 |
-
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
|
317 |
-
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
|
318 |
-
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
|
319 |
-
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
|
320 |
-
|
321 |
-
if self.text_projection is not None:
|
322 |
-
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
|
323 |
-
|
324 |
-
def build_attention_mask(self):
|
325 |
-
# lazily create causal attention mask, with full attention between the vision tokens
|
326 |
-
# pytorch uses additive attention mask; fill with -inf
|
327 |
-
mask = torch.empty(self.context_length, self.context_length)
|
328 |
-
mask.fill_(float("-inf"))
|
329 |
-
mask.triu_(1) # zero out the lower diagonal
|
330 |
-
return mask
|
331 |
-
|
332 |
-
@property
|
333 |
-
def dtype(self):
|
334 |
-
return self.visual.conv1.weight.dtype
|
335 |
-
|
336 |
-
def encode_image(self, image):
|
337 |
-
return self.visual(image.type(self.dtype))
|
338 |
-
|
339 |
-
def encode_text(self, text):
|
340 |
-
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
|
341 |
-
|
342 |
-
x = x + self.positional_embedding.type(self.dtype)
|
343 |
-
x = x.permute(1, 0, 2) # NLD -> LND
|
344 |
-
x = self.transformer(x)
|
345 |
-
x = x.permute(1, 0, 2) # LND -> NLD
|
346 |
-
x = self.ln_final(x).type(self.dtype)
|
347 |
-
|
348 |
-
# x.shape = [batch_size, n_ctx, transformer.width]
|
349 |
-
# take features from the eot embedding (eot_token is the highest number in each sequence)
|
350 |
-
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
|
351 |
-
|
352 |
-
return x
|
353 |
-
|
354 |
-
def forward(self, image, text):
|
355 |
-
image_features = self.encode_image(image)
|
356 |
-
text_features = self.encode_text(text)
|
357 |
-
|
358 |
-
# normalized features
|
359 |
-
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
|
360 |
-
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
|
361 |
-
|
362 |
-
# cosine similarity as logits
|
363 |
-
logit_scale = self.logit_scale.exp()
|
364 |
-
logits_per_image = logit_scale * image_features @ text_features.t()
|
365 |
-
logits_per_text = logit_scale * text_features @ image_features.t()
|
366 |
-
|
367 |
-
# shape = [global_batch_size, global_batch_size]
|
368 |
-
return logits_per_image, logits_per_text
|
369 |
-
|
370 |
-
|
371 |
-
def convert_weights(model: nn.Module):
|
372 |
-
"""Convert applicable model parameters to fp16"""
|
373 |
-
|
374 |
-
def _convert_weights_to_fp16(l):
|
375 |
-
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
|
376 |
-
l.weight.data = l.weight.data.half()
|
377 |
-
if l.bias is not None:
|
378 |
-
l.bias.data = l.bias.data.half()
|
379 |
-
|
380 |
-
if isinstance(l, nn.MultiheadAttention):
|
381 |
-
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
|
382 |
-
tensor = getattr(l, attr)
|
383 |
-
if tensor is not None:
|
384 |
-
tensor.data = tensor.data.half()
|
385 |
-
|
386 |
-
for name in ["text_projection", "proj"]:
|
387 |
-
if hasattr(l, name):
|
388 |
-
attr = getattr(l, name)
|
389 |
-
if attr is not None:
|
390 |
-
attr.data = attr.data.half()
|
391 |
-
|
392 |
-
model.apply(_convert_weights_to_fp16)
|
393 |
-
|
394 |
-
|
395 |
-
def build_model(state_dict: dict):
|
396 |
-
vit = "visual.proj" in state_dict
|
397 |
-
|
398 |
-
if vit:
|
399 |
-
vision_width = state_dict["visual.conv1.weight"].shape[0]
|
400 |
-
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
|
401 |
-
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
|
402 |
-
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
|
403 |
-
image_resolution = vision_patch_size * grid_size
|
404 |
-
else:
|
405 |
-
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
|
406 |
-
vision_layers = tuple(counts)
|
407 |
-
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
|
408 |
-
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
|
409 |
-
vision_patch_size = None
|
410 |
-
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
|
411 |
-
image_resolution = output_width * 32
|
412 |
-
|
413 |
-
embed_dim = state_dict["text_projection"].shape[1]
|
414 |
-
context_length = state_dict["positional_embedding"].shape[0]
|
415 |
-
vocab_size = state_dict["token_embedding.weight"].shape[0]
|
416 |
-
transformer_width = state_dict["ln_final.weight"].shape[0]
|
417 |
-
transformer_heads = transformer_width // 64
|
418 |
-
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
|
419 |
-
|
420 |
-
model = CLIP(
|
421 |
-
embed_dim,
|
422 |
-
image_resolution, vision_layers, vision_width, vision_patch_size,
|
423 |
-
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
|
424 |
-
)
|
425 |
-
|
426 |
-
for key in ["input_resolution", "context_length", "vocab_size"]:
|
427 |
-
if key in state_dict:
|
428 |
-
del state_dict[key]
|
429 |
-
|
430 |
-
convert_weights(model)
|
431 |
-
model.load_state_dict(state_dict)
|
432 |
-
return model.eval()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/idna/codec.py
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
from .core import encode, decode, alabel, ulabel, IDNAError
|
2 |
-
import codecs
|
3 |
-
import re
|
4 |
-
from typing import Tuple, Optional
|
5 |
-
|
6 |
-
_unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]')
|
7 |
-
|
8 |
-
class Codec(codecs.Codec):
|
9 |
-
|
10 |
-
def encode(self, data: str, errors: str = 'strict') -> Tuple[bytes, int]:
|
11 |
-
if errors != 'strict':
|
12 |
-
raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
|
13 |
-
|
14 |
-
if not data:
|
15 |
-
return b"", 0
|
16 |
-
|
17 |
-
return encode(data), len(data)
|
18 |
-
|
19 |
-
def decode(self, data: bytes, errors: str = 'strict') -> Tuple[str, int]:
|
20 |
-
if errors != 'strict':
|
21 |
-
raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
|
22 |
-
|
23 |
-
if not data:
|
24 |
-
return '', 0
|
25 |
-
|
26 |
-
return decode(data), len(data)
|
27 |
-
|
28 |
-
class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
|
29 |
-
def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore
|
30 |
-
if errors != 'strict':
|
31 |
-
raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
|
32 |
-
|
33 |
-
if not data:
|
34 |
-
return "", 0
|
35 |
-
|
36 |
-
labels = _unicode_dots_re.split(data)
|
37 |
-
trailing_dot = ''
|
38 |
-
if labels:
|
39 |
-
if not labels[-1]:
|
40 |
-
trailing_dot = '.'
|
41 |
-
del labels[-1]
|
42 |
-
elif not final:
|
43 |
-
# Keep potentially unfinished label until the next call
|
44 |
-
del labels[-1]
|
45 |
-
if labels:
|
46 |
-
trailing_dot = '.'
|
47 |
-
|
48 |
-
result = []
|
49 |
-
size = 0
|
50 |
-
for label in labels:
|
51 |
-
result.append(alabel(label))
|
52 |
-
if size:
|
53 |
-
size += 1
|
54 |
-
size += len(label)
|
55 |
-
|
56 |
-
# Join with U+002E
|
57 |
-
result_str = '.'.join(result) + trailing_dot # type: ignore
|
58 |
-
size += len(trailing_dot)
|
59 |
-
return result_str, size
|
60 |
-
|
61 |
-
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
|
62 |
-
def _buffer_decode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore
|
63 |
-
if errors != 'strict':
|
64 |
-
raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
|
65 |
-
|
66 |
-
if not data:
|
67 |
-
return ('', 0)
|
68 |
-
|
69 |
-
labels = _unicode_dots_re.split(data)
|
70 |
-
trailing_dot = ''
|
71 |
-
if labels:
|
72 |
-
if not labels[-1]:
|
73 |
-
trailing_dot = '.'
|
74 |
-
del labels[-1]
|
75 |
-
elif not final:
|
76 |
-
# Keep potentially unfinished label until the next call
|
77 |
-
del labels[-1]
|
78 |
-
if labels:
|
79 |
-
trailing_dot = '.'
|
80 |
-
|
81 |
-
result = []
|
82 |
-
size = 0
|
83 |
-
for label in labels:
|
84 |
-
result.append(ulabel(label))
|
85 |
-
if size:
|
86 |
-
size += 1
|
87 |
-
size += len(label)
|
88 |
-
|
89 |
-
result_str = '.'.join(result) + trailing_dot
|
90 |
-
size += len(trailing_dot)
|
91 |
-
return (result_str, size)
|
92 |
-
|
93 |
-
|
94 |
-
class StreamWriter(Codec, codecs.StreamWriter):
|
95 |
-
pass
|
96 |
-
|
97 |
-
|
98 |
-
class StreamReader(Codec, codecs.StreamReader):
|
99 |
-
pass
|
100 |
-
|
101 |
-
|
102 |
-
def getregentry() -> codecs.CodecInfo:
|
103 |
-
# Compatibility as a search_function for codecs.register()
|
104 |
-
return codecs.CodecInfo(
|
105 |
-
name='idna',
|
106 |
-
encode=Codec().encode, # type: ignore
|
107 |
-
decode=Codec().decode, # type: ignore
|
108 |
-
incrementalencoder=IncrementalEncoder,
|
109 |
-
incrementaldecoder=IncrementalDecoder,
|
110 |
-
streamwriter=StreamWriter,
|
111 |
-
streamreader=StreamReader,
|
112 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/install_scripts.py
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
from distutils import log
|
2 |
-
import distutils.command.install_scripts as orig
|
3 |
-
from distutils.errors import DistutilsModuleError
|
4 |
-
import os
|
5 |
-
import sys
|
6 |
-
|
7 |
-
from pkg_resources import Distribution, PathMetadata
|
8 |
-
from .._path import ensure_directory
|
9 |
-
|
10 |
-
|
11 |
-
class install_scripts(orig.install_scripts):
|
12 |
-
"""Do normal script install, plus any egg_info wrapper scripts"""
|
13 |
-
|
14 |
-
def initialize_options(self):
|
15 |
-
orig.install_scripts.initialize_options(self)
|
16 |
-
self.no_ep = False
|
17 |
-
|
18 |
-
def run(self):
|
19 |
-
import setuptools.command.easy_install as ei
|
20 |
-
|
21 |
-
self.run_command("egg_info")
|
22 |
-
if self.distribution.scripts:
|
23 |
-
orig.install_scripts.run(self) # run first to set up self.outfiles
|
24 |
-
else:
|
25 |
-
self.outfiles = []
|
26 |
-
if self.no_ep:
|
27 |
-
# don't install entry point scripts into .egg file!
|
28 |
-
return
|
29 |
-
|
30 |
-
ei_cmd = self.get_finalized_command("egg_info")
|
31 |
-
dist = Distribution(
|
32 |
-
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
|
33 |
-
ei_cmd.egg_name, ei_cmd.egg_version,
|
34 |
-
)
|
35 |
-
bs_cmd = self.get_finalized_command('build_scripts')
|
36 |
-
exec_param = getattr(bs_cmd, 'executable', None)
|
37 |
-
try:
|
38 |
-
bw_cmd = self.get_finalized_command("bdist_wininst")
|
39 |
-
is_wininst = getattr(bw_cmd, '_is_running', False)
|
40 |
-
except (ImportError, DistutilsModuleError):
|
41 |
-
is_wininst = False
|
42 |
-
writer = ei.ScriptWriter
|
43 |
-
if is_wininst:
|
44 |
-
exec_param = "python.exe"
|
45 |
-
writer = ei.WindowsScriptWriter
|
46 |
-
if exec_param == sys.executable:
|
47 |
-
# In case the path to the Python executable contains a space, wrap
|
48 |
-
# it so it's not split up.
|
49 |
-
exec_param = [exec_param]
|
50 |
-
# resolve the writer to the environment
|
51 |
-
writer = writer.best()
|
52 |
-
cmd = writer.command_spec_class.best().from_param(exec_param)
|
53 |
-
for args in writer.get_args(dist, cmd.as_header()):
|
54 |
-
self.write_script(*args)
|
55 |
-
|
56 |
-
def write_script(self, script_name, contents, mode="t", *ignored):
|
57 |
-
"""Write an executable file to the scripts directory"""
|
58 |
-
from setuptools.command.easy_install import chmod, current_umask
|
59 |
-
|
60 |
-
log.info("Installing %s script to %s", script_name, self.install_dir)
|
61 |
-
target = os.path.join(self.install_dir, script_name)
|
62 |
-
self.outfiles.append(target)
|
63 |
-
|
64 |
-
mask = current_umask()
|
65 |
-
if not self.dry_run:
|
66 |
-
ensure_directory(target)
|
67 |
-
f = open(target, "w" + mode)
|
68 |
-
f.write(contents)
|
69 |
-
f.close()
|
70 |
-
chmod(target, 0o777 - mask)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BilalSardar/Text-To-image-AllModels/app.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
from diffusers import StableDiffusionPipeline
|
2 |
-
import torch
|
3 |
-
|
4 |
-
modelieo=['nitrosocke/Arcane-Diffusion',
|
5 |
-
'dreamlike-art/dreamlike-diffusion-1.0',
|
6 |
-
'nitrosocke/archer-diffusion',
|
7 |
-
'Linaqruf/anything-v3.0',
|
8 |
-
'nitrosocke/mo-di-diffusion',
|
9 |
-
'nitrosocke/classic-anim-diffusion',
|
10 |
-
'dallinmackay/Van-Gogh-diffusion',
|
11 |
-
'wavymulder/wavyfusion',
|
12 |
-
'wavymulder/Analog-Diffusion',
|
13 |
-
'nitrosocke/redshift-diffusion',
|
14 |
-
'prompthero/midjourney-v4-diffusion',
|
15 |
-
'hakurei/waifu-diffusion',
|
16 |
-
'DGSpitzer/Cyberpunk-Anime-Diffusion',
|
17 |
-
'nitrosocke/elden-ring-diffusion',
|
18 |
-
'naclbit/trinart_stable_diffusion_v2',
|
19 |
-
'nitrosocke/spider-verse-diffusion',
|
20 |
-
'Fictiverse/Stable_Diffusion_BalloonArt_Model',
|
21 |
-
'dallinmackay/Tron-Legacy-diffusion',
|
22 |
-
'lambdalabs/sd-pokemon-diffusers',
|
23 |
-
'AstraliteHeart/pony-diffusion',
|
24 |
-
'nousr/robo-diffusion']
|
25 |
-
|
26 |
-
|
27 |
-
def TextToImage(Prompt,model):
|
28 |
-
model_id = model
|
29 |
-
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
30 |
-
pipe = pipe.to("cpu")
|
31 |
-
|
32 |
-
prompt = Prompt
|
33 |
-
image = pipe(prompt).images[0]
|
34 |
-
|
35 |
-
return image
|
36 |
-
|
37 |
-
|
38 |
-
import gradio as gr
|
39 |
-
interface = gr.Interface(fn=TextToImage,
|
40 |
-
inputs=["text", gr.Dropdown(modelieo)],
|
41 |
-
outputs="image",
|
42 |
-
title='Text to Image')
|
43 |
-
|
44 |
-
interface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Boadiwaa/Recipes/openai/api_resources/answer.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
from openai.openai_object import OpenAIObject
|
2 |
-
|
3 |
-
|
4 |
-
class Answer(OpenAIObject):
|
5 |
-
@classmethod
|
6 |
-
def get_url(self):
|
7 |
-
return "/answers"
|
8 |
-
|
9 |
-
@classmethod
|
10 |
-
def create(cls, **params):
|
11 |
-
instance = cls()
|
12 |
-
return instance.request("post", cls.get_url(), params)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/structures.py
DELETED
@@ -1,578 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import base64
|
3 |
-
import numpy as np
|
4 |
-
from io import BytesIO
|
5 |
-
import torch
|
6 |
-
from PIL import Image
|
7 |
-
from torch.nn import functional as F
|
8 |
-
|
9 |
-
|
10 |
-
class DensePoseTransformData(object):
|
11 |
-
|
12 |
-
# Horizontal symmetry label transforms used for horizontal flip
|
13 |
-
MASK_LABEL_SYMMETRIES = [0, 1, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 14]
|
14 |
-
# fmt: off
|
15 |
-
POINT_LABEL_SYMMETRIES = [ 0, 1, 2, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15, 18, 17, 20, 19, 22, 21, 24, 23] # noqa
|
16 |
-
# fmt: on
|
17 |
-
|
18 |
-
def __init__(self, uv_symmetries):
|
19 |
-
self.mask_label_symmetries = DensePoseTransformData.MASK_LABEL_SYMMETRIES
|
20 |
-
self.point_label_symmetries = DensePoseTransformData.POINT_LABEL_SYMMETRIES
|
21 |
-
self.uv_symmetries = uv_symmetries
|
22 |
-
|
23 |
-
@staticmethod
|
24 |
-
def load(fpath):
|
25 |
-
import scipy.io
|
26 |
-
|
27 |
-
uv_symmetry_map = scipy.io.loadmat(fpath)
|
28 |
-
uv_symmetry_map_torch = {}
|
29 |
-
for key in ["U_transforms", "V_transforms"]:
|
30 |
-
uv_symmetry_map_torch[key] = []
|
31 |
-
map_src = uv_symmetry_map[key]
|
32 |
-
map_dst = uv_symmetry_map_torch[key]
|
33 |
-
for i in range(map_src.shape[1]):
|
34 |
-
map_dst.append(torch.from_numpy(map_src[0, i]).to(dtype=torch.float))
|
35 |
-
uv_symmetry_map_torch[key] = torch.stack(map_dst, dim=0).to(
|
36 |
-
device=torch.cuda.current_device()
|
37 |
-
)
|
38 |
-
transform_data = DensePoseTransformData(uv_symmetry_map_torch)
|
39 |
-
return transform_data
|
40 |
-
|
41 |
-
|
42 |
-
class DensePoseDataRelative(object):
|
43 |
-
"""
|
44 |
-
Dense pose relative annotations that can be applied to any bounding box:
|
45 |
-
x - normalized X coordinates [0, 255] of annotated points
|
46 |
-
y - normalized Y coordinates [0, 255] of annotated points
|
47 |
-
i - body part labels 0,...,24 for annotated points
|
48 |
-
u - body part U coordinates [0, 1] for annotated points
|
49 |
-
v - body part V coordinates [0, 1] for annotated points
|
50 |
-
segm - 256x256 segmentation mask with values 0,...,14
|
51 |
-
To obtain absolute x and y data wrt some bounding box one needs to first
|
52 |
-
divide the data by 256, multiply by the respective bounding box size
|
53 |
-
and add bounding box offset:
|
54 |
-
x_img = x0 + x_norm * w / 256.0
|
55 |
-
y_img = y0 + y_norm * h / 256.0
|
56 |
-
Segmentation masks are typically sampled to get image-based masks.
|
57 |
-
"""
|
58 |
-
|
59 |
-
# Key for normalized X coordinates in annotation dict
|
60 |
-
X_KEY = "dp_x"
|
61 |
-
# Key for normalized Y coordinates in annotation dict
|
62 |
-
Y_KEY = "dp_y"
|
63 |
-
# Key for U part coordinates in annotation dict
|
64 |
-
U_KEY = "dp_U"
|
65 |
-
# Key for V part coordinates in annotation dict
|
66 |
-
V_KEY = "dp_V"
|
67 |
-
# Key for I point labels in annotation dict
|
68 |
-
I_KEY = "dp_I"
|
69 |
-
# Key for segmentation mask in annotation dict
|
70 |
-
S_KEY = "dp_masks"
|
71 |
-
# Number of body parts in segmentation masks
|
72 |
-
N_BODY_PARTS = 14
|
73 |
-
# Number of parts in point labels
|
74 |
-
N_PART_LABELS = 24
|
75 |
-
MASK_SIZE = 256
|
76 |
-
|
77 |
-
def __init__(self, annotation, cleanup=False):
|
78 |
-
is_valid, reason_not_valid = DensePoseDataRelative.validate_annotation(annotation)
|
79 |
-
assert is_valid, "Invalid DensePose annotations: {}".format(reason_not_valid)
|
80 |
-
self.x = torch.as_tensor(annotation[DensePoseDataRelative.X_KEY])
|
81 |
-
self.y = torch.as_tensor(annotation[DensePoseDataRelative.Y_KEY])
|
82 |
-
self.i = torch.as_tensor(annotation[DensePoseDataRelative.I_KEY])
|
83 |
-
self.u = torch.as_tensor(annotation[DensePoseDataRelative.U_KEY])
|
84 |
-
self.v = torch.as_tensor(annotation[DensePoseDataRelative.V_KEY])
|
85 |
-
self.segm = DensePoseDataRelative.extract_segmentation_mask(annotation)
|
86 |
-
self.device = torch.device("cpu")
|
87 |
-
if cleanup:
|
88 |
-
DensePoseDataRelative.cleanup_annotation(annotation)
|
89 |
-
|
90 |
-
def to(self, device):
|
91 |
-
if self.device == device:
|
92 |
-
return self
|
93 |
-
new_data = DensePoseDataRelative.__new__(DensePoseDataRelative)
|
94 |
-
new_data.x = self.x
|
95 |
-
new_data.x = self.x.to(device)
|
96 |
-
new_data.y = self.y.to(device)
|
97 |
-
new_data.i = self.i.to(device)
|
98 |
-
new_data.u = self.u.to(device)
|
99 |
-
new_data.v = self.v.to(device)
|
100 |
-
new_data.segm = self.segm.to(device)
|
101 |
-
new_data.device = device
|
102 |
-
return new_data
|
103 |
-
|
104 |
-
@staticmethod
|
105 |
-
def extract_segmentation_mask(annotation):
|
106 |
-
import pycocotools.mask as mask_utils
|
107 |
-
|
108 |
-
poly_specs = annotation[DensePoseDataRelative.S_KEY]
|
109 |
-
segm = torch.zeros((DensePoseDataRelative.MASK_SIZE,) * 2, dtype=torch.float32)
|
110 |
-
for i in range(DensePoseDataRelative.N_BODY_PARTS):
|
111 |
-
poly_i = poly_specs[i]
|
112 |
-
if poly_i:
|
113 |
-
mask_i = mask_utils.decode(poly_i)
|
114 |
-
segm[mask_i > 0] = i + 1
|
115 |
-
return segm
|
116 |
-
|
117 |
-
@staticmethod
|
118 |
-
def validate_annotation(annotation):
|
119 |
-
for key in [
|
120 |
-
DensePoseDataRelative.X_KEY,
|
121 |
-
DensePoseDataRelative.Y_KEY,
|
122 |
-
DensePoseDataRelative.I_KEY,
|
123 |
-
DensePoseDataRelative.U_KEY,
|
124 |
-
DensePoseDataRelative.V_KEY,
|
125 |
-
DensePoseDataRelative.S_KEY,
|
126 |
-
]:
|
127 |
-
if key not in annotation:
|
128 |
-
return False, "no {key} data in the annotation".format(key=key)
|
129 |
-
return True, None
|
130 |
-
|
131 |
-
@staticmethod
|
132 |
-
def cleanup_annotation(annotation):
|
133 |
-
for key in [
|
134 |
-
DensePoseDataRelative.X_KEY,
|
135 |
-
DensePoseDataRelative.Y_KEY,
|
136 |
-
DensePoseDataRelative.I_KEY,
|
137 |
-
DensePoseDataRelative.U_KEY,
|
138 |
-
DensePoseDataRelative.V_KEY,
|
139 |
-
DensePoseDataRelative.S_KEY,
|
140 |
-
]:
|
141 |
-
if key in annotation:
|
142 |
-
del annotation[key]
|
143 |
-
|
144 |
-
def apply_transform(self, transforms, densepose_transform_data):
|
145 |
-
self._transform_pts(transforms, densepose_transform_data)
|
146 |
-
self._transform_segm(transforms, densepose_transform_data)
|
147 |
-
|
148 |
-
def _transform_pts(self, transforms, dp_transform_data):
|
149 |
-
import detectron2.data.transforms as T
|
150 |
-
|
151 |
-
# NOTE: This assumes that HorizFlipTransform is the only one that does flip
|
152 |
-
do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
|
153 |
-
if do_hflip:
|
154 |
-
self.x = self.segm.size(1) - self.x
|
155 |
-
self._flip_iuv_semantics(dp_transform_data)
|
156 |
-
|
157 |
-
def _flip_iuv_semantics(self, dp_transform_data: DensePoseTransformData) -> None:
|
158 |
-
i_old = self.i.clone()
|
159 |
-
uv_symmetries = dp_transform_data.uv_symmetries
|
160 |
-
pt_label_symmetries = dp_transform_data.point_label_symmetries
|
161 |
-
for i in range(self.N_PART_LABELS):
|
162 |
-
if i + 1 in i_old:
|
163 |
-
annot_indices_i = i_old == i + 1
|
164 |
-
if pt_label_symmetries[i + 1] != i + 1:
|
165 |
-
self.i[annot_indices_i] = pt_label_symmetries[i + 1]
|
166 |
-
u_loc = (self.u[annot_indices_i] * 255).long()
|
167 |
-
v_loc = (self.v[annot_indices_i] * 255).long()
|
168 |
-
self.u[annot_indices_i] = uv_symmetries["U_transforms"][i][v_loc, u_loc].to(
|
169 |
-
device=self.u.device
|
170 |
-
)
|
171 |
-
self.v[annot_indices_i] = uv_symmetries["V_transforms"][i][v_loc, u_loc].to(
|
172 |
-
device=self.v.device
|
173 |
-
)
|
174 |
-
|
175 |
-
def _transform_segm(self, transforms, dp_transform_data):
|
176 |
-
import detectron2.data.transforms as T
|
177 |
-
|
178 |
-
# NOTE: This assumes that HorizFlipTransform is the only one that does flip
|
179 |
-
do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
|
180 |
-
if do_hflip:
|
181 |
-
self.segm = torch.flip(self.segm, [1])
|
182 |
-
self._flip_segm_semantics(dp_transform_data)
|
183 |
-
|
184 |
-
def _flip_segm_semantics(self, dp_transform_data):
|
185 |
-
old_segm = self.segm.clone()
|
186 |
-
mask_label_symmetries = dp_transform_data.mask_label_symmetries
|
187 |
-
for i in range(self.N_BODY_PARTS):
|
188 |
-
if mask_label_symmetries[i + 1] != i + 1:
|
189 |
-
self.segm[old_segm == i + 1] = mask_label_symmetries[i + 1]
|
190 |
-
|
191 |
-
|
192 |
-
def normalized_coords_transform(x0, y0, w, h):
|
193 |
-
"""
|
194 |
-
Coordinates transform that maps top left corner to (-1, -1) and bottom
|
195 |
-
right corner to (1, 1). Used for torch.grid_sample to initialize the
|
196 |
-
grid
|
197 |
-
"""
|
198 |
-
|
199 |
-
def f(p):
|
200 |
-
return (2 * (p[0] - x0) / w - 1, 2 * (p[1] - y0) / h - 1)
|
201 |
-
|
202 |
-
return f
|
203 |
-
|
204 |
-
|
205 |
-
class DensePoseOutput(object):
|
206 |
-
def __init__(self, S, I, U, V, confidences):
|
207 |
-
"""
|
208 |
-
Args:
|
209 |
-
S (`torch.Tensor`): coarse segmentation tensor of size (N, A, H, W)
|
210 |
-
I (`torch.Tensor`): fine segmentation tensor of size (N, C, H, W)
|
211 |
-
U (`torch.Tensor`): U coordinates for each fine segmentation label of size (N, C, H, W)
|
212 |
-
V (`torch.Tensor`): V coordinates for each fine segmentation label of size (N, C, H, W)
|
213 |
-
confidences (dict of str -> `torch.Tensor`) estimated confidence model parameters
|
214 |
-
"""
|
215 |
-
self.S = S
|
216 |
-
self.I = I # noqa: E741
|
217 |
-
self.U = U
|
218 |
-
self.V = V
|
219 |
-
self.confidences = confidences
|
220 |
-
self._check_output_dims(S, I, U, V)
|
221 |
-
|
222 |
-
def _check_output_dims(self, S, I, U, V):
|
223 |
-
assert (
|
224 |
-
len(S.size()) == 4
|
225 |
-
), "Segmentation output should have 4 " "dimensions (NCHW), but has size {}".format(
|
226 |
-
S.size()
|
227 |
-
)
|
228 |
-
assert (
|
229 |
-
len(I.size()) == 4
|
230 |
-
), "Segmentation output should have 4 " "dimensions (NCHW), but has size {}".format(
|
231 |
-
S.size()
|
232 |
-
)
|
233 |
-
assert (
|
234 |
-
len(U.size()) == 4
|
235 |
-
), "Segmentation output should have 4 " "dimensions (NCHW), but has size {}".format(
|
236 |
-
S.size()
|
237 |
-
)
|
238 |
-
assert (
|
239 |
-
len(V.size()) == 4
|
240 |
-
), "Segmentation output should have 4 " "dimensions (NCHW), but has size {}".format(
|
241 |
-
S.size()
|
242 |
-
)
|
243 |
-
assert len(S) == len(I), (
|
244 |
-
"Number of output segmentation planes {} "
|
245 |
-
"should be equal to the number of output part index "
|
246 |
-
"planes {}".format(len(S), len(I))
|
247 |
-
)
|
248 |
-
assert S.size()[2:] == I.size()[2:], (
|
249 |
-
"Output segmentation plane size {} "
|
250 |
-
"should be equal to the output part index "
|
251 |
-
"plane size {}".format(S.size()[2:], I.size()[2:])
|
252 |
-
)
|
253 |
-
assert I.size() == U.size(), (
|
254 |
-
"Part index output shape {} "
|
255 |
-
"should be the same as U coordinates output shape {}".format(I.size(), U.size())
|
256 |
-
)
|
257 |
-
assert I.size() == V.size(), (
|
258 |
-
"Part index output shape {} "
|
259 |
-
"should be the same as V coordinates output shape {}".format(I.size(), V.size())
|
260 |
-
)
|
261 |
-
|
262 |
-
def resize(self, image_size_hw):
|
263 |
-
# do nothing - outputs are invariant to resize
|
264 |
-
pass
|
265 |
-
|
266 |
-
def _crop(self, S, I, U, V, bbox_old_xywh, bbox_new_xywh):
|
267 |
-
"""
|
268 |
-
Resample S, I, U, V from bbox_old to the cropped bbox_new
|
269 |
-
"""
|
270 |
-
x0old, y0old, wold, hold = bbox_old_xywh
|
271 |
-
x0new, y0new, wnew, hnew = bbox_new_xywh
|
272 |
-
tr_coords = normalized_coords_transform(x0old, y0old, wold, hold)
|
273 |
-
topleft = (x0new, y0new)
|
274 |
-
bottomright = (x0new + wnew, y0new + hnew)
|
275 |
-
topleft_norm = tr_coords(topleft)
|
276 |
-
bottomright_norm = tr_coords(bottomright)
|
277 |
-
hsize = S.size(1)
|
278 |
-
wsize = S.size(2)
|
279 |
-
grid = torch.meshgrid(
|
280 |
-
torch.arange(
|
281 |
-
topleft_norm[1],
|
282 |
-
bottomright_norm[1],
|
283 |
-
(bottomright_norm[1] - topleft_norm[1]) / hsize,
|
284 |
-
)[:hsize],
|
285 |
-
torch.arange(
|
286 |
-
topleft_norm[0],
|
287 |
-
bottomright_norm[0],
|
288 |
-
(bottomright_norm[0] - topleft_norm[0]) / wsize,
|
289 |
-
)[:wsize],
|
290 |
-
)
|
291 |
-
grid = torch.stack(grid, dim=2).to(S.device)
|
292 |
-
assert (
|
293 |
-
grid.size(0) == hsize
|
294 |
-
), "Resampled grid expected " "height={}, actual height={}".format(hsize, grid.size(0))
|
295 |
-
assert grid.size(1) == wsize, "Resampled grid expected " "width={}, actual width={}".format(
|
296 |
-
wsize, grid.size(1)
|
297 |
-
)
|
298 |
-
S_new = F.grid_sample(
|
299 |
-
S.unsqueeze(0),
|
300 |
-
torch.unsqueeze(grid, 0),
|
301 |
-
mode="bilinear",
|
302 |
-
padding_mode="border",
|
303 |
-
align_corners=True,
|
304 |
-
).squeeze(0)
|
305 |
-
I_new = F.grid_sample(
|
306 |
-
I.unsqueeze(0),
|
307 |
-
torch.unsqueeze(grid, 0),
|
308 |
-
mode="bilinear",
|
309 |
-
padding_mode="border",
|
310 |
-
align_corners=True,
|
311 |
-
).squeeze(0)
|
312 |
-
U_new = F.grid_sample(
|
313 |
-
U.unsqueeze(0),
|
314 |
-
torch.unsqueeze(grid, 0),
|
315 |
-
mode="bilinear",
|
316 |
-
padding_mode="border",
|
317 |
-
align_corners=True,
|
318 |
-
).squeeze(0)
|
319 |
-
V_new = F.grid_sample(
|
320 |
-
V.unsqueeze(0),
|
321 |
-
torch.unsqueeze(grid, 0),
|
322 |
-
mode="bilinear",
|
323 |
-
padding_mode="border",
|
324 |
-
align_corners=True,
|
325 |
-
).squeeze(0)
|
326 |
-
return S_new, I_new, U_new, V_new
|
327 |
-
|
328 |
-
def crop(self, indices_cropped, bboxes_old, bboxes_new):
|
329 |
-
"""
|
330 |
-
Crop outputs for selected bounding boxes to the new bounding boxes.
|
331 |
-
"""
|
332 |
-
# VK: cropping is ignored for now
|
333 |
-
# for i, ic in enumerate(indices_cropped):
|
334 |
-
# self.S[ic], self.I[ic], self.U[ic], self.V[ic] = \
|
335 |
-
# self._crop(self.S[ic], self.I[ic], self.U[ic], self.V[ic],
|
336 |
-
# bboxes_old[i], bboxes_new[i])
|
337 |
-
pass
|
338 |
-
|
339 |
-
def hflip(self, transform_data: DensePoseTransformData) -> None:
|
340 |
-
"""
|
341 |
-
Change S, I, U and V to take into account a Horizontal flip.
|
342 |
-
"""
|
343 |
-
if self.I.shape[0] > 0:
|
344 |
-
for el in "SIUV":
|
345 |
-
self.__dict__[el] = torch.flip(self.__dict__[el], [3])
|
346 |
-
self._flip_iuv_semantics_tensor(transform_data)
|
347 |
-
self._flip_segm_semantics_tensor(transform_data)
|
348 |
-
|
349 |
-
def _flip_iuv_semantics_tensor(self, dp_transform_data: DensePoseTransformData) -> None:
|
350 |
-
point_label_symmetries = dp_transform_data.point_label_symmetries
|
351 |
-
uv_symmetries = dp_transform_data.uv_symmetries
|
352 |
-
|
353 |
-
N, C, H, W = self.U.shape
|
354 |
-
u_loc = (self.U[:, 1:, :, :].clamp(0, 1) * 255).long()
|
355 |
-
v_loc = (self.V[:, 1:, :, :].clamp(0, 1) * 255).long()
|
356 |
-
Iindex = torch.arange(C - 1, device=self.U.device)[None, :, None, None].expand(
|
357 |
-
N, C - 1, H, W
|
358 |
-
)
|
359 |
-
self.U[:, 1:, :, :] = uv_symmetries["U_transforms"][Iindex, v_loc, u_loc].to(
|
360 |
-
device=self.U.device
|
361 |
-
)
|
362 |
-
self.V[:, 1:, :, :] = uv_symmetries["V_transforms"][Iindex, v_loc, u_loc].to(
|
363 |
-
device=self.V.device
|
364 |
-
)
|
365 |
-
|
366 |
-
for el in "IUV":
|
367 |
-
self.__dict__[el] = self.__dict__[el][:, point_label_symmetries, :, :]
|
368 |
-
|
369 |
-
def _flip_segm_semantics_tensor(self, dp_transform_data):
|
370 |
-
if self.S.shape[1] == DensePoseDataRelative.N_BODY_PARTS + 1:
|
371 |
-
self.S = self.S[:, dp_transform_data.mask_label_symmetries, :, :]
|
372 |
-
|
373 |
-
def to_result(self, boxes_xywh):
|
374 |
-
"""
|
375 |
-
Convert DensePose outputs to results format. Results are more compact,
|
376 |
-
but cannot be resampled any more
|
377 |
-
"""
|
378 |
-
result = DensePoseResult(boxes_xywh, self.S, self.I, self.U, self.V)
|
379 |
-
return result
|
380 |
-
|
381 |
-
def __getitem__(self, item):
|
382 |
-
if isinstance(item, int):
|
383 |
-
S_selected = self.S[item].unsqueeze(0)
|
384 |
-
I_selected = self.I[item].unsqueeze(0)
|
385 |
-
U_selected = self.U[item].unsqueeze(0)
|
386 |
-
V_selected = self.V[item].unsqueeze(0)
|
387 |
-
conf_selected = {}
|
388 |
-
for key in self.confidences:
|
389 |
-
conf_selected[key] = self.confidences[key][item].unsqueeze(0)
|
390 |
-
else:
|
391 |
-
S_selected = self.S[item]
|
392 |
-
I_selected = self.I[item]
|
393 |
-
U_selected = self.U[item]
|
394 |
-
V_selected = self.V[item]
|
395 |
-
conf_selected = {}
|
396 |
-
for key in self.confidences:
|
397 |
-
conf_selected[key] = self.confidences[key][item]
|
398 |
-
return DensePoseOutput(S_selected, I_selected, U_selected, V_selected, conf_selected)
|
399 |
-
|
400 |
-
def __str__(self):
|
401 |
-
s = "DensePoseOutput S {}, I {}, U {}, V {}".format(
|
402 |
-
list(self.S.size()), list(self.I.size()), list(self.U.size()), list(self.V.size())
|
403 |
-
)
|
404 |
-
s_conf = "confidences: [{}]".format(
|
405 |
-
", ".join([f"{key} {list(self.confidences[key].size())}" for key in self.confidences])
|
406 |
-
)
|
407 |
-
return ", ".join([s, s_conf])
|
408 |
-
|
409 |
-
def __len__(self):
|
410 |
-
return self.S.size(0)
|
411 |
-
|
412 |
-
|
413 |
-
class DensePoseResult(object):
|
414 |
-
def __init__(self, boxes_xywh, S, I, U, V):
|
415 |
-
self.results = []
|
416 |
-
self.boxes_xywh = boxes_xywh.cpu().tolist()
|
417 |
-
assert len(boxes_xywh.size()) == 2
|
418 |
-
assert boxes_xywh.size(1) == 4
|
419 |
-
for i, box_xywh in enumerate(boxes_xywh):
|
420 |
-
result_i = self._output_to_result(box_xywh, S[[i]], I[[i]], U[[i]], V[[i]])
|
421 |
-
result_numpy_i = result_i.cpu().numpy()
|
422 |
-
result_encoded_i = DensePoseResult.encode_png_data(result_numpy_i)
|
423 |
-
result_encoded_with_shape_i = (result_numpy_i.shape, result_encoded_i)
|
424 |
-
self.results.append(result_encoded_with_shape_i)
|
425 |
-
|
426 |
-
def __str__(self):
|
427 |
-
s = "DensePoseResult: N={} [{}]".format(
|
428 |
-
len(self.results), ", ".join([str(list(r[0])) for r in self.results])
|
429 |
-
)
|
430 |
-
return s
|
431 |
-
|
432 |
-
def _output_to_result(self, box_xywh, S, I, U, V):
|
433 |
-
x, y, w, h = box_xywh
|
434 |
-
w = max(int(w), 1)
|
435 |
-
h = max(int(h), 1)
|
436 |
-
result = torch.zeros([3, h, w], dtype=torch.uint8, device=U.device)
|
437 |
-
assert (
|
438 |
-
len(S.size()) == 4
|
439 |
-
), "AnnIndex tensor size should have {} " "dimensions but has {}".format(4, len(S.size()))
|
440 |
-
s_bbox = F.interpolate(S, (h, w), mode="bilinear", align_corners=False).argmax(dim=1)
|
441 |
-
assert (
|
442 |
-
len(I.size()) == 4
|
443 |
-
), "IndexUV tensor size should have {} " "dimensions but has {}".format(4, len(S.size()))
|
444 |
-
i_bbox = (
|
445 |
-
F.interpolate(I, (h, w), mode="bilinear", align_corners=False).argmax(dim=1)
|
446 |
-
* (s_bbox > 0).long()
|
447 |
-
).squeeze(0)
|
448 |
-
assert len(U.size()) == 4, "U tensor size should have {} " "dimensions but has {}".format(
|
449 |
-
4, len(U.size())
|
450 |
-
)
|
451 |
-
u_bbox = F.interpolate(U, (h, w), mode="bilinear", align_corners=False)
|
452 |
-
assert len(V.size()) == 4, "V tensor size should have {} " "dimensions but has {}".format(
|
453 |
-
4, len(V.size())
|
454 |
-
)
|
455 |
-
v_bbox = F.interpolate(V, (h, w), mode="bilinear", align_corners=False)
|
456 |
-
result[0] = i_bbox
|
457 |
-
for part_id in range(1, u_bbox.size(1)):
|
458 |
-
result[1][i_bbox == part_id] = (
|
459 |
-
(u_bbox[0, part_id][i_bbox == part_id] * 255).clamp(0, 255).to(torch.uint8)
|
460 |
-
)
|
461 |
-
result[2][i_bbox == part_id] = (
|
462 |
-
(v_bbox[0, part_id][i_bbox == part_id] * 255).clamp(0, 255).to(torch.uint8)
|
463 |
-
)
|
464 |
-
assert (
|
465 |
-
result.size(1) == h
|
466 |
-
), "Results height {} should be equal" "to bounding box height {}".format(result.size(1), h)
|
467 |
-
assert (
|
468 |
-
result.size(2) == w
|
469 |
-
), "Results width {} should be equal" "to bounding box width {}".format(result.size(2), w)
|
470 |
-
return result
|
471 |
-
|
472 |
-
@staticmethod
|
473 |
-
def encode_png_data(arr):
|
474 |
-
"""
|
475 |
-
Encode array data as a PNG image using the highest compression rate
|
476 |
-
@param arr [in] Data stored in an array of size (3, M, N) of type uint8
|
477 |
-
@return Base64-encoded string containing PNG-compressed data
|
478 |
-
"""
|
479 |
-
assert len(arr.shape) == 3, "Expected a 3D array as an input," " got a {0}D array".format(
|
480 |
-
len(arr.shape)
|
481 |
-
)
|
482 |
-
assert arr.shape[0] == 3, "Expected first array dimension of size 3," " got {0}".format(
|
483 |
-
arr.shape[0]
|
484 |
-
)
|
485 |
-
assert arr.dtype == np.uint8, "Expected an array of type np.uint8, " " got {0}".format(
|
486 |
-
arr.dtype
|
487 |
-
)
|
488 |
-
data = np.moveaxis(arr, 0, -1)
|
489 |
-
im = Image.fromarray(data)
|
490 |
-
fstream = BytesIO()
|
491 |
-
im.save(fstream, format="png", optimize=True)
|
492 |
-
s = base64.encodebytes(fstream.getvalue()).decode()
|
493 |
-
return s
|
494 |
-
|
495 |
-
@staticmethod
|
496 |
-
def decode_png_data(shape, s):
|
497 |
-
"""
|
498 |
-
Decode array data from a string that contains PNG-compressed data
|
499 |
-
@param Base64-encoded string containing PNG-compressed data
|
500 |
-
@return Data stored in an array of size (3, M, N) of type uint8
|
501 |
-
"""
|
502 |
-
fstream = BytesIO(base64.decodebytes(s.encode()))
|
503 |
-
im = Image.open(fstream)
|
504 |
-
data = np.moveaxis(np.array(im.getdata(), dtype=np.uint8), -1, 0)
|
505 |
-
return data.reshape(shape)
|
506 |
-
|
507 |
-
def __len__(self):
|
508 |
-
return len(self.results)
|
509 |
-
|
510 |
-
def __getitem__(self, item):
|
511 |
-
result_encoded = self.results[item]
|
512 |
-
bbox_xywh = self.boxes_xywh[item]
|
513 |
-
return result_encoded, bbox_xywh
|
514 |
-
|
515 |
-
|
516 |
-
class DensePoseList(object):
|
517 |
-
|
518 |
-
_TORCH_DEVICE_CPU = torch.device("cpu")
|
519 |
-
|
520 |
-
def __init__(self, densepose_datas, boxes_xyxy_abs, image_size_hw, device=_TORCH_DEVICE_CPU):
|
521 |
-
assert len(densepose_datas) == len(boxes_xyxy_abs), (
|
522 |
-
"Attempt to initialize DensePoseList with {} DensePose datas "
|
523 |
-
"and {} boxes".format(len(densepose_datas), len(boxes_xyxy_abs))
|
524 |
-
)
|
525 |
-
self.densepose_datas = []
|
526 |
-
for densepose_data in densepose_datas:
|
527 |
-
assert isinstance(densepose_data, DensePoseDataRelative) or densepose_data is None, (
|
528 |
-
"Attempt to initialize DensePoseList with DensePose datas "
|
529 |
-
"of type {}, expected DensePoseDataRelative".format(type(densepose_data))
|
530 |
-
)
|
531 |
-
densepose_data_ondevice = (
|
532 |
-
densepose_data.to(device) if densepose_data is not None else None
|
533 |
-
)
|
534 |
-
self.densepose_datas.append(densepose_data_ondevice)
|
535 |
-
self.boxes_xyxy_abs = boxes_xyxy_abs.to(device)
|
536 |
-
self.image_size_hw = image_size_hw
|
537 |
-
self.device = device
|
538 |
-
|
539 |
-
def to(self, device):
|
540 |
-
if self.device == device:
|
541 |
-
return self
|
542 |
-
return DensePoseList(self.densepose_datas, self.boxes_xyxy_abs, self.image_size_hw, device)
|
543 |
-
|
544 |
-
def __iter__(self):
|
545 |
-
return iter(self.densepose_datas)
|
546 |
-
|
547 |
-
def __len__(self):
|
548 |
-
return len(self.densepose_datas)
|
549 |
-
|
550 |
-
def __repr__(self):
|
551 |
-
s = self.__class__.__name__ + "("
|
552 |
-
s += "num_instances={}, ".format(len(self.densepose_datas))
|
553 |
-
s += "image_width={}, ".format(self.image_size_hw[1])
|
554 |
-
s += "image_height={})".format(self.image_size_hw[0])
|
555 |
-
return s
|
556 |
-
|
557 |
-
def __getitem__(self, item):
|
558 |
-
if isinstance(item, int):
|
559 |
-
densepose_data_rel = self.densepose_datas[item]
|
560 |
-
return densepose_data_rel
|
561 |
-
elif isinstance(item, slice):
|
562 |
-
densepose_datas_rel = self.densepose_datas[item]
|
563 |
-
boxes_xyxy_abs = self.boxes_xyxy_abs[item]
|
564 |
-
return DensePoseList(
|
565 |
-
densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device
|
566 |
-
)
|
567 |
-
elif isinstance(item, torch.Tensor) and (item.dtype == torch.bool):
|
568 |
-
densepose_datas_rel = [self.densepose_datas[i] for i, x in enumerate(item) if x > 0]
|
569 |
-
boxes_xyxy_abs = self.boxes_xyxy_abs[item]
|
570 |
-
return DensePoseList(
|
571 |
-
densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device
|
572 |
-
)
|
573 |
-
else:
|
574 |
-
densepose_datas_rel = [self.densepose_datas[i] for i in item]
|
575 |
-
boxes_xyxy_abs = self.boxes_xyxy_abs[item]
|
576 |
-
return DensePoseList(
|
577 |
-
densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device
|
578 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/mr/detail/config.h
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2018 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <cstddef>
|
20 |
-
|
21 |
-
#include <thrust/detail/config.h>
|
22 |
-
#include <thrust/detail/alignment.h>
|
23 |
-
#include <thrust/detail/config/cpp_compatibility.h>
|
24 |
-
|
25 |
-
#define THRUST_MR_DEFAULT_ALIGNMENT THRUST_ALIGNOF(::thrust::detail::max_align_t)
|
26 |
-
|
27 |
-
#if THRUST_CPP_DIALECT >= 2017
|
28 |
-
# if __has_include(<memory_resource>)
|
29 |
-
# define THRUST_MR_STD_MR_HEADER <memory_resource>
|
30 |
-
# define THRUST_MR_STD_MR_NS std::pmr
|
31 |
-
# elif __has_include(<experimental/memory_resource>)
|
32 |
-
# define THRUST_MR_STD_MR_HEADER <experimental/memory_resource>
|
33 |
-
# define THRUST_MR_STD_MR_NS std::experimental::pmr
|
34 |
-
# endif
|
35 |
-
#endif
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/ml-talking-face/translator/v3.py
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
from google.cloud import translate
|
2 |
-
import yaml
|
3 |
-
|
4 |
-
|
5 |
-
class GoogleAuthTranslation:
|
6 |
-
def __init__(self, project_id, yaml_path='lang.yaml'):
|
7 |
-
self.translator = translate.TranslationServiceClient()
|
8 |
-
self.location = "global"
|
9 |
-
self.parent = f"projects/{project_id}/locations/{self.location}"
|
10 |
-
|
11 |
-
with open(yaml_path) as f:
|
12 |
-
self.supporting_languages = yaml.load(f, Loader=yaml.FullLoader)
|
13 |
-
|
14 |
-
def _detect(self, query):
|
15 |
-
response = self.translator.detect_language(
|
16 |
-
request={
|
17 |
-
"parent": self.parent,
|
18 |
-
"content": query,
|
19 |
-
"mime_type": "text/plain", # mime types: text/plain, text/html
|
20 |
-
}
|
21 |
-
)
|
22 |
-
|
23 |
-
for language in response.languages:
|
24 |
-
# First language is the most confident one
|
25 |
-
return language.language_code
|
26 |
-
|
27 |
-
def _get_dest_from_lang(self, lang):
|
28 |
-
try:
|
29 |
-
return self.supporting_languages[lang]['google_dest']
|
30 |
-
|
31 |
-
except KeyError as e:
|
32 |
-
raise e
|
33 |
-
|
34 |
-
def _get_lang_from_dest(self, dest):
|
35 |
-
for key in self.supporting_languages:
|
36 |
-
if self.supporting_languages[key]['google_dest'] == dest:
|
37 |
-
return key
|
38 |
-
|
39 |
-
raise RuntimeError(f"Detected langauge is not supported in our multilingual TTS. |\n Code: {dest} | See https://cloud.google.com/translate/docs/languages")
|
40 |
-
|
41 |
-
def translate(self, query, lang):
|
42 |
-
|
43 |
-
dest = self._get_dest_from_lang(lang)
|
44 |
-
|
45 |
-
response = self.translator.translate_text(
|
46 |
-
request={
|
47 |
-
"parent": self.parent,
|
48 |
-
"contents": [query],
|
49 |
-
"mime_type": "text/plain", # mime types: text/plain, text/html
|
50 |
-
"target_language_code": dest,
|
51 |
-
}
|
52 |
-
)
|
53 |
-
|
54 |
-
return " ".join([translation.translated_text for translation in response.translations])
|
55 |
-
|
56 |
-
def detect(self, query):
|
57 |
-
dest = self._detect(query)
|
58 |
-
return self._get_lang_from_dest(dest)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|