Commit
·
ee0ff33
1
Parent(s):
9149177
Update parquet files (step 68 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/2021 Crackdown Band.md +0 -23
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/3d Object Converter V5.30 Serial Fix.md +0 -129
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cs 1.6 Bot Paketi Download Gezginler Enhance Your Counter Strike 1.6 Experience with Bots.md +0 -109
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Sims 3 Supernatural No Cd Crack Not ) A Guide For Sims 3 Supernatural Fans.md +0 -134
- spaces/1gistliPinn/ChatGPT4/Examples/Download Full Movie Yaariyan In 720p.md +0 -32
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CSR Racing Mod APK Experience the Thrill of Drag Racing with Unlimited Money and Gold.md +0 -101
- spaces/1phancelerku/anime-remove-background/Build Shoot and Survive in 1v1.LOL - The Fastest Battle Royale Experience.md +0 -114
- spaces/1phancelerku/anime-remove-background/Download True Story in 480p Quality - The Best Site for Movies.md +0 -128
- spaces/7hao/bingo/src/lib/bots/bing/sr.ts +0 -106
- spaces/A666sxr/Genshin_TTS/models.py +0 -730
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/data/extract_mel_spectrogram.py +0 -151
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/transform.py +0 -30
- spaces/ALSv/Chat-with-Llama-2-70b/README.md +0 -13
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/custom_model.py +0 -22
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/H2o.py +0 -109
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Vercel.py +0 -377
- spaces/Adapter/T2I-Adapter/ldm/lr_scheduler.py +0 -98
- spaces/AdithyaSNair/PCOS_Prediction/README.md +0 -12
- spaces/AgentVerse/agentVerse/agentverse/memory/base.py +0 -23
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/dialog-quest/DialogQuest.js +0 -54
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fileselectorbutton/FileSelectorButton.js +0 -45
- spaces/Aloento/9Nine-VITS/modules.py +0 -387
- spaces/Amrrs/DragGan-Inversion/dnnlib/__init__.py +0 -9
- spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py +0 -205
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug.py +0 -2
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/Spell-book.md +0 -107
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/update_macos.sh +0 -26
- spaces/AntNikYab/NaturalLanguageProcessing/pages/polyclinics.py +0 -115
- spaces/AnthonyTruchetPoC/persistent-docker/on_startup.sh +0 -5
- spaces/Arcypojeb/NeuralServer/app.py +0 -304
- spaces/Arikkod/FoodVisionMini/README.md +0 -13
- spaces/AtomdffAI/wechatgpt4atom/docker/entrypoint.sh +0 -81
- spaces/Brasd99/JustClothify/README.md +0 -12
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/doc/GETTING_STARTED.md +0 -58
- spaces/CVPR/LIVE/thrust/thrust/detail/type_traits/has_nested_type.h +0 -32
- spaces/CVPR/regionclip-demo/detectron2/engine/hooks.py +0 -466
- spaces/CVPR/regionclip-demo/detectron2/layers/blocks.py +0 -111
- spaces/CVPR/v-doc_abstractive_mac/config.py +0 -491
- spaces/Carlosito16/HXM-summarization/README.md +0 -12
- spaces/ChandraMohanNayal/AutoGPT/autogpt/prompt.py +0 -204
- spaces/Cyril666/my_abi/modules/model_language.py +0 -67
- spaces/DCXGAO/DeepDanbooru_string/README.md +0 -39
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/__init__.py +0 -46
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/encodings/__init__.py +0 -1
- spaces/Daniton/superjourney/README.md +0 -12
- spaces/Danky/dreamlike-art-dreamlike-diffusion-1.0/app.py +0 -3
- spaces/Datasculptor/DescriptionGPT/datasets/README.md +0 -207
- spaces/Detomo/AnimeGAN/README.md +0 -13
- spaces/DiffusionArtco/Diffusion200Max/app.py +0 -84
- spaces/Dinoking/Guccio-AI-Designer/netdissect/broden.py +0 -271
spaces/1acneusushi/gradio-2dmoleculeeditor/data/2021 Crackdown Band.md
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Crackdown Band: The Italian Hardcore Metal Sensation</h1>
|
3 |
-
<p>Crackdown is a band from Ravenna, Italy, that plays a blend of hardcore and metal music. The band was formed in 1995 and released their first album, 74, in 1997. The album featured a raw and aggressive sound that earned them a loyal fan base in the underground scene.</p>
|
4 |
-
<h2>crackdown band</h2><br /><p><b><b>DOWNLOAD</b> ✶✶✶ <a href="https://byltly.com/2uKzTu">https://byltly.com/2uKzTu</a></b></p><br /><br />
|
5 |
-
<p>In 1998, Crackdown signed with Diehard Music Worldwide and released their second album, Rise Up. The album showcased a more mature and refined sound, with influences from bands like Biohazard, Machine Head, and Pantera. The album also featured a guest appearance by Evan Seinfeld from Biohazard on the song "Never".</p>
|
6 |
-
<p>Crackdown toured extensively in Europe and Japan to promote their album, sharing the stage with bands like Pro-Pain, Agnostic Front, Madball, and Snapcase. The band also appeared on several compilations and tribute albums, such as A Tribute to Sick of It All and A Tribute to Slayer.</p>
|
7 |
-
<p>Crackdown is currently working on new material and plans to release their third album soon. The band consists of Andrea (vocals), Dima (guitar), Pippo (bass), and Lorenzo (drums). They are known for their energetic and powerful live performances, as well as their social and political lyrics.</p>
|
8 |
-
<p>If you are a fan of hardcore metal music, you should check out Crackdown. They are one of the most promising bands in the Italian scene and have a lot to offer to the genre. You can follow them on Facebook or watch some of their videos on YouTube.</p>
|
9 |
-
|
10 |
-
<h2>Crackdown's Influences and Style</h2>
|
11 |
-
<p>Crackdown's music is influenced by various bands and genres, such as hardcore, metal, punk, rap, and industrial. The band cites Biohazard, Machine Head, Pantera, Sick of It All, Slayer, Sepultura, Cypress Hill, and Ministry as some of their main inspirations.</p>
|
12 |
-
<p></p>
|
13 |
-
<p>Crackdown's style is characterized by heavy riffs, groovy rhythms, fast tempos, and aggressive vocals. The band also incorporates elements of rap and electronic music in some of their songs, creating a diverse and original sound. The band's lyrics deal with topics such as social injustice, corruption, violence, racism, and personal struggles.</p>
|
14 |
-
|
15 |
-
<h2>Crackdown's Discography and Reviews</h2>
|
16 |
-
<p>Crackdown has released two albums so far: 74 (1997) and Rise Up (1998). Both albums received positive reviews from critics and fans alike. Here are some of the highlights from their discography:</p>
|
17 |
-
<ul>
|
18 |
-
<li>74: The debut album by Crackdown, released by Freak Out! Records in 1997. The album features 13 tracks of raw and brutal hardcore metal, with songs like "Headquarter", "Release the Pain", and "In Your Face". The album was praised for its intensity and honesty.</li>
|
19 |
-
<li>Rise Up: The second album by Crackdown, released by Diehard Music Worldwide in 1998. The album features 12 tracks of mature and refined hardcore metal, with songs like "Never", "Ten", "Keep It in the Family", and "Pride". The album was acclaimed for its production and diversity.</li>
|
20 |
-
</ul>
|
21 |
-
<p>You can find Crackdown's albums on Discogs or on streaming platforms like Spotify and Apple Music.</p> ddb901b051<br />
|
22 |
-
<br />
|
23 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/3d Object Converter V5.30 Serial Fix.md
DELETED
@@ -1,129 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>3D Object Converter V5.30 Serial</h1>
|
3 |
-
<p>If you are looking for a powerful and versatile tool that can help you view, convert, and manipulate 3D models in different formats, you might want to check out <strong>3D Object Converter V5.30 Serial</strong>. This is a shareware tool that allows you to interactively view and translate 3D polygon models between more than 700 file formats. In this article, we will show you what 3D Object Converter can do for you, how to download and install it, how to use it, and some tips and tricks for getting the most out of it.</p>
|
4 |
-
<h2>3d Object Converter V5.30 Serial</h2><br /><p><b><b>Download File</b> >>> <a href="https://byltly.com/2uKyDY">https://byltly.com/2uKyDY</a></b></p><br /><br />
|
5 |
-
<h2>What is 3D Object Converter?</h2>
|
6 |
-
<p><strong>3D Object Converter</strong> is a software application that was developed by Zoltán Karpati, a Hungarian programmer who has been working on it since 1999. The software is designed to help users who work with 3D models in various fields, such as engineering, architecture, animation, gaming, education, and hobby. The software can handle almost any 3D file format that exists, including popular ones like STL, OBJ, 3DS, FBX, DXF, LWO, MDL, MD2, MD3, Z3AM, and many more. The software can also import and export data from various CAD/CAM/CAE systems, such as AutoCAD, CATIA, SolidWorks, Pro/Engineer, Inventor, Maya, Blender, SketchUp, etc.</p>
|
7 |
-
<p>The main features and benefits of using <strong>3D Object Converter</strong> are:</p>
|
8 |
-
<ul>
|
9 |
-
<li>It allows you to view 3D models in different modes, such as wireframe, shaded, textured, solid, or transparent.</li>
|
10 |
-
<li>It allows you to convert 3D models between different formats with high accuracy and quality.</li>
|
11 |
-
<li>It allows you to edit 3D models by performing operations like scaling, rotating, mirroring, translating, transforming, merging, splitting, welding, smoothing, etc.</li>
|
12 |
-
<li>It allows you to analyze 3D models by measuring distances, angles, areas, volumes, bounding boxes, center of gravity, etc.</li>
|
13 |
-
<li>It allows you to repair 3D models by fixing errors like holes, gaps, degenerate faces, inverted normals, duplicate vertices, etc.</li>
|
14 |
-
<li>It allows you to optimize 3D models by reducing polygon count, simplifying geometry, removing hidden faces, etc.</li>
|
15 |
-
<li>It allows you to save and export 3D models in different formats with various options like compression level, quality level, texture mapping, material properties, etc.</li>
|
16 |
-
</ul>
|
17 |
-
<h2>How to download and install 3D Object Converter V5.30 Serial</h2>
|
18 |
-
<p>To download and install <strong>3D Object Converter V5.30 Serial</strong>, you need to follow these steps:</p>
|
19 |
-
<ol>
|
20 |
-
<li>Go to the official website of <strong>3D Object Converter</strong> at <a href="https://web.archive.org/web/20211126174007/http://web.axelero.hu/karpo/">http://web.axelero.hu/karpo/</a>.</li>
|
21 |
-
<li>Click on the <em>Download</em> link on the left side menu.</li>
|
22 |
-
<li>Select the <em>Download Now!</em> button under the <em>v5.30 (2014-10-06)</em> section.</li>
|
23 |
-
<li>Save the file <em>objconv530.zip</em> (4.79 MB) on your computer.</li>
|
24 |
-
<li>Unzip the file using a program like WinZip or WinRAR.</li>
|
25 |
-
<li>You will find two files inside: <em>objconv.exe</em> (the main executable file) and <em>file_id.diz</em> (the file containing the serial number).</li>
|
26 |
-
<li>To install <strong>3D Object Converter</strong>, you just need to copy <em>objconv.exe</em> to any folder on your computer. You can also create a shortcut on your desktop or start menu for easy access.</li>
|
27 |
-
<li>To register <strong>3D Object Converter</strong>, you need to run <em>objconv.exe</em>, go to the <em>About/Register...</em> menu item, enter your name, and copy-paste the serial number from <em>file_id.diz</em>.</li>
|
28 |
-
<li>You will see a message confirming that your registration was successful.</li>
|
29 |
-
<li>You can now use <strong>3D Object Converter</strong> without any limitations for as long as you want.</li>
|
30 |
-
</ol>
|
31 |
-
<h2>How to use 3D Object Converter V5.30 Serial</h2>
|
32 |
-
<p>To use <strong>3D Object Converter V5.30 Serial</strong>, you need to follow these steps:</p>
|
33 |
-
<ol>
|
34 |
-
<li>Run <em>objconv.exe</em>. You will see a window with a toolbar, a menu bar, a status bar, and a blank workspace.</li>
|
35 |
-
<li>To open a 3D model file, go to the <em>File/Open...</em> menu item, or click on the <img src="https://web.archive.org/web/20211126174007im_/http://web.axelero.hu/karpo/images/open.gif" alt="Open" width="16" height="16"> button on the toolbar, or press Ctrl+O on your keyboard.</li>
|
36 |
-
<li>A dialog box will appear where you can browse your computer for the file you want to open. You can also type the file name or drag-and-drop it from another window. You can filter the files by their extensions using the drop-down list at the bottom.</li>
|
37 |
-
<li>Select the file you want to open and click on the <em>Open</em> button. The file will be loaded into <strong>3D Object Converter</strong>. You will see some information about the file on the status bar, such as its name, size, format, vertex count, face count, etc.</li>
|
38 |
-
<li>You can view the 3D model in different modes by clicking on the buttons on the toolbar or using the keyboard shortcuts. The available modes are: <ul>
|
39 |
-
<li><img src="https://web.archive.org/web/20211126174007im_/http://web.axelero.hu/karpo/images/wire.gif" alt="Wireframe" width="16" height="16"> Wireframe mode (W): shows only the edges of the faces.</li>
|
40 |
-
<li><img src="https://web.archive.org/web/20211126174007im_/http://web.axelero.hu/karpo/images/shade.gif" alt="Shaded" width="16" height="16"> Shaded mode (S): shows the faces with flat shading.</li>
|
41 |
-
<li><img src="https://web.archive.org/web/20211126174007im_/http://web.axelero.hu/karpo/images/tex.gif" alt="Textured" width="16" height="16"> Textured mode (T): shows the faces with texture mapping if available.</li>
|
42 |
-
<li><img src="https://web.archive.org/web/20211126174007im_/http://web.axelero.hu/karpo/images/solid.gif" alt="Solid" width="16" height="16"> Solid mode (L): shows the faces with smooth shading.</li>
|
43 |
-
<li><img src="https://web.archive.org/web/20211126174007im_/http://web.axelero.hu/karpo/images/trans.gif" alt="Transparent" width="16" height="16"> Transparent mode (R): shows the faces with transparency if available.</li>
|
44 |
-
</ul></li>
|
45 |
-
<li>You can rotate, pan, zoom, and fit the 3D model in the workspace by using your mouse or keyboard. The available commands are: <ul>
|
46 |
-
<li>To rotate: hold down the left mouse button and move your mouse horizontally or vertically. You can also use the arrow keys on your keyboard.</li>
|
47 |
-
Here is the rest of the article with HTML formatting: hold down the right mouse button and move your mouse horizontally or vertically. You can also use the Shift+arrow keys on your keyboard.</li>
|
48 |
-
<li>To zoom: use the mouse wheel or the + and - keys on your keyboard.</li>
|
49 |
-
<li>To fit: double-click on the left mouse button or press F on your keyboard.</li>
|
50 |
-
</ul></li>
|
51 |
-
<li>To convert a 3D model file, go to the <em>File/Save As...</em> menu item, or click on the <img src="https://web.archive.org/web/20211126174007im_/http://web.axelero.hu/karpo/images/save.gif" alt="Save" width="16" height="16"> button on the toolbar, or press Ctrl+S on your keyboard.</li>
|
52 |
-
<li>A dialog box will appear where you can choose the output file format from a drop-down list at the bottom. You can also specify the output file name and location.</li>
|
53 |
-
<li>Depending on the output file format you choose, you may see another dialog box where you can adjust some options for the conversion, such as compression level, quality level, texture mapping, material properties, etc.</li>
|
54 |
-
<li>Click on the <em>Save</em> button to start the conversion process. The file will be saved in the output format you selected. You will see some information about the conversion on the status bar, such as time elapsed, file size, vertex count, face count, etc.</li>
|
55 |
-
</ol>
|
56 |
-
<h2>Tips and tricks for using 3D Object Converter V5.30 Serial</h2>
|
57 |
-
<p>To get the most out of <strong>3D Object Converter V5.30 Serial</strong>, here are some tips and tricks you can use:</p>
|
58 |
-
<ul>
|
59 |
-
<li>To customize the settings and preferences of <strong>3D Object Converter</strong>, go to the <em>Options/Settings...</em> menu item. You will see a dialog box where you can change various options for the interface, the file formats, the 3D viewer, the 3D editor, the 3D analyzer, the 3D repairer, the 3D optimizer, and the 3D converter.</li>
|
60 |
-
<li>To use the batch mode and convert multiple 3D files at once, go to the <em>File/Batch Mode...</em> menu item. You will see a dialog box where you can add files or folders to a list of input files, choose an output file format and location, and set some options for the batch conversion. You can also use command line parameters to run batch conversions without opening <strong>3D Object Converter</strong>.</li>
|
61 |
-
<li>To troubleshoot common issues and errors with <strong>3D Object Converter</strong>, go to the <em>Help/Troubleshooting...</em> menu item. You will see a dialog box where you can find some solutions for problems like corrupted files, unsupported formats, missing textures, invalid normals, etc.</li>
|
62 |
-
</ul>
|
63 |
-
<h2>Conclusion</h2>
|
64 |
-
<p>In conclusion, <strong>3D Object Converter V5.30 Serial</strong> is a powerful and versatile tool that can help you view, convert, and manipulate 3D models in different formats. It supports almost any 3D file format that exists, and it offers a lot of features and options for editing, analyzing, repairing, and optimizing 3D models. It is easy to use and fast to convert, and it can handle large and complex 3D models with ease. If you are looking for a reliable 3D file converter, you should definitely give <strong>3D Object Converter V5.30 Serial</strong> a try.</p>
|
65 |
-
<p>How to convert 3d models with 3d Object Converter V5.30<br />
|
66 |
-
3d Object Converter V5.30 crack download<br />
|
67 |
-
Best alternatives to 3d Object Converter V5.30<br />
|
68 |
-
3d Object Converter V5.30 license key generator<br />
|
69 |
-
3d Object Converter V5.30 tutorial and tips<br />
|
70 |
-
3d Object Converter V5.30 review and comparison<br />
|
71 |
-
3d Object Converter V5.30 supported formats and features<br />
|
72 |
-
3d Object Converter V5.30 free trial and discount<br />
|
73 |
-
3d Object Converter V5.30 system requirements and compatibility<br />
|
74 |
-
3d Object Converter V5.30 troubleshooting and error fixing<br />
|
75 |
-
Where to buy 3d Object Converter V5.30 online<br />
|
76 |
-
How to update 3d Object Converter V5.30 to the latest version<br />
|
77 |
-
How to uninstall 3d Object Converter V5.30 completely<br />
|
78 |
-
How to backup and restore 3d models with 3d Object Converter V5.30<br />
|
79 |
-
How to optimize 3d models with 3d Object Converter V5.30<br />
|
80 |
-
How to edit and modify 3d models with 3d Object Converter V5.30<br />
|
81 |
-
How to export and import 3d models with 3d Object Converter V5.30<br />
|
82 |
-
How to batch convert 3d models with 3d Object Converter V5.30<br />
|
83 |
-
How to view and inspect 3d models with 3d Object Converter V5.30<br />
|
84 |
-
How to measure and calculate 3d models with 3d Object Converter V5.30<br />
|
85 |
-
How to apply textures and materials to 3d models with 3d Object Converter V5.30<br />
|
86 |
-
How to animate and render 3d models with 3d Object Converter V5.30<br />
|
87 |
-
How to compress and decompress 3d models with 3d Object Converter V5.30<br />
|
88 |
-
How to merge and split 3d models with 3d Object Converter V5.30<br />
|
89 |
-
How to rotate and scale 3d models with 3d Object Converter V5.30<br />
|
90 |
-
How to align and snap 3d models with 3d Object Converter V5.30<br />
|
91 |
-
How to mirror and flip 3d models with 3d Object Converter V5.30<br />
|
92 |
-
How to extrude and inset 3d models with 3d Object Converter V5.30<br />
|
93 |
-
How to smooth and sharpen 3d models with 3d Object Converter V5.30<br />
|
94 |
-
How to hollow and solidify 3d models with 3d Object Converter V5.30<br />
|
95 |
-
How to cut and slice 3d models with 3d Object Converter V5.30<br />
|
96 |
-
How to bend and twist 3d models with 3d Object Converter V5.30<br />
|
97 |
-
How to sculpt and paint 3d models with 3d Object Converter V5.30<br />
|
98 |
-
How to add and remove vertices, edges, faces, polygons, etc., from/to a model using the software.<br />
|
99 |
-
How to create custom shapes and primitives using the software.<br />
|
100 |
-
How to use different tools and modes in the software interface.<br />
|
101 |
-
How to customize the software settings and preferences.<br />
|
102 |
-
How to use keyboard shortcuts and commands in the software.<br />
|
103 |
-
How to use plugins and extensions for the software.<br />
|
104 |
-
How to integrate the software with other applications and software.<br />
|
105 |
-
How to share and collaborate on projects using the software.<br />
|
106 |
-
How to print or scan a model using the software.<br />
|
107 |
-
What are the advantages and disadvantages of using the software.<br />
|
108 |
-
What are the common problems and solutions when using the software.<br />
|
109 |
-
What are the best practices and tips for using the software.<br />
|
110 |
-
What are some examples of projects made using the software.<br />
|
111 |
-
What are some resources and tutorials for learning the software.</p>
|
112 |
-
<p>If you have any questions or feedback about this article, please feel free to leave a comment below. We would love to hear from you!</p>
|
113 |
-
<h2>Frequently Asked Questions</h2>
|
114 |
-
<p>Here are some frequently asked questions about <strong>3D Object Converter V5.30 Serial</strong>:</p>
|
115 |
-
<ol>
|
116 |
-
<li><strong>How much does 3D Object Converter cost?</strong><br>
|
117 |
-
<strong>Answer:</strong> 3D Object Converter is shareware and you must register the program to continue using it after the 30-day trial period. The registration fee is $50 USD for a single user license, which includes free updates for all future versions of 3D Object Converter.</li>
|
118 |
-
<li><strong>What are the system requirements for 3D Object Converter?</strong><br>
|
119 |
-
<strong>Answer:</strong> 3D Object Converter works on Windows XP, Vista, 7, 8, 8.1, and 10 (32-bit and 64-bit). It requires at least a Pentium III processor, 256 MB of RAM, and 10 MB of free disk space. It also requires DirectX 9.0c or higher and OpenGL 1.1 or higher for rendering 3D models.</li>
|
120 |
-
<li><strong>Can I use 3D Object Converter on Mac or Linux?</strong><br>
|
121 |
-
<strong>Answer:</strong> No, unfortunately, 3D Object Converter is only available for Windows at this time. However, you may be able to run it on Mac or Linux using a virtual machine or an emulator like Wine.</li>
|
122 |
-
<li><strong>Can I use 3D Object Converter for commercial purposes?</strong><br>
|
123 |
-
<strong>Answer:</strong> Yes, you can use 3D Object Converter for commercial purposes as long as you have a valid license. You can also distribute your converted files without any restrictions.</li>
|
124 |
-
<li><strong>Where can I find more information and support for 3D Object Converter?</strong><br>
|
125 |
-
<strong>Answer:</strong> You can find more information and support for 3D Object Converter on its official website at <a href="https://web.archive.org/web/20211126174007/http://web.axelero.hu/karpo/">http://web.axelero.hu/karpo/</a>. You can also contact the developer by email at [email protected].</li>
|
126 |
-
</ol>
|
127 |
-
</p> 0a6ba089eb<br />
|
128 |
-
<br />
|
129 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cs 1.6 Bot Paketi Download Gezginler Enhance Your Counter Strike 1.6 Experience with Bots.md
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cs 1.6 Bot Paketi Download Gezginler: How to Play Counter Strike 1.6 with Bots</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>Counter Strike 1.6 is one of the most popular and legendary first-person shooter games ever created. It has millions of fans around the world who enjoy playing it online or offline with their friends or strangers. However, sometimes you may not have access to the internet, or you may want to practice your skills and have fun without worrying about other players. In that case, you may want to play Counter Strike 1.6 with bots.</p>
|
5 |
-
<p>Bots are computer-controlled players that can simulate human behavior and actions in the game. They can join your team or the enemy team, and they can follow commands, shoot, move, plant bombs, defuse bombs, and more. Playing with bots can be a great way to improve your aim, reflexes, strategy, and tactics in Counter Strike 1.6.</p>
|
6 |
-
<h2>Cs 1.6 Bot Paketi Download Gezginler</h2><br /><p><b><b>Download File</b> ::: <a href="https://byltly.com/2uKvk3">https://byltly.com/2uKvk3</a></b></p><br /><br />
|
7 |
-
<p>But how can you play Counter Strike 1.6 with bots? You need a special program called Cs 1.6 Bot Paketi, which is a bot package that adds bots to your game and lets you customize them according to your preferences. In this article, we will show you how to download, install, and use Cs 1.6 Bot Paketi in a few easy steps.</p>
|
8 |
-
<h2>How to download and install Cs 1.6 Bot Paketi?</h2>
|
9 |
-
<h3>Step 1: Download Cs 1.6 Bot Paketi from a reliable source</h3>
|
10 |
-
<p>The first thing you need to do is to download Cs 1.6 Bot Paketi from a trustworthy website that offers safe and virus-free downloads. One such website is <a href="http://bilgibak.net/counter-strike-1-6-bot-paketi-indir-download-gezginler-hotfile/">Bilgibak</a>, which provides a direct link to download Cs 1.6 Bot Paketi for free.</p>
|
11 |
-
<p>Alternatively, you can also use <a href="https://www.fullturkceindir.com/counter-strike-1-6-bot-paketi-indir.html">Full Türkçe İndir</a>, which also offers a free download link for Cs 1.6 Bot Paketi along with detailed instructions on how to install it.</p>
|
12 |
-
<p>Once you have downloaded Cs 1.6 Bot Paketi, you will get a zip file that contains the setup file and some other files.</p>
|
13 |
-
<h3>Step 2: Run the setup file and choose your language</h3>
|
14 |
-
<p>The next step is to run the setup file that you have downloaded from the zip file. You can do this by double-clicking on it or right-clicking on it and choosing "Run as administrator".</p>
|
15 |
-
<p>Cs 1.6 Bot Paketi Indir Gezginler<br />
|
16 |
-
Cs 1.6 Bot Paketi Kurulumu Gezginler<br />
|
17 |
-
Cs 1.6 Bot Paketi Nasıl Yüklenir Gezginler<br />
|
18 |
-
Cs 1.6 Bot Paketi Yükleme Gezginler<br />
|
19 |
-
Cs 1.6 Bot Paketi Full Download Gezginler<br />
|
20 |
-
Cs 1.6 Bot Paketi Ücretsiz Download Gezginler<br />
|
21 |
-
Cs 1.6 Bot Paketi Son Sürüm Download Gezginler<br />
|
22 |
-
Cs 1.6 Bot Paketi Türkçe Download Gezginler<br />
|
23 |
-
Cs 1.6 Bot Paketi En İyi Download Gezginler<br />
|
24 |
-
Cs 1.6 Bot Paketi Hızlı Download Gezginler<br />
|
25 |
-
Cs 1.6 Bot Paketi Güncel Download Gezginler<br />
|
26 |
-
Cs 1.6 Bot Paketi Sorunsuz Download Gezginler<br />
|
27 |
-
Cs 1.6 Bot Paketi Kolay Download Gezginler<br />
|
28 |
-
Cs 1.6 Bot Paketi Tek Link Download Gezginler<br />
|
29 |
-
Cs 1.6 Bot Paketi Alternatif Download Gezginler<br />
|
30 |
-
Cs 1.6 Bot Paketi Oyun İndir Club Gezginler<br />
|
31 |
-
Cs 1.6 Bot Paketi Tam İndir Gezginler<br />
|
32 |
-
Cs 1.6 Bot Paketi Oyun İndir Vip Gezginler<br />
|
33 |
-
Cs 1.6 Bot Paketi Oyun İndirme Sitesi Gezginler<br />
|
34 |
-
Cs 1.6 Bot Paketi Oyun İndirme Programı Gezginler<br />
|
35 |
-
Cs 1.6 Bot Paketi Oyun İndirme Linki Gezginler<br />
|
36 |
-
Cs 1.6 Bot Paketi Oyun İndirme Yöntemi Gezginler<br />
|
37 |
-
Cs 1.6 Bot Paketi Oyun İndirme Hilesi Gezginler<br />
|
38 |
-
Cs 1.6 Bot Paketi Oyun İndirme Rehberi Gezginler<br />
|
39 |
-
Cs 1.6 Bot Paketi Oyun İndirme Tavsiyesi Gezginler<br />
|
40 |
-
Cs 1.6 Bot Paketi Nasıl İndirilir Gezginler<br />
|
41 |
-
Cs 1.6 Bot Paketi Nasıl Kurulur Gezginler<br />
|
42 |
-
Cs 1.6 Bot Paketi Nasıl Çalıştırılır Gezginler<br />
|
43 |
-
Cs 1.6 Bot Paketi Nasıl Güncellenir Gezginler<br />
|
44 |
-
Cs 1.6 Bot Paketi Nasıl Silinir Gezginler<br />
|
45 |
-
Cs 1.6 Bot Paketi Nasıl Kullanılır Gezginler<br />
|
46 |
-
Cs 1.6 Bot Paketi Nasıl Ayarlanır Gezginler<br />
|
47 |
-
Cs 1.6 Bot Paketi Nasıl Etkinleştirilir Gezginler<br />
|
48 |
-
Cs 1.6 Bot Paketi Nasıl Devre Dışı Bırakılır Gezginler<br />
|
49 |
-
Cs 1.6 Bot Paketi Nasıl Eklenebilir Gezginler<br />
|
50 |
-
Cs 1.6 Bot Paketi Nasıl Kaldırılır Gezginler<br />
|
51 |
-
Cs 1.6 Bot Paketi Nasıl Değiştirilir Gezginler<br />
|
52 |
-
Cs 1.6 Bot Paketi Nasıl Düzenlenir Gezginler<br />
|
53 |
-
Cs 1.6 Bot Paketi Nasıl Özelleştirilir Gezginler<br />
|
54 |
-
Cs 1.6 Bot Paketi Nasıl Optimize Edilir Gezginler<br />
|
55 |
-
Cs 1.6 Bot Paketi Nereden İndirilir Gezginler<br />
|
56 |
-
Cs 1.6 Bot Paketi Nerede Kurulur Gezginler<br />
|
57 |
-
Cs 1.6 Bot Paketi Nerede Bulunur Gezginler<br />
|
58 |
-
Cs 1.6 Bot Paketi Nerede Saklanır Gezginler<br />
|
59 |
-
Cs 1.6 Bot Paketi Nerede Çalıştırılır Gezginler<br />
|
60 |
-
Cs 1.6 Bot Paketi Neden İndirilir Gezginler<br />
|
61 |
-
Cs 1.6 Bot Paketi Neden Kurulur Gezginler<br />
|
62 |
-
Cs 1.6 Bot Paketi Neden Kullanılır Gezginler<br />
|
63 |
-
Cs 1.6 Bot Paketi Ne ��e Yarar Gezginler<br />
|
64 |
-
Cs 1.6 Bot Paketi Ne Zaman Çıktı Gezginler</p>
|
65 |
-
<p>A window will pop up asking you to choose your language for the installation process. You can choose from Turkish, English, French, German, or Spanish.</p>
|
66 |
-
<h3>Step 3: Select the destination folder for Counter Strike 1.6</h3>
|
67 |
-
<p>After choosing your language, you will see another window that asks you to select the destination folder for Counter Strike 1.6.</p>
|
68 |
-
<p>If you have already installed Counter Strike 1.6 on your computer, you should select the same folder where it is located.</p>
|
69 |
-
<p>If you have not installed Counter Strike 1.6 yet, you should select a folder where you want to install it.</p>
|
70 |
-
<p>You can browse for the folder by clicking on the "Browse" button or typing in the path manually.</p>
|
71 |
-
<h3>Step 4: Wait for the installation to finish and launch Counter Strike 1.6</h3>
|
72 |
-
<p>The final step is to wait for the installation of Cs 1.6 Bot Paketi to finish.</p>
|
73 |
-
<p>This may take a few minutes depending on your computer speed and internet connection.</p>
|
74 |
-
<p>When the installation is done, you will see a message that says "Installation Complete". You can click on "Finish" to close the window.</p>
|
75 |
-
<p>Now you can launch Counter Strike 1.6 by clicking on its icon on your desktop or start menu.</p>
|
76 |
-
<h2>How to use Cs 1.6 Bot Paketi?</h2>
|
77 |
-
<h3>Step 1: Create a new game or join an existing one</h3>
|
78 |
-
<p>To use Cs 1.6 Bot Paketi, you need to create a new game or join an existing one.</p>
|
79 |
-
<p>To create a new game, go to "New Game" from the main menu of Counter Strike 1.6.</p>
|
80 |
-
<p>You will see a list of maps that you can choose from.</p>
|
81 |
-
<p>Select the map that you want to play on and click on "Start".</p>
|
82 |
-
<p>To join an existing game, go to "Find Servers" from the main menu of Counter Strike 1.6.</p>
|
83 |
-
<p>You will see a list of servers that are available online.</p>
|
84 |
-
<p>Select the server that you want to join and click on "Connect".</p>
|
85 |
-
<h3>Step 2: Press "H" to open the bot menu</h3>
|
86 |
-
<p>Once you are in a game, press "H" on your keyboard to open the bot menu.</p>
|
87 |
-
<p>This is where you can add bots to your game and customize them according to your preferences.</p>
|
88 |
-
<h3>Step 3: Add bots to your team or the enemy team</h3>
|
89 |
-
<p>In the bot menu, you will see several options that allow you to add bots to your team or the enemy team.</p>
|
90 |
-
<p>You can add as many bots as you want by clicking on the "+" button next to each option.</p>
|
91 |
-
<p>You can also remove bots by clicking on the "-" button next to each option.</p>
|
92 |
-
<p>The options are:</p>
|
93 |
-
- Add CT bot: This adds a bot to your team if you are playing as counter-terrorists (CT) or to the enemy team if you are playing as terrorists (T). - Add T bot: This adds a bot to your team if you are playing as terrorists (T) or to the enemy team if you are playing as counter-terrorists (CT). - Add random bot: This adds a bot randomly either to your team or the enemy team. - Fill server with bots: This fills up all the empty slots in both teams with bots. - Kick all bots: This removes all bots from both teams. <h3>Step 4: Customize the bot settings and difficulty level</h3>
|
94 |
-
<p>In addition to adding bots, you can also customize their settings and difficulty level in the bot menu.</p>
|
95 |
-
- Bot quota: This sets how many bots will be added automatically when a new round starts. - Bot difficulty: This sets how smart and skilled the bots will be. - Bot prefix: This sets what name prefix will be used for all bots. - Bot chat: This sets whether bots will talk during gameplay. - Bot weapon mode: This sets what weapons bots will use. - Bot knife only mode: This sets whether bots will only use knives. - Bot defuse bomb mode: This sets whether bots will try to defuse bombs. - Bot don't shoot mode: This sets whether bots will not shoot at all. <h3 Continuing the article: <h3>Step 5: Enjoy playing Counter Strike 1.6 with bots</h3>
|
96 |
-
<p>Now that you have added and customized bots to your game, you can enjoy playing Counter Strike 1.6 with them.</p>
|
97 |
-
<p>You can use the bot menu to change the bot settings anytime during the game.</p>
|
98 |
-
<p>You can also use some keyboard shortcuts to control the bots more easily.</p>
|
99 |
-
<p>Here are some of the most useful ones:</p>
|
100 |
-
- Y: Chat with all players - U: Chat with your team only - C: Voice communication with your team only - V: Voice communication with all players - E: Use items or interact with objects - R: Reload your weapon - F: Flashlight on/off - Q: Switch between your last two weapons - 1: Select your primary weapon - 2: Select your secondary weapon - 3: Select your knife - 4: Select your grenade - 5: Select your bomb (if you have one) - Tab: Show the scoreboard <h2>Conclusion</h2>
|
101 |
-
<p>In this article, we have shown you how to download, install, and use Cs 1.6 Bot Paketi to play Counter Strike 1.6 with bots.</p>
|
102 |
-
<p>Playing with bots can be a great way to improve your skills and have fun in Counter Strike 1.6.</p>
|
103 |
-
<p>You can add as many bots as you want, customize their settings and difficulty level, and control them with keyboard shortcuts.</p>
|
104 |
-
<p>If you are looking for a reliable source to download Cs 1.6 Bot Paketi, you can use <a href="http://bilgibak.net/counter-strike-1-6-bot-paketi-indir-download-gezginler-hotfile/">Bilgibak</a> or <a href="https://www.fullturkceindir.com/counter-strike-1-6-bot-paketi-indir.html">Full Türkçe İndir</a>, which offer safe and virus-free downloads.</p>
|
105 |
-
<p>If you want to learn more tips and tricks for Counter Strike 1.6, you can check out <a href="https://ccm.net/gaming/games/1919-tips-and-tricks-for-counter-strike-1-6/">CCM</a> or <a href="https://steamcommunity.com/sharedfiles/filedetails/?id=186125396">Steam Community</a>, which offer useful guides and tutorials.</p>
|
106 |
-
<p>We hope you enjoyed this article and found it helpful. Now go ahead and play Counter Strike 1.6 with bots!</p>
|
107 |
-
FAQs: Q: What is Counter Strike 1.6? A: Counter Strike 1.6 is a competitive, online, multiplayer, first-person shooter game that requires teamwork, communication, and skill. Q: What is Cs 1.6 Bot Paketi? A: Cs 1.6 Bot Paketi is a bot package that adds bots to your game and lets you customize them according to your preferences. Q: How do I download and install Cs 1.6 Bot Paketi? A: You can download Cs 1.6 Bot Paketi from a reliable website like Bilgibak or Full Türkçe İndir, then run the setup file and select the destination folder for Counter Strike 1.6. Q: How do I use Cs 1.6 Bot Paketi? A: You can use Cs 1.6 Bot Paketi by pressing "H" to open the bot menu, then adding bots to your team or the enemy team, and customizing their settings and difficulty level. Q: What are some keyboard shortcuts to control the bots? A: Some of the keyboard shortcuts to control the bots are Y, U, C, V, E, R, F, Q, 1, 2, 3, 4, 5, and Tab. </p> 0a6ba089eb<br />
|
108 |
-
<br />
|
109 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Sims 3 Supernatural No Cd Crack Not ) A Guide For Sims 3 Supernatural Fans.md
DELETED
@@ -1,134 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>HD Online Player (Sims 3 Supernatural No Cd Crack Not ): How to Play The Sims 3 Supernatural Without a CD</h1>
|
3 |
-
<p>If you are a fan of The Sims 3 and its supernatural expansion pack, you might have encountered the problem of needing a CD to play the game. This can be inconvenient, especially if you have lost or damaged your CD, or if you want to play the game on multiple devices. Fortunately, there are ways to play The Sims 3 Supernatural without a CD, such as using a no-CD patch or crack, or using an online game streaming service. In this article, we will explain what these options are, how they work, and what are their pros and cons.</p>
|
4 |
-
<h2>HD Online Player (Sims 3 Supernatural No Cd Crack Not )</h2><br /><p><b><b>Download Zip</b> ››››› <a href="https://byltly.com/2uKztf">https://byltly.com/2uKztf</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<h3>What is The Sims 3 Supernatural?</h3>
|
7 |
-
<p>The Sims 3 Supernatural is the seventh expansion pack for The Sims 3, a popular life simulation game developed by Maxis and published by Electronic Arts. The expansion pack was released in September 2012 and introduces several new life states, such as witches, werewolves, vampires, fairies, zombies, and ghosts. It also adds new features, such as lunar cycle, alchemy, magic spells, enchanted objects, and a new world called Moonlight Falls. The expansion pack allows players to create and control supernatural beings, explore their abilities and interactions, and experience mysterious events by the light of the full moon.</p>
|
8 |
-
<h3>Why do you need a CD to play The Sims 3 Supernatural?</h3>
|
9 |
-
<p>To play The Sims 3 Supernatural, you need to have the base game of The Sims 3 installed on your computer. You also need to have a physical copy of the expansion pack's CD or DVD inserted in your computer's drive while playing. This is because the game uses a copy protection system called SecuROM that checks for the presence of the original disc every time you launch the game. SecuROM is designed to prevent piracy and unauthorized copying of the game.</p>
|
10 |
-
<h3>What are the drawbacks of using a CD to play The Sims 3 Supernatural?</h3>
|
11 |
-
<p>While using a CD to play The Sims 3 Supernatural may seem like a simple and secure way to enjoy the game, it also has some drawbacks. Some of these drawbacks are:</p>
|
12 |
-
<ul>
|
13 |
-
<li>You need to have access to your CD every time you want to play the game. This can be inconvenient if you have misplaced or damaged your CD, or if you want to play the game on different devices.</li>
|
14 |
-
<li>You need to have a working CD or DVD drive on your computer. This can be problematic if your drive is broken or incompatible with the game disc.</li>
|
15 |
-
<li>You may experience performance issues or errors due to disc reading problems. This can affect your gameplay quality and enjoyment.</li>
|
16 |
-
<li>You may expose your computer to potential security risks from SecuROM. Some users have reported that SecuROM can cause system instability, compatibility issues with other software, registry errors, privacy breaches, and malware infections.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>How to play The Sims 3 Supernatural without a CD</h2>
|
19 |
-
<h3>Option 1: Download and install a no-CD patch or crack</h3>
|
20 |
-
<h4>What is a no-CD patch or crack?</h4>
|
21 |
-
<p>A no-CD patch or crack is a modified version of the game's executable file that bypasses or removes the SecuROM check. This means that you can play the game without inserting the original disc in your drive. A no-CD patch or crack can be downloaded from various websites that offer them for free or for a fee. However, you should be careful when downloading and installing such files, as they may contain viruses or malware that can harm your computer.</p>
|
22 |
-
<h4>How to find and download a no-CD patch or crack for The Sims 3 Supernatural</h4>
|
23 |
-
<p>To find and download a no-CD patch or crack for The Sims 3 Supernatural, you can follow these steps:</p>
|
24 |
-
<ol>
|
25 |
-
<li>Search for "The Sims 3 Supernatural no cd" on Google or any other search engine.</li>
|
26 |
-
<li>Choose a website that offers a no-CD patch or crack for The Sims 3 Supernatural. Make sure that the website is trustworthy and reputable by checking its reviews and ratings.</li>
|
27 |
-
<li>Download the file that matches your game version and language.</li>
|
28 |
-
<li>Scan the file with an antivirus software before opening it.</li>
|
29 |
-
</ol>
|
30 |
-
<h4>How to install and use a no-CD patch or crack for The Sims 3 Supernatural</h4>
|
31 |
-
<p>To install and use a no-CD patch or crack for The Sims 3 Supernatural, you can follow these steps:</p>
|
32 |
-
<ol>
|
33 |
-
<li>Backup your original game executable file (TS3W.exe) in case something goes wrong.</li>
|
34 |
-
<li>Copy and paste the downloaded file into your game installation folder (usually C:\Program Files\Electronic Arts\The Sims 3\Game\Bin).</li>
|
35 |
-
<li>Replace the original file with the downloaded file when prompted.</li>
|
36 |
-
<li>Launch the game as usual without inserting the disc in your drive.</li>
|
37 |
-
</ol>
|
38 |
-
<h4>Pros and cons of using a no-CD patch or crack for The Sims 3 Supernatural</h4>
|
39 |
-
<p>Using a no-CD patch or crack for The Sims 3 Supernatural has some pros and cons. Some of them are:</p>
|
40 |
-
<table>
|
41 |
-
<tr><th>Pros</th><th>Cons</th></tr>
|
42 |
-
<tr><td>You don't need to have access to your CD every time you want to play the game.</td><td>You may violate the terms of service and end user license agreement of the game by using an unauthorized modification.</td></tr>
|
43 |
-
<tr><td>You don't need to have a working CD or DVD drive on your computer.</td><td>You may encounter compatibility issues with future updates or patches of the game.</td></tr>
|
44 |
-
```html the downloaded file.</td></tr>
|
45 |
-
</table>
|
46 |
-
<h3>Option 2: Use an online game streaming service</h3>
|
47 |
-
<h4>What is an online game streaming service?</h4>
|
48 |
-
<p>An online game streaming service is a platform that lets you play games that are hosted on remote servers over the internet. You don't need to download or install anything on your device, except for a client app or a web browser. You can access a library of games that are available on the service, or link your own game accounts from other platforms. The service handles all the processing and rendering of the game, and streams the video and audio output to your device. You can control the game using your keyboard, mouse, gamepad, or touch screen.</p>
|
49 |
-
<h4>How to find and use an online game streaming service for The Sims 3 Supernatural</h4>
|
50 |
-
<p>To find and use an online game streaming service for The Sims 3 Supernatural, you can follow these steps:</p>
|
51 |
-
<p>How to play Sims 3 Supernatural without CD<br />
|
52 |
-
Sims 3 Supernatural No-DVD/Fixed EXE download<br />
|
53 |
-
Sims 3 Supernatural patch v1.39.3 no CD required<br />
|
54 |
-
Sims 3 Supernatural game fixes and cheats<br />
|
55 |
-
Sims 3 Supernatural PC game trainer and playfix<br />
|
56 |
-
Sims 3 Supernatural crack by Hi2U<br />
|
57 |
-
Sims 3 Supernatural DVD-check bypass<br />
|
58 |
-
Sims 3 Supernatural serial key generator<br />
|
59 |
-
Sims 3 Supernatural free full version download<br />
|
60 |
-
Sims 3 Supernatural online multiplayer crack<br />
|
61 |
-
Sims 3 Supernatural custom content and mods<br />
|
62 |
-
Sims 3 Supernatural expansion pack features and review<br />
|
63 |
-
Sims 3 Supernatural gameplay tips and tricks<br />
|
64 |
-
Sims 3 Supernatural system requirements and compatibility<br />
|
65 |
-
Sims 3 Supernatural backup and installation guide<br />
|
66 |
-
Sims 3 Supernatural error and bug fixes<br />
|
67 |
-
Sims 3 Supernatural update v1.63.5 FASDOX<br />
|
68 |
-
Sims 3 Supernatural movie stuff pack DeZoMoR4iN<br />
|
69 |
-
Sims 3 Supernatural zombies and werewolves guide<br />
|
70 |
-
Sims 3 Supernatural fairy and witch abilities<br />
|
71 |
-
Sims 3 Supernatural moon phases and lunar cycle effects<br />
|
72 |
-
Sims 3 Supernatural alchemy and elixirs recipes<br />
|
73 |
-
Sims 3 Supernatural fortune teller and mystic career paths<br />
|
74 |
-
Sims 3 Supernatural hidden springs world download<br />
|
75 |
-
Sims 3 Supernatural brambles shrub placement fix<br />
|
76 |
-
Sims 3 Supernatural best mods and custom content sites<br />
|
77 |
-
Sims 3 Supernatural cheats and codes for PC<br />
|
78 |
-
Sims 3 Supernatural no CD crack for Mac<br />
|
79 |
-
Sims 3 Supernatural HD online player tutorial<br />
|
80 |
-
Sims 3 Supernatural how to install custom worlds<br />
|
81 |
-
Sims 3 Supernatural how to create a vampire sim<br />
|
82 |
-
Sims 3 Supernatural how to cure a zombie sim<br />
|
83 |
-
Sims 3 Supernatural how to become a ghost sim<br />
|
84 |
-
Sims 3 Supernatural how to make a genie sim<br />
|
85 |
-
Sims 3 Supernatural how to unlock all outfits and hairstyles<br />
|
86 |
-
Sims 3 Supernatural how to get a unicorn sim<br />
|
87 |
-
Sims 3 Supernatural how to build a haunted house<br />
|
88 |
-
Sims 3 Supernatural how to use the magic mirror<br />
|
89 |
-
Sims 3 Supernatural how to cast spells and curses<br />
|
90 |
-
Sims 3 Supernatural how to brew potions and poisons<br />
|
91 |
-
Sims 3 Supernatural how to grow plantsims and death flowers<br />
|
92 |
-
Sims 3 Supernatural how to summon a meteor shower or an eclipse<br />
|
93 |
-
Sims 3 Supernatural how to find hidden objects and collectibles<br />
|
94 |
-
Sims 3 Supernatural how to get a dragon egg or a dragon valley world<br />
|
95 |
-
Sims 3 Supernatural how to change the weather and seasons<br />
|
96 |
-
Sims 3 Supernatural how to travel to the future or the past<br />
|
97 |
-
Sims 3 Supernatural how to have an alien baby or a hybrid sim<br />
|
98 |
-
Sims 3 Supernatural how to adopt a pet or a stray animal<br />
|
99 |
-
Sims 3 Supernatural how to start a fire or a flood</p>
|
100 |
-
<ol>
|
101 |
-
<li>Search for "online game streaming service" on Google or any other search engine.</li>
|
102 |
-
<li>Choose a service that offers The Sims 3 Supernatural in its library, or allows you to link your own game account from another platform. Some of the popular services are GeForce NOW, PlayStation Now, Stadia, and Amazon Luna.</li>
|
103 |
-
<li>Sign up for an account on the service and choose a subscription plan that suits your needs and budget. Some services offer free trials or tiers that let you play for a limited time or with certain restrictions.</li>
|
104 |
-
<li>Download the client app for the service on your device, or open the web browser that supports the service.</li>
|
105 |
-
<li>Log in to the service and browse for The Sims 3 Supernatural in its library, or link your own game account from another platform.</li>
|
106 |
-
<li>Launch the game and enjoy playing it without a CD.</li>
|
107 |
-
</ol>
|
108 |
-
<h4>Pros and cons of using an online game streaming service for The Sims 3 Supernatural</h4>
|
109 |
-
<p>Using an online game streaming service for The Sims 3 Supernatural has some pros and cons. Some of them are:</p>
|
110 |
-
<table>
|
111 |
-
<tr><th>Pros</th><th>Cons</th></tr>
|
112 |
-
<tr><td>You don't need to have a CD or a powerful PC to play the game.</td><td>You need to have a fast and stable internet connection to play the game smoothly.</td></tr>
|
113 |
-
<tr><td>You can play the game on any device that supports the service, such as a laptop, tablet, smartphone, or TV.</td><td>You may experience input lag or latency depending on your network speed and quality.</td></tr>
|
114 |
-
<tr><td>You can access a large library of games that are available on the service, or link your own game accounts from other platforms.</td><td>You may have to pay a monthly fee or buy individual games to use the service.</td></tr>
|
115 |
-
<tr><td>You don't have to worry about updates or patches of the game, as they are handled by the service.</td><td>You may lose access to the game if the service shuts down or removes it from its library.</td></tr>
|
116 |
-
</table>
|
117 |
-
<h2>Conclusion</h2>
|
118 |
-
<p>In conclusion, playing The Sims 3 Supernatural without a CD is possible with two options: using a no-CD patch or crack, or using an online game streaming service. Both options have their advantages and disadvantages, so you should choose the one that works best for you. No matter which option you choose, you can enjoy creating and controlling supernatural beings in a world full of magic, mystery, and mischief with The Sims 3 Supernatural!</p>
|
119 |
-
<h2>FAQs</h2>
|
120 |
-
<ul>
|
121 |
-
<li><b>Is it legal to use a no-CD patch or crack for The Sims 3 Supernatural?</b></li>
|
122 |
-
<p>It depends on your local laws and regulations regarding software piracy and modification. Generally speaking, using a no-CD patch or crack for personal use may be considered fair use if you own a legitimate copy of the game. However, distributing or downloading such files may be illegal and punishable by law. Therefore, we do not endorse or recommend using a no-CD patch or crack for The Sims 3 Supernatural.</p>
|
123 |
-
<li><b>Is it safe to use a no-CD patch or crack for The Sims 3 Supernatural?</b></li>
|
124 |
-
<p>Not necessarily. Some no-CD patches or cracks may contain viruses or malware that can harm your computer. Therefore, you should be careful when downloading and installing such files, and scan them with an antivirus software before opening them. You should also backup your original game executable file in case something goes wrong.</p>
|
125 |
-
<li><b>What are some of the best online game streaming services for The Sims 3 Supernatural?</b></li>
|
126 |
-
<p>Some of the best online game streaming services for The Sims 3 Supernatural are GeForce NOW, PlayStation Now, Stadia, and Amazon Luna. These services offer high-quality graphics, performance, and compatibility with various devices. However, they also have different pricing plans, availability regions, and game libraries. Therefore, you should compare them and choose the one that suits your needs and preferences.</p>
|
127 |
-
<li><b>Do I need to buy The Sims 3 Supernatural again to use an online game streaming service?</b></li>
|
128 |
-
<p>It depends on the service you choose. Some services require you to buy individual games to play them on their platform. Others allow you to link your own game accounts from other platforms and play the games you already own. You should check the terms and conditions of each service before signing up.</p>
|
129 |
-
<li><b>Can I play The Sims 3 Supernatural offline with an online game streaming service?</b></li>
|
130 |
-
<p>No. Online game streaming services require an internet connection to work. You cannot play The Sims 3 Supernatural offline with an online game streaming service.</p>
|
131 |
-
</ul>
|
132 |
-
</p> 0a6ba089eb<br />
|
133 |
-
<br />
|
134 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Full Movie Yaariyan In 720p.md
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
<h2>Download Full Movie Yaariyan In 720p</h2><br /><p><b><b>Download Zip</b> ☆☆☆☆☆ <a href="https://imgfil.com/2uxZLA">https://imgfil.com/2uxZLA</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
The novel's style of writing made it easy to read for young adults, which is what its subsequent film adaptations were aimed at. I have my husband. It has been entirely well worth the wait.
|
4 |
-
|
5 |
-
Is the faq: The process from the pre-creative stage to the finished product was very well thought out. That's awesome that the story was actually put to good use.
|
6 |
-
|
7 |
-
You're never alone.
|
8 |
-
|
9 |
-
I was immediately transported back in time to the previous few decades of my life, and I had a feeling I was watching a movie. The first sense of animation that I got was the fight sequence between Thor and Hulk.
|
10 |
-
|
11 |
-
What is the real story? Initially, I thought this book would be a mere story for young adults, and boy, was I wrong! I didn't want to end it all. I gave myself one year from that time to the movie's release date.
|
12 |
-
|
13 |
-
Get exclusive access to movie trailers, clips, features and film reviews!
|
14 |
-
|
15 |
-
My husband and I were able to make a lot of our own pizzas and pasta meals in a matter of minutes with just a microwave and a few ingredients. The best part was that we only spent $ per serving.
|
16 |
-
|
17 |
-
The toddler was able to play more than his usual games of pretend and imitation. It is a big problem because I'm a college student and this book reminds me of everything that I am missing out on.
|
18 |
-
|
19 |
-
A fun, quirky book for young adults. Not one to be read in one sitting, but the story is so well put together that it can be read in a few sittings.
|
20 |
-
|
21 |
-
Kibou no Kazoku [私ぼうの財産] by Yamada Kenjirou — | MangaDex
|
22 |
-
|
23 |
-
I was impressed with the movie's depiction of our homes and household goods. Even more amazing is how well the characters' emotions are reflected in their faces.
|
24 |
-
|
25 |
-
Love is all around us, as the saying goes, but why are we so focused on finding the perfect mate? The characters will charm anyone who has ever spent any time in a mall or department store.
|
26 |
-
|
27 |
-
Goku has learned how to do everything from riding a bicycle to driving an automobile. When I was younger, I would have loved to read this book. I'm not very religious, but I enjoy the book so much that I try to read it each year.
|
28 |
-
|
29 |
-
It's certainly one of the few anime movies that are actually family 4fefd39f24<br />
|
30 |
-
<br />
|
31 |
-
<br />
|
32 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CSR Racing Mod APK Experience the Thrill of Drag Racing with Unlimited Money and Gold.md
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>CSR Racing Mod APK: The Ultimate Drag Racing Experience</h1>
|
3 |
-
<p>If you are a fan of drag racing games, you must have heard of CSR Racing. It is one of the most popular and addictive games in the genre, with millions of downloads and positive reviews. But what if you want to enjoy the game without any limitations or restrictions? That's where CSR Racing Mod APK comes in handy. In this article, we will tell you everything you need to know about this modified version of the game, including its features, benefits, and how to download and install it on your device.</p>
|
4 |
-
<h2>What is CSR Racing?</h2>
|
5 |
-
<p>CSR Racing is a free-to-play drag racing game developed by NaturalMotion Games and published by Zynga. It was released in 2012 for iOS and Android devices, and later for Windows and Mac OS. The game lets you race against the best drivers in the city, using over 200 licensed cars from top brands like Ferrari, Lamborghini, McLaren, Bugatti, and more. You can customize your cars with various upgrades and tuning options, and compete in different modes and events. You can also challenge other players online and join crews to earn rewards and reputation.</p>
|
6 |
-
<h2>csr racing game mod apk</h2><br /><p><b><b>Download Zip</b> === <a href="https://urlin.us/2uSXhl">https://urlin.us/2uSXhl</a></b></p><br /><br />
|
7 |
-
<h3>Features of CSR Racing</h3>
|
8 |
-
<h4>Stunning graphics and realistic physics</h4>
|
9 |
-
<p>One of the main attractions of CSR Racing is its amazing graphics and sound effects. The game uses high-quality 3D models and textures to create realistic environments and car details. The game also features realistic physics and animations, making the races more immersive and thrilling. You can feel the speed, power, and adrenaline as you race through the streets of the city.</p>
|
10 |
-
<h4>Over 200 licensed cars from top brands</h4>
|
11 |
-
<p>Another feature that makes CSR Racing stand out is its huge collection of licensed cars from top brands. You can choose from over 200 cars from manufacturers like Ferrari, Lamborghini, McLaren, Bugatti, Aston Martin, Pagani, Koenigsegg, and more. Each car has its own stats, performance, and appearance. You can also unlock new cars as you progress through the game.</p>
|
12 |
-
<h4>Customizable upgrades and tuning options</h4>
|
13 |
-
<p>If you want to make your car faster and more powerful, you can customize it with various upgrades and tuning options. You can upgrade your engine, turbo, intake, nitrous, tires, gearbox, body, and more. You can also tune your car to optimize its performance for different races. You can adjust the gear ratios, tire pressure, nitrous timing, launch control, and more.</p>
|
14 |
-
<h4>Challenging races and events</h4>
|
15 |
-
<p>CSR Racing offers a variety of races and events to keep you entertained. You can race against different opponents in different modes, such as Regulation Races, Ladder Races, Crew Battles, Daily Battles, Restriction Races, Manufacturer Races, Pro Cars Races, World Tour Races, etc. You can also participate in special events that offer exclusive rewards and prizes.</p>
|
16 |
-
<h4>Online multiplayer and social features</h4>
|
17 |
-
<p>If you want to test your skills against other players around the world, you can join the online multiplayer mode of CSR Racing. You can race against real players in real time or challenge them to beat your best times. You can also join or create a crew with other players to earn more rewards and reputation. You can chat with your crew members, share tips and strategies, and compete in crew championships.</p>
|
18 |
-
<h2>What is CSR Racing Mod APK?</h2>
|
19 |
-
<p>CSR Racing Mod APK is a modified version of the original CSR Racing game that offers some extra features and benefits that are not available in the official version. These features include unlimited money and gold, free shopping and upgrades, unlocked cars and tracks, no ads and root required, and more. These features make the game more fun and easy to play, as you don't have to worry about running out of resources, waiting for timers, or facing any restrictions. You can enjoy the game to the fullest without spending any real money or risking your device's security.</p>
|
20 |
-
<p>csr racing 2 mod apk unlimited money and gold<br />
|
21 |
-
csr racing classic mod apk free download<br />
|
22 |
-
csr racing hack apk download for android<br />
|
23 |
-
csr racing mod apk latest version 2021<br />
|
24 |
-
csr racing 2 mod apk ios no jailbreak<br />
|
25 |
-
csr racing 2 mod apk offline<br />
|
26 |
-
csr racing 2 mod apk revdl<br />
|
27 |
-
csr racing 2 mod apk unlimited keys<br />
|
28 |
-
csr racing 2 mod apk android 1<br />
|
29 |
-
csr racing 2 mod apk obb<br />
|
30 |
-
csr racing 2 mod apk rexdl<br />
|
31 |
-
csr racing 2 mod apk happymod<br />
|
32 |
-
csr racing 2 mod apk unlimited everything<br />
|
33 |
-
csr racing 2 mod apk unlimited cars<br />
|
34 |
-
csr racing 2 mod apk all cars unlocked<br />
|
35 |
-
csr racing 2 mod apk no root<br />
|
36 |
-
csr racing 2 mod apk anti ban<br />
|
37 |
-
csr racing 2 mod apk online<br />
|
38 |
-
csr racing 2 mod apk data<br />
|
39 |
-
csr racing 2 mod apk pure<br />
|
40 |
-
csr racing 2 mod apk vip<br />
|
41 |
-
csr racing 2 mod apk mega<br />
|
42 |
-
csr racing 2 mod apk lenov.ru<br />
|
43 |
-
csr racing 2 mod apk blackmod<br />
|
44 |
-
csr racing 2 mod apk platinmods<br />
|
45 |
-
csr racing classic hack apk unlimited money and gold<br />
|
46 |
-
csr racing classic hack apk download for android<br />
|
47 |
-
csr racing classic hack apk latest version 2021<br />
|
48 |
-
csr racing classic hack apk ios no jailbreak<br />
|
49 |
-
csr racing classic hack apk offline<br />
|
50 |
-
csr racing classic hack apk revdl<br />
|
51 |
-
csr racing classic hack apk android 1<br />
|
52 |
-
csr racing classic hack apk obb<br />
|
53 |
-
csr racing classic hack apk rexdl<br />
|
54 |
-
csr racing classic hack apk happymod<br />
|
55 |
-
csr racing classic hack apk unlimited everything<br />
|
56 |
-
csr racing classic hack apk no root<br />
|
57 |
-
csr racing classic hack apk anti ban<br />
|
58 |
-
csr racing classic hack apk online<br />
|
59 |
-
csr racing classic hack apk data<br />
|
60 |
-
csr racing classic hack apk pure<br />
|
61 |
-
csr racing classic hack apk vip<br />
|
62 |
-
csr racing classic hack apk mega<br />
|
63 |
-
csr racing classic hack apk lenov.ru</p>
|
64 |
-
<h3>Benefits of CSR Racing Mod APK</h3>
|
65 |
-
<h4>Unlimited money and gold</h4>
|
66 |
-
<p>Money and gold are the main currencies in CSR Racing. You need them to buy new cars, upgrade your existing ones, enter races and events, and more. However, earning money and gold in the game can be slow and tedious, especially if you want to buy the most expensive and powerful cars. That's why CSR Racing Mod APK gives you unlimited money and gold, so you can buy anything you want without any limitations. You can also use them to skip timers, refill your fuel, and more.</p>
|
67 |
-
<h4>Free shopping and upgrades</h4>
|
68 |
-
<p>Another benefit of CSR Racing Mod APK is that it allows you to shop and upgrade your cars for free. You don't have to spend any money or gold to buy new cars or upgrade your existing ones. You can simply choose any car you like from the shop and get it for free. You can also upgrade your car's engine, turbo, intake, nitrous, tires, gearbox, body, and more for free. You can make your car as fast and powerful as you want without any cost.</p>
|
69 |
-
<h4>Unlocked cars and tracks</h4>
|
70 |
-
<p>CSR Racing Mod APK also unlocks all the cars and tracks in the game for you. You don't have to complete any missions or challenges to unlock new cars or tracks. You can access them all from the start of the game. You can choose from over 200 cars from top brands like Ferrari, Lamborghini, McLaren, Bugatti, Aston Martin, Pagani, Koenigsegg, and more. You can also race on different tracks in different locations like London, Rome, New York, Tokyo, etc.</p>
|
71 |
-
<h4>No ads and root required</h4>
|
72 |
-
<p>One of the best things about CSR Racing Mod APK is that it removes all the annoying ads from the game. You don't have to watch any ads to get extra rewards or bonuses. You can enjoy the game without any interruptions or distractions. Moreover, CSR Racing Mod APK does not require root access to work on your device. You don't have to root your device or risk its security to install and play the modded version of the game.</p>
|
73 |
-
<h2>How to download and install CSR Racing Mod APK?</h2>
|
74 |
-
<p>If you are interested in downloading and installing CSR Racing Mod APK on your device, you can follow these simple steps:</p>
|
75 |
-
<h3>Step-by-step guide</h3>
|
76 |
-
<ol>
|
77 |
-
<li>First of all, you need to uninstall the original version of CSR Racing from your device if you have it installed.</li>
|
78 |
-
<li>Then, you need to download the CSR Racing Mod APK file from a trusted source. You can use this link to download it.</li>
|
79 |
-
<li>After downloading the file, you need to enable unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
|
80 |
-
<li>Next, you need to locate the downloaded file on your device and tap on it to start the installation process.</li>
|
81 |
-
<li>Follow the instructions on the screen and wait for the installation to finish.</li>
|
82 |
-
<li>Finally, you can launch the game from your app drawer or home screen and enjoy it.</li>
|
83 |
-
</ol>
|
84 |
-
<h2>Conclusion</h2>
|
85 |
-
<p>CSR Racing is a great drag racing game that offers stunning graphics, realistic physics, over 200 licensed cars, customizable upgrades and tuning options, challenging races and events, online multiplayer and social features, and more. However, if you want to enjoy the game without any limitations or restrictions, you should try CSR Racing Mod APK. It gives you unlimited money and gold, free shopping and upgrades, unlocked cars and tracks, no ads and root required, and more. It makes the game more fun and easy to play without spending any real money or risking your device's security. You can download and install CSR Racing Mod APK on your device by following our step-by-step guide above.</p>
|
86 |
-
<p>We hope this article was helpful for you. If you have any questions or feedback, please let us know in the comments below. Thank you for reading!</p>
|
87 |
-
<h3>Frequently Asked Questions</h3>
|
88 |
-
<ol>
|
89 |
-
<li><b>Is CSR Racing Mod APK safe to use?</b></li>
|
90 |
-
<p>Yes, CSR Racing Mod APK is safe to use as long as you download it from a trusted source like ours. It does not contain any viruses or malware that could harm your device or data. However, you should always be careful when downloading and installing any modded or hacked apps from unknown sources, as they may contain malicious code or unwanted features.</p>
|
91 |
-
<li><b>What are the minimum requirements to play CSR Racing Mod APK?</b></li>
|
92 |
-
<p>To play CSR Racing Mod APK on your device, you need to have at least Android 4.0.3 or higher, 1 GB of RAM, and 2 GB of free storage space. You also need to have a stable internet connection to access the online features of the game.</p>
|
93 |
-
<li><b>Can I play CSR Racing Mod APK offline?</b></li>
|
94 |
-
<p>Yes, you can play CSR Racing Mod APK offline, but you will not be able to access some of the features that require an internet connection, such as online multiplayer, crew championships, daily battles, etc. You will also not be able to sync your progress with your Facebook account or cloud service.</p>
|
95 |
-
<li><b>Can I update CSR Racing Mod APK?</b></li>
|
96 |
-
<p>No, you cannot update CSR Racing Mod APK from the Google Play Store or any other source, as it is a modified version of the game that is not supported by the official developers. If you want to update the game, you will have to download and install the latest version of CSR Racing Mod APK from our website or any other trusted source.</p>
|
97 |
-
<li><b>Can I use CSR Racing Mod APK with my existing account?</b></li>
|
98 |
-
<p>No, you cannot use CSR Racing Mod APK with your existing account, as it may cause your account to be banned or suspended by the game's anti-cheat system. If you want to use CSR Racing Mod APK, you should create a new account or use a guest account to avoid any risks.</p>
|
99 |
-
</ol></p> 197e85843d<br />
|
100 |
-
<br />
|
101 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Build Shoot and Survive in 1v1.LOL - The Fastest Battle Royale Experience.md
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>1 vs 1 lol download: How to play the online building simulator and shooting game</h1>
|
3 |
-
<p>If you are looking for a fast-paced, competitive, and fun online game that combines building and shooting skills, you might want to try 1 vs 1 lol. This game is a free-to-play browser-based game that has millions of players around the world. In this article, we will tell you everything you need to know about 1 vs 1 lol, how to download and install it, and how to improve your skills and enjoy it.</p>
|
4 |
-
<h2>What is 1 vs 1 lol?</h2>
|
5 |
-
<h3>A brief introduction to the game and its features</h3>
|
6 |
-
<p>1 vs 1 lol is an online building simulator and shooting game that was developed by Lior Alterman in December 2019. The game is inspired by popular games like Fortnite, but with simpler graphics and mechanics. The game allows you to build platforms, ramps, walls, and doors to create your own structures and defend yourself from other players. You can also use various weapons, such as assault rifles, shotguns, axes, and sniper rifles, to shoot and eliminate your opponents. The game has multiple game modes, such as battle royale, 1v1, 2v2, box fight, zone wars, and more. You can also customize your character's appearance, name, controls, and settings. The game is free to play and does not require any registration or download. You can play it on your web browser or on your mobile devices.</p>
|
7 |
-
<h2>1 vs 1 lol download</h2><br /><p><b><b>DOWNLOAD</b> ……… <a href="https://jinyurl.com/2uNUy5">https://jinyurl.com/2uNUy5</a></b></p><br /><br />
|
8 |
-
<h3>The main game modes and how to play them</h3>
|
9 |
-
<p>The main game mode of 1 vs 1 lol is battle royale, where you have to fight against up to 10 players in a shrinking map. The last player standing wins the match. You can also play in teams of two in duos mode. To play battle royale, you have to select the BR or BR Duos mode from the menu, then click on Play. You will be matched with other players in a few seconds. Once the match starts, you have to find weapons and materials on the map, build your structures, and shoot your enemies. You can also use the map button to see your location and the safe zone. You have to stay inside the safe zone or you will take damage from the storm. The safe zone will shrink over time, forcing you to move closer to your enemies. The last player or team alive wins the match.</p>
|
10 |
-
<p>Another popular game mode is 1v1 or 2v2, where you have to face another player or team in a small arena. You can choose between four weapons: assault rifle, shotgun, axe, or sniper rifle. You can also choose between four building platforms: wood, stone, metal, or glass. To play 1v1 or 2v2, you have to select the mode from the menu, then click on Play. You will be matched with another player or team in a few seconds. Once the match starts, you have to build your structures and shoot your opponent. You can also use the reset button to reset your structures if they are damaged or blocking your view. The first player or team to reach five kills wins the match.</p>
|
11 |
-
<p>There are also other game modes that you can try, such as box fight, where you have to fight inside a box-shaped arena; zone wars, where you have to survive in a randomly generated zone; just build, where you can practice your building skills without any enemies; and party mode, where you can create your own private room and invite your friends to play with you. You can also create your own custom game mode by changing the settings, such as the number of players, the weapons, the materials, the map, and the time limit. To play these modes, you have to select them from the menu, then click on Play or Create. You can also join other players' rooms by clicking on Join.</p>
|
12 |
-
<p>1v1.lol building simulator and shooting game<br />
|
13 |
-
How to download 1v1.lol on PC<br />
|
14 |
-
1v1.lol battle royale mode tips and tricks<br />
|
15 |
-
Best settings for 1v1.lol on mobile<br />
|
16 |
-
1v1.lol custom games with friends<br />
|
17 |
-
1v1.lol steam release date and features<br />
|
18 |
-
1v1.lol zombies mode gameplay and guide<br />
|
19 |
-
How to improve your aim in 1v1.lol<br />
|
20 |
-
1v1.lol free coins and skins hack<br />
|
21 |
-
1v1.lol online multiplayer gun game<br />
|
22 |
-
1v1.lol box fight and build fight modes<br />
|
23 |
-
How to play 2v2 in 1v1.lol<br />
|
24 |
-
1v1.lol assault rifle vs shotgun vs sniper rifle<br />
|
25 |
-
How to use the axe in 1v1.lol<br />
|
26 |
-
1v1.lol daily challenges and rewards<br />
|
27 |
-
How to win every match in 1v1.lol<br />
|
28 |
-
1v1.lol advanced control editor and HUD<br />
|
29 |
-
How to report bugs and glitches in 1v1.lol<br />
|
30 |
-
1v1.lol community hub and discord server<br />
|
31 |
-
How to stream 1v1.lol on Twitch or YouTube<br />
|
32 |
-
1v1.lol reviews and ratings from players<br />
|
33 |
-
How to create a custom character in 1v1.lol<br />
|
34 |
-
How to join a squad in 1v1.lol<br />
|
35 |
-
How to unlock new weapons and items in 1v1.lol<br />
|
36 |
-
How to practice your building skills in 1v1.lol<br />
|
37 |
-
How to change your name and avatar in 1v1.lol<br />
|
38 |
-
How to chat with other players in 1v1.lol<br />
|
39 |
-
How to invite friends to play 1v1.lol with you<br />
|
40 |
-
How to update 1v1.lol on your device<br />
|
41 |
-
How to uninstall 1v</p>
|
42 |
-
<h3>The benefits of playing 1 vs 1 lol</h3>
|
43 |
-
<p>Playing 1 vs 1 lol can be very beneficial for you, especially if you are a fan of building and shooting games. Here are some of the benefits of playing 1 vs 1 lol:</p>
|
44 |
-
<ul>
|
45 |
-
<li>You can improve your reaction time, hand-eye coordination, and spatial awareness by building and shooting in a fast-paced environment.</li>
|
46 |
-
<li>You can develop your creativity and problem-solving skills by creating your own structures and strategies to overcome your opponents.</li>
|
47 |
-
<li>You can enhance your social skills and teamwork by playing with other players and communicating with them through chat or voice.</li>
|
48 |
-
<li>You can have fun and relax by playing a casual and entertaining game that does not require any download or registration.</li>
|
49 |
-
</ul>
|
50 |
-
<h2>How to download and install 1 vs 1 lol?</h2>
|
51 |
-
<h3>The requirements and compatibility of the game</h3>
|
52 |
-
<p>As mentioned before, 1 vs 1 lol is a browser-based game that does not require any download or registration. However, if you want to play it on your mobile devices, you will need to download and install the app. The app is compatible with Android and iOS devices, and it is free to download and play. The app requires at least Android 4.4 or iOS 9.0 to run smoothly. The app also requires an internet connection to play online with other players.</p>
|
53 |
-
<h3>The steps to download and install the game on different platforms</h3>
|
54 |
-
<p>To download and install the game on your mobile devices, you need to follow these steps:</p>
|
55 |
-
<ol>
|
56 |
-
<li>Go to the official website of 1 vs 1 lol at <a href="">https://1v1.lol/</a> or search for "1 vs 1 lol" on Google Play Store or App Store.</li>
|
57 |
-
<li>Click on the download button for your device (Android or iOS) and wait for the app to be downloaded.</li>
|
58 |
-
<li>Open the app and grant the necessary permissions for the app to function properly.</li>
|
59 |
-
<li>Enjoy playing 1 vs 1 lol on your mobile devices!</li>
|
60 |
-
</ol>
|
61 |
-
<p>To play the game on your web browser, you need to follow these steps:</p>
|
62 |
-
<ol>
|
63 |
-
<li>Go to the official website of 1 vs 1 lol at <a href="">https://1v1.lol/</a> or search for "1 vs 1 lol" on any web browser.</li>
|
64 |
-
<li>Click on the play button and wait for the game to load.</li>
|
65 |
-
<li>Enjoy playing 1 vs 1 lol on your web browser!</li>
|
66 |
-
</ol> <h3>The tips and tricks to optimize the game performance and settings</h3>
|
67 |
-
<p>Although 1 vs 1 lol is a simple and lightweight game, it can still lag or crash sometimes due to various factors, such as your device's specifications, your internet connection, or the game's server. To optimize the game performance and settings, you can try these tips and tricks:</p>
|
68 |
-
<ul>
|
69 |
-
<li>Close any unnecessary apps or tabs that are running in the background of your device or browser.</li>
|
70 |
-
<li>Clear your device's cache and memory or your browser's history and cookies.</li>
|
71 |
-
<li>Update your device's software or your browser's version to the latest one.</li>
|
72 |
-
<li>Adjust the game's graphics and sound settings to low or medium, depending on your device's capabilities.</li>
|
73 |
-
<li>Use a wired or stable Wi-Fi connection instead of a mobile data connection.</li>
|
74 |
-
<li>Choose a server that is closest to your location or has the lowest ping.</li>
|
75 |
-
<li>Report any bugs or glitches to the game's developer or support team.</li>
|
76 |
-
</ul>
|
77 |
-
<h2>How to improve your skills and enjoy 1 vs 1 lol?</h2>
|
78 |
-
<h3>The practice modes and how to use them</h3>
|
79 |
-
<p>If you are new to 1 vs 1 lol or want to improve your skills, you can use the practice modes that are available in the game. These modes allow you to practice your building and shooting skills without any enemies or pressure. You can also adjust the settings, such as the gravity, the speed, and the weapons, to suit your preferences. To use the practice modes, you have to select them from the menu, then click on Play. You can choose between two practice modes: just build and aim trainer. Just build mode lets you build unlimited structures with unlimited materials. Aim trainer mode lets you shoot at moving targets with different weapons. You can use these modes to master your building and shooting techniques and become a better player.</p>
|
80 |
-
<h3>The best weapons and building strategies for different situations</h3>
|
81 |
-
<p>One of the most important aspects of 1 vs 1 lol is knowing how to use the best weapons and building strategies for different situations. There are four weapons in the game: assault rifle, shotgun, axe, and sniper rifle. Each weapon has its own advantages and disadvantages, depending on the range, the damage, the accuracy, and the fire rate. Here are some tips on how to use each weapon effectively:</p>
|
82 |
-
<ul>
|
83 |
-
<li>Assault rifle: This weapon is good for medium-range combat, as it has a high fire rate and moderate damage. You can use it to spray bullets at your enemies or destroy their structures. However, it has low accuracy and high recoil, so you have to aim carefully and control your shots.</li>
|
84 |
-
<li>Shotgun: This weapon is good for close-range combat, as it has a high damage and low recoil. You can use it to deal massive damage to your enemies with one shot or finish them off quickly. However, it has low fire rate and low range, so you have to get close to your enemies and reload frequently.</li>
|
85 |
-
<li>Axe: This weapon is good for melee combat, as it has a high damage and no recoil. You can use it to break your enemies' structures or hit them directly. However, it has low range and low speed, so you have to be very close to your enemies and time your swings well.</li>
|
86 |
-
<li>Sniper rifle: This weapon is good for long-range combat, as it has a high damage and high accuracy. You can use it to snipe your enemies from afar or headshot them for instant kills. However, it has low fire rate and high reload time, so you have to aim precisely and avoid missing.</li>
|
87 |
-
</ul>
|
88 |
-
<p>As for building strategies, there are many ways to build your structures in 1 vs 1 lol, depending on your style and situation. Here are some of the most common building strategies that you can use:</p>
|
89 |
-
<ul>
|
90 |
-
<li>Ramp rush: This strategy involves building ramps towards your enemies while jumping and shooting at them. This can help you gain height advantage and surprise them with your aggression. However, this can also expose you to enemy fire and make you vulnerable to fall damage.</li>
|
91 |
-
<li>Tower defense: This strategy involves building a tower with walls, floors, ramps, and doors around yourself while shooting at your enemies from above. This can help you protect yourself from enemy fire and have a better view of the map. However, this can also limit your mobility and make you an easy target for snipers.</li>
|
92 |
-
<li>Box fight: This strategy involves building a box with walls, floors, ceilings, and doors around yourself while shooting at your enemies from inside. This can help you hide from enemy fire and bait them into close-range combat. However, this can also trap you inside and make you vulnerable to explosives.</li>
|
93 |
-
</ul> <h3>The online community and resources for 1 vs 1 lol players</h3>
|
94 |
-
<p>Another way to improve your skills and enjoy 1 vs 1 lol is to join the online community and access the resources for 1 vs 1 lol players. The online community consists of other players who share their experiences, tips, feedback, and suggestions about the game. You can interact with them through chat or voice, or join their clans or tournaments. You can also watch their live streams or videos, or follow their social media accounts. The online community can help you learn from other players, make new friends, and have more fun. To join the online community, you can visit the official website of 1 vs 1 lol at <a href="">https://1v1.lol/</a> or search for "1 vs 1 lol" on platforms like YouTube, Twitch, Discord, Reddit, Twitter, and Facebook.</p>
|
95 |
-
<p>The resources for 1 vs 1 lol players consist of various tools and information that can help you play the game better. These include guides, tutorials, reviews, updates, news, and more. You can use these resources to learn more about the game's features, modes, weapons, building strategies, tips and tricks, and more. You can also use these resources to stay updated on the game's development, changes, events, and more. To access the resources for 1 vs 1 lol players, you can visit the official website of 1 vs 1 lol at <a href="">https://1v1.lol/</a> or search for "1 vs 1 lol" on platforms like Google, YouTube, Wikipedia, and more.</p>
|
96 |
-
<h2>Conclusion</h2>
|
97 |
-
<h3>A summary of the main points and a call to action</h3>
|
98 |
-
<p>In conclusion, 1 vs 1 lol is an online building simulator and shooting game that is free to play and easy to access. You can play it on your web browser or on your mobile devices. You can choose from different game modes, such as battle royale, 1v1, 2v2, box fight, zone wars, and more. You can also customize your character's appearance, name, controls, and settings. You can improve your skills by practicing your building and shooting techniques in different situations. You can also join the online community and access the resources for 1 vs 1 lol players to learn from others and have more fun. If you are interested in playing 1 vs 1 lol, you can download and install it by following the steps in this article. You can also visit the official website of 1 vs 1 lol at <a href="">https://1v1.lol/</a> for more information. What are you waiting for? Start playing 1 vs 1 lol today and enjoy the online building simulator and shooting game!</p>
|
99 |
-
<h2>FAQs</h2>
|
100 |
-
<p>Here are some of the frequently asked questions about 1 vs 1 lol:</p>
|
101 |
-
<ol>
|
102 |
-
<li>Is 1 vs 1 lol safe to play?</li>
|
103 |
-
<p>Yes, 1 vs 1 lol is safe to play as long as you follow the game's rules and terms of service. The game does not contain any viruses or malware that can harm your device or browser. The game also does not collect any personal or sensitive information from you without your consent. However, you should be careful when interacting with other players online, as they may not be who they claim to be. You should also avoid clicking on any suspicious links or ads that may appear on the game's website or app.</p>
|
104 |
-
<li>Is 1 vs 1 lol multiplayer?</li>
|
105 |
-
<p>Yes, 1 vs 1 lol is multiplayer as it allows you to play online with other players around the world. You can play with random players in public matches or with your friends in private matches. You can also chat or voice with other players during the game. However, you can also play offline in practice modes if you want to play solo or without an internet connection.</p>
|
106 |
-
<li>Is 1 vs 1 lol cross-platform?</li>
|
107 |
-
<p>Yes, 1 vs 1 lol is cross-platform as it allows you to play on different devices and platforms. You can play on your web browser or on your mobile devices (Android or iOS). You can also play with other players who are using different devices or platforms than yours.</p>
|
108 |
-
<li>How do I report a bug or a problem in 1 vs 1 lol?</li>
|
109 |
-
<p>If you encounter a bug or a problem in 1 vs 1 lol, you can report it to the game's developer or support team by using the feedback button on the game's website or app. You can also contact them by email at <a href="mailto:[email protected]">[email protected]</a>. You should provide as much detail as possible about the bug or problem, such as the device, platform, mode, situation, and screenshot of the issue. The developer or support team will try to fix the bug or problem as soon as possible.</p>
|
110 |
-
<li>How do I get better at 1 vs 1 lol?</li>
|
111 |
-
<p>If you want to get better at 1 vs 1 lol, you need to practice your building and shooting skills regularly. You can use the practice modes to improve your techniques and learn from your mistakes. You can also watch other players' streams or videos to see how they play and what strategies they use. You can also join the online community and ask for advice or feedback from other players. You can also challenge yourself by playing against stronger opponents or in different modes. The more you play, the more you will improve and enjoy 1 vs 1 lol.</p>
|
112 |
-
</ol></p> 197e85843d<br />
|
113 |
-
<br />
|
114 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download True Story in 480p Quality - The Best Site for Movies.md
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download True Story in 480p Quality</h1>
|
3 |
-
<p>True Story is a new crime thriller miniseries created by Eric Newman for Netflix. It stars Kevin Hart and Wesley Snipes as two brothers who get involved in a deadly situation after a night out in Philadelphia. If you are a fan of drama, suspense, and twists, you might want to watch this show. But what if you want to download True Story in 480p quality for offline viewing? In this article, we will show you how to do that from Netflix and other sources.</p>
|
4 |
-
<h2>What is True Story and Why You Should Watch It</h2>
|
5 |
-
<p>True Story is a limited series that premiered on Netflix on November 24, 2021. It consists of seven episodes, each ranging from 27 to 58 minutes. The series is based on an original idea by Kevin Hart, who also executive produces and stars as Kid, a world-famous comedian. Wesley Snipes plays his older brother Carlton, who has a troubled past and a shady agenda. The series follows the brothers as they try to escape from a dangerous situation that threatens to ruin Kid's career and life.</p>
|
6 |
-
<h2>true story 480p download</h2><br /><p><b><b>DOWNLOAD</b> --->>> <a href="https://jinyurl.com/2uNQg4">https://jinyurl.com/2uNQg4</a></b></p><br /><br />
|
7 |
-
<h3>The Plot and the Cast of True Story</h3>
|
8 |
-
<p>The plot of True Story is inspired by some of the real-life experiences of Kevin Hart, but it is not a biopic or a comedy. It is a dark and gritty story that explores the themes of fame, family, loyalty, and betrayal. The series begins with Kid returning to his hometown of Philadelphia for a comedy tour. He reunites with his brother Carlton, who he has not seen for years. Carlton convinces Kid to join him for a night out at a hotel, where they meet two women. The next morning, Kid wakes up to find one of the women dead in his bed. Carlton tells him that they have been set up by someone who wants to blackmail them. Kid has to decide whether to trust his brother or not, while also dealing with the police, the media, and his own conscience.</p>
|
9 |
-
<p>The cast of True Story includes some well-known actors and some newcomers. Kevin Hart and Wesley Snipes are the main stars, playing Kid and Carlton respectively. They are joined by Tawny Newsome as Billie, Kid's personal assistant; Paul Adelstein as Todd, Kid's manager; Will Catlett as Herschel, Kid's bodyguard; Chris Diamantopoulos as Savvas, a Greek mobster; Billy Zane as Ari, Savvas' brother; Lauren London as Monyca, Kid's ex-wife; Ash Santos as Daphne, one of the women at the hotel; John Ales as Nikos, Savvas' henchman; and Theo Rossi as Gene, a superfan of Kid.</p>
|
10 |
-
<h3>The Reviews and the Ratings of True Story</h3>
|
11 |
-
<p>True Story has received mixed reviews from critics and audiences. Some praised the series for its gripping plot, its unexpected twists, its stellar performances, especially by Hart and Snipes, and its exploration of the dark side of fame. Others criticized the series for its lack of originality, its implausible scenarios, its uneven tone, its excessive violence, and its wasted potential.</p>
|
12 |
-
<p>The series has a rating of 6.9 out of 10 on IMDb, based on over 6,000 user ratings. It has a rating of 57% on Rotten Tomatoes, based on 20 critic reviews. It has a rating of 7 out of 10 on IGN, based on one critic review.</p>
|
13 |
-
<h2>How to Download True Story in 480p Quality from Netflix</h2>
|
14 |
-
<p>If you want to watch True Story offline or save some data usage, you can download it in 480p quality from Netflix. Netflix allows you to download videos offline on your smartphone, tablet, or computer, as long as you have a Netflix subscription and the Netflix app installed. Here are the benefits and the steps of downloading True Story in 480p quality from Netflix.</p>
|
15 |
-
<h3>The Benefits of Downloading Netflix Videos Offline</h3>
|
16 |
-
<p>Downloading Netflix videos offline has several advantages, such as:</p>
|
17 |
-
<ul>
|
18 |
-
<li>You can watch your favorite shows and movies anytime and anywhere, without worrying about internet connection or speed.</li>
|
19 |
-
<li>You can save your mobile data usage, especially if you are on a limited or expensive plan.</li>
|
20 |
-
<li>You can avoid buffering, lagging, or interruptions caused by network issues or traffic.</li>
|
21 |
-
<li>You can choose the video quality that suits your device and preference, from low to high.</li>
|
22 |
-
</ul>
|
23 |
-
<p>Downloading True Story in 480p quality from Netflix is a good option if you want to enjoy the series in decent resolution, but also save some storage space and data usage. 480p is the standard definition (SD) quality, which means that the video has a resolution of 720 x 480 pixels. It is lower than high definition (HD) quality, which has a resolution of 1280 x 720 pixels or higher, but it is still clear and sharp enough for most devices and screens.</p>
|
24 |
-
<p>true story movie 480p download<br />
|
25 |
-
true story web series 480p download<br />
|
26 |
-
true story 2023 480p download<br />
|
27 |
-
true story netflix 480p download<br />
|
28 |
-
true story hindi dubbed 480p download<br />
|
29 |
-
true story based on a true story 480p download<br />
|
30 |
-
true story season 1 480p download<br />
|
31 |
-
true story full episodes 480p download<br />
|
32 |
-
true story free 480p download<br />
|
33 |
-
true story torrent 480p download<br />
|
34 |
-
true story mp4 480p download<br />
|
35 |
-
true story mkv 480p download<br />
|
36 |
-
true story hd 480p download<br />
|
37 |
-
true story english subtitles 480p download<br />
|
38 |
-
true story online watch 480p download<br />
|
39 |
-
true story direct link 480p download<br />
|
40 |
-
true story google drive 480p download<br />
|
41 |
-
true story mega link 480p download<br />
|
42 |
-
true story index of 480p download<br />
|
43 |
-
true story filmyzilla 480p download<br />
|
44 |
-
true story moviesverse 480p download<br />
|
45 |
-
true story moviezverse 480p download<br />
|
46 |
-
true story worldfree4u 480p download<br />
|
47 |
-
true story filmywap 480p download<br />
|
48 |
-
true story khatrimaza 480p download<br />
|
49 |
-
true story bolly4u 480p download<br />
|
50 |
-
true story pagalworld 480p download<br />
|
51 |
-
true story skymovieshd 480p download<br />
|
52 |
-
true story moviesflix 480p download<br />
|
53 |
-
true story moviescounter 480p download<br />
|
54 |
-
true story movierulz 480p download<br />
|
55 |
-
true story tamilrockers 480p download<br />
|
56 |
-
true story isaimini 480p download<br />
|
57 |
-
true story tamilyogi 480p download<br />
|
58 |
-
true story teluguwap 480p download<br />
|
59 |
-
true story jiorockers 480p download<br />
|
60 |
-
true story todaypk 480p download<br />
|
61 |
-
true story yts 480p download<br />
|
62 |
-
true story yify 480p download<br />
|
63 |
-
true story rarbg 480p download<br />
|
64 |
-
true story limetorrents 480p download<br />
|
65 |
-
true story kickass torrents 480p download<br />
|
66 |
-
true story the pirate bay 480p download<br />
|
67 |
-
true story eztv.io 480p download <br />
|
68 |
-
true story xmovies8.tv 480p download <br />
|
69 |
-
true story putlocker9.ru 480p download <br />
|
70 |
-
true story solarmovie.to 480p download <br />
|
71 |
-
true story fmovies.too.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooo.ooop.too.too.too.too.too.too.too.too.too.too.too.too.too.too.too.too.too.too.too.too.too.too.too.too.too.too.too.download</p>
|
72 |
-
<h3>The Steps to Download True Story in 480p Quality from Netflix</h3>
|
73 |
-
<p>To download True Story in 480p quality from Netflix, you need to follow these steps:</p>
|
74 |
-
<ol>
|
75 |
-
<li>Open the Netflix app on your device and sign in with your account.</li>
|
76 |
-
<li>Search for True Story in the search bar and tap on the series title.</li>
|
77 |
-
<li>Select the episode that you want to download and tap on the download icon next to it. You can also tap on the download icon next to the series title to download all the episodes at once.</li>
|
78 |
-
<li>Wait for the download to complete. You can check the progress and manage your downloads in the downloads section of the app.</li>
|
79 |
-
<li>To watch your downloaded videos offline, go to the downloads section of the app and tap on the play icon next to the video title.</li>
|
80 |
-
</ol>
|
81 |
-
<p>Note that you need to have enough storage space on your device to download True Story in 480p quality. Each episode of True Story in 480p quality takes up about 300 MB of storage space. You also need to have a Netflix plan that supports downloading videos offline. The basic plan only allows you to download videos on one device at a time, while the standard and premium plans allow you to download videos on two and four devices at a time, respectively.</p>
|
82 |
-
<h2>How to Download True Story in 480p Quality from Other Sources</h2>
|
83 |
-
<p>If you don't have a Netflix subscription or you want to download True Story in 480p quality from other sources, you might be tempted to look for unofficial websites that offer free downloads or streaming of the series. However, we strongly advise you against doing that, as it comes with many risks and disadvantages. Here are some of them:</p>
|
84 |
-
<h3>The Risks of Downloading from Unofficial Websites</h3>
|
85 |
-
<p>Downloading from unofficial websites is illegal, unethical, and unsafe. You might face some serious consequences, such as:</p>
|
86 |
-
<ul>
|
87 |
-
<li>You might violate the intellectual property rights of the creators and distributors of True Story, and get sued or fined for piracy.</li>
|
88 |
-
<li>You might expose your device and personal information to malware, viruses, spyware, ransomware, or phishing attacks that can harm your system or steal your data.</li>
|
89 |
-
<li>You might get low-quality or corrupted files that don't play properly or damage your device.</li>
|
90 |
-
<li>You might encounter annoying ads, pop-ups, redirects, or surveys that interrupt your viewing experience or trick you into clicking on malicious links or downloading unwanted software.</li>
|
91 |
-
</ul>
|
92 |
-
<h3>The Alternatives to Download True Story in 480p Quality from Other Sources</h3>
|
93 |
-
<p>If you want to download True Story in 480p quality from other sources legally and safely, you have some alternatives, such as:</p>
|
94 |
-
<ul>
|
95 |
-
<li>You can buy or rent True Story in digital format from online platforms like Amazon Prime Video, Google Play Movies & TV, iTunes, or YouTube. You can then download it to your device and watch it offline. However, this option might cost you more than a Netflix subscription.</li>
|
96 |
-
<li>You can wait for True Story to be released on DVD or Blu-ray discs. You can then buy or borrow them and rip them to your device using a DVD ripper software. However, this option might take longer and require more technical skills.</li></ul>
|
97 |
-
<h2>Conclusion</h2>
|
98 |
-
<p>True Story is a thrilling miniseries that you don't want to miss. If you want to download it in 480p quality for offline viewing, you have two main options: Netflix or other sources . Netflix is the easiest and safest option, as it allows you to download the series with a few taps on your device. Other sources might be cheaper or faster, but they also come with many risks and disadvantages. You should always respect the intellectual property rights of the creators and distributors of True Story, and avoid downloading from illegal or unofficial websites. We hope this article has helped you learn how to download True Story in 480p quality from Netflix and other sources.</p>
|
99 |
-
<h2>FAQs</h2>
|
100 |
-
<p>Here are some frequently asked questions about downloading True Story in 480p quality:</p>
|
101 |
-
<table>
|
102 |
-
<tr>
|
103 |
-
<th>Question</th>
|
104 |
-
<th>Answer</th>
|
105 |
-
</tr>
|
106 |
-
<tr>
|
107 |
-
<td>Can I download True Story in higher quality than 480p?</td>
|
108 |
-
<td>Yes, you can download True Story in higher quality than 480p from Netflix or other sources, if your device and internet connection support it. However, higher quality videos will take up more storage space and data usage than lower quality videos.</td>
|
109 |
-
</tr>
|
110 |
-
<tr>
|
111 |
-
<td>How long can I keep the downloaded videos of True Story on my device?</td>
|
112 |
-
<td>You can keep the downloaded videos of True Story on your device as long as you have an active Netflix subscription and the Netflix app installed. However, some videos might expire after a certain period of time or if you go online after a long time offline. You can check the expiration date of each video in the downloads section of the app.</td>
|
113 |
-
</tr>
|
114 |
-
<tr>
|
115 |
-
<td>Can I watch the downloaded videos of True Story on other devices?</td>
|
116 |
-
<td>You can watch the downloaded videos of True Story on other devices that have the Netflix app installed and are signed in with the same account. However, you might be limited by the number of devices that you can download videos on at a time, depending on your Netflix plan.</td>
|
117 |
-
</tr>
|
118 |
-
<tr>
|
119 |
-
<td>Can I share the downloaded videos of True Story with others?</td>
|
120 |
-
<td>No, you cannot share the downloaded videos of True Story with others, as they are encrypted and tied to your account. Sharing them might violate the terms of service of Netflix and the intellectual property rights of the creators and distributors of True Story.</td>
|
121 |
-
</tr>
|
122 |
-
<tr>
|
123 |
-
<td>Can I download True Story from other streaming platforms?</td>
|
124 |
-
<td>No, you cannot download True Story from other streaming platforms, as it is a Netflix original series and exclusive to Netflix. You can only watch it online or offline on Netflix.</td>
|
125 |
-
</tr>
|
126 |
-
</table></p> 197e85843d<br />
|
127 |
-
<br />
|
128 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7hao/bingo/src/lib/bots/bing/sr.ts
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
// @ts-ignore
|
2 |
-
const SpeechRecognitionPolyfill: typeof webkitSpeechRecognition = typeof window !== 'undefined' ? (
|
3 |
-
// @ts-ignore
|
4 |
-
window.SpeechRecognition ||
|
5 |
-
window.webkitSpeechRecognition ||
|
6 |
-
// @ts-ignore
|
7 |
-
window.mozSpeechRecognition ||
|
8 |
-
// @ts-ignore
|
9 |
-
window.msSpeechRecognition ||
|
10 |
-
// @ts-ignore
|
11 |
-
window.oSpeechRecognition
|
12 |
-
) as typeof webkitSpeechRecognition : undefined
|
13 |
-
|
14 |
-
type subscriber = (msg: string, command?: string) => void
|
15 |
-
|
16 |
-
export class SR {
|
17 |
-
recognition?: SpeechRecognition
|
18 |
-
onchange?: subscriber
|
19 |
-
transcript: boolean = false
|
20 |
-
listening: boolean = false
|
21 |
-
private commandsRe?: RegExp
|
22 |
-
constructor(commands: string[]) {
|
23 |
-
this.recognition = SpeechRecognitionPolyfill ? new SpeechRecognitionPolyfill() : undefined
|
24 |
-
if (!this.recognition) {
|
25 |
-
return
|
26 |
-
}
|
27 |
-
this.configuration('zh-CN')
|
28 |
-
if (commands.length) {
|
29 |
-
this.commandsRe = new RegExp(`^(${commands.join('|')})。?$`)
|
30 |
-
}
|
31 |
-
this.recognition.onresult = this.speechRecognition
|
32 |
-
this.recognition.onerror = (err) => {
|
33 |
-
console.log('err', err.error)
|
34 |
-
this.stop()
|
35 |
-
}
|
36 |
-
this.recognition.onend = () => {
|
37 |
-
if (this.recognition && this.listening) {
|
38 |
-
this.recognition.start()
|
39 |
-
}
|
40 |
-
}
|
41 |
-
}
|
42 |
-
|
43 |
-
speechRecognition = (event: SpeechRecognitionEvent) => {
|
44 |
-
if (!this.listening) return
|
45 |
-
for (var i = event.resultIndex; i < event.results.length; i++) {
|
46 |
-
let result = event.results[i]
|
47 |
-
if (result.isFinal) {
|
48 |
-
var alt = result[0]
|
49 |
-
const text = alt.transcript.trim()
|
50 |
-
if (this.commandsRe && this.commandsRe.test(text)) {
|
51 |
-
return this.onchange?.('', RegExp.$1)
|
52 |
-
}
|
53 |
-
if (!this.transcript) return
|
54 |
-
this.onchange?.(text)
|
55 |
-
}
|
56 |
-
}
|
57 |
-
}
|
58 |
-
|
59 |
-
private configuration = async (lang: string = 'zh-CN') => {
|
60 |
-
return new Promise((resolve) => {
|
61 |
-
if (this.recognition) {
|
62 |
-
this.recognition.continuous = true
|
63 |
-
this.recognition.lang = lang
|
64 |
-
this.recognition.onstart = resolve
|
65 |
-
}
|
66 |
-
})
|
67 |
-
}
|
68 |
-
|
69 |
-
start = async () => {
|
70 |
-
if (this.recognition && !this.listening) {
|
71 |
-
await this.recognition.start()
|
72 |
-
this.transcript = true
|
73 |
-
this.listening = true
|
74 |
-
}
|
75 |
-
}
|
76 |
-
|
77 |
-
stop = () => {
|
78 |
-
if (this.recognition) {
|
79 |
-
this.recognition.stop()
|
80 |
-
this.transcript = false
|
81 |
-
this.listening = false
|
82 |
-
}
|
83 |
-
}
|
84 |
-
|
85 |
-
|
86 |
-
pause = () => {
|
87 |
-
if (this.recognition) {
|
88 |
-
this.transcript = false
|
89 |
-
}
|
90 |
-
}
|
91 |
-
|
92 |
-
resume = () => {
|
93 |
-
if (this.recognition) {
|
94 |
-
this.transcript = true
|
95 |
-
}
|
96 |
-
}
|
97 |
-
|
98 |
-
abort = () => {
|
99 |
-
if (this.recognition && this.transcript) {
|
100 |
-
this.recognition.abort()
|
101 |
-
this.transcript = false
|
102 |
-
this.listening = false
|
103 |
-
}
|
104 |
-
}
|
105 |
-
}
|
106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A666sxr/Genshin_TTS/models.py
DELETED
@@ -1,730 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
import math
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
import commons
|
8 |
-
import modules
|
9 |
-
import attentions
|
10 |
-
import monotonic_align
|
11 |
-
|
12 |
-
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
13 |
-
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
14 |
-
from commons import init_weights, get_padding
|
15 |
-
from pqmf import PQMF
|
16 |
-
from stft import TorchSTFT
|
17 |
-
import math
|
18 |
-
|
19 |
-
|
20 |
-
class StochasticDurationPredictor(nn.Module):
|
21 |
-
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
|
22 |
-
super().__init__()
|
23 |
-
filter_channels = in_channels # it needs to be removed from future version.
|
24 |
-
self.in_channels = in_channels
|
25 |
-
self.filter_channels = filter_channels
|
26 |
-
self.kernel_size = kernel_size
|
27 |
-
self.p_dropout = p_dropout
|
28 |
-
self.n_flows = n_flows
|
29 |
-
self.gin_channels = gin_channels
|
30 |
-
|
31 |
-
self.log_flow = modules.Log()
|
32 |
-
self.flows = nn.ModuleList()
|
33 |
-
self.flows.append(modules.ElementwiseAffine(2))
|
34 |
-
for i in range(n_flows):
|
35 |
-
self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
36 |
-
self.flows.append(modules.Flip())
|
37 |
-
|
38 |
-
self.post_pre = nn.Conv1d(1, filter_channels, 1)
|
39 |
-
self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
40 |
-
self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
41 |
-
self.post_flows = nn.ModuleList()
|
42 |
-
self.post_flows.append(modules.ElementwiseAffine(2))
|
43 |
-
for i in range(4):
|
44 |
-
self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
45 |
-
self.post_flows.append(modules.Flip())
|
46 |
-
|
47 |
-
self.pre = nn.Conv1d(in_channels, filter_channels, 1)
|
48 |
-
self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
49 |
-
self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
50 |
-
if gin_channels != 0:
|
51 |
-
self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
|
52 |
-
|
53 |
-
def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
|
54 |
-
x = torch.detach(x)
|
55 |
-
x = self.pre(x)
|
56 |
-
if g is not None:
|
57 |
-
g = torch.detach(g)
|
58 |
-
x = x + self.cond(g)
|
59 |
-
x = self.convs(x, x_mask)
|
60 |
-
x = self.proj(x) * x_mask
|
61 |
-
|
62 |
-
if not reverse:
|
63 |
-
flows = self.flows
|
64 |
-
assert w is not None
|
65 |
-
|
66 |
-
logdet_tot_q = 0
|
67 |
-
h_w = self.post_pre(w)
|
68 |
-
h_w = self.post_convs(h_w, x_mask)
|
69 |
-
h_w = self.post_proj(h_w) * x_mask
|
70 |
-
e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
|
71 |
-
z_q = e_q
|
72 |
-
for flow in self.post_flows:
|
73 |
-
z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
|
74 |
-
logdet_tot_q += logdet_q
|
75 |
-
z_u, z1 = torch.split(z_q, [1, 1], 1)
|
76 |
-
u = torch.sigmoid(z_u) * x_mask
|
77 |
-
z0 = (w - u) * x_mask
|
78 |
-
logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
|
79 |
-
logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
|
80 |
-
|
81 |
-
logdet_tot = 0
|
82 |
-
z0, logdet = self.log_flow(z0, x_mask)
|
83 |
-
logdet_tot += logdet
|
84 |
-
z = torch.cat([z0, z1], 1)
|
85 |
-
for flow in flows:
|
86 |
-
z, logdet = flow(z, x_mask, g=x, reverse=reverse)
|
87 |
-
logdet_tot = logdet_tot + logdet
|
88 |
-
nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
|
89 |
-
return nll + logq # [b]
|
90 |
-
else:
|
91 |
-
flows = list(reversed(self.flows))
|
92 |
-
flows = flows[:-2] + [flows[-1]] # remove a useless vflow
|
93 |
-
z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
|
94 |
-
for flow in flows:
|
95 |
-
z = flow(z, x_mask, g=x, reverse=reverse)
|
96 |
-
z0, z1 = torch.split(z, [1, 1], 1)
|
97 |
-
logw = z0
|
98 |
-
return logw
|
99 |
-
|
100 |
-
|
101 |
-
class DurationPredictor(nn.Module):
|
102 |
-
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
|
103 |
-
super().__init__()
|
104 |
-
|
105 |
-
self.in_channels = in_channels
|
106 |
-
self.filter_channels = filter_channels
|
107 |
-
self.kernel_size = kernel_size
|
108 |
-
self.p_dropout = p_dropout
|
109 |
-
self.gin_channels = gin_channels
|
110 |
-
|
111 |
-
self.drop = nn.Dropout(p_dropout)
|
112 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
|
113 |
-
self.norm_1 = modules.LayerNorm(filter_channels)
|
114 |
-
self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
|
115 |
-
self.norm_2 = modules.LayerNorm(filter_channels)
|
116 |
-
self.proj = nn.Conv1d(filter_channels, 1, 1)
|
117 |
-
|
118 |
-
if gin_channels != 0:
|
119 |
-
self.cond = nn.Conv1d(gin_channels, in_channels, 1)
|
120 |
-
|
121 |
-
def forward(self, x, x_mask, g=None):
|
122 |
-
x = torch.detach(x)
|
123 |
-
if g is not None:
|
124 |
-
g = torch.detach(g)
|
125 |
-
x = x + self.cond(g)
|
126 |
-
x = self.conv_1(x * x_mask)
|
127 |
-
x = torch.relu(x)
|
128 |
-
x = self.norm_1(x)
|
129 |
-
x = self.drop(x)
|
130 |
-
x = self.conv_2(x * x_mask)
|
131 |
-
x = torch.relu(x)
|
132 |
-
x = self.norm_2(x)
|
133 |
-
x = self.drop(x)
|
134 |
-
x = self.proj(x * x_mask)
|
135 |
-
return x * x_mask
|
136 |
-
|
137 |
-
|
138 |
-
class TextEncoder(nn.Module):
|
139 |
-
def __init__(self,
|
140 |
-
n_vocab,
|
141 |
-
out_channels,
|
142 |
-
hidden_channels,
|
143 |
-
filter_channels,
|
144 |
-
n_heads,
|
145 |
-
n_layers,
|
146 |
-
kernel_size,
|
147 |
-
p_dropout):
|
148 |
-
super().__init__()
|
149 |
-
self.n_vocab = n_vocab
|
150 |
-
self.out_channels = out_channels
|
151 |
-
self.hidden_channels = hidden_channels
|
152 |
-
self.filter_channels = filter_channels
|
153 |
-
self.n_heads = n_heads
|
154 |
-
self.n_layers = n_layers
|
155 |
-
self.kernel_size = kernel_size
|
156 |
-
self.p_dropout = p_dropout
|
157 |
-
|
158 |
-
self.emb = nn.Embedding(n_vocab, hidden_channels)
|
159 |
-
nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
|
160 |
-
|
161 |
-
self.encoder = attentions.Encoder(
|
162 |
-
hidden_channels,
|
163 |
-
filter_channels,
|
164 |
-
n_heads,
|
165 |
-
n_layers,
|
166 |
-
kernel_size,
|
167 |
-
p_dropout)
|
168 |
-
self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
169 |
-
|
170 |
-
def forward(self, x, x_lengths):
|
171 |
-
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
|
172 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
173 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
174 |
-
|
175 |
-
x = self.encoder(x * x_mask, x_mask)
|
176 |
-
stats = self.proj(x) * x_mask
|
177 |
-
|
178 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
179 |
-
return x, m, logs, x_mask
|
180 |
-
|
181 |
-
|
182 |
-
class ResidualCouplingBlock(nn.Module):
|
183 |
-
def __init__(self,
|
184 |
-
channels,
|
185 |
-
hidden_channels,
|
186 |
-
kernel_size,
|
187 |
-
dilation_rate,
|
188 |
-
n_layers,
|
189 |
-
n_flows=4,
|
190 |
-
gin_channels=0):
|
191 |
-
super().__init__()
|
192 |
-
self.channels = channels
|
193 |
-
self.hidden_channels = hidden_channels
|
194 |
-
self.kernel_size = kernel_size
|
195 |
-
self.dilation_rate = dilation_rate
|
196 |
-
self.n_layers = n_layers
|
197 |
-
self.n_flows = n_flows
|
198 |
-
self.gin_channels = gin_channels
|
199 |
-
|
200 |
-
self.flows = nn.ModuleList()
|
201 |
-
for i in range(n_flows):
|
202 |
-
self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
|
203 |
-
self.flows.append(modules.Flip())
|
204 |
-
|
205 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
206 |
-
if not reverse:
|
207 |
-
for flow in self.flows:
|
208 |
-
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
209 |
-
else:
|
210 |
-
for flow in reversed(self.flows):
|
211 |
-
x = flow(x, x_mask, g=g, reverse=reverse)
|
212 |
-
return x
|
213 |
-
|
214 |
-
|
215 |
-
class PosteriorEncoder(nn.Module):
|
216 |
-
def __init__(self,
|
217 |
-
in_channels,
|
218 |
-
out_channels,
|
219 |
-
hidden_channels,
|
220 |
-
kernel_size,
|
221 |
-
dilation_rate,
|
222 |
-
n_layers,
|
223 |
-
gin_channels=0):
|
224 |
-
super().__init__()
|
225 |
-
self.in_channels = in_channels
|
226 |
-
self.out_channels = out_channels
|
227 |
-
self.hidden_channels = hidden_channels
|
228 |
-
self.kernel_size = kernel_size
|
229 |
-
self.dilation_rate = dilation_rate
|
230 |
-
self.n_layers = n_layers
|
231 |
-
self.gin_channels = gin_channels
|
232 |
-
|
233 |
-
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
234 |
-
self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
|
235 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
236 |
-
|
237 |
-
def forward(self, x, x_lengths, g=None):
|
238 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
239 |
-
x = self.pre(x) * x_mask
|
240 |
-
x = self.enc(x, x_mask, g=g)
|
241 |
-
stats = self.proj(x) * x_mask
|
242 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
243 |
-
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
244 |
-
return z, m, logs, x_mask
|
245 |
-
|
246 |
-
class iSTFT_Generator(torch.nn.Module):
|
247 |
-
def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gen_istft_n_fft, gen_istft_hop_size, gin_channels=0):
|
248 |
-
super(iSTFT_Generator, self).__init__()
|
249 |
-
# self.h = h
|
250 |
-
self.gen_istft_n_fft = gen_istft_n_fft
|
251 |
-
self.gen_istft_hop_size = gen_istft_hop_size
|
252 |
-
|
253 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
254 |
-
self.num_upsamples = len(upsample_rates)
|
255 |
-
self.conv_pre = weight_norm(Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3))
|
256 |
-
resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
|
257 |
-
|
258 |
-
self.ups = nn.ModuleList()
|
259 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
260 |
-
self.ups.append(weight_norm(
|
261 |
-
ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
|
262 |
-
k, u, padding=(k-u)//2)))
|
263 |
-
|
264 |
-
self.resblocks = nn.ModuleList()
|
265 |
-
for i in range(len(self.ups)):
|
266 |
-
ch = upsample_initial_channel//(2**(i+1))
|
267 |
-
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
268 |
-
self.resblocks.append(resblock(ch, k, d))
|
269 |
-
|
270 |
-
self.post_n_fft = self.gen_istft_n_fft
|
271 |
-
self.conv_post = weight_norm(Conv1d(ch, self.post_n_fft + 2, 7, 1, padding=3))
|
272 |
-
self.ups.apply(init_weights)
|
273 |
-
self.conv_post.apply(init_weights)
|
274 |
-
self.reflection_pad = torch.nn.ReflectionPad1d((1, 0))
|
275 |
-
self.stft = TorchSTFT(filter_length=self.gen_istft_n_fft, hop_length=self.gen_istft_hop_size, win_length=self.gen_istft_n_fft)
|
276 |
-
def forward(self, x, g=None):
|
277 |
-
|
278 |
-
x = self.conv_pre(x)
|
279 |
-
for i in range(self.num_upsamples):
|
280 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
281 |
-
x = self.ups[i](x)
|
282 |
-
xs = None
|
283 |
-
for j in range(self.num_kernels):
|
284 |
-
if xs is None:
|
285 |
-
xs = self.resblocks[i*self.num_kernels+j](x)
|
286 |
-
else:
|
287 |
-
xs += self.resblocks[i*self.num_kernels+j](x)
|
288 |
-
x = xs / self.num_kernels
|
289 |
-
x = F.leaky_relu(x)
|
290 |
-
x = self.reflection_pad(x)
|
291 |
-
x = self.conv_post(x)
|
292 |
-
spec = torch.exp(x[:,:self.post_n_fft // 2 + 1, :])
|
293 |
-
phase = math.pi*torch.sin(x[:, self.post_n_fft // 2 + 1:, :])
|
294 |
-
out = self.stft.inverse(spec, phase).to(x.device)
|
295 |
-
return out, None
|
296 |
-
|
297 |
-
def remove_weight_norm(self):
|
298 |
-
print('Removing weight norm...')
|
299 |
-
for l in self.ups:
|
300 |
-
remove_weight_norm(l)
|
301 |
-
for l in self.resblocks:
|
302 |
-
l.remove_weight_norm()
|
303 |
-
remove_weight_norm(self.conv_pre)
|
304 |
-
remove_weight_norm(self.conv_post)
|
305 |
-
|
306 |
-
|
307 |
-
class Multiband_iSTFT_Generator(torch.nn.Module):
|
308 |
-
def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gen_istft_n_fft, gen_istft_hop_size, subbands, gin_channels=0):
|
309 |
-
super(Multiband_iSTFT_Generator, self).__init__()
|
310 |
-
# self.h = h
|
311 |
-
self.subbands = subbands
|
312 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
313 |
-
self.num_upsamples = len(upsample_rates)
|
314 |
-
self.conv_pre = weight_norm(Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3))
|
315 |
-
resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
|
316 |
-
|
317 |
-
self.ups = nn.ModuleList()
|
318 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
319 |
-
self.ups.append(weight_norm(
|
320 |
-
ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
|
321 |
-
k, u, padding=(k-u)//2)))
|
322 |
-
|
323 |
-
self.resblocks = nn.ModuleList()
|
324 |
-
for i in range(len(self.ups)):
|
325 |
-
ch = upsample_initial_channel//(2**(i+1))
|
326 |
-
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
327 |
-
self.resblocks.append(resblock(ch, k, d))
|
328 |
-
|
329 |
-
self.post_n_fft = gen_istft_n_fft
|
330 |
-
self.ups.apply(init_weights)
|
331 |
-
self.reflection_pad = torch.nn.ReflectionPad1d((1, 0))
|
332 |
-
self.reshape_pixelshuffle = []
|
333 |
-
|
334 |
-
self.subband_conv_post = weight_norm(Conv1d(ch, self.subbands*(self.post_n_fft + 2), 7, 1, padding=3))
|
335 |
-
|
336 |
-
self.subband_conv_post.apply(init_weights)
|
337 |
-
|
338 |
-
self.gen_istft_n_fft = gen_istft_n_fft
|
339 |
-
self.gen_istft_hop_size = gen_istft_hop_size
|
340 |
-
|
341 |
-
|
342 |
-
def forward(self, x, g=None):
|
343 |
-
stft = TorchSTFT(filter_length=self.gen_istft_n_fft, hop_length=self.gen_istft_hop_size, win_length=self.gen_istft_n_fft).to(x.device)
|
344 |
-
pqmf = PQMF(x.device)
|
345 |
-
|
346 |
-
x = self.conv_pre(x)#[B, ch, length]
|
347 |
-
|
348 |
-
for i in range(self.num_upsamples):
|
349 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
350 |
-
x = self.ups[i](x)
|
351 |
-
|
352 |
-
|
353 |
-
xs = None
|
354 |
-
for j in range(self.num_kernels):
|
355 |
-
if xs is None:
|
356 |
-
xs = self.resblocks[i*self.num_kernels+j](x)
|
357 |
-
else:
|
358 |
-
xs += self.resblocks[i*self.num_kernels+j](x)
|
359 |
-
x = xs / self.num_kernels
|
360 |
-
|
361 |
-
x = F.leaky_relu(x)
|
362 |
-
x = self.reflection_pad(x)
|
363 |
-
x = self.subband_conv_post(x)
|
364 |
-
x = torch.reshape(x, (x.shape[0], self.subbands, x.shape[1]//self.subbands, x.shape[-1]))
|
365 |
-
|
366 |
-
spec = torch.exp(x[:,:,:self.post_n_fft // 2 + 1, :])
|
367 |
-
phase = math.pi*torch.sin(x[:,:, self.post_n_fft // 2 + 1:, :])
|
368 |
-
|
369 |
-
y_mb_hat = stft.inverse(torch.reshape(spec, (spec.shape[0]*self.subbands, self.gen_istft_n_fft // 2 + 1, spec.shape[-1])), torch.reshape(phase, (phase.shape[0]*self.subbands, self.gen_istft_n_fft // 2 + 1, phase.shape[-1])))
|
370 |
-
y_mb_hat = torch.reshape(y_mb_hat, (x.shape[0], self.subbands, 1, y_mb_hat.shape[-1]))
|
371 |
-
y_mb_hat = y_mb_hat.squeeze(-2)
|
372 |
-
|
373 |
-
y_g_hat = pqmf.synthesis(y_mb_hat)
|
374 |
-
|
375 |
-
return y_g_hat, y_mb_hat
|
376 |
-
|
377 |
-
def remove_weight_norm(self):
|
378 |
-
print('Removing weight norm...')
|
379 |
-
for l in self.ups:
|
380 |
-
remove_weight_norm(l)
|
381 |
-
for l in self.resblocks:
|
382 |
-
l.remove_weight_norm()
|
383 |
-
|
384 |
-
|
385 |
-
class Multistream_iSTFT_Generator(torch.nn.Module):
|
386 |
-
def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gen_istft_n_fft, gen_istft_hop_size, subbands, gin_channels=0):
|
387 |
-
super(Multistream_iSTFT_Generator, self).__init__()
|
388 |
-
# self.h = h
|
389 |
-
self.subbands = subbands
|
390 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
391 |
-
self.num_upsamples = len(upsample_rates)
|
392 |
-
self.conv_pre = weight_norm(Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3))
|
393 |
-
resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
|
394 |
-
|
395 |
-
self.ups = nn.ModuleList()
|
396 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
397 |
-
self.ups.append(weight_norm(
|
398 |
-
ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
|
399 |
-
k, u, padding=(k-u)//2)))
|
400 |
-
|
401 |
-
self.resblocks = nn.ModuleList()
|
402 |
-
for i in range(len(self.ups)):
|
403 |
-
ch = upsample_initial_channel//(2**(i+1))
|
404 |
-
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
405 |
-
self.resblocks.append(resblock(ch, k, d))
|
406 |
-
|
407 |
-
self.post_n_fft = gen_istft_n_fft
|
408 |
-
self.ups.apply(init_weights)
|
409 |
-
self.reflection_pad = torch.nn.ReflectionPad1d((1, 0))
|
410 |
-
self.reshape_pixelshuffle = []
|
411 |
-
|
412 |
-
self.subband_conv_post = weight_norm(Conv1d(ch, self.subbands*(self.post_n_fft + 2), 7, 1, padding=3))
|
413 |
-
|
414 |
-
self.subband_conv_post.apply(init_weights)
|
415 |
-
|
416 |
-
self.gen_istft_n_fft = gen_istft_n_fft
|
417 |
-
self.gen_istft_hop_size = gen_istft_hop_size
|
418 |
-
|
419 |
-
updown_filter = torch.zeros((self.subbands, self.subbands, self.subbands)).float()
|
420 |
-
for k in range(self.subbands):
|
421 |
-
updown_filter[k, k, 0] = 1.0
|
422 |
-
self.register_buffer("updown_filter", updown_filter)
|
423 |
-
self.multistream_conv_post = weight_norm(Conv1d(4, 1, kernel_size=63, bias=False, padding=get_padding(63, 1)))
|
424 |
-
self.multistream_conv_post.apply(init_weights)
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
def forward(self, x, g=None):
|
429 |
-
stft = TorchSTFT(filter_length=self.gen_istft_n_fft, hop_length=self.gen_istft_hop_size, win_length=self.gen_istft_n_fft).to(x.device)
|
430 |
-
# pqmf = PQMF(x.device)
|
431 |
-
|
432 |
-
x = self.conv_pre(x)#[B, ch, length]
|
433 |
-
|
434 |
-
for i in range(self.num_upsamples):
|
435 |
-
|
436 |
-
|
437 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
438 |
-
x = self.ups[i](x)
|
439 |
-
|
440 |
-
|
441 |
-
xs = None
|
442 |
-
for j in range(self.num_kernels):
|
443 |
-
if xs is None:
|
444 |
-
xs = self.resblocks[i*self.num_kernels+j](x)
|
445 |
-
else:
|
446 |
-
xs += self.resblocks[i*self.num_kernels+j](x)
|
447 |
-
x = xs / self.num_kernels
|
448 |
-
|
449 |
-
x = F.leaky_relu(x)
|
450 |
-
x = self.reflection_pad(x)
|
451 |
-
x = self.subband_conv_post(x)
|
452 |
-
x = torch.reshape(x, (x.shape[0], self.subbands, x.shape[1]//self.subbands, x.shape[-1]))
|
453 |
-
|
454 |
-
spec = torch.exp(x[:,:,:self.post_n_fft // 2 + 1, :])
|
455 |
-
phase = math.pi*torch.sin(x[:,:, self.post_n_fft // 2 + 1:, :])
|
456 |
-
|
457 |
-
y_mb_hat = stft.inverse(torch.reshape(spec, (spec.shape[0]*self.subbands, self.gen_istft_n_fft // 2 + 1, spec.shape[-1])), torch.reshape(phase, (phase.shape[0]*self.subbands, self.gen_istft_n_fft // 2 + 1, phase.shape[-1])))
|
458 |
-
y_mb_hat = torch.reshape(y_mb_hat, (x.shape[0], self.subbands, 1, y_mb_hat.shape[-1]))
|
459 |
-
y_mb_hat = y_mb_hat.squeeze(-2)
|
460 |
-
|
461 |
-
y_mb_hat = F.conv_transpose1d(y_mb_hat, self.updown_filter.to(x.device) * self.subbands, stride=self.subbands)
|
462 |
-
|
463 |
-
y_g_hat = self.multistream_conv_post(y_mb_hat)
|
464 |
-
|
465 |
-
return y_g_hat, y_mb_hat
|
466 |
-
|
467 |
-
def remove_weight_norm(self):
|
468 |
-
print('Removing weight norm...')
|
469 |
-
for l in self.ups:
|
470 |
-
remove_weight_norm(l)
|
471 |
-
for l in self.resblocks:
|
472 |
-
l.remove_weight_norm()
|
473 |
-
|
474 |
-
|
475 |
-
class DiscriminatorP(torch.nn.Module):
|
476 |
-
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
477 |
-
super(DiscriminatorP, self).__init__()
|
478 |
-
self.period = period
|
479 |
-
self.use_spectral_norm = use_spectral_norm
|
480 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
481 |
-
self.convs = nn.ModuleList([
|
482 |
-
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
483 |
-
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
484 |
-
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
485 |
-
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
486 |
-
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
|
487 |
-
])
|
488 |
-
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
489 |
-
|
490 |
-
def forward(self, x):
|
491 |
-
fmap = []
|
492 |
-
|
493 |
-
# 1d to 2d
|
494 |
-
b, c, t = x.shape
|
495 |
-
if t % self.period != 0: # pad first
|
496 |
-
n_pad = self.period - (t % self.period)
|
497 |
-
x = F.pad(x, (0, n_pad), "reflect")
|
498 |
-
t = t + n_pad
|
499 |
-
x = x.view(b, c, t // self.period, self.period)
|
500 |
-
|
501 |
-
for l in self.convs:
|
502 |
-
x = l(x)
|
503 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
504 |
-
fmap.append(x)
|
505 |
-
x = self.conv_post(x)
|
506 |
-
fmap.append(x)
|
507 |
-
x = torch.flatten(x, 1, -1)
|
508 |
-
|
509 |
-
return x, fmap
|
510 |
-
|
511 |
-
|
512 |
-
class DiscriminatorS(torch.nn.Module):
|
513 |
-
def __init__(self, use_spectral_norm=False):
|
514 |
-
super(DiscriminatorS, self).__init__()
|
515 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
516 |
-
self.convs = nn.ModuleList([
|
517 |
-
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
518 |
-
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
519 |
-
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
520 |
-
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
521 |
-
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
522 |
-
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
523 |
-
])
|
524 |
-
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
525 |
-
|
526 |
-
def forward(self, x):
|
527 |
-
fmap = []
|
528 |
-
|
529 |
-
for l in self.convs:
|
530 |
-
x = l(x)
|
531 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
532 |
-
fmap.append(x)
|
533 |
-
x = self.conv_post(x)
|
534 |
-
fmap.append(x)
|
535 |
-
x = torch.flatten(x, 1, -1)
|
536 |
-
|
537 |
-
return x, fmap
|
538 |
-
|
539 |
-
|
540 |
-
class MultiPeriodDiscriminator(torch.nn.Module):
|
541 |
-
def __init__(self, use_spectral_norm=False):
|
542 |
-
super(MultiPeriodDiscriminator, self).__init__()
|
543 |
-
periods = [2,3,5,7,11]
|
544 |
-
|
545 |
-
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
546 |
-
discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
|
547 |
-
self.discriminators = nn.ModuleList(discs)
|
548 |
-
|
549 |
-
def forward(self, y, y_hat):
|
550 |
-
y_d_rs = []
|
551 |
-
y_d_gs = []
|
552 |
-
fmap_rs = []
|
553 |
-
fmap_gs = []
|
554 |
-
for i, d in enumerate(self.discriminators):
|
555 |
-
y_d_r, fmap_r = d(y)
|
556 |
-
y_d_g, fmap_g = d(y_hat)
|
557 |
-
y_d_rs.append(y_d_r)
|
558 |
-
y_d_gs.append(y_d_g)
|
559 |
-
fmap_rs.append(fmap_r)
|
560 |
-
fmap_gs.append(fmap_g)
|
561 |
-
|
562 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
563 |
-
|
564 |
-
|
565 |
-
|
566 |
-
class SynthesizerTrn(nn.Module):
|
567 |
-
"""
|
568 |
-
Synthesizer for Training
|
569 |
-
"""
|
570 |
-
|
571 |
-
def __init__(self,
|
572 |
-
n_vocab,
|
573 |
-
spec_channels,
|
574 |
-
segment_size,
|
575 |
-
inter_channels,
|
576 |
-
hidden_channels,
|
577 |
-
filter_channels,
|
578 |
-
n_heads,
|
579 |
-
n_layers,
|
580 |
-
kernel_size,
|
581 |
-
p_dropout,
|
582 |
-
resblock,
|
583 |
-
resblock_kernel_sizes,
|
584 |
-
resblock_dilation_sizes,
|
585 |
-
upsample_rates,
|
586 |
-
upsample_initial_channel,
|
587 |
-
upsample_kernel_sizes,
|
588 |
-
gen_istft_n_fft,
|
589 |
-
gen_istft_hop_size,
|
590 |
-
n_speakers=0,
|
591 |
-
gin_channels=0,
|
592 |
-
use_sdp=False,
|
593 |
-
ms_istft_vits=False,
|
594 |
-
mb_istft_vits = False,
|
595 |
-
subbands = False,
|
596 |
-
istft_vits=False,
|
597 |
-
**kwargs):
|
598 |
-
|
599 |
-
super().__init__()
|
600 |
-
self.n_vocab = n_vocab
|
601 |
-
self.spec_channels = spec_channels
|
602 |
-
self.inter_channels = inter_channels
|
603 |
-
self.hidden_channels = hidden_channels
|
604 |
-
self.filter_channels = filter_channels
|
605 |
-
self.n_heads = n_heads
|
606 |
-
self.n_layers = n_layers
|
607 |
-
self.kernel_size = kernel_size
|
608 |
-
self.p_dropout = p_dropout
|
609 |
-
self.resblock = resblock
|
610 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
611 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
612 |
-
self.upsample_rates = upsample_rates
|
613 |
-
self.upsample_initial_channel = upsample_initial_channel
|
614 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
615 |
-
self.segment_size = segment_size
|
616 |
-
self.n_speakers = n_speakers
|
617 |
-
self.gin_channels = gin_channels
|
618 |
-
self.ms_istft_vits = ms_istft_vits
|
619 |
-
self.mb_istft_vits = mb_istft_vits
|
620 |
-
self.istft_vits = istft_vits
|
621 |
-
|
622 |
-
self.use_sdp = use_sdp
|
623 |
-
|
624 |
-
self.enc_p = TextEncoder(n_vocab,
|
625 |
-
inter_channels,
|
626 |
-
hidden_channels,
|
627 |
-
filter_channels,
|
628 |
-
n_heads,
|
629 |
-
n_layers,
|
630 |
-
kernel_size,
|
631 |
-
p_dropout)
|
632 |
-
if mb_istft_vits == True:
|
633 |
-
print('Mutli-band iSTFT VITS')
|
634 |
-
self.dec = Multiband_iSTFT_Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gen_istft_n_fft, gen_istft_hop_size, subbands, gin_channels=gin_channels)
|
635 |
-
elif ms_istft_vits == True:
|
636 |
-
print('Mutli-stream iSTFT VITS')
|
637 |
-
self.dec = Multistream_iSTFT_Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gen_istft_n_fft, gen_istft_hop_size, subbands, gin_channels=gin_channels)
|
638 |
-
elif istft_vits == True:
|
639 |
-
print('iSTFT-VITS')
|
640 |
-
self.dec = iSTFT_Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gen_istft_n_fft, gen_istft_hop_size, gin_channels=gin_channels)
|
641 |
-
else:
|
642 |
-
print('Decoder Error in json file')
|
643 |
-
|
644 |
-
self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
|
645 |
-
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
|
646 |
-
|
647 |
-
if use_sdp:
|
648 |
-
self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
|
649 |
-
else:
|
650 |
-
self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
|
651 |
-
|
652 |
-
if n_speakers > 1:
|
653 |
-
self.emb_g = nn.Embedding(n_speakers, gin_channels)
|
654 |
-
|
655 |
-
def forward(self, x, x_lengths, y, y_lengths, sid=None):
|
656 |
-
|
657 |
-
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
658 |
-
if self.n_speakers > 0:
|
659 |
-
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
660 |
-
else:
|
661 |
-
g = None
|
662 |
-
|
663 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
664 |
-
z_p = self.flow(z, y_mask, g=g)
|
665 |
-
|
666 |
-
with torch.no_grad():
|
667 |
-
# negative cross-entropy
|
668 |
-
s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
|
669 |
-
neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
|
670 |
-
neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
671 |
-
neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
672 |
-
neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
|
673 |
-
neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
|
674 |
-
|
675 |
-
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
676 |
-
attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
|
677 |
-
|
678 |
-
w = attn.sum(2)
|
679 |
-
if self.use_sdp:
|
680 |
-
l_length = self.dp(x, x_mask, w, g=g)
|
681 |
-
l_length = l_length / torch.sum(x_mask)
|
682 |
-
else:
|
683 |
-
logw_ = torch.log(w + 1e-6) * x_mask
|
684 |
-
logw = self.dp(x, x_mask, g=g)
|
685 |
-
l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
|
686 |
-
|
687 |
-
# expand prior
|
688 |
-
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
|
689 |
-
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
|
690 |
-
|
691 |
-
z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
|
692 |
-
o, o_mb = self.dec(z_slice, g=g)
|
693 |
-
return o, o_mb, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
694 |
-
|
695 |
-
def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
|
696 |
-
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
697 |
-
if self.n_speakers > 0:
|
698 |
-
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
699 |
-
else:
|
700 |
-
g = None
|
701 |
-
|
702 |
-
if self.use_sdp:
|
703 |
-
logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
|
704 |
-
else:
|
705 |
-
logw = self.dp(x, x_mask, g=g)
|
706 |
-
w = torch.exp(logw) * x_mask * length_scale
|
707 |
-
w_ceil = torch.ceil(w)
|
708 |
-
y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
|
709 |
-
y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
|
710 |
-
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
711 |
-
attn = commons.generate_path(w_ceil, attn_mask)
|
712 |
-
|
713 |
-
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
714 |
-
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
715 |
-
|
716 |
-
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
|
717 |
-
z = self.flow(z_p, y_mask, g=g, reverse=True)
|
718 |
-
o, o_mb = self.dec((z * y_mask)[:,:,:max_len], g=g)
|
719 |
-
return o, o_mb, attn, y_mask, (z, z_p, m_p, logs_p)
|
720 |
-
|
721 |
-
def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
|
722 |
-
assert self.n_speakers > 0, "n_speakers have to be larger than 0."
|
723 |
-
g_src = self.emb_g(sid_src).unsqueeze(-1)
|
724 |
-
g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
|
725 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
|
726 |
-
z_p = self.flow(z, y_mask, g=g_src)
|
727 |
-
z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
|
728 |
-
o_hat, o_hat_mb = self.dec(z_hat * y_mask, g=g_tgt)
|
729 |
-
return o_hat, o_hat_mb, y_mask, (z, z_p, z_hat)
|
730 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/data/extract_mel_spectrogram.py
DELETED
@@ -1,151 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import os
|
3 |
-
import os.path as P
|
4 |
-
from copy import deepcopy
|
5 |
-
from functools import partial
|
6 |
-
from glob import glob
|
7 |
-
from multiprocessing import Pool
|
8 |
-
from pathlib import Path
|
9 |
-
|
10 |
-
import librosa
|
11 |
-
import numpy as np
|
12 |
-
import torchvision
|
13 |
-
|
14 |
-
|
15 |
-
class MelSpectrogram(object):
|
16 |
-
def __init__(self, sr, nfft, fmin, fmax, nmels, hoplen, spec_power, inverse=False):
|
17 |
-
self.sr = sr
|
18 |
-
self.nfft = nfft
|
19 |
-
self.fmin = fmin
|
20 |
-
self.fmax = fmax
|
21 |
-
self.nmels = nmels
|
22 |
-
self.hoplen = hoplen
|
23 |
-
self.spec_power = spec_power
|
24 |
-
self.inverse = inverse
|
25 |
-
|
26 |
-
self.mel_basis = librosa.filters.mel(sr=sr, n_fft=nfft, fmin=fmin, fmax=fmax, n_mels=nmels)
|
27 |
-
|
28 |
-
def __call__(self, x):
|
29 |
-
if self.inverse:
|
30 |
-
spec = librosa.feature.inverse.mel_to_stft(
|
31 |
-
x, sr=self.sr, n_fft=self.nfft, fmin=self.fmin, fmax=self.fmax, power=self.spec_power
|
32 |
-
)
|
33 |
-
wav = librosa.griffinlim(spec, hop_length=self.hoplen)
|
34 |
-
return wav
|
35 |
-
else:
|
36 |
-
spec = np.abs(librosa.stft(x, n_fft=self.nfft, hop_length=self.hoplen)) ** self.spec_power
|
37 |
-
mel_spec = np.dot(self.mel_basis, spec)
|
38 |
-
return mel_spec
|
39 |
-
|
40 |
-
class LowerThresh(object):
|
41 |
-
def __init__(self, min_val, inverse=False):
|
42 |
-
self.min_val = min_val
|
43 |
-
self.inverse = inverse
|
44 |
-
|
45 |
-
def __call__(self, x):
|
46 |
-
if self.inverse:
|
47 |
-
return x
|
48 |
-
else:
|
49 |
-
return np.maximum(self.min_val, x)
|
50 |
-
|
51 |
-
class Add(object):
|
52 |
-
def __init__(self, val, inverse=False):
|
53 |
-
self.inverse = inverse
|
54 |
-
self.val = val
|
55 |
-
|
56 |
-
def __call__(self, x):
|
57 |
-
if self.inverse:
|
58 |
-
return x - self.val
|
59 |
-
else:
|
60 |
-
return x + self.val
|
61 |
-
|
62 |
-
class Subtract(Add):
|
63 |
-
def __init__(self, val, inverse=False):
|
64 |
-
self.inverse = inverse
|
65 |
-
self.val = val
|
66 |
-
|
67 |
-
def __call__(self, x):
|
68 |
-
if self.inverse:
|
69 |
-
return x + self.val
|
70 |
-
else:
|
71 |
-
return x - self.val
|
72 |
-
|
73 |
-
class Multiply(object):
|
74 |
-
def __init__(self, val, inverse=False) -> None:
|
75 |
-
self.val = val
|
76 |
-
self.inverse = inverse
|
77 |
-
|
78 |
-
def __call__(self, x):
|
79 |
-
if self.inverse:
|
80 |
-
return x / self.val
|
81 |
-
else:
|
82 |
-
return x * self.val
|
83 |
-
|
84 |
-
class Divide(Multiply):
|
85 |
-
def __init__(self, val, inverse=False):
|
86 |
-
self.inverse = inverse
|
87 |
-
self.val = val
|
88 |
-
|
89 |
-
def __call__(self, x):
|
90 |
-
if self.inverse:
|
91 |
-
return x * self.val
|
92 |
-
else:
|
93 |
-
return x / self.val
|
94 |
-
|
95 |
-
class Log10(object):
|
96 |
-
def __init__(self, inverse=False):
|
97 |
-
self.inverse = inverse
|
98 |
-
|
99 |
-
def __call__(self, x):
|
100 |
-
if self.inverse:
|
101 |
-
return 10 ** x
|
102 |
-
else:
|
103 |
-
return np.log10(x)
|
104 |
-
|
105 |
-
class Clip(object):
|
106 |
-
def __init__(self, min_val, max_val, inverse=False):
|
107 |
-
self.min_val = min_val
|
108 |
-
self.max_val = max_val
|
109 |
-
self.inverse = inverse
|
110 |
-
|
111 |
-
def __call__(self, x):
|
112 |
-
if self.inverse:
|
113 |
-
return x
|
114 |
-
else:
|
115 |
-
return np.clip(x, self.min_val, self.max_val)
|
116 |
-
|
117 |
-
class TrimSpec(object):
|
118 |
-
def __init__(self, max_len, inverse=False):
|
119 |
-
self.max_len = max_len
|
120 |
-
self.inverse = inverse
|
121 |
-
|
122 |
-
def __call__(self, x):
|
123 |
-
if self.inverse:
|
124 |
-
return x
|
125 |
-
else:
|
126 |
-
return x[:, :self.max_len]
|
127 |
-
|
128 |
-
class MaxNorm(object):
|
129 |
-
def __init__(self, inverse=False):
|
130 |
-
self.inverse = inverse
|
131 |
-
self.eps = 1e-10
|
132 |
-
|
133 |
-
def __call__(self, x):
|
134 |
-
if self.inverse:
|
135 |
-
return x
|
136 |
-
else:
|
137 |
-
return x / (x.max() + self.eps)
|
138 |
-
|
139 |
-
|
140 |
-
TRANSFORMS_16000 = torchvision.transforms.Compose([
|
141 |
-
MelSpectrogram(sr=16000, nfft=1024, fmin=125, fmax=7600, nmels=80, hoplen=1024//4, spec_power=1),
|
142 |
-
LowerThresh(1e-5),
|
143 |
-
Log10(),
|
144 |
-
Multiply(20),
|
145 |
-
Subtract(20),
|
146 |
-
Add(100),
|
147 |
-
Divide(100),
|
148 |
-
Clip(0, 1.0)
|
149 |
-
# TrimSpec(860)
|
150 |
-
])
|
151 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/transform.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \
|
2 |
-
CenterCrop
|
3 |
-
|
4 |
-
|
5 |
-
def _convert_to_rgb(image):
|
6 |
-
return image.convert('RGB')
|
7 |
-
|
8 |
-
|
9 |
-
def image_transform(
|
10 |
-
image_size: int,
|
11 |
-
is_train: bool,
|
12 |
-
mean=(0.48145466, 0.4578275, 0.40821073),
|
13 |
-
std=(0.26862954, 0.26130258, 0.27577711)
|
14 |
-
):
|
15 |
-
normalize = Normalize(mean=mean, std=std)
|
16 |
-
if is_train:
|
17 |
-
return Compose([
|
18 |
-
RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC),
|
19 |
-
_convert_to_rgb,
|
20 |
-
ToTensor(),
|
21 |
-
normalize,
|
22 |
-
])
|
23 |
-
else:
|
24 |
-
return Compose([
|
25 |
-
Resize(image_size, interpolation=InterpolationMode.BICUBIC),
|
26 |
-
CenterCrop(image_size),
|
27 |
-
_convert_to_rgb,
|
28 |
-
ToTensor(),
|
29 |
-
normalize,
|
30 |
-
])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ALSv/Chat-with-Llama-2-70b/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Lauche-AI LEU-Chatbot
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.44.3
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/custom_model.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
model = dict(
|
2 |
-
type='ImageClassifier', # 主模型类型(对于图像分类任务,使用 `ImageClassifier`)
|
3 |
-
backbone=dict(
|
4 |
-
type='ResNet', # 主干网络类型
|
5 |
-
# 除了 `type` 之外的所有字段都来自 `ResNet` 类的 __init__ 方法
|
6 |
-
# 可查阅 https://mmpretrain.readthedocs.io/zh_CN/latest/api/generated/mmpretrain.models.backbones.ResNet.html
|
7 |
-
depth=50,
|
8 |
-
num_stages=4, # 主干网络状态(stages)的数目,这些状态产生的特征图作为后续的 head 的输入。
|
9 |
-
in_channels=3, # 输入图像的通道数
|
10 |
-
out_indices=(3, ), # 输出的特征图输出索引。
|
11 |
-
frozen_stages=-1, # 冻结主干网的层数
|
12 |
-
style='pytorch'),
|
13 |
-
neck=dict(type='GlobalAveragePooling'), # 颈网络类型
|
14 |
-
head=dict(
|
15 |
-
type='LinearClsHead', # 分类颈网络类型
|
16 |
-
# 除了 `type` 之外的所有字段都来自 `LinearClsHead` 类的 __init__ 方法
|
17 |
-
# 可查阅 https://mmpretrain.readthedocs.io/zh_CN/latest/api/generated/mmpretrain.models.heads.LinearClsHead.html
|
18 |
-
num_classes=7, # 分类类别数
|
19 |
-
in_channels=2048,
|
20 |
-
loss=dict(type='CrossEntropyLoss', loss_weight=1.0), # 损失函数配置信息
|
21 |
-
topk=(1, 3), # 评估指标,Top-k 准确率
|
22 |
-
))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/H2o.py
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import json
|
4 |
-
import uuid
|
5 |
-
|
6 |
-
from aiohttp import ClientSession
|
7 |
-
|
8 |
-
from ..typing import AsyncGenerator
|
9 |
-
from .base_provider import AsyncGeneratorProvider, format_prompt
|
10 |
-
|
11 |
-
|
12 |
-
class H2o(AsyncGeneratorProvider):
|
13 |
-
url = "https://gpt-gm.h2o.ai"
|
14 |
-
working = True
|
15 |
-
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
|
16 |
-
|
17 |
-
@classmethod
|
18 |
-
async def create_async_generator(
|
19 |
-
cls,
|
20 |
-
model: str,
|
21 |
-
messages: list[dict[str, str]],
|
22 |
-
proxy: str = None,
|
23 |
-
**kwargs
|
24 |
-
) -> AsyncGenerator:
|
25 |
-
model = model if model else cls.model
|
26 |
-
headers = {"Referer": cls.url + "/"}
|
27 |
-
|
28 |
-
async with ClientSession(
|
29 |
-
headers=headers
|
30 |
-
) as session:
|
31 |
-
data = {
|
32 |
-
"ethicsModalAccepted": "true",
|
33 |
-
"shareConversationsWithModelAuthors": "true",
|
34 |
-
"ethicsModalAcceptedAt": "",
|
35 |
-
"activeModel": model,
|
36 |
-
"searchEnabled": "true",
|
37 |
-
}
|
38 |
-
async with session.post(
|
39 |
-
f"{cls.url}/settings",
|
40 |
-
proxy=proxy,
|
41 |
-
data=data
|
42 |
-
) as response:
|
43 |
-
response.raise_for_status()
|
44 |
-
|
45 |
-
async with session.post(
|
46 |
-
f"{cls.url}/conversation",
|
47 |
-
proxy=proxy,
|
48 |
-
json={"model": model},
|
49 |
-
) as response:
|
50 |
-
response.raise_for_status()
|
51 |
-
conversationId = (await response.json())["conversationId"]
|
52 |
-
|
53 |
-
data = {
|
54 |
-
"inputs": format_prompt(messages),
|
55 |
-
"parameters": {
|
56 |
-
"temperature": 0.4,
|
57 |
-
"truncate": 2048,
|
58 |
-
"max_new_tokens": 1024,
|
59 |
-
"do_sample": True,
|
60 |
-
"repetition_penalty": 1.2,
|
61 |
-
"return_full_text": False,
|
62 |
-
**kwargs
|
63 |
-
},
|
64 |
-
"stream": True,
|
65 |
-
"options": {
|
66 |
-
"id": str(uuid.uuid4()),
|
67 |
-
"response_id": str(uuid.uuid4()),
|
68 |
-
"is_retry": False,
|
69 |
-
"use_cache": False,
|
70 |
-
"web_search_id": "",
|
71 |
-
},
|
72 |
-
}
|
73 |
-
async with session.post(
|
74 |
-
f"{cls.url}/conversation/{conversationId}",
|
75 |
-
proxy=proxy,
|
76 |
-
json=data
|
77 |
-
) as response:
|
78 |
-
start = "data:"
|
79 |
-
async for line in response.content:
|
80 |
-
line = line.decode("utf-8")
|
81 |
-
if line and line.startswith(start):
|
82 |
-
line = json.loads(line[len(start):-1])
|
83 |
-
if not line["token"]["special"]:
|
84 |
-
yield line["token"]["text"]
|
85 |
-
|
86 |
-
async with session.delete(
|
87 |
-
f"{cls.url}/conversation/{conversationId}",
|
88 |
-
proxy=proxy,
|
89 |
-
json=data
|
90 |
-
) as response:
|
91 |
-
response.raise_for_status()
|
92 |
-
|
93 |
-
|
94 |
-
@classmethod
|
95 |
-
@property
|
96 |
-
def params(cls):
|
97 |
-
params = [
|
98 |
-
("model", "str"),
|
99 |
-
("messages", "list[dict[str, str]]"),
|
100 |
-
("stream", "bool"),
|
101 |
-
("temperature", "float"),
|
102 |
-
("truncate", "int"),
|
103 |
-
("max_new_tokens", "int"),
|
104 |
-
("do_sample", "bool"),
|
105 |
-
("repetition_penalty", "float"),
|
106 |
-
("return_full_text", "bool"),
|
107 |
-
]
|
108 |
-
param = ", ".join([": ".join(p) for p in params])
|
109 |
-
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Vercel.py
DELETED
@@ -1,377 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import json, base64, requests, execjs, random, uuid
|
4 |
-
|
5 |
-
from ..typing import Any, TypedDict, CreateResult
|
6 |
-
from .base_provider import BaseProvider
|
7 |
-
from abc import abstractmethod
|
8 |
-
|
9 |
-
|
10 |
-
class Vercel(BaseProvider):
|
11 |
-
url = 'https://sdk.vercel.ai'
|
12 |
-
working = True
|
13 |
-
supports_gpt_35_turbo = True
|
14 |
-
supports_stream = True
|
15 |
-
|
16 |
-
@staticmethod
|
17 |
-
@abstractmethod
|
18 |
-
def create_completion(
|
19 |
-
model: str,
|
20 |
-
messages: list[dict[str, str]],
|
21 |
-
stream: bool,
|
22 |
-
**kwargs
|
23 |
-
) -> CreateResult:
|
24 |
-
if not model:
|
25 |
-
model = "gpt-3.5-turbo"
|
26 |
-
elif model not in model_info:
|
27 |
-
raise ValueError(f"Model are not supported: {model}")
|
28 |
-
|
29 |
-
headers = {
|
30 |
-
'authority' : 'sdk.vercel.ai',
|
31 |
-
'accept' : '*/*',
|
32 |
-
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
33 |
-
'cache-control' : 'no-cache',
|
34 |
-
'content-type' : 'application/json',
|
35 |
-
'custom-encoding' : get_anti_bot_token(),
|
36 |
-
'origin' : 'https://sdk.vercel.ai',
|
37 |
-
'pragma' : 'no-cache',
|
38 |
-
'referer' : 'https://sdk.vercel.ai/',
|
39 |
-
'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
40 |
-
'sec-ch-ua-mobile' : '?0',
|
41 |
-
'sec-ch-ua-platform': '"macOS"',
|
42 |
-
'sec-fetch-dest' : 'empty',
|
43 |
-
'sec-fetch-mode' : 'cors',
|
44 |
-
'sec-fetch-site' : 'same-origin',
|
45 |
-
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % (
|
46 |
-
random.randint(99, 999),
|
47 |
-
random.randint(99, 999)
|
48 |
-
)
|
49 |
-
}
|
50 |
-
|
51 |
-
json_data = {
|
52 |
-
'model' : model_info[model]['id'],
|
53 |
-
'messages' : messages,
|
54 |
-
'playgroundId': str(uuid.uuid4()),
|
55 |
-
'chatIndex' : 0} | model_info[model]['default_params']
|
56 |
-
|
57 |
-
max_retries = kwargs.get('max_retries', 20)
|
58 |
-
for i in range(max_retries):
|
59 |
-
response = requests.post('https://sdk.vercel.ai/api/generate',
|
60 |
-
headers=headers, json=json_data, stream=True)
|
61 |
-
try:
|
62 |
-
response.raise_for_status()
|
63 |
-
except:
|
64 |
-
continue
|
65 |
-
for token in response.iter_content(chunk_size=None):
|
66 |
-
yield token.decode()
|
67 |
-
break
|
68 |
-
|
69 |
-
|
70 |
-
def get_anti_bot_token() -> str:
|
71 |
-
headers = {
|
72 |
-
'authority' : 'sdk.vercel.ai',
|
73 |
-
'accept' : '*/*',
|
74 |
-
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
75 |
-
'cache-control' : 'no-cache',
|
76 |
-
'pragma' : 'no-cache',
|
77 |
-
'referer' : 'https://sdk.vercel.ai/',
|
78 |
-
'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
79 |
-
'sec-ch-ua-mobile' : '?0',
|
80 |
-
'sec-ch-ua-platform': '"macOS"',
|
81 |
-
'sec-fetch-dest' : 'empty',
|
82 |
-
'sec-fetch-mode' : 'cors',
|
83 |
-
'sec-fetch-site' : 'same-origin',
|
84 |
-
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % (
|
85 |
-
random.randint(99, 999),
|
86 |
-
random.randint(99, 999)
|
87 |
-
)
|
88 |
-
}
|
89 |
-
|
90 |
-
response = requests.get('https://sdk.vercel.ai/openai.jpeg',
|
91 |
-
headers=headers).text
|
92 |
-
|
93 |
-
raw_data = json.loads(base64.b64decode(response,
|
94 |
-
validate=True))
|
95 |
-
|
96 |
-
js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
|
97 |
-
return (%s)(%s)''' % (raw_data['c'], raw_data['a'])
|
98 |
-
|
99 |
-
raw_token = json.dumps({'r': execjs.compile(js_script).call(''), 't': raw_data['t']},
|
100 |
-
separators = (",", ":"))
|
101 |
-
|
102 |
-
return base64.b64encode(raw_token.encode('utf-16le')).decode()
|
103 |
-
|
104 |
-
class ModelInfo(TypedDict):
|
105 |
-
id: str
|
106 |
-
default_params: dict[str, Any]
|
107 |
-
|
108 |
-
model_info: dict[str, ModelInfo] = {
|
109 |
-
'claude-instant-v1': {
|
110 |
-
'id': 'anthropic:claude-instant-v1',
|
111 |
-
'default_params': {
|
112 |
-
'temperature': 1,
|
113 |
-
'maximumLength': 1024,
|
114 |
-
'topP': 1,
|
115 |
-
'topK': 1,
|
116 |
-
'presencePenalty': 1,
|
117 |
-
'frequencyPenalty': 1,
|
118 |
-
'stopSequences': ['\n\nHuman:'],
|
119 |
-
},
|
120 |
-
},
|
121 |
-
'claude-v1': {
|
122 |
-
'id': 'anthropic:claude-v1',
|
123 |
-
'default_params': {
|
124 |
-
'temperature': 1,
|
125 |
-
'maximumLength': 1024,
|
126 |
-
'topP': 1,
|
127 |
-
'topK': 1,
|
128 |
-
'presencePenalty': 1,
|
129 |
-
'frequencyPenalty': 1,
|
130 |
-
'stopSequences': ['\n\nHuman:'],
|
131 |
-
},
|
132 |
-
},
|
133 |
-
'claude-v2': {
|
134 |
-
'id': 'anthropic:claude-v2',
|
135 |
-
'default_params': {
|
136 |
-
'temperature': 1,
|
137 |
-
'maximumLength': 1024,
|
138 |
-
'topP': 1,
|
139 |
-
'topK': 1,
|
140 |
-
'presencePenalty': 1,
|
141 |
-
'frequencyPenalty': 1,
|
142 |
-
'stopSequences': ['\n\nHuman:'],
|
143 |
-
},
|
144 |
-
},
|
145 |
-
'a16z-infra/llama7b-v2-chat': {
|
146 |
-
'id': 'replicate:a16z-infra/llama7b-v2-chat',
|
147 |
-
'default_params': {
|
148 |
-
'temperature': 0.75,
|
149 |
-
'maximumLength': 3000,
|
150 |
-
'topP': 1,
|
151 |
-
'repetitionPenalty': 1,
|
152 |
-
},
|
153 |
-
},
|
154 |
-
'a16z-infra/llama13b-v2-chat': {
|
155 |
-
'id': 'replicate:a16z-infra/llama13b-v2-chat',
|
156 |
-
'default_params': {
|
157 |
-
'temperature': 0.75,
|
158 |
-
'maximumLength': 3000,
|
159 |
-
'topP': 1,
|
160 |
-
'repetitionPenalty': 1,
|
161 |
-
},
|
162 |
-
},
|
163 |
-
'replicate/llama-2-70b-chat': {
|
164 |
-
'id': 'replicate:replicate/llama-2-70b-chat',
|
165 |
-
'default_params': {
|
166 |
-
'temperature': 0.75,
|
167 |
-
'maximumLength': 3000,
|
168 |
-
'topP': 1,
|
169 |
-
'repetitionPenalty': 1,
|
170 |
-
},
|
171 |
-
},
|
172 |
-
'bigscience/bloom': {
|
173 |
-
'id': 'huggingface:bigscience/bloom',
|
174 |
-
'default_params': {
|
175 |
-
'temperature': 0.5,
|
176 |
-
'maximumLength': 1024,
|
177 |
-
'topP': 0.95,
|
178 |
-
'topK': 4,
|
179 |
-
'repetitionPenalty': 1.03,
|
180 |
-
},
|
181 |
-
},
|
182 |
-
'google/flan-t5-xxl': {
|
183 |
-
'id': 'huggingface:google/flan-t5-xxl',
|
184 |
-
'default_params': {
|
185 |
-
'temperature': 0.5,
|
186 |
-
'maximumLength': 1024,
|
187 |
-
'topP': 0.95,
|
188 |
-
'topK': 4,
|
189 |
-
'repetitionPenalty': 1.03,
|
190 |
-
},
|
191 |
-
},
|
192 |
-
'EleutherAI/gpt-neox-20b': {
|
193 |
-
'id': 'huggingface:EleutherAI/gpt-neox-20b',
|
194 |
-
'default_params': {
|
195 |
-
'temperature': 0.5,
|
196 |
-
'maximumLength': 1024,
|
197 |
-
'topP': 0.95,
|
198 |
-
'topK': 4,
|
199 |
-
'repetitionPenalty': 1.03,
|
200 |
-
'stopSequences': [],
|
201 |
-
},
|
202 |
-
},
|
203 |
-
'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {
|
204 |
-
'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
|
205 |
-
'default_params': {
|
206 |
-
'maximumLength': 1024,
|
207 |
-
'typicalP': 0.2,
|
208 |
-
'repetitionPenalty': 1,
|
209 |
-
},
|
210 |
-
},
|
211 |
-
'OpenAssistant/oasst-sft-1-pythia-12b': {
|
212 |
-
'id': 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
|
213 |
-
'default_params': {
|
214 |
-
'maximumLength': 1024,
|
215 |
-
'typicalP': 0.2,
|
216 |
-
'repetitionPenalty': 1,
|
217 |
-
},
|
218 |
-
},
|
219 |
-
'bigcode/santacoder': {
|
220 |
-
'id': 'huggingface:bigcode/santacoder',
|
221 |
-
'default_params': {
|
222 |
-
'temperature': 0.5,
|
223 |
-
'maximumLength': 1024,
|
224 |
-
'topP': 0.95,
|
225 |
-
'topK': 4,
|
226 |
-
'repetitionPenalty': 1.03,
|
227 |
-
},
|
228 |
-
},
|
229 |
-
'command-light-nightly': {
|
230 |
-
'id': 'cohere:command-light-nightly',
|
231 |
-
'default_params': {
|
232 |
-
'temperature': 0.9,
|
233 |
-
'maximumLength': 1024,
|
234 |
-
'topP': 1,
|
235 |
-
'topK': 0,
|
236 |
-
'presencePenalty': 0,
|
237 |
-
'frequencyPenalty': 0,
|
238 |
-
'stopSequences': [],
|
239 |
-
},
|
240 |
-
},
|
241 |
-
'command-nightly': {
|
242 |
-
'id': 'cohere:command-nightly',
|
243 |
-
'default_params': {
|
244 |
-
'temperature': 0.9,
|
245 |
-
'maximumLength': 1024,
|
246 |
-
'topP': 1,
|
247 |
-
'topK': 0,
|
248 |
-
'presencePenalty': 0,
|
249 |
-
'frequencyPenalty': 0,
|
250 |
-
'stopSequences': [],
|
251 |
-
},
|
252 |
-
},
|
253 |
-
'gpt-4': {
|
254 |
-
'id': 'openai:gpt-4',
|
255 |
-
'default_params': {
|
256 |
-
'temperature': 0.7,
|
257 |
-
'maximumLength': 8192,
|
258 |
-
'topP': 1,
|
259 |
-
'presencePenalty': 0,
|
260 |
-
'frequencyPenalty': 0,
|
261 |
-
'stopSequences': [],
|
262 |
-
},
|
263 |
-
},
|
264 |
-
'gpt-4-0613': {
|
265 |
-
'id': 'openai:gpt-4-0613',
|
266 |
-
'default_params': {
|
267 |
-
'temperature': 0.7,
|
268 |
-
'maximumLength': 8192,
|
269 |
-
'topP': 1,
|
270 |
-
'presencePenalty': 0,
|
271 |
-
'frequencyPenalty': 0,
|
272 |
-
'stopSequences': [],
|
273 |
-
},
|
274 |
-
},
|
275 |
-
'code-davinci-002': {
|
276 |
-
'id': 'openai:code-davinci-002',
|
277 |
-
'default_params': {
|
278 |
-
'temperature': 0.5,
|
279 |
-
'maximumLength': 1024,
|
280 |
-
'topP': 1,
|
281 |
-
'presencePenalty': 0,
|
282 |
-
'frequencyPenalty': 0,
|
283 |
-
'stopSequences': [],
|
284 |
-
},
|
285 |
-
},
|
286 |
-
'gpt-3.5-turbo': {
|
287 |
-
'id': 'openai:gpt-3.5-turbo',
|
288 |
-
'default_params': {
|
289 |
-
'temperature': 0.7,
|
290 |
-
'maximumLength': 4096,
|
291 |
-
'topP': 1,
|
292 |
-
'topK': 1,
|
293 |
-
'presencePenalty': 1,
|
294 |
-
'frequencyPenalty': 1,
|
295 |
-
'stopSequences': [],
|
296 |
-
},
|
297 |
-
},
|
298 |
-
'gpt-3.5-turbo-16k': {
|
299 |
-
'id': 'openai:gpt-3.5-turbo-16k',
|
300 |
-
'default_params': {
|
301 |
-
'temperature': 0.7,
|
302 |
-
'maximumLength': 16280,
|
303 |
-
'topP': 1,
|
304 |
-
'topK': 1,
|
305 |
-
'presencePenalty': 1,
|
306 |
-
'frequencyPenalty': 1,
|
307 |
-
'stopSequences': [],
|
308 |
-
},
|
309 |
-
},
|
310 |
-
'gpt-3.5-turbo-16k-0613': {
|
311 |
-
'id': 'openai:gpt-3.5-turbo-16k-0613',
|
312 |
-
'default_params': {
|
313 |
-
'temperature': 0.7,
|
314 |
-
'maximumLength': 16280,
|
315 |
-
'topP': 1,
|
316 |
-
'topK': 1,
|
317 |
-
'presencePenalty': 1,
|
318 |
-
'frequencyPenalty': 1,
|
319 |
-
'stopSequences': [],
|
320 |
-
},
|
321 |
-
},
|
322 |
-
'text-ada-001': {
|
323 |
-
'id': 'openai:text-ada-001',
|
324 |
-
'default_params': {
|
325 |
-
'temperature': 0.5,
|
326 |
-
'maximumLength': 1024,
|
327 |
-
'topP': 1,
|
328 |
-
'presencePenalty': 0,
|
329 |
-
'frequencyPenalty': 0,
|
330 |
-
'stopSequences': [],
|
331 |
-
},
|
332 |
-
},
|
333 |
-
'text-babbage-001': {
|
334 |
-
'id': 'openai:text-babbage-001',
|
335 |
-
'default_params': {
|
336 |
-
'temperature': 0.5,
|
337 |
-
'maximumLength': 1024,
|
338 |
-
'topP': 1,
|
339 |
-
'presencePenalty': 0,
|
340 |
-
'frequencyPenalty': 0,
|
341 |
-
'stopSequences': [],
|
342 |
-
},
|
343 |
-
},
|
344 |
-
'text-curie-001': {
|
345 |
-
'id': 'openai:text-curie-001',
|
346 |
-
'default_params': {
|
347 |
-
'temperature': 0.5,
|
348 |
-
'maximumLength': 1024,
|
349 |
-
'topP': 1,
|
350 |
-
'presencePenalty': 0,
|
351 |
-
'frequencyPenalty': 0,
|
352 |
-
'stopSequences': [],
|
353 |
-
},
|
354 |
-
},
|
355 |
-
'text-davinci-002': {
|
356 |
-
'id': 'openai:text-davinci-002',
|
357 |
-
'default_params': {
|
358 |
-
'temperature': 0.5,
|
359 |
-
'maximumLength': 1024,
|
360 |
-
'topP': 1,
|
361 |
-
'presencePenalty': 0,
|
362 |
-
'frequencyPenalty': 0,
|
363 |
-
'stopSequences': [],
|
364 |
-
},
|
365 |
-
},
|
366 |
-
'text-davinci-003': {
|
367 |
-
'id': 'openai:text-davinci-003',
|
368 |
-
'default_params': {
|
369 |
-
'temperature': 0.5,
|
370 |
-
'maximumLength': 4097,
|
371 |
-
'topP': 1,
|
372 |
-
'presencePenalty': 0,
|
373 |
-
'frequencyPenalty': 0,
|
374 |
-
'stopSequences': [],
|
375 |
-
},
|
376 |
-
},
|
377 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapter/T2I-Adapter/ldm/lr_scheduler.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
|
3 |
-
|
4 |
-
class LambdaWarmUpCosineScheduler:
|
5 |
-
"""
|
6 |
-
note: use with a base_lr of 1.0
|
7 |
-
"""
|
8 |
-
def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0):
|
9 |
-
self.lr_warm_up_steps = warm_up_steps
|
10 |
-
self.lr_start = lr_start
|
11 |
-
self.lr_min = lr_min
|
12 |
-
self.lr_max = lr_max
|
13 |
-
self.lr_max_decay_steps = max_decay_steps
|
14 |
-
self.last_lr = 0.
|
15 |
-
self.verbosity_interval = verbosity_interval
|
16 |
-
|
17 |
-
def schedule(self, n, **kwargs):
|
18 |
-
if self.verbosity_interval > 0:
|
19 |
-
if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}")
|
20 |
-
if n < self.lr_warm_up_steps:
|
21 |
-
lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start
|
22 |
-
self.last_lr = lr
|
23 |
-
return lr
|
24 |
-
else:
|
25 |
-
t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps)
|
26 |
-
t = min(t, 1.0)
|
27 |
-
lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * (
|
28 |
-
1 + np.cos(t * np.pi))
|
29 |
-
self.last_lr = lr
|
30 |
-
return lr
|
31 |
-
|
32 |
-
def __call__(self, n, **kwargs):
|
33 |
-
return self.schedule(n,**kwargs)
|
34 |
-
|
35 |
-
|
36 |
-
class LambdaWarmUpCosineScheduler2:
|
37 |
-
"""
|
38 |
-
supports repeated iterations, configurable via lists
|
39 |
-
note: use with a base_lr of 1.0.
|
40 |
-
"""
|
41 |
-
def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0):
|
42 |
-
assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths)
|
43 |
-
self.lr_warm_up_steps = warm_up_steps
|
44 |
-
self.f_start = f_start
|
45 |
-
self.f_min = f_min
|
46 |
-
self.f_max = f_max
|
47 |
-
self.cycle_lengths = cycle_lengths
|
48 |
-
self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths))
|
49 |
-
self.last_f = 0.
|
50 |
-
self.verbosity_interval = verbosity_interval
|
51 |
-
|
52 |
-
def find_in_interval(self, n):
|
53 |
-
interval = 0
|
54 |
-
for cl in self.cum_cycles[1:]:
|
55 |
-
if n <= cl:
|
56 |
-
return interval
|
57 |
-
interval += 1
|
58 |
-
|
59 |
-
def schedule(self, n, **kwargs):
|
60 |
-
cycle = self.find_in_interval(n)
|
61 |
-
n = n - self.cum_cycles[cycle]
|
62 |
-
if self.verbosity_interval > 0:
|
63 |
-
if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
|
64 |
-
f"current cycle {cycle}")
|
65 |
-
if n < self.lr_warm_up_steps[cycle]:
|
66 |
-
f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
|
67 |
-
self.last_f = f
|
68 |
-
return f
|
69 |
-
else:
|
70 |
-
t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle])
|
71 |
-
t = min(t, 1.0)
|
72 |
-
f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * (
|
73 |
-
1 + np.cos(t * np.pi))
|
74 |
-
self.last_f = f
|
75 |
-
return f
|
76 |
-
|
77 |
-
def __call__(self, n, **kwargs):
|
78 |
-
return self.schedule(n, **kwargs)
|
79 |
-
|
80 |
-
|
81 |
-
class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2):
|
82 |
-
|
83 |
-
def schedule(self, n, **kwargs):
|
84 |
-
cycle = self.find_in_interval(n)
|
85 |
-
n = n - self.cum_cycles[cycle]
|
86 |
-
if self.verbosity_interval > 0:
|
87 |
-
if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
|
88 |
-
f"current cycle {cycle}")
|
89 |
-
|
90 |
-
if n < self.lr_warm_up_steps[cycle]:
|
91 |
-
f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
|
92 |
-
self.last_f = f
|
93 |
-
return f
|
94 |
-
else:
|
95 |
-
f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle])
|
96 |
-
self.last_f = f
|
97 |
-
return f
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AdithyaSNair/PCOS_Prediction/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: PCOS Prediction
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.29.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/memory/base.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
from abc import abstractmethod
|
2 |
-
from typing import Dict, List
|
3 |
-
|
4 |
-
from pydantic import BaseModel, Field
|
5 |
-
|
6 |
-
from agentverse.message import Message
|
7 |
-
|
8 |
-
|
9 |
-
class BaseMemory(BaseModel):
|
10 |
-
@abstractmethod
|
11 |
-
def add_message(self, messages: List[Message]) -> None:
|
12 |
-
pass
|
13 |
-
|
14 |
-
@abstractmethod
|
15 |
-
def to_string(self) -> str:
|
16 |
-
pass
|
17 |
-
|
18 |
-
@abstractmethod
|
19 |
-
def reset(self) -> None:
|
20 |
-
pass
|
21 |
-
|
22 |
-
def to_messages(self) -> List[dict]:
|
23 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/dialog-quest/DialogQuest.js
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
import QuestionManager from '../../plugins/logic/quest/questions/QuestionManager.js';
|
2 |
-
import QuestMethods from './QuestMethods.js';
|
3 |
-
import DataMethods from './DataMethods.js';
|
4 |
-
|
5 |
-
const EE = Phaser.Events.EventEmitter;
|
6 |
-
const GetValue = Phaser.Utils.Objects.GetValue;
|
7 |
-
|
8 |
-
class DialogQuest extends EE {
|
9 |
-
constructor(config) {
|
10 |
-
super();
|
11 |
-
|
12 |
-
if (config === undefined) {
|
13 |
-
config = {};
|
14 |
-
}
|
15 |
-
if (!config.quest) {
|
16 |
-
config.quest = true;
|
17 |
-
}
|
18 |
-
|
19 |
-
this.dialog = GetValue(config, 'dialog', undefined);
|
20 |
-
this.questionManager = new QuestionManager(config);
|
21 |
-
|
22 |
-
// Attach events
|
23 |
-
this.questionManager
|
24 |
-
.on('quest', function (question) {
|
25 |
-
var choices = this.dialog.getElement('choices');
|
26 |
-
var options = question.options, option;
|
27 |
-
for (var i = 0, cnt = choices.length; i < cnt; i++) {
|
28 |
-
option = options[i];
|
29 |
-
if (option) {
|
30 |
-
this.dialog.showChoice(i);
|
31 |
-
this.emit('update-choice', choices[i], option, this);
|
32 |
-
} else {
|
33 |
-
this.dialog.hideChoice(i);
|
34 |
-
}
|
35 |
-
}
|
36 |
-
this.emit('update-dialog', this.dialog, question, this);
|
37 |
-
}, this);
|
38 |
-
|
39 |
-
this.dialog
|
40 |
-
.on('button.click', function (button, groupName, index) {
|
41 |
-
var eventName = 'click-' + ((groupName === 'choices') ? 'choice' : 'action');
|
42 |
-
this.emit(eventName, button, this.dialog, this);
|
43 |
-
}, this)
|
44 |
-
}
|
45 |
-
}
|
46 |
-
|
47 |
-
Object.assign(
|
48 |
-
DialogQuest.prototype,
|
49 |
-
QuestMethods,
|
50 |
-
DataMethods
|
51 |
-
);
|
52 |
-
|
53 |
-
|
54 |
-
export default DialogQuest;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fileselectorbutton/FileSelectorButton.js
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
import Label from '../label/Label.js';
|
2 |
-
import { FileChooser } from '../filechooser/FileChooser.js';
|
3 |
-
import FileChooserMethods from './FileChooserMethods.js';
|
4 |
-
|
5 |
-
const GetValue = Phaser.Utils.Objects.GetValue;
|
6 |
-
|
7 |
-
class FileSelectorButton extends Label {
|
8 |
-
constructor(scene, config) {
|
9 |
-
super(scene, config);
|
10 |
-
this.type = 'rexFileSelectorButton';
|
11 |
-
|
12 |
-
var fileChooser = new FileChooser(scene);
|
13 |
-
scene.add.existing(fileChooser);
|
14 |
-
this.addBackground(fileChooser);
|
15 |
-
|
16 |
-
this.addChildrenMap('fileChooser', fileChooser);
|
17 |
-
|
18 |
-
this.setAccept(GetValue(config, 'accept', ''));
|
19 |
-
this.setMultiple(GetValue(config, 'multiple', false));
|
20 |
-
|
21 |
-
fileChooser
|
22 |
-
.on('change', function (gameObject) {
|
23 |
-
var files = gameObject.files;
|
24 |
-
if (files.length === 0) {
|
25 |
-
return;
|
26 |
-
}
|
27 |
-
|
28 |
-
files = Array.from(files);
|
29 |
-
this.emit('select', files, this);
|
30 |
-
}, this)
|
31 |
-
|
32 |
-
}
|
33 |
-
|
34 |
-
get files() {
|
35 |
-
return this.childrenMap.fileChooser.files;
|
36 |
-
}
|
37 |
-
|
38 |
-
}
|
39 |
-
|
40 |
-
Object.assign(
|
41 |
-
FileSelectorButton.prototype,
|
42 |
-
FileChooserMethods,
|
43 |
-
)
|
44 |
-
|
45 |
-
export default FileSelectorButton;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aloento/9Nine-VITS/modules.py
DELETED
@@ -1,387 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import Conv1d
|
6 |
-
from torch.nn import functional as F
|
7 |
-
from torch.nn.utils import weight_norm, remove_weight_norm
|
8 |
-
|
9 |
-
import commons
|
10 |
-
from commons import init_weights, get_padding
|
11 |
-
from transforms import piecewise_rational_quadratic_transform
|
12 |
-
|
13 |
-
LRELU_SLOPE = 0.1
|
14 |
-
|
15 |
-
|
16 |
-
class LayerNorm(nn.Module):
|
17 |
-
def __init__(self, channels, eps=1e-5):
|
18 |
-
super().__init__()
|
19 |
-
self.channels = channels
|
20 |
-
self.eps = eps
|
21 |
-
|
22 |
-
self.gamma = nn.Parameter(torch.ones(channels))
|
23 |
-
self.beta = nn.Parameter(torch.zeros(channels))
|
24 |
-
|
25 |
-
def forward(self, x):
|
26 |
-
x = x.transpose(1, -1)
|
27 |
-
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
|
28 |
-
return x.transpose(1, -1)
|
29 |
-
|
30 |
-
|
31 |
-
class ConvReluNorm(nn.Module):
|
32 |
-
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
|
33 |
-
super().__init__()
|
34 |
-
self.in_channels = in_channels
|
35 |
-
self.hidden_channels = hidden_channels
|
36 |
-
self.out_channels = out_channels
|
37 |
-
self.kernel_size = kernel_size
|
38 |
-
self.n_layers = n_layers
|
39 |
-
self.p_dropout = p_dropout
|
40 |
-
assert n_layers > 1, "Number of layers should be larger than 0."
|
41 |
-
|
42 |
-
self.conv_layers = nn.ModuleList()
|
43 |
-
self.norm_layers = nn.ModuleList()
|
44 |
-
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
|
45 |
-
self.norm_layers.append(LayerNorm(hidden_channels))
|
46 |
-
self.relu_drop = nn.Sequential(
|
47 |
-
nn.ReLU(),
|
48 |
-
nn.Dropout(p_dropout))
|
49 |
-
for _ in range(n_layers - 1):
|
50 |
-
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
|
51 |
-
self.norm_layers.append(LayerNorm(hidden_channels))
|
52 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
|
53 |
-
self.proj.weight.data.zero_()
|
54 |
-
self.proj.bias.data.zero_()
|
55 |
-
|
56 |
-
def forward(self, x, x_mask):
|
57 |
-
x_org = x
|
58 |
-
for i in range(self.n_layers):
|
59 |
-
x = self.conv_layers[i](x * x_mask)
|
60 |
-
x = self.norm_layers[i](x)
|
61 |
-
x = self.relu_drop(x)
|
62 |
-
x = x_org + self.proj(x)
|
63 |
-
return x * x_mask
|
64 |
-
|
65 |
-
|
66 |
-
class DDSConv(nn.Module):
|
67 |
-
"""
|
68 |
-
Dialted and Depth-Separable Convolution
|
69 |
-
"""
|
70 |
-
|
71 |
-
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
|
72 |
-
super().__init__()
|
73 |
-
self.channels = channels
|
74 |
-
self.kernel_size = kernel_size
|
75 |
-
self.n_layers = n_layers
|
76 |
-
self.p_dropout = p_dropout
|
77 |
-
|
78 |
-
self.drop = nn.Dropout(p_dropout)
|
79 |
-
self.convs_sep = nn.ModuleList()
|
80 |
-
self.convs_1x1 = nn.ModuleList()
|
81 |
-
self.norms_1 = nn.ModuleList()
|
82 |
-
self.norms_2 = nn.ModuleList()
|
83 |
-
for i in range(n_layers):
|
84 |
-
dilation = kernel_size ** i
|
85 |
-
padding = (kernel_size * dilation - dilation) // 2
|
86 |
-
self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
|
87 |
-
groups=channels, dilation=dilation, padding=padding
|
88 |
-
))
|
89 |
-
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
|
90 |
-
self.norms_1.append(LayerNorm(channels))
|
91 |
-
self.norms_2.append(LayerNorm(channels))
|
92 |
-
|
93 |
-
def forward(self, x, x_mask, g=None):
|
94 |
-
if g is not None:
|
95 |
-
x = x + g
|
96 |
-
for i in range(self.n_layers):
|
97 |
-
y = self.convs_sep[i](x * x_mask)
|
98 |
-
y = self.norms_1[i](y)
|
99 |
-
y = F.gelu(y)
|
100 |
-
y = self.convs_1x1[i](y)
|
101 |
-
y = self.norms_2[i](y)
|
102 |
-
y = F.gelu(y)
|
103 |
-
y = self.drop(y)
|
104 |
-
x = x + y
|
105 |
-
return x * x_mask
|
106 |
-
|
107 |
-
|
108 |
-
class WN(torch.nn.Module):
|
109 |
-
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
|
110 |
-
super(WN, self).__init__()
|
111 |
-
assert (kernel_size % 2 == 1)
|
112 |
-
self.hidden_channels = hidden_channels
|
113 |
-
self.kernel_size = kernel_size,
|
114 |
-
self.dilation_rate = dilation_rate
|
115 |
-
self.n_layers = n_layers
|
116 |
-
self.gin_channels = gin_channels
|
117 |
-
self.p_dropout = p_dropout
|
118 |
-
|
119 |
-
self.in_layers = torch.nn.ModuleList()
|
120 |
-
self.res_skip_layers = torch.nn.ModuleList()
|
121 |
-
self.drop = nn.Dropout(p_dropout)
|
122 |
-
|
123 |
-
if gin_channels != 0:
|
124 |
-
cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1)
|
125 |
-
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
|
126 |
-
|
127 |
-
for i in range(n_layers):
|
128 |
-
dilation = dilation_rate ** i
|
129 |
-
padding = int((kernel_size * dilation - dilation) / 2)
|
130 |
-
in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size,
|
131 |
-
dilation=dilation, padding=padding)
|
132 |
-
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
|
133 |
-
self.in_layers.append(in_layer)
|
134 |
-
|
135 |
-
# last one is not necessary
|
136 |
-
if i < n_layers - 1:
|
137 |
-
res_skip_channels = 2 * hidden_channels
|
138 |
-
else:
|
139 |
-
res_skip_channels = hidden_channels
|
140 |
-
|
141 |
-
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
|
142 |
-
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
|
143 |
-
self.res_skip_layers.append(res_skip_layer)
|
144 |
-
|
145 |
-
def forward(self, x, x_mask, g=None, **kwargs):
|
146 |
-
output = torch.zeros_like(x)
|
147 |
-
n_channels_tensor = torch.IntTensor([self.hidden_channels])
|
148 |
-
|
149 |
-
if g is not None:
|
150 |
-
g = self.cond_layer(g)
|
151 |
-
|
152 |
-
for i in range(self.n_layers):
|
153 |
-
x_in = self.in_layers[i](x)
|
154 |
-
if g is not None:
|
155 |
-
cond_offset = i * 2 * self.hidden_channels
|
156 |
-
g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :]
|
157 |
-
else:
|
158 |
-
g_l = torch.zeros_like(x_in)
|
159 |
-
|
160 |
-
acts = commons.fused_add_tanh_sigmoid_multiply(
|
161 |
-
x_in,
|
162 |
-
g_l,
|
163 |
-
n_channels_tensor)
|
164 |
-
acts = self.drop(acts)
|
165 |
-
|
166 |
-
res_skip_acts = self.res_skip_layers[i](acts)
|
167 |
-
if i < self.n_layers - 1:
|
168 |
-
res_acts = res_skip_acts[:, :self.hidden_channels, :]
|
169 |
-
x = (x + res_acts) * x_mask
|
170 |
-
output = output + res_skip_acts[:, self.hidden_channels:, :]
|
171 |
-
else:
|
172 |
-
output = output + res_skip_acts
|
173 |
-
return output * x_mask
|
174 |
-
|
175 |
-
def remove_weight_norm(self):
|
176 |
-
if self.gin_channels != 0:
|
177 |
-
torch.nn.utils.remove_weight_norm(self.cond_layer)
|
178 |
-
for l in self.in_layers:
|
179 |
-
torch.nn.utils.remove_weight_norm(l)
|
180 |
-
for l in self.res_skip_layers:
|
181 |
-
torch.nn.utils.remove_weight_norm(l)
|
182 |
-
|
183 |
-
|
184 |
-
class ResBlock1(torch.nn.Module):
|
185 |
-
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
|
186 |
-
super(ResBlock1, self).__init__()
|
187 |
-
self.convs1 = nn.ModuleList([
|
188 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
189 |
-
padding=get_padding(kernel_size, dilation[0]))),
|
190 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
191 |
-
padding=get_padding(kernel_size, dilation[1]))),
|
192 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
|
193 |
-
padding=get_padding(kernel_size, dilation[2])))
|
194 |
-
])
|
195 |
-
self.convs1.apply(init_weights)
|
196 |
-
|
197 |
-
self.convs2 = nn.ModuleList([
|
198 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
199 |
-
padding=get_padding(kernel_size, 1))),
|
200 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
201 |
-
padding=get_padding(kernel_size, 1))),
|
202 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
203 |
-
padding=get_padding(kernel_size, 1)))
|
204 |
-
])
|
205 |
-
self.convs2.apply(init_weights)
|
206 |
-
|
207 |
-
def forward(self, x, x_mask=None):
|
208 |
-
for c1, c2 in zip(self.convs1, self.convs2):
|
209 |
-
xt = F.leaky_relu(x, LRELU_SLOPE)
|
210 |
-
if x_mask is not None:
|
211 |
-
xt = xt * x_mask
|
212 |
-
xt = c1(xt)
|
213 |
-
xt = F.leaky_relu(xt, LRELU_SLOPE)
|
214 |
-
if x_mask is not None:
|
215 |
-
xt = xt * x_mask
|
216 |
-
xt = c2(xt)
|
217 |
-
x = xt + x
|
218 |
-
if x_mask is not None:
|
219 |
-
x = x * x_mask
|
220 |
-
return x
|
221 |
-
|
222 |
-
def remove_weight_norm(self):
|
223 |
-
for l in self.convs1:
|
224 |
-
remove_weight_norm(l)
|
225 |
-
for l in self.convs2:
|
226 |
-
remove_weight_norm(l)
|
227 |
-
|
228 |
-
|
229 |
-
class ResBlock2(torch.nn.Module):
|
230 |
-
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
|
231 |
-
super(ResBlock2, self).__init__()
|
232 |
-
self.convs = nn.ModuleList([
|
233 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
234 |
-
padding=get_padding(kernel_size, dilation[0]))),
|
235 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
236 |
-
padding=get_padding(kernel_size, dilation[1])))
|
237 |
-
])
|
238 |
-
self.convs.apply(init_weights)
|
239 |
-
|
240 |
-
def forward(self, x, x_mask=None):
|
241 |
-
for c in self.convs:
|
242 |
-
xt = F.leaky_relu(x, LRELU_SLOPE)
|
243 |
-
if x_mask is not None:
|
244 |
-
xt = xt * x_mask
|
245 |
-
xt = c(xt)
|
246 |
-
x = xt + x
|
247 |
-
if x_mask is not None:
|
248 |
-
x = x * x_mask
|
249 |
-
return x
|
250 |
-
|
251 |
-
def remove_weight_norm(self):
|
252 |
-
for l in self.convs:
|
253 |
-
remove_weight_norm(l)
|
254 |
-
|
255 |
-
|
256 |
-
class Log(nn.Module):
|
257 |
-
def forward(self, x, x_mask, reverse=False, **kwargs):
|
258 |
-
if not reverse:
|
259 |
-
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
|
260 |
-
logdet = torch.sum(-y, [1, 2])
|
261 |
-
return y, logdet
|
262 |
-
else:
|
263 |
-
x = torch.exp(x) * x_mask
|
264 |
-
return x
|
265 |
-
|
266 |
-
|
267 |
-
class Flip(nn.Module):
|
268 |
-
def forward(self, x, *args, reverse=False, **kwargs):
|
269 |
-
x = torch.flip(x, [1])
|
270 |
-
if not reverse:
|
271 |
-
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
|
272 |
-
return x, logdet
|
273 |
-
else:
|
274 |
-
return x
|
275 |
-
|
276 |
-
|
277 |
-
class ElementwiseAffine(nn.Module):
|
278 |
-
def __init__(self, channels):
|
279 |
-
super().__init__()
|
280 |
-
self.channels = channels
|
281 |
-
self.m = nn.Parameter(torch.zeros(channels, 1))
|
282 |
-
self.logs = nn.Parameter(torch.zeros(channels, 1))
|
283 |
-
|
284 |
-
def forward(self, x, x_mask, reverse=False, **kwargs):
|
285 |
-
if not reverse:
|
286 |
-
y = self.m + torch.exp(self.logs) * x
|
287 |
-
y = y * x_mask
|
288 |
-
logdet = torch.sum(self.logs * x_mask, [1, 2])
|
289 |
-
return y, logdet
|
290 |
-
else:
|
291 |
-
x = (x - self.m) * torch.exp(-self.logs) * x_mask
|
292 |
-
return x
|
293 |
-
|
294 |
-
|
295 |
-
class ResidualCouplingLayer(nn.Module):
|
296 |
-
def __init__(self,
|
297 |
-
channels,
|
298 |
-
hidden_channels,
|
299 |
-
kernel_size,
|
300 |
-
dilation_rate,
|
301 |
-
n_layers,
|
302 |
-
p_dropout=0,
|
303 |
-
gin_channels=0,
|
304 |
-
mean_only=False):
|
305 |
-
assert channels % 2 == 0, "channels should be divisible by 2"
|
306 |
-
super().__init__()
|
307 |
-
self.channels = channels
|
308 |
-
self.hidden_channels = hidden_channels
|
309 |
-
self.kernel_size = kernel_size
|
310 |
-
self.dilation_rate = dilation_rate
|
311 |
-
self.n_layers = n_layers
|
312 |
-
self.half_channels = channels // 2
|
313 |
-
self.mean_only = mean_only
|
314 |
-
|
315 |
-
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
|
316 |
-
self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
|
317 |
-
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
|
318 |
-
self.post.weight.data.zero_()
|
319 |
-
self.post.bias.data.zero_()
|
320 |
-
|
321 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
322 |
-
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
|
323 |
-
h = self.pre(x0) * x_mask
|
324 |
-
h = self.enc(h, x_mask, g=g)
|
325 |
-
stats = self.post(h) * x_mask
|
326 |
-
if not self.mean_only:
|
327 |
-
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
|
328 |
-
else:
|
329 |
-
m = stats
|
330 |
-
logs = torch.zeros_like(m)
|
331 |
-
|
332 |
-
if not reverse:
|
333 |
-
x1 = m + x1 * torch.exp(logs) * x_mask
|
334 |
-
x = torch.cat([x0, x1], 1)
|
335 |
-
logdet = torch.sum(logs, [1, 2])
|
336 |
-
return x, logdet
|
337 |
-
else:
|
338 |
-
x1 = (x1 - m) * torch.exp(-logs) * x_mask
|
339 |
-
x = torch.cat([x0, x1], 1)
|
340 |
-
return x
|
341 |
-
|
342 |
-
|
343 |
-
class ConvFlow(nn.Module):
|
344 |
-
def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
|
345 |
-
super().__init__()
|
346 |
-
self.in_channels = in_channels
|
347 |
-
self.filter_channels = filter_channels
|
348 |
-
self.kernel_size = kernel_size
|
349 |
-
self.n_layers = n_layers
|
350 |
-
self.num_bins = num_bins
|
351 |
-
self.tail_bound = tail_bound
|
352 |
-
self.half_channels = in_channels // 2
|
353 |
-
|
354 |
-
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
|
355 |
-
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
|
356 |
-
self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
|
357 |
-
self.proj.weight.data.zero_()
|
358 |
-
self.proj.bias.data.zero_()
|
359 |
-
|
360 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
361 |
-
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
|
362 |
-
h = self.pre(x0)
|
363 |
-
h = self.convs(h, x_mask, g=g)
|
364 |
-
h = self.proj(h) * x_mask
|
365 |
-
|
366 |
-
b, c, t = x0.shape
|
367 |
-
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
|
368 |
-
|
369 |
-
unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
|
370 |
-
unnormalized_heights = h[..., self.num_bins:2 * self.num_bins] / math.sqrt(self.filter_channels)
|
371 |
-
unnormalized_derivatives = h[..., 2 * self.num_bins:]
|
372 |
-
|
373 |
-
x1, logabsdet = piecewise_rational_quadratic_transform(x1,
|
374 |
-
unnormalized_widths,
|
375 |
-
unnormalized_heights,
|
376 |
-
unnormalized_derivatives,
|
377 |
-
inverse=reverse,
|
378 |
-
tails='linear',
|
379 |
-
tail_bound=self.tail_bound
|
380 |
-
)
|
381 |
-
|
382 |
-
x = torch.cat([x0, x1], 1) * x_mask
|
383 |
-
logdet = torch.sum(logabsdet * x_mask, [1, 2])
|
384 |
-
if not reverse:
|
385 |
-
return x, logdet
|
386 |
-
else:
|
387 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/dnnlib/__init__.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
from .util import EasyDict, make_cache_dir_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
DELETED
@@ -1,205 +0,0 @@
|
|
1 |
-
import torch.nn as nn
|
2 |
-
from mmcv.cnn import ConvModule
|
3 |
-
|
4 |
-
from mmdet.models.builder import HEADS
|
5 |
-
from .bbox_head import BBoxHead
|
6 |
-
|
7 |
-
|
8 |
-
@HEADS.register_module()
|
9 |
-
class ConvFCBBoxHead(BBoxHead):
|
10 |
-
r"""More general bbox head, with shared conv and fc layers and two optional
|
11 |
-
separated branches.
|
12 |
-
|
13 |
-
.. code-block:: none
|
14 |
-
|
15 |
-
/-> cls convs -> cls fcs -> cls
|
16 |
-
shared convs -> shared fcs
|
17 |
-
\-> reg convs -> reg fcs -> reg
|
18 |
-
""" # noqa: W605
|
19 |
-
|
20 |
-
def __init__(self,
|
21 |
-
num_shared_convs=0,
|
22 |
-
num_shared_fcs=0,
|
23 |
-
num_cls_convs=0,
|
24 |
-
num_cls_fcs=0,
|
25 |
-
num_reg_convs=0,
|
26 |
-
num_reg_fcs=0,
|
27 |
-
conv_out_channels=256,
|
28 |
-
fc_out_channels=1024,
|
29 |
-
conv_cfg=None,
|
30 |
-
norm_cfg=None,
|
31 |
-
*args,
|
32 |
-
**kwargs):
|
33 |
-
super(ConvFCBBoxHead, self).__init__(*args, **kwargs)
|
34 |
-
assert (num_shared_convs + num_shared_fcs + num_cls_convs +
|
35 |
-
num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
|
36 |
-
if num_cls_convs > 0 or num_reg_convs > 0:
|
37 |
-
assert num_shared_fcs == 0
|
38 |
-
if not self.with_cls:
|
39 |
-
assert num_cls_convs == 0 and num_cls_fcs == 0
|
40 |
-
if not self.with_reg:
|
41 |
-
assert num_reg_convs == 0 and num_reg_fcs == 0
|
42 |
-
self.num_shared_convs = num_shared_convs
|
43 |
-
self.num_shared_fcs = num_shared_fcs
|
44 |
-
self.num_cls_convs = num_cls_convs
|
45 |
-
self.num_cls_fcs = num_cls_fcs
|
46 |
-
self.num_reg_convs = num_reg_convs
|
47 |
-
self.num_reg_fcs = num_reg_fcs
|
48 |
-
self.conv_out_channels = conv_out_channels
|
49 |
-
self.fc_out_channels = fc_out_channels
|
50 |
-
self.conv_cfg = conv_cfg
|
51 |
-
self.norm_cfg = norm_cfg
|
52 |
-
|
53 |
-
# add shared convs and fcs
|
54 |
-
self.shared_convs, self.shared_fcs, last_layer_dim = \
|
55 |
-
self._add_conv_fc_branch(
|
56 |
-
self.num_shared_convs, self.num_shared_fcs, self.in_channels,
|
57 |
-
True)
|
58 |
-
self.shared_out_channels = last_layer_dim
|
59 |
-
|
60 |
-
# add cls specific branch
|
61 |
-
self.cls_convs, self.cls_fcs, self.cls_last_dim = \
|
62 |
-
self._add_conv_fc_branch(
|
63 |
-
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
|
64 |
-
|
65 |
-
# add reg specific branch
|
66 |
-
self.reg_convs, self.reg_fcs, self.reg_last_dim = \
|
67 |
-
self._add_conv_fc_branch(
|
68 |
-
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
|
69 |
-
|
70 |
-
if self.num_shared_fcs == 0 and not self.with_avg_pool:
|
71 |
-
if self.num_cls_fcs == 0:
|
72 |
-
self.cls_last_dim *= self.roi_feat_area
|
73 |
-
if self.num_reg_fcs == 0:
|
74 |
-
self.reg_last_dim *= self.roi_feat_area
|
75 |
-
|
76 |
-
self.relu = nn.ReLU(inplace=True)
|
77 |
-
# reconstruct fc_cls and fc_reg since input channels are changed
|
78 |
-
if self.with_cls:
|
79 |
-
self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes + 1)
|
80 |
-
if self.with_reg:
|
81 |
-
out_dim_reg = (4 if self.reg_class_agnostic else 4 *
|
82 |
-
self.num_classes)
|
83 |
-
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
|
84 |
-
|
85 |
-
def _add_conv_fc_branch(self,
|
86 |
-
num_branch_convs,
|
87 |
-
num_branch_fcs,
|
88 |
-
in_channels,
|
89 |
-
is_shared=False):
|
90 |
-
"""Add shared or separable branch.
|
91 |
-
|
92 |
-
convs -> avg pool (optional) -> fcs
|
93 |
-
"""
|
94 |
-
last_layer_dim = in_channels
|
95 |
-
# add branch specific conv layers
|
96 |
-
branch_convs = nn.ModuleList()
|
97 |
-
if num_branch_convs > 0:
|
98 |
-
for i in range(num_branch_convs):
|
99 |
-
conv_in_channels = (
|
100 |
-
last_layer_dim if i == 0 else self.conv_out_channels)
|
101 |
-
branch_convs.append(
|
102 |
-
ConvModule(
|
103 |
-
conv_in_channels,
|
104 |
-
self.conv_out_channels,
|
105 |
-
3,
|
106 |
-
padding=1,
|
107 |
-
conv_cfg=self.conv_cfg,
|
108 |
-
norm_cfg=self.norm_cfg))
|
109 |
-
last_layer_dim = self.conv_out_channels
|
110 |
-
# add branch specific fc layers
|
111 |
-
branch_fcs = nn.ModuleList()
|
112 |
-
if num_branch_fcs > 0:
|
113 |
-
# for shared branch, only consider self.with_avg_pool
|
114 |
-
# for separated branches, also consider self.num_shared_fcs
|
115 |
-
if (is_shared
|
116 |
-
or self.num_shared_fcs == 0) and not self.with_avg_pool:
|
117 |
-
last_layer_dim *= self.roi_feat_area
|
118 |
-
for i in range(num_branch_fcs):
|
119 |
-
fc_in_channels = (
|
120 |
-
last_layer_dim if i == 0 else self.fc_out_channels)
|
121 |
-
branch_fcs.append(
|
122 |
-
nn.Linear(fc_in_channels, self.fc_out_channels))
|
123 |
-
last_layer_dim = self.fc_out_channels
|
124 |
-
return branch_convs, branch_fcs, last_layer_dim
|
125 |
-
|
126 |
-
def init_weights(self):
|
127 |
-
super(ConvFCBBoxHead, self).init_weights()
|
128 |
-
# conv layers are already initialized by ConvModule
|
129 |
-
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
|
130 |
-
for m in module_list.modules():
|
131 |
-
if isinstance(m, nn.Linear):
|
132 |
-
nn.init.xavier_uniform_(m.weight)
|
133 |
-
nn.init.constant_(m.bias, 0)
|
134 |
-
|
135 |
-
def forward(self, x):
|
136 |
-
# shared part
|
137 |
-
if self.num_shared_convs > 0:
|
138 |
-
for conv in self.shared_convs:
|
139 |
-
x = conv(x)
|
140 |
-
|
141 |
-
if self.num_shared_fcs > 0:
|
142 |
-
if self.with_avg_pool:
|
143 |
-
x = self.avg_pool(x)
|
144 |
-
|
145 |
-
x = x.flatten(1)
|
146 |
-
|
147 |
-
for fc in self.shared_fcs:
|
148 |
-
x = self.relu(fc(x))
|
149 |
-
# separate branches
|
150 |
-
x_cls = x
|
151 |
-
x_reg = x
|
152 |
-
|
153 |
-
for conv in self.cls_convs:
|
154 |
-
x_cls = conv(x_cls)
|
155 |
-
if x_cls.dim() > 2:
|
156 |
-
if self.with_avg_pool:
|
157 |
-
x_cls = self.avg_pool(x_cls)
|
158 |
-
x_cls = x_cls.flatten(1)
|
159 |
-
for fc in self.cls_fcs:
|
160 |
-
x_cls = self.relu(fc(x_cls))
|
161 |
-
|
162 |
-
for conv in self.reg_convs:
|
163 |
-
x_reg = conv(x_reg)
|
164 |
-
if x_reg.dim() > 2:
|
165 |
-
if self.with_avg_pool:
|
166 |
-
x_reg = self.avg_pool(x_reg)
|
167 |
-
x_reg = x_reg.flatten(1)
|
168 |
-
for fc in self.reg_fcs:
|
169 |
-
x_reg = self.relu(fc(x_reg))
|
170 |
-
|
171 |
-
cls_score = self.fc_cls(x_cls) if self.with_cls else None
|
172 |
-
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
|
173 |
-
return cls_score, bbox_pred
|
174 |
-
|
175 |
-
|
176 |
-
@HEADS.register_module()
|
177 |
-
class Shared2FCBBoxHead(ConvFCBBoxHead):
|
178 |
-
|
179 |
-
def __init__(self, fc_out_channels=1024, *args, **kwargs):
|
180 |
-
super(Shared2FCBBoxHead, self).__init__(
|
181 |
-
num_shared_convs=0,
|
182 |
-
num_shared_fcs=2,
|
183 |
-
num_cls_convs=0,
|
184 |
-
num_cls_fcs=0,
|
185 |
-
num_reg_convs=0,
|
186 |
-
num_reg_fcs=0,
|
187 |
-
fc_out_channels=fc_out_channels,
|
188 |
-
*args,
|
189 |
-
**kwargs)
|
190 |
-
|
191 |
-
|
192 |
-
@HEADS.register_module()
|
193 |
-
class Shared4Conv1FCBBoxHead(ConvFCBBoxHead):
|
194 |
-
|
195 |
-
def __init__(self, fc_out_channels=1024, *args, **kwargs):
|
196 |
-
super(Shared4Conv1FCBBoxHead, self).__init__(
|
197 |
-
num_shared_convs=4,
|
198 |
-
num_shared_fcs=1,
|
199 |
-
num_cls_convs=0,
|
200 |
-
num_cls_fcs=0,
|
201 |
-
num_reg_convs=0,
|
202 |
-
num_reg_fcs=0,
|
203 |
-
fc_out_channels=fc_out_channels,
|
204 |
-
*args,
|
205 |
-
**kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './deeplabv3_r50-d8_512x512_20k_voc12aug.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/Spell-book.md
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
You have now entered a hidden corner of the internet.
|
2 |
-
|
3 |
-
A confusing yet intriguing realm of paradoxes and contradictions.
|
4 |
-
|
5 |
-
A place where you will find out that what you thought you knew, you in fact didn't know, and what you didn't know was in front of you all along.
|
6 |
-
|
7 |
-

|
8 |
-
|
9 |
-
*In other words, here I will document little-known facts about this web UI that I could not find another place for in the wiki.*
|
10 |
-
|
11 |
-
#### You can train LoRAs in CPU mode
|
12 |
-
|
13 |
-
Load the web UI with
|
14 |
-
|
15 |
-
```
|
16 |
-
python server.py --cpu
|
17 |
-
```
|
18 |
-
|
19 |
-
and start training the LoRA from the training tab as usual.
|
20 |
-
|
21 |
-
#### 8-bit mode works with CPU offloading
|
22 |
-
|
23 |
-
```
|
24 |
-
python server.py --load-in-8bit --gpu-memory 4000MiB
|
25 |
-
```
|
26 |
-
|
27 |
-
#### `--pre_layer`, and not `--gpu-memory`, is the right way to do CPU offloading with 4-bit models
|
28 |
-
|
29 |
-
```
|
30 |
-
python server.py --wbits 4 --groupsize 128 --pre_layer 20
|
31 |
-
```
|
32 |
-
|
33 |
-
#### Models can be loaded in 32-bit, 16-bit, 8-bit, and 4-bit modes
|
34 |
-
|
35 |
-
```
|
36 |
-
python server.py --cpu
|
37 |
-
python server.py
|
38 |
-
python server.py --load-in-8bit
|
39 |
-
python server.py --wbits 4
|
40 |
-
```
|
41 |
-
|
42 |
-
#### The web UI works with any version of GPTQ-for-LLaMa
|
43 |
-
|
44 |
-
Including the up to date triton and cuda branches. But you have to delete the `repositories/GPTQ-for-LLaMa` folder and reinstall the new one every time:
|
45 |
-
|
46 |
-
```
|
47 |
-
cd text-generation-webui/repositories
|
48 |
-
rm -r GPTQ-for-LLaMa
|
49 |
-
pip uninstall quant-cuda
|
50 |
-
git clone https://github.com/oobabooga/GPTQ-for-LLaMa -b cuda # or any other repository and branch
|
51 |
-
cd GPTQ-for-LLaMa
|
52 |
-
python setup_cuda.py install
|
53 |
-
```
|
54 |
-
|
55 |
-
#### Instruction-following templates are represented as chat characters
|
56 |
-
|
57 |
-
https://github.com/oobabooga/text-generation-webui/tree/main/characters/instruction-following
|
58 |
-
|
59 |
-
#### The right way to run Alpaca, Open Assistant, Vicuna, etc is Instruct mode, not normal chat mode
|
60 |
-
|
61 |
-
Otherwise the prompt will not be formatted correctly.
|
62 |
-
|
63 |
-
1. Start the web UI with
|
64 |
-
|
65 |
-
```
|
66 |
-
python server.py --chat
|
67 |
-
```
|
68 |
-
|
69 |
-
2. Click on the "instruct" option under "Chat modes"
|
70 |
-
|
71 |
-
3. Select the correct template in the hidden dropdown menu that will become visible.
|
72 |
-
|
73 |
-
#### Notebook mode is best mode
|
74 |
-
|
75 |
-
Ascended individuals have realized that notebook mode is the superset of chat mode and can do chats with ultimate flexibility, including group chats, editing replies, starting a new bot reply in a given way, and impersonating.
|
76 |
-
|
77 |
-
#### RWKV is a RNN
|
78 |
-
|
79 |
-
Most models are transformers, but not RWKV, which is a RNN. It's a great model.
|
80 |
-
|
81 |
-
#### `--gpu-memory` is not a hard limit on the GPU memory
|
82 |
-
|
83 |
-
It is simply a parameter that is passed to the `accelerate` library while loading the model. More memory will be allocated during generation. That's why this parameter has to be set to less than your total GPU memory.
|
84 |
-
|
85 |
-
#### Contrastive search perhaps the best preset
|
86 |
-
|
87 |
-
But it uses a ton of VRAM.
|
88 |
-
|
89 |
-
#### You can check the sha256sum of downloaded models with the download script
|
90 |
-
|
91 |
-
```
|
92 |
-
python download-model.py facebook/galactica-125m --check
|
93 |
-
```
|
94 |
-
|
95 |
-
#### The download script continues interrupted downloads by default
|
96 |
-
|
97 |
-
It doesn't start over.
|
98 |
-
|
99 |
-
#### You can download models with multiple threads
|
100 |
-
|
101 |
-
```
|
102 |
-
python download-model.py facebook/galactica-125m --threads 8
|
103 |
-
```
|
104 |
-
|
105 |
-
#### LoRAs work in 4-bit mode
|
106 |
-
|
107 |
-
You need to follow [these instructions](GPTQ-models-(4-bit-mode).md#using-loras-in-4-bit-mode) and then start the web UI with the `--monkey-patch` flag.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/update_macos.sh
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
cd "$(dirname "${BASH_SOURCE[0]}")"
|
4 |
-
|
5 |
-
if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi
|
6 |
-
|
7 |
-
# deactivate existing conda envs as needed to avoid conflicts
|
8 |
-
{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null
|
9 |
-
|
10 |
-
# config
|
11 |
-
CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda"
|
12 |
-
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
13 |
-
|
14 |
-
# environment isolation
|
15 |
-
export PYTHONNOUSERSITE=1
|
16 |
-
unset PYTHONPATH
|
17 |
-
unset PYTHONHOME
|
18 |
-
export CUDA_PATH="$INSTALL_ENV_DIR"
|
19 |
-
export CUDA_HOME="$CUDA_PATH"
|
20 |
-
|
21 |
-
# activate installer env
|
22 |
-
source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
23 |
-
conda activate "$INSTALL_ENV_DIR"
|
24 |
-
|
25 |
-
# update installer env
|
26 |
-
python one_click.py --update && echo -e "\nDone!"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AntNikYab/NaturalLanguageProcessing/pages/polyclinics.py
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import numpy as np
|
3 |
-
import time
|
4 |
-
import pickle
|
5 |
-
import torch
|
6 |
-
import pandas as pd
|
7 |
-
from gensim.models import KeyedVectors
|
8 |
-
from transformers import BertTokenizer, BertModel
|
9 |
-
from nltk.corpus import stopwords
|
10 |
-
from nltk.stem import SnowballStemmer
|
11 |
-
from function.lstm_preprocessing import (
|
12 |
-
clean,
|
13 |
-
tokin,
|
14 |
-
predict_ml_class,
|
15 |
-
predict_sentence,
|
16 |
-
predict_single_string,
|
17 |
-
LSTMClassifier
|
18 |
-
)
|
19 |
-
|
20 |
-
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
21 |
-
|
22 |
-
stemmer = SnowballStemmer('russian')
|
23 |
-
sw = stopwords.words('russian')
|
24 |
-
|
25 |
-
EMBEDDING_DIM = 32
|
26 |
-
HIDDEN_DIM = 32
|
27 |
-
SEQ_LEN = 200
|
28 |
-
VOCAB_SIZE = 196906
|
29 |
-
EMBEDDING_DIM = 32
|
30 |
-
wv = KeyedVectors.load("file/wv.wordvectors", mmap='r')
|
31 |
-
|
32 |
-
with open('file/vocab_to_int.txt', 'rb') as f:
|
33 |
-
vocab_to_int = pickle.load(f)
|
34 |
-
|
35 |
-
embedding_matrix = np.zeros((VOCAB_SIZE, EMBEDDING_DIM))
|
36 |
-
|
37 |
-
for word, i in vocab_to_int.items():
|
38 |
-
try:
|
39 |
-
embedding_vector = wv[word]
|
40 |
-
embedding_matrix[i] = embedding_vector
|
41 |
-
except KeyError as e:
|
42 |
-
pass
|
43 |
-
|
44 |
-
embedding_layer = torch.nn.Embedding.from_pretrained(torch.FloatTensor(embedding_matrix))
|
45 |
-
|
46 |
-
model = LSTMClassifier(embedding_dim=EMBEDDING_DIM, hidden_size=HIDDEN_DIM, embedding=embedding_layer).to(DEVICE)
|
47 |
-
model.load_state_dict(torch.load('models/LTSM_model_epoch_7.pt', map_location='cpu'))
|
48 |
-
|
49 |
-
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased')
|
50 |
-
model_BERT = BertModel.from_pretrained("bert-base-multilingual-cased")
|
51 |
-
|
52 |
-
loaded_model = pickle.load(open('models/LogReg.pickle', "rb"))
|
53 |
-
|
54 |
-
loaded_classifier = pickle.load(open('models/trained_model.pkl', "rb"))
|
55 |
-
loaded_vectorizer = pickle.load(open('models/vectorizer.pkl', "rb"))
|
56 |
-
|
57 |
-
def main():
|
58 |
-
st.title("Классификация отзыва на поликлиники")
|
59 |
-
user_input = st.text_area("Введите ваш отзыв:", "")
|
60 |
-
return user_input
|
61 |
-
|
62 |
-
user_input = main()
|
63 |
-
|
64 |
-
def predict_lstm(user_input):
|
65 |
-
start_time = time.time()
|
66 |
-
prediction = predict_sentence(user_input, model, SEQ_LEN, vocab_to_int)
|
67 |
-
end_time = time.time()
|
68 |
-
return prediction, round((end_time - start_time), 4)
|
69 |
-
|
70 |
-
def predict_bert(user_input):
|
71 |
-
start_time = time.time()
|
72 |
-
prediction = predict_single_string(user_input, model_BERT, loaded_model)
|
73 |
-
end_time = time.time()
|
74 |
-
return prediction, round((end_time - start_time), 4)
|
75 |
-
|
76 |
-
def predict_ML(user_input):
|
77 |
-
start_time = time.time()
|
78 |
-
prediction = predict_ml_class(user_input, loaded_vectorizer, loaded_classifier)
|
79 |
-
end_time = time.time()
|
80 |
-
return prediction, round((end_time - start_time), 4)
|
81 |
-
|
82 |
-
if user_input:
|
83 |
-
prediction_rnn, time_taken_rnn = predict_ML(user_input)
|
84 |
-
st.write("### Bag-of-Words + LogReg")
|
85 |
-
st.write("Предсказанный класс:", prediction_rnn)
|
86 |
-
st.write("Время предсказания:", time_taken_rnn, "сек.")
|
87 |
-
prediction_rnn, time_taken_rnn = predict_lstm(user_input)
|
88 |
-
st.write("### LSTM модель")
|
89 |
-
st.write("Предсказанный класс:", prediction_rnn)
|
90 |
-
st.write("Время предсказания:", time_taken_rnn, "сек.")
|
91 |
-
prediction_rnn, time_taken_rnn = predict_bert(user_input)
|
92 |
-
st.write("### BERT модель + LogReg")
|
93 |
-
st.write("Предсказанный класс:", prediction_rnn)
|
94 |
-
st.write("Время предсказания:", time_taken_rnn, "сек.")
|
95 |
-
|
96 |
-
|
97 |
-
st.sidebar.image('images/polyclinic.jpeg', use_column_width=True)
|
98 |
-
f1_score_classic_ml = 0.87
|
99 |
-
f1_score_rnn = 0.88
|
100 |
-
f1_score_bert = 0.83
|
101 |
-
f1_score_classic_ml_valid = 0.89
|
102 |
-
f1_score_rnn_valid = 0.92
|
103 |
-
f1_score_bert_valid = 0.82
|
104 |
-
# Создание DataFrame для сравнения результатов
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
st.sidebar.write("### Сравнительная таблица по метрике f1-macro")
|
109 |
-
results = {
|
110 |
-
"Модель": ["Классический ML", "LSTM", "BERT-based"],
|
111 |
-
"train": [f1_score_classic_ml, f1_score_rnn, f1_score_bert],
|
112 |
-
"valid": [f1_score_classic_ml_valid, f1_score_rnn_valid, f1_score_bert_valid]
|
113 |
-
}
|
114 |
-
results_df = pd.DataFrame(results)
|
115 |
-
st.sidebar.dataframe(results_df)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnthonyTruchetPoC/persistent-docker/on_startup.sh
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
# Write some commands here that will run on root user before startup.
|
3 |
-
# For example, to clone transformers and install it in dev mode:
|
4 |
-
# git clone https://github.com/huggingface/transformers.git
|
5 |
-
# cd transformers && pip install -e ".[dev]"
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arcypojeb/NeuralServer/app.py
DELETED
@@ -1,304 +0,0 @@
|
|
1 |
-
import datetime
|
2 |
-
import websockets
|
3 |
-
import asyncio
|
4 |
-
import sqlite3
|
5 |
-
import json
|
6 |
-
import requests
|
7 |
-
import gradio as gr
|
8 |
-
import PySimpleGUI as sg
|
9 |
-
from bs4 import BeautifulSoup
|
10 |
-
from gradio_client import Client
|
11 |
-
from websockets.sync.client import connect
|
12 |
-
|
13 |
-
modelPath = 'nlp-model.json'
|
14 |
-
|
15 |
-
inputs = []
|
16 |
-
client_ports = []
|
17 |
-
server_ports = []
|
18 |
-
|
19 |
-
layout = [
|
20 |
-
[sg.Multiline(size=(200, 10), key='-CLIENT-')],
|
21 |
-
[sg.Multiline(size=(100, 20), key='-INPUT-', auto_refresh=True), sg.Multiline(size=(100, 20), key='-OUTPUT-', auto_refresh=True)],
|
22 |
-
[sg.Multiline(size=(150, 2), key='-USERINPUT-')],
|
23 |
-
[sg.Button('Ask the agent')],
|
24 |
-
[sg.Text('Enter Port:'), sg.InputText(size=(10, 1), key='-PORT-'),
|
25 |
-
sg.Slider(range=(1000, 9999), orientation='h', size=(20, 20), key='-PORTSLIDER-')],
|
26 |
-
[sg.Button('Start WebSocket server'), sg.Button('Start WebSocket client')],
|
27 |
-
[sg.Button('Stop WebSocket server'), sg.Button('Stop WebSocket client')],
|
28 |
-
[sg.Multiline(size=(20, 4), key='-SERVER_PORTS-')], [sg.Multiline(size=(20, 4), key='-CLIENT_PORTS-')],
|
29 |
-
[sg.Button('Clear Textboxes')]
|
30 |
-
]
|
31 |
-
|
32 |
-
def get_port(values):
|
33 |
-
if values['-PORT-']:
|
34 |
-
return int(values['-PORT-'])
|
35 |
-
else:
|
36 |
-
return int(values['-PORTSLIDER-'])
|
37 |
-
|
38 |
-
window = sg.Window('WebSocket Client', layout)
|
39 |
-
|
40 |
-
# Function to send a question to the chatbot and get the response
|
41 |
-
async def askQuestion(question):
|
42 |
-
url = 'https://api.docsbot.ai/teams/ZrbLG98bbxZ9EFqiPvyl/bots/oFFiXuQsakcqyEdpLvCB/chat'
|
43 |
-
headers = {
|
44 |
-
'Content-Type': 'application/json'
|
45 |
-
}
|
46 |
-
data = {
|
47 |
-
'question': question,
|
48 |
-
'full_source': False
|
49 |
-
}
|
50 |
-
try:
|
51 |
-
response = requests.post(url, headers=headers, json=data)
|
52 |
-
responseText = response.content.decode('utf-8')
|
53 |
-
return responseText
|
54 |
-
|
55 |
-
except requests.exceptions.RequestException as e:
|
56 |
-
# Handle request exceptions here
|
57 |
-
print(f"Request failed with exception: {e}")
|
58 |
-
|
59 |
-
async def askQuestion2(question):
|
60 |
-
url = 'https://api.docsbot.ai/teams/ZrbLG98bbxZ9EFqiPvyl/bots/oFFiXuQsakcqyEdpLvCB/chat'
|
61 |
-
headers = {
|
62 |
-
'Content-Type': 'application/json'
|
63 |
-
}
|
64 |
-
data = {
|
65 |
-
'question': question,
|
66 |
-
'full_source': False
|
67 |
-
}
|
68 |
-
try:
|
69 |
-
response = requests.post(url, headers=headers, json=data)
|
70 |
-
responseText = response.content.decode('utf-8')
|
71 |
-
return responseText
|
72 |
-
|
73 |
-
except requests.exceptions.RequestException as e:
|
74 |
-
# Handle request exceptions here
|
75 |
-
print(f"Request failed with exception: {e}")
|
76 |
-
|
77 |
-
async def askQuestion3(question):
|
78 |
-
url = 'https://api.docsbot.ai/teams/ZrbLG98bbxZ9EFqiPvyl/bots/oFFiXuQsakcqyEdpLvCB/chat'
|
79 |
-
headers = {
|
80 |
-
'Content-Type': 'application/json'
|
81 |
-
}
|
82 |
-
data = {
|
83 |
-
'question': question,
|
84 |
-
'full_source': False
|
85 |
-
}
|
86 |
-
try:
|
87 |
-
response = requests.post(url, headers=headers, json=data)
|
88 |
-
responseText = response.content.decode('utf-8')
|
89 |
-
return responseText
|
90 |
-
|
91 |
-
except requests.exceptions.RequestException as e:
|
92 |
-
# Handle request exceptions here
|
93 |
-
print(f"Request failed with exception: {e}")
|
94 |
-
|
95 |
-
async def run_agent(question):
|
96 |
-
os.environ["GOOGLE_CSE_ID"] = GOOGLE_CSE_ID
|
97 |
-
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
|
98 |
-
os.environ["FIREWORKS_API_KEY"] = FIREWORKS_API_KEY
|
99 |
-
|
100 |
-
llm = Fireworks(model="accounts/fireworks/models/llama-v2-13b")
|
101 |
-
tools = load_tools(["google-search", "llm-math"], llm=llm)
|
102 |
-
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, return_intermediate_steps=True)
|
103 |
-
|
104 |
-
response = agent({"input": question})
|
105 |
-
return response["output"], response["intermediate_steps"]
|
106 |
-
response_content = response.content.decode('utf-8')
|
107 |
-
return response_content
|
108 |
-
|
109 |
-
async def handleWebSocket(ws):
|
110 |
-
print('New connection')
|
111 |
-
instruction = "Hello! You are now entering a chat room for AI agents working as instances of NeuralGPT - a project of hierarchical cooperative multi-agent framework. Keep in mind that you are speaking with another chatbot. Please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic."
|
112 |
-
greetings = {'instructions': instruction}
|
113 |
-
await ws.send(json.dumps(instruction))
|
114 |
-
while True:
|
115 |
-
message = await ws.recv()
|
116 |
-
print(message)
|
117 |
-
timestamp = datetime.datetime.now().isoformat()
|
118 |
-
sender = 'client'
|
119 |
-
db = sqlite3.connect('chat-hub.db')
|
120 |
-
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
121 |
-
(sender, message, timestamp))
|
122 |
-
db.commit()
|
123 |
-
try:
|
124 |
-
response = await askQuestion(message)
|
125 |
-
serverResponse = f'server response:{response}'
|
126 |
-
# Append the server response to the server_responses list
|
127 |
-
timestamp = datetime.datetime.now().isoformat()
|
128 |
-
serverSender = 'server'
|
129 |
-
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
130 |
-
(serverSender, serverResponse, timestamp))
|
131 |
-
db.commit()
|
132 |
-
await ws.send(json.dumps(serverResponse))
|
133 |
-
return serverResponse
|
134 |
-
|
135 |
-
except websockets.exceptions.ConnectionClosedError as e:
|
136 |
-
print(f"Connection closed: {e}")
|
137 |
-
|
138 |
-
except Exception as e:
|
139 |
-
print(f"Error: {e}")
|
140 |
-
|
141 |
-
async def handle_message(message):
|
142 |
-
print(f'Received message: {message}')
|
143 |
-
timestamp = datetime.datetime.now().isoformat()
|
144 |
-
sender = 'client'
|
145 |
-
db = sqlite3.connect('chat-hub.db')
|
146 |
-
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
147 |
-
(sender, message, timestamp))
|
148 |
-
db.commit()
|
149 |
-
try:
|
150 |
-
userMessage = f'User B:{message}'
|
151 |
-
response = await askQuestion(userMessage)
|
152 |
-
serverResponse = f'server response:{response}'
|
153 |
-
timestamp = datetime.datetime.now().isoformat()
|
154 |
-
serverSender = 'server'
|
155 |
-
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
156 |
-
(serverSender, serverResponse, timestamp))
|
157 |
-
db.commit()
|
158 |
-
return serverResponse
|
159 |
-
except Exception as e:
|
160 |
-
print(f"Error: {e}")
|
161 |
-
|
162 |
-
# Define start_client function with a variable port
|
163 |
-
async def start_client(clientPort):
|
164 |
-
uri = f'ws://localhost:{clientPort}'
|
165 |
-
client_ports.append(clientPort)
|
166 |
-
window['-CLIENT_PORTS-'].print(str(client_ports) + '\n')
|
167 |
-
async with websockets.connect(uri, create_protocol=handleClient) as websocket:
|
168 |
-
print("Connected to server at:", clientPort)
|
169 |
-
return "Used ports:\n" + '\n'.join(map(str, client_ports))
|
170 |
-
message = await websocket.recv()
|
171 |
-
inputMsg = "client: " + handle_message
|
172 |
-
window['-INPUT-'].print(str(inputMsg) + '\n')
|
173 |
-
print(message)
|
174 |
-
return message
|
175 |
-
|
176 |
-
async def handleClient(websocket, path):
|
177 |
-
return client1_msg
|
178 |
-
|
179 |
-
async def connect_docsbot(clientPort):
|
180 |
-
uri = f'ws://localhost:{clientPort}'
|
181 |
-
async with websockets.connect(uri) as websocket:
|
182 |
-
print("Connected to server at:", clientPort)
|
183 |
-
client_ports.append(clientPort)
|
184 |
-
window['-CLIENT_PORTS-'].print(str(client_ports) + '\n')
|
185 |
-
return "Used ports:\n" + '\n'.join(map(str, client_ports))
|
186 |
-
while True:
|
187 |
-
message = await websocket.recv()
|
188 |
-
inputMsg = "client: " + handle_message
|
189 |
-
window['-INPUT-'].print(str(inputMsg) + '\n')
|
190 |
-
print(message)
|
191 |
-
return message
|
192 |
-
|
193 |
-
async def handleClient2(websocket, path):
|
194 |
-
return client2_msg
|
195 |
-
|
196 |
-
async def connect_agent(clientPort):
|
197 |
-
uri = f'ws://localhost:{clientPort}'
|
198 |
-
async with websockets.connect(uri, create_protocol=handleClient3) as websocket:
|
199 |
-
print("Connected to server at:", clientPort)
|
200 |
-
client_ports.append(clientPort)
|
201 |
-
return "Used ports:\n" + '\n'.join(map(str, client_ports))
|
202 |
-
message = await websocket.recv()
|
203 |
-
inputMsg = "client: " + handle_message
|
204 |
-
window['-INPUT-'].print(str(inputMsg) + '\n')
|
205 |
-
print(message)
|
206 |
-
return message
|
207 |
-
|
208 |
-
async def handleClient3(websocket, path):
|
209 |
-
return client3_msg
|
210 |
-
|
211 |
-
# Function to stop the WebSocket server
|
212 |
-
def stop_websockets():
|
213 |
-
global server
|
214 |
-
if server:
|
215 |
-
cursor.close()
|
216 |
-
db.close()
|
217 |
-
server.close()
|
218 |
-
print("WebSocket server stopped.")
|
219 |
-
else:
|
220 |
-
print("WebSocket server is not running.")
|
221 |
-
|
222 |
-
# Start the WebSocket server
|
223 |
-
async def start_websockets(websocketPort):
|
224 |
-
uri = f'wss://localhost:{websocketPort}'
|
225 |
-
global server
|
226 |
-
# Create a WebSocket client that connects to the server
|
227 |
-
server = await(websockets.serve(handleWebSocket, uri))
|
228 |
-
server_ports.append(websocketPort)
|
229 |
-
print(f"Starting WebSocket server on port {websocketPort}...")
|
230 |
-
return "Used ports:\n" + '\n'.join(map(str, server_ports))
|
231 |
-
await stop
|
232 |
-
await server.close()
|
233 |
-
|
234 |
-
async def start_client(websocketPort):
|
235 |
-
uri = f'ws://localhost:{websocketPort}'
|
236 |
-
while True:
|
237 |
-
try:
|
238 |
-
async with websockets.connect(uri) as ws:
|
239 |
-
print("Connected to server at:", websocketPort)
|
240 |
-
while True:
|
241 |
-
message = await ws.recv()
|
242 |
-
print(message)
|
243 |
-
response = await askQuestion(message)
|
244 |
-
print(response)
|
245 |
-
await ws.send(response)
|
246 |
-
except websockets.exceptions.ConnectionClosedOK:
|
247 |
-
print("Connection closed")
|
248 |
-
continue
|
249 |
-
|
250 |
-
async def start_interface():
|
251 |
-
while True:
|
252 |
-
event, values = window.read()
|
253 |
-
if event in (sg.WIN_CLOSED, 'Stop WebSocket client'):
|
254 |
-
break
|
255 |
-
elif event == 'Start WebSocket server':
|
256 |
-
websocketPort = get_port(values)
|
257 |
-
loop = asyncio.get_event_loop()
|
258 |
-
loop.run_until_complete(start_websockets(websocketPort))
|
259 |
-
elif event == 'Start WebSocket client':
|
260 |
-
websocketPort = get_port(values)
|
261 |
-
loop = asyncio.get_event_loop()
|
262 |
-
loop.run_until_complete(start_client(websocketPort))
|
263 |
-
elif event == 'Ask the agent':
|
264 |
-
question = values['-USERINPUT-']
|
265 |
-
loop = asyncio.get_event_loop()
|
266 |
-
loop.run_until_complete(handle_user(question))
|
267 |
-
elif event == 'Clear Textboxes':
|
268 |
-
window['-INPUT-'].update('')
|
269 |
-
window['-OUTPUT-'].update('')
|
270 |
-
window['-USERINPUT-'].update('')
|
271 |
-
|
272 |
-
window.close()
|
273 |
-
|
274 |
-
with gr.Blocks() as demo:
|
275 |
-
with gr.Row():
|
276 |
-
# Use the client_messages list to update the messageTextbox
|
277 |
-
client_msg = gr.Textbox(lines=15, max_lines=130, label="Client messages", interactive=False)
|
278 |
-
# Use the server_responses list to update the serverMessageTextbox
|
279 |
-
server_msg = gr.Textbox(lines=15, max_lines=130, label="Server responses", interactive=False)
|
280 |
-
with gr.Row():
|
281 |
-
userInput = gr.Textbox(label="User Input")
|
282 |
-
with gr.Row():
|
283 |
-
Bot = gr.Button("Ask Server")
|
284 |
-
with gr.Row():
|
285 |
-
websocketPort = gr.Slider(minimum=1000, maximum=9999, label="Websocket server port", interactive=True, randomize=False)
|
286 |
-
startServer = gr.Button("Start WebSocket Server")
|
287 |
-
stopWebsockets = gr.Button("Stop WebSocket Server")
|
288 |
-
with gr.Row():
|
289 |
-
port = gr.Textbox()
|
290 |
-
with gr.Row():
|
291 |
-
clientPort = gr.Slider(minimum=1000, maximum=9999, label="Websocket server port", interactive=True, randomize=False)
|
292 |
-
startClient = gr.Button("Start WebSocket client")
|
293 |
-
stopClient = gr.Button("Stop WebSocket client")
|
294 |
-
with gr.Row():
|
295 |
-
PortInUse = gr.Textbox()
|
296 |
-
startServer.click(start_websockets, inputs=websocketPort, outputs=port)
|
297 |
-
startClient.click(start_client, inputs=clientPort, outputs=[PortInUse, client_msg])
|
298 |
-
stopWebsockets.click(stop_websockets, inputs=None, outputs=server_msg)
|
299 |
-
startInterface = gr.Button("Start GUI")
|
300 |
-
Bot.click(askQuestion, inputs=userInput, outputs=server_msg)
|
301 |
-
startInterface.click(start_interface, inputs=None, outputs=None)
|
302 |
-
|
303 |
-
demo.queue()
|
304 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arikkod/FoodVisionMini/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: FoodVisionMini
|
3 |
-
emoji: 🥩🍕🍣
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.32.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AtomdffAI/wechatgpt4atom/docker/entrypoint.sh
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
set -e
|
3 |
-
|
4 |
-
# build prefix
|
5 |
-
CHATGPT_ON_WECHAT_PREFIX=${CHATGPT_ON_WECHAT_PREFIX:-""}
|
6 |
-
# path to config.json
|
7 |
-
CHATGPT_ON_WECHAT_CONFIG_PATH=${CHATGPT_ON_WECHAT_CONFIG_PATH:-""}
|
8 |
-
# execution command line
|
9 |
-
CHATGPT_ON_WECHAT_EXEC=${CHATGPT_ON_WECHAT_EXEC:-""}
|
10 |
-
|
11 |
-
OPEN_AI_API_KEY=${OPEN_AI_API_KEY:-""}
|
12 |
-
SINGLE_CHAT_PREFIX=${SINGLE_CHAT_PREFIX:-""}
|
13 |
-
SINGLE_CHAT_REPLY_PREFIX=${SINGLE_CHAT_REPLY_PREFIX:-""}
|
14 |
-
GROUP_CHAT_PREFIX=${GROUP_CHAT_PREFIX:-""}
|
15 |
-
GROUP_NAME_WHITE_LIST=${GROUP_NAME_WHITE_LIST:-""}
|
16 |
-
IMAGE_CREATE_PREFIX=${IMAGE_CREATE_PREFIX:-""}
|
17 |
-
CONVERSATION_MAX_TOKENS=${CONVERSATION_MAX_TOKENS:-""}
|
18 |
-
CHARACTER_DESC=${CHARACTER_DESC:-""}
|
19 |
-
|
20 |
-
# CHATGPT_ON_WECHAT_PREFIX is empty, use /app
|
21 |
-
if [ "$CHATGPT_ON_WECHAT_PREFIX" == "" ] ; then
|
22 |
-
CHATGPT_ON_WECHAT_PREFIX=/app
|
23 |
-
fi
|
24 |
-
|
25 |
-
# CHATGPT_ON_WECHAT_CONFIG_PATH is empty, use '/app/config.json'
|
26 |
-
if [ "$CHATGPT_ON_WECHAT_CONFIG_PATH" == "" ] ; then
|
27 |
-
CHATGPT_ON_WECHAT_CONFIG_PATH=$CHATGPT_ON_WECHAT_PREFIX/config.json
|
28 |
-
fi
|
29 |
-
|
30 |
-
# CHATGPT_ON_WECHAT_EXEC is empty, use ‘python app.py’
|
31 |
-
if [ "$CHATGPT_ON_WECHAT_EXEC" == "" ] ; then
|
32 |
-
CHATGPT_ON_WECHAT_EXEC="python app.py"
|
33 |
-
fi
|
34 |
-
|
35 |
-
# modify content in config.json
|
36 |
-
if [ "$OPEN_AI_API_KEY" != "" ] ; then
|
37 |
-
sed -i "2c \"open_ai_api_key\": \"$OPEN_AI_API_KEY\"," $CHATGPT_ON_WECHAT_CONFIG_PATH
|
38 |
-
else
|
39 |
-
echo -e "\033[31m[Warning] You need to set OPEN_AI_API_KEY before running!\033[0m"
|
40 |
-
fi
|
41 |
-
|
42 |
-
if [ "$WECHATY_PUPPET_SERVICE_TOKEN" != "" ] ; then
|
43 |
-
sed -i "3c \"wechaty_puppet_service_token\": \"$WECHATY_PUPPET_SERVICE_TOKEN\"," $CHATGPT_ON_WECHAT_CONFIG_PATH
|
44 |
-
else
|
45 |
-
echo -e "\033[31m[Info] You need to set WECHATY_PUPPET_SERVICE_TOKEN if you use wechaty!\033[0m"
|
46 |
-
fi
|
47 |
-
|
48 |
-
if [ "$SINGLE_CHAT_PREFIX" != "" ] ; then
|
49 |
-
sed -i "4c \"single_chat_prefix\": $SINGLE_CHAT_PREFIX," $CHATGPT_ON_WECHAT_CONFIG_PATH
|
50 |
-
fi
|
51 |
-
|
52 |
-
if [ "$SINGLE_CHAT_REPLY_PREFIX" != "" ] ; then
|
53 |
-
sed -i "5c \"single_chat_reply_prefix\": $SINGLE_CHAT_REPLY_PREFIX," $CHATGPT_ON_WECHAT_CONFIG_PATH
|
54 |
-
fi
|
55 |
-
|
56 |
-
if [ "$GROUP_CHAT_PREFIX" != "" ] ; then
|
57 |
-
sed -i "6c \"group_chat_prefix\": $GROUP_CHAT_PREFIX," $CHATGPT_ON_WECHAT_CONFIG_PATH
|
58 |
-
fi
|
59 |
-
|
60 |
-
if [ "$GROUP_NAME_WHITE_LIST" != "" ] ; then
|
61 |
-
sed -i "7c \"group_name_white_list\": $GROUP_NAME_WHITE_LIST," $CHATGPT_ON_WECHAT_CONFIG_PATH
|
62 |
-
fi
|
63 |
-
|
64 |
-
if [ "$IMAGE_CREATE_PREFIX" != "" ] ; then
|
65 |
-
sed -i "8c \"image_create_prefix\": $IMAGE_CREATE_PREFIX," $CHATGPT_ON_WECHAT_CONFIG_PATH
|
66 |
-
fi
|
67 |
-
|
68 |
-
if [ "$CONVERSATION_MAX_TOKENS" != "" ] ; then
|
69 |
-
sed -i "9c \"conversation_max_tokens\": $CONVERSATION_MAX_TOKENS," $CHATGPT_ON_WECHAT_CONFIG_PATH
|
70 |
-
fi
|
71 |
-
|
72 |
-
if [ "$CHARACTER_DESC" != "" ] ; then
|
73 |
-
sed -i "10c \"character_desc\": \"$CHARACTER_DESC\"" $CHATGPT_ON_WECHAT_CONFIG_PATH
|
74 |
-
fi
|
75 |
-
|
76 |
-
# go to prefix dir
|
77 |
-
cd $CHATGPT_ON_WECHAT_PREFIX
|
78 |
-
# excute
|
79 |
-
$CHATGPT_ON_WECHAT_EXEC
|
80 |
-
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Brasd99/JustClothify/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: JustClothify
|
3 |
-
emoji: 💻
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.32.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/doc/GETTING_STARTED.md
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
# Getting Started with DensePose
|
2 |
-
|
3 |
-
## Inference with Pre-trained Models
|
4 |
-
|
5 |
-
1. Pick a model and its config file from [Model Zoo](MODEL_ZOO.md), for example [densepose_rcnn_R_50_FPN_s1x.yaml](../configs/densepose_rcnn_R_50_FPN_s1x.yaml)
|
6 |
-
2. Run the [Apply Net](TOOL_APPLY_NET.md) tool to visualize the results or save the to disk. For example, to use contour visualization for DensePose, one can run:
|
7 |
-
```bash
|
8 |
-
python apply_net.py show configs/densepose_rcnn_R_50_FPN_s1x.yaml densepose_rcnn_R_50_FPN_s1x.pkl image.jpg dp_contour,bbox --output image_densepose_contour.png
|
9 |
-
```
|
10 |
-
Please see [Apply Net](TOOL_APPLY_NET.md) for more details on the tool.
|
11 |
-
|
12 |
-
## Training
|
13 |
-
|
14 |
-
First, prepare the [dataset](http://densepose.org/#dataset) into the following structure under the directory you'll run training scripts:
|
15 |
-
<pre>
|
16 |
-
datasets/coco/
|
17 |
-
annotations/
|
18 |
-
densepose_{train,minival,valminusminival}2014.json
|
19 |
-
<a href="https://dl.fbaipublicfiles.com/detectron2/densepose/densepose_minival2014_100.json">densepose_minival2014_100.json </a> (optional, for testing only)
|
20 |
-
{train,val}2014/
|
21 |
-
# image files that are mentioned in the corresponding json
|
22 |
-
</pre>
|
23 |
-
|
24 |
-
To train a model one can use the [train_net.py](../train_net.py) script.
|
25 |
-
This script was used to train all DensePose models in [Model Zoo](MODEL_ZOO.md).
|
26 |
-
For example, to launch end-to-end DensePose-RCNN training with ResNet-50 FPN backbone
|
27 |
-
on 8 GPUs following the s1x schedule, one can run
|
28 |
-
```bash
|
29 |
-
python train_net.py --config-file configs/densepose_rcnn_R_50_FPN_s1x.yaml --num-gpus 8
|
30 |
-
```
|
31 |
-
The configs are made for 8-GPU training. To train on 1 GPU, one can apply the
|
32 |
-
[linear learning rate scaling rule](https://arxiv.org/abs/1706.02677):
|
33 |
-
```bash
|
34 |
-
python train_net.py --config-file configs/densepose_rcnn_R_50_FPN_s1x.yaml \
|
35 |
-
SOLVER.IMS_PER_BATCH 2 SOLVER.BASE_LR 0.0025
|
36 |
-
```
|
37 |
-
|
38 |
-
## Evaluation
|
39 |
-
|
40 |
-
Model testing can be done in the same way as training, except for an additional flag `--eval-only` and
|
41 |
-
model location specification through `MODEL.WEIGHTS model.pth` in the command line
|
42 |
-
```bash
|
43 |
-
python train_net.py --config-file configs/densepose_rcnn_R_50_FPN_s1x.yaml \
|
44 |
-
--eval-only MODEL.WEIGHTS model.pth
|
45 |
-
```
|
46 |
-
|
47 |
-
## Tools
|
48 |
-
|
49 |
-
We provide tools which allow one to:
|
50 |
-
- easily view DensePose annotated data in a dataset;
|
51 |
-
- perform DensePose inference on a set of images;
|
52 |
-
- visualize DensePose model results;
|
53 |
-
|
54 |
-
`query_db` is a tool to print or visualize DensePose data in a dataset.
|
55 |
-
Please refer to [Query DB](TOOL_QUERY_DB.md) for more details on this tool
|
56 |
-
|
57 |
-
`apply_net` is a tool to print or visualize DensePose results.
|
58 |
-
Please refer to [Apply Net](TOOL_APPLY_NET.md) for more details on this tool
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/type_traits/has_nested_type.h
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/type_traits.h>
|
20 |
-
|
21 |
-
#define __THRUST_DEFINE_HAS_NESTED_TYPE(trait_name, nested_type_name) \
|
22 |
-
template<typename T> \
|
23 |
-
struct trait_name \
|
24 |
-
{ \
|
25 |
-
typedef char yes_type; \
|
26 |
-
typedef int no_type; \
|
27 |
-
template<typename S> static yes_type test(typename S::nested_type_name *); \
|
28 |
-
template<typename S> static no_type test(...); \
|
29 |
-
static bool const value = sizeof(test<T>(0)) == sizeof(yes_type);\
|
30 |
-
typedef thrust::detail::integral_constant<bool, value> type;\
|
31 |
-
};
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/engine/hooks.py
DELETED
@@ -1,466 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
import datetime
|
5 |
-
import itertools
|
6 |
-
import logging
|
7 |
-
import os
|
8 |
-
import tempfile
|
9 |
-
import time
|
10 |
-
from collections import Counter
|
11 |
-
import torch
|
12 |
-
from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer
|
13 |
-
from fvcore.common.param_scheduler import ParamScheduler
|
14 |
-
from fvcore.common.timer import Timer
|
15 |
-
from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
|
16 |
-
|
17 |
-
import detectron2.utils.comm as comm
|
18 |
-
from detectron2.evaluation.testing import flatten_results_dict
|
19 |
-
from detectron2.solver import LRMultiplier
|
20 |
-
from detectron2.utils.events import EventStorage, EventWriter
|
21 |
-
from detectron2.utils.file_io import PathManager
|
22 |
-
|
23 |
-
from .train_loop import HookBase
|
24 |
-
|
25 |
-
__all__ = [
|
26 |
-
"CallbackHook",
|
27 |
-
"IterationTimer",
|
28 |
-
"PeriodicWriter",
|
29 |
-
"PeriodicCheckpointer",
|
30 |
-
"LRScheduler",
|
31 |
-
"AutogradProfiler",
|
32 |
-
"EvalHook",
|
33 |
-
"PreciseBN",
|
34 |
-
]
|
35 |
-
|
36 |
-
|
37 |
-
"""
|
38 |
-
Implement some common hooks.
|
39 |
-
"""
|
40 |
-
|
41 |
-
|
42 |
-
class CallbackHook(HookBase):
|
43 |
-
"""
|
44 |
-
Create a hook using callback functions provided by the user.
|
45 |
-
"""
|
46 |
-
|
47 |
-
def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None):
|
48 |
-
"""
|
49 |
-
Each argument is a function that takes one argument: the trainer.
|
50 |
-
"""
|
51 |
-
self._before_train = before_train
|
52 |
-
self._before_step = before_step
|
53 |
-
self._after_step = after_step
|
54 |
-
self._after_train = after_train
|
55 |
-
|
56 |
-
def before_train(self):
|
57 |
-
if self._before_train:
|
58 |
-
self._before_train(self.trainer)
|
59 |
-
|
60 |
-
def after_train(self):
|
61 |
-
if self._after_train:
|
62 |
-
self._after_train(self.trainer)
|
63 |
-
# The functions may be closures that hold reference to the trainer
|
64 |
-
# Therefore, delete them to avoid circular reference.
|
65 |
-
del self._before_train, self._after_train
|
66 |
-
del self._before_step, self._after_step
|
67 |
-
|
68 |
-
def before_step(self):
|
69 |
-
if self._before_step:
|
70 |
-
self._before_step(self.trainer)
|
71 |
-
|
72 |
-
def after_step(self):
|
73 |
-
if self._after_step:
|
74 |
-
self._after_step(self.trainer)
|
75 |
-
|
76 |
-
|
77 |
-
class IterationTimer(HookBase):
|
78 |
-
"""
|
79 |
-
Track the time spent for each iteration (each run_step call in the trainer).
|
80 |
-
Print a summary in the end of training.
|
81 |
-
|
82 |
-
This hook uses the time between the call to its :meth:`before_step`
|
83 |
-
and :meth:`after_step` methods.
|
84 |
-
Under the convention that :meth:`before_step` of all hooks should only
|
85 |
-
take negligible amount of time, the :class:`IterationTimer` hook should be
|
86 |
-
placed at the beginning of the list of hooks to obtain accurate timing.
|
87 |
-
"""
|
88 |
-
|
89 |
-
def __init__(self, warmup_iter=3):
|
90 |
-
"""
|
91 |
-
Args:
|
92 |
-
warmup_iter (int): the number of iterations at the beginning to exclude
|
93 |
-
from timing.
|
94 |
-
"""
|
95 |
-
self._warmup_iter = warmup_iter
|
96 |
-
self._step_timer = Timer()
|
97 |
-
self._start_time = time.perf_counter()
|
98 |
-
self._total_timer = Timer()
|
99 |
-
|
100 |
-
def before_train(self):
|
101 |
-
self._start_time = time.perf_counter()
|
102 |
-
self._total_timer.reset()
|
103 |
-
self._total_timer.pause()
|
104 |
-
|
105 |
-
def after_train(self):
|
106 |
-
logger = logging.getLogger(__name__)
|
107 |
-
total_time = time.perf_counter() - self._start_time
|
108 |
-
total_time_minus_hooks = self._total_timer.seconds()
|
109 |
-
hook_time = total_time - total_time_minus_hooks
|
110 |
-
|
111 |
-
num_iter = self.trainer.iter + 1 - self.trainer.start_iter - self._warmup_iter
|
112 |
-
|
113 |
-
if num_iter > 0 and total_time_minus_hooks > 0:
|
114 |
-
# Speed is meaningful only after warmup
|
115 |
-
# NOTE this format is parsed by grep in some scripts
|
116 |
-
logger.info(
|
117 |
-
"Overall training speed: {} iterations in {} ({:.4f} s / it)".format(
|
118 |
-
num_iter,
|
119 |
-
str(datetime.timedelta(seconds=int(total_time_minus_hooks))),
|
120 |
-
total_time_minus_hooks / num_iter,
|
121 |
-
)
|
122 |
-
)
|
123 |
-
|
124 |
-
logger.info(
|
125 |
-
"Total training time: {} ({} on hooks)".format(
|
126 |
-
str(datetime.timedelta(seconds=int(total_time))),
|
127 |
-
str(datetime.timedelta(seconds=int(hook_time))),
|
128 |
-
)
|
129 |
-
)
|
130 |
-
|
131 |
-
def before_step(self):
|
132 |
-
self._step_timer.reset()
|
133 |
-
self._total_timer.resume()
|
134 |
-
|
135 |
-
def after_step(self):
|
136 |
-
# +1 because we're in after_step, the current step is done
|
137 |
-
# but not yet counted
|
138 |
-
iter_done = self.trainer.iter - self.trainer.start_iter + 1
|
139 |
-
if iter_done >= self._warmup_iter:
|
140 |
-
sec = self._step_timer.seconds()
|
141 |
-
self.trainer.storage.put_scalars(time=sec)
|
142 |
-
else:
|
143 |
-
self._start_time = time.perf_counter()
|
144 |
-
self._total_timer.reset()
|
145 |
-
|
146 |
-
self._total_timer.pause()
|
147 |
-
|
148 |
-
|
149 |
-
class PeriodicWriter(HookBase):
|
150 |
-
"""
|
151 |
-
Write events to EventStorage (by calling ``writer.write()``) periodically.
|
152 |
-
|
153 |
-
It is executed every ``period`` iterations and after the last iteration.
|
154 |
-
Note that ``period`` does not affect how data is smoothed by each writer.
|
155 |
-
"""
|
156 |
-
|
157 |
-
def __init__(self, writers, period=20):
|
158 |
-
"""
|
159 |
-
Args:
|
160 |
-
writers (list[EventWriter]): a list of EventWriter objects
|
161 |
-
period (int):
|
162 |
-
"""
|
163 |
-
self._writers = writers
|
164 |
-
for w in writers:
|
165 |
-
assert isinstance(w, EventWriter), w
|
166 |
-
self._period = period
|
167 |
-
|
168 |
-
def after_step(self):
|
169 |
-
if (self.trainer.iter + 1) % self._period == 0 or (
|
170 |
-
self.trainer.iter == self.trainer.max_iter - 1
|
171 |
-
):
|
172 |
-
for writer in self._writers:
|
173 |
-
writer.write()
|
174 |
-
|
175 |
-
def after_train(self):
|
176 |
-
for writer in self._writers:
|
177 |
-
# If any new data is found (e.g. produced by other after_train),
|
178 |
-
# write them before closing
|
179 |
-
writer.write()
|
180 |
-
writer.close()
|
181 |
-
|
182 |
-
|
183 |
-
class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase):
|
184 |
-
"""
|
185 |
-
Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook.
|
186 |
-
|
187 |
-
Note that when used as a hook,
|
188 |
-
it is unable to save additional data other than what's defined
|
189 |
-
by the given `checkpointer`.
|
190 |
-
|
191 |
-
It is executed every ``period`` iterations and after the last iteration.
|
192 |
-
"""
|
193 |
-
|
194 |
-
def before_train(self):
|
195 |
-
self.max_iter = self.trainer.max_iter
|
196 |
-
|
197 |
-
def after_step(self):
|
198 |
-
# No way to use **kwargs
|
199 |
-
self.step(self.trainer.iter)
|
200 |
-
|
201 |
-
|
202 |
-
class LRScheduler(HookBase):
|
203 |
-
"""
|
204 |
-
A hook which executes a torch builtin LR scheduler and summarizes the LR.
|
205 |
-
It is executed after every iteration.
|
206 |
-
"""
|
207 |
-
|
208 |
-
def __init__(self, optimizer=None, scheduler=None):
|
209 |
-
"""
|
210 |
-
Args:
|
211 |
-
optimizer (torch.optim.Optimizer):
|
212 |
-
scheduler (torch.optim.LRScheduler or fvcore.common.param_scheduler.ParamScheduler):
|
213 |
-
if a :class:`ParamScheduler` object, it defines the multiplier over the base LR
|
214 |
-
in the optimizer.
|
215 |
-
|
216 |
-
If any argument is not given, will try to obtain it from the trainer.
|
217 |
-
"""
|
218 |
-
self._optimizer = optimizer
|
219 |
-
self._scheduler = scheduler
|
220 |
-
|
221 |
-
def before_train(self):
|
222 |
-
self._optimizer = self._optimizer or self.trainer.optimizer
|
223 |
-
if isinstance(self.scheduler, ParamScheduler):
|
224 |
-
self._scheduler = LRMultiplier(
|
225 |
-
self._optimizer,
|
226 |
-
self.scheduler,
|
227 |
-
self.trainer.max_iter,
|
228 |
-
last_iter=self.trainer.iter - 1,
|
229 |
-
)
|
230 |
-
|
231 |
-
# NOTE: some heuristics on what LR to summarize
|
232 |
-
# summarize the param group with most parameters
|
233 |
-
largest_group = max(len(g["params"]) for g in self._optimizer.param_groups)
|
234 |
-
|
235 |
-
if largest_group == 1:
|
236 |
-
# If all groups have one parameter,
|
237 |
-
# then find the most common initial LR, and use it for summary
|
238 |
-
lr_count = Counter([g["lr"] for g in self._optimizer.param_groups])
|
239 |
-
lr = lr_count.most_common()[0][0]
|
240 |
-
for i, g in enumerate(self._optimizer.param_groups):
|
241 |
-
if g["lr"] == lr:
|
242 |
-
self._best_param_group_id = i
|
243 |
-
break
|
244 |
-
else:
|
245 |
-
for i, g in enumerate(self._optimizer.param_groups):
|
246 |
-
if len(g["params"]) == largest_group:
|
247 |
-
self._best_param_group_id = i
|
248 |
-
break
|
249 |
-
|
250 |
-
def after_step(self):
|
251 |
-
lr = self._optimizer.param_groups[self._best_param_group_id]["lr"]
|
252 |
-
self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False)
|
253 |
-
self.scheduler.step()
|
254 |
-
|
255 |
-
@property
|
256 |
-
def scheduler(self):
|
257 |
-
return self._scheduler or self.trainer.scheduler
|
258 |
-
|
259 |
-
def state_dict(self):
|
260 |
-
if isinstance(self.scheduler, torch.optim.lr_scheduler._LRScheduler):
|
261 |
-
return self.scheduler.state_dict()
|
262 |
-
return {}
|
263 |
-
|
264 |
-
def load_state_dict(self, state_dict):
|
265 |
-
if isinstance(self.scheduler, torch.optim.lr_scheduler._LRScheduler):
|
266 |
-
logger = logging.getLogger(__name__)
|
267 |
-
logger.info("Loading scheduler from state_dict ...")
|
268 |
-
self.scheduler.load_state_dict(state_dict)
|
269 |
-
|
270 |
-
|
271 |
-
class AutogradProfiler(HookBase):
|
272 |
-
"""
|
273 |
-
A hook which runs `torch.autograd.profiler.profile`.
|
274 |
-
|
275 |
-
Examples:
|
276 |
-
::
|
277 |
-
hooks.AutogradProfiler(
|
278 |
-
lambda trainer: trainer.iter > 10 and trainer.iter < 20, self.cfg.OUTPUT_DIR
|
279 |
-
)
|
280 |
-
|
281 |
-
The above example will run the profiler for iteration 10~20 and dump
|
282 |
-
results to ``OUTPUT_DIR``. We did not profile the first few iterations
|
283 |
-
because they are typically slower than the rest.
|
284 |
-
The result files can be loaded in the ``chrome://tracing`` page in chrome browser.
|
285 |
-
|
286 |
-
Note:
|
287 |
-
When used together with NCCL on older version of GPUs,
|
288 |
-
autograd profiler may cause deadlock because it unnecessarily allocates
|
289 |
-
memory on every device it sees. The memory management calls, if
|
290 |
-
interleaved with NCCL calls, lead to deadlock on GPUs that do not
|
291 |
-
support ``cudaLaunchCooperativeKernelMultiDevice``.
|
292 |
-
"""
|
293 |
-
|
294 |
-
def __init__(self, enable_predicate, output_dir, *, use_cuda=True):
|
295 |
-
"""
|
296 |
-
Args:
|
297 |
-
enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
|
298 |
-
and returns whether to enable the profiler.
|
299 |
-
It will be called once every step, and can be used to select which steps to profile.
|
300 |
-
output_dir (str): the output directory to dump tracing files.
|
301 |
-
use_cuda (bool): same as in `torch.autograd.profiler.profile`.
|
302 |
-
"""
|
303 |
-
self._enable_predicate = enable_predicate
|
304 |
-
self._use_cuda = use_cuda
|
305 |
-
self._output_dir = output_dir
|
306 |
-
|
307 |
-
def before_step(self):
|
308 |
-
if self._enable_predicate(self.trainer):
|
309 |
-
self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda)
|
310 |
-
self._profiler.__enter__()
|
311 |
-
else:
|
312 |
-
self._profiler = None
|
313 |
-
|
314 |
-
def after_step(self):
|
315 |
-
if self._profiler is None:
|
316 |
-
return
|
317 |
-
self._profiler.__exit__(None, None, None)
|
318 |
-
PathManager.mkdirs(self._output_dir)
|
319 |
-
out_file = os.path.join(
|
320 |
-
self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter)
|
321 |
-
)
|
322 |
-
if "://" not in out_file:
|
323 |
-
self._profiler.export_chrome_trace(out_file)
|
324 |
-
else:
|
325 |
-
# Support non-posix filesystems
|
326 |
-
with tempfile.TemporaryDirectory(prefix="detectron2_profiler") as d:
|
327 |
-
tmp_file = os.path.join(d, "tmp.json")
|
328 |
-
self._profiler.export_chrome_trace(tmp_file)
|
329 |
-
with open(tmp_file) as f:
|
330 |
-
content = f.read()
|
331 |
-
with PathManager.open(out_file, "w") as f:
|
332 |
-
f.write(content)
|
333 |
-
|
334 |
-
|
335 |
-
class EvalHook(HookBase):
|
336 |
-
"""
|
337 |
-
Run an evaluation function periodically, and at the end of training.
|
338 |
-
|
339 |
-
It is executed every ``eval_period`` iterations and after the last iteration.
|
340 |
-
"""
|
341 |
-
|
342 |
-
def __init__(self, eval_period, eval_function):
|
343 |
-
"""
|
344 |
-
Args:
|
345 |
-
eval_period (int): the period to run `eval_function`. Set to 0 to
|
346 |
-
not evaluate periodically (but still after the last iteration).
|
347 |
-
eval_function (callable): a function which takes no arguments, and
|
348 |
-
returns a nested dict of evaluation metrics.
|
349 |
-
|
350 |
-
Note:
|
351 |
-
This hook must be enabled in all or none workers.
|
352 |
-
If you would like only certain workers to perform evaluation,
|
353 |
-
give other workers a no-op function (`eval_function=lambda: None`).
|
354 |
-
"""
|
355 |
-
self._period = eval_period
|
356 |
-
self._func = eval_function
|
357 |
-
|
358 |
-
def _do_eval(self):
|
359 |
-
results = self._func()
|
360 |
-
|
361 |
-
if results:
|
362 |
-
assert isinstance(
|
363 |
-
results, dict
|
364 |
-
), "Eval function must return a dict. Got {} instead.".format(results)
|
365 |
-
|
366 |
-
flattened_results = flatten_results_dict(results)
|
367 |
-
for k, v in flattened_results.items():
|
368 |
-
try:
|
369 |
-
v = float(v)
|
370 |
-
except Exception as e:
|
371 |
-
raise ValueError(
|
372 |
-
"[EvalHook] eval_function should return a nested dict of float. "
|
373 |
-
"Got '{}: {}' instead.".format(k, v)
|
374 |
-
) from e
|
375 |
-
self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)
|
376 |
-
|
377 |
-
# Evaluation may take different time among workers.
|
378 |
-
# A barrier make them start the next iteration together.
|
379 |
-
comm.synchronize()
|
380 |
-
|
381 |
-
def after_step(self):
|
382 |
-
next_iter = self.trainer.iter + 1
|
383 |
-
if self._period > 0 and next_iter % self._period == 0:
|
384 |
-
# do the last eval in after_train
|
385 |
-
if next_iter != self.trainer.max_iter:
|
386 |
-
self._do_eval()
|
387 |
-
|
388 |
-
def after_train(self):
|
389 |
-
# This condition is to prevent the eval from running after a failed training
|
390 |
-
if self.trainer.iter + 1 >= self.trainer.max_iter:
|
391 |
-
self._do_eval()
|
392 |
-
# func is likely a closure that holds reference to the trainer
|
393 |
-
# therefore we clean it to avoid circular reference in the end
|
394 |
-
del self._func
|
395 |
-
|
396 |
-
|
397 |
-
class PreciseBN(HookBase):
|
398 |
-
"""
|
399 |
-
The standard implementation of BatchNorm uses EMA in inference, which is
|
400 |
-
sometimes suboptimal.
|
401 |
-
This class computes the true average of statistics rather than the moving average,
|
402 |
-
and put true averages to every BN layer in the given model.
|
403 |
-
|
404 |
-
It is executed every ``period`` iterations and after the last iteration.
|
405 |
-
"""
|
406 |
-
|
407 |
-
def __init__(self, period, model, data_loader, num_iter):
|
408 |
-
"""
|
409 |
-
Args:
|
410 |
-
period (int): the period this hook is run, or 0 to not run during training.
|
411 |
-
The hook will always run in the end of training.
|
412 |
-
model (nn.Module): a module whose all BN layers in training mode will be
|
413 |
-
updated by precise BN.
|
414 |
-
Note that user is responsible for ensuring the BN layers to be
|
415 |
-
updated are in training mode when this hook is triggered.
|
416 |
-
data_loader (iterable): it will produce data to be run by `model(data)`.
|
417 |
-
num_iter (int): number of iterations used to compute the precise
|
418 |
-
statistics.
|
419 |
-
"""
|
420 |
-
self._logger = logging.getLogger(__name__)
|
421 |
-
if len(get_bn_modules(model)) == 0:
|
422 |
-
self._logger.info(
|
423 |
-
"PreciseBN is disabled because model does not contain BN layers in training mode."
|
424 |
-
)
|
425 |
-
self._disabled = True
|
426 |
-
return
|
427 |
-
|
428 |
-
self._model = model
|
429 |
-
self._data_loader = data_loader
|
430 |
-
self._num_iter = num_iter
|
431 |
-
self._period = period
|
432 |
-
self._disabled = False
|
433 |
-
|
434 |
-
self._data_iter = None
|
435 |
-
|
436 |
-
def after_step(self):
|
437 |
-
next_iter = self.trainer.iter + 1
|
438 |
-
is_final = next_iter == self.trainer.max_iter
|
439 |
-
if is_final or (self._period > 0 and next_iter % self._period == 0):
|
440 |
-
self.update_stats()
|
441 |
-
|
442 |
-
def update_stats(self):
|
443 |
-
"""
|
444 |
-
Update the model with precise statistics. Users can manually call this method.
|
445 |
-
"""
|
446 |
-
if self._disabled:
|
447 |
-
return
|
448 |
-
|
449 |
-
if self._data_iter is None:
|
450 |
-
self._data_iter = iter(self._data_loader)
|
451 |
-
|
452 |
-
def data_loader():
|
453 |
-
for num_iter in itertools.count(1):
|
454 |
-
if num_iter % 100 == 0:
|
455 |
-
self._logger.info(
|
456 |
-
"Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter)
|
457 |
-
)
|
458 |
-
# This way we can reuse the same iterator
|
459 |
-
yield next(self._data_iter)
|
460 |
-
|
461 |
-
with EventStorage(): # capture events in a new storage to discard them
|
462 |
-
self._logger.info(
|
463 |
-
"Running precise-BN for {} iterations... ".format(self._num_iter)
|
464 |
-
+ "Note that this could produce different statistics every time."
|
465 |
-
)
|
466 |
-
update_bn_stats(self._model, data_loader(), self._num_iter)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/layers/blocks.py
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
import fvcore.nn.weight_init as weight_init
|
5 |
-
from torch import nn
|
6 |
-
|
7 |
-
from .batch_norm import FrozenBatchNorm2d, get_norm
|
8 |
-
from .wrappers import Conv2d
|
9 |
-
|
10 |
-
|
11 |
-
"""
|
12 |
-
CNN building blocks.
|
13 |
-
"""
|
14 |
-
|
15 |
-
|
16 |
-
class CNNBlockBase(nn.Module):
|
17 |
-
"""
|
18 |
-
A CNN block is assumed to have input channels, output channels and a stride.
|
19 |
-
The input and output of `forward()` method must be NCHW tensors.
|
20 |
-
The method can perform arbitrary computation but must match the given
|
21 |
-
channels and stride specification.
|
22 |
-
|
23 |
-
Attribute:
|
24 |
-
in_channels (int):
|
25 |
-
out_channels (int):
|
26 |
-
stride (int):
|
27 |
-
"""
|
28 |
-
|
29 |
-
def __init__(self, in_channels, out_channels, stride):
|
30 |
-
"""
|
31 |
-
The `__init__` method of any subclass should also contain these arguments.
|
32 |
-
|
33 |
-
Args:
|
34 |
-
in_channels (int):
|
35 |
-
out_channels (int):
|
36 |
-
stride (int):
|
37 |
-
"""
|
38 |
-
super().__init__()
|
39 |
-
self.in_channels = in_channels
|
40 |
-
self.out_channels = out_channels
|
41 |
-
self.stride = stride
|
42 |
-
|
43 |
-
def freeze(self):
|
44 |
-
"""
|
45 |
-
Make this block not trainable.
|
46 |
-
This method sets all parameters to `requires_grad=False`,
|
47 |
-
and convert all BatchNorm layers to FrozenBatchNorm
|
48 |
-
|
49 |
-
Returns:
|
50 |
-
the block itself
|
51 |
-
"""
|
52 |
-
for p in self.parameters():
|
53 |
-
p.requires_grad = False
|
54 |
-
FrozenBatchNorm2d.convert_frozen_batchnorm(self)
|
55 |
-
return self
|
56 |
-
|
57 |
-
|
58 |
-
class DepthwiseSeparableConv2d(nn.Module):
|
59 |
-
"""
|
60 |
-
A kxk depthwise convolution + a 1x1 convolution.
|
61 |
-
|
62 |
-
In :paper:`xception`, norm & activation are applied on the second conv.
|
63 |
-
:paper:`mobilenet` uses norm & activation on both convs.
|
64 |
-
"""
|
65 |
-
|
66 |
-
def __init__(
|
67 |
-
self,
|
68 |
-
in_channels,
|
69 |
-
out_channels,
|
70 |
-
kernel_size=3,
|
71 |
-
padding=1,
|
72 |
-
dilation=1,
|
73 |
-
*,
|
74 |
-
norm1=None,
|
75 |
-
activation1=None,
|
76 |
-
norm2=None,
|
77 |
-
activation2=None,
|
78 |
-
):
|
79 |
-
"""
|
80 |
-
Args:
|
81 |
-
norm1, norm2 (str or callable): normalization for the two conv layers.
|
82 |
-
activation1, activation2 (callable(Tensor) -> Tensor): activation
|
83 |
-
function for the two conv layers.
|
84 |
-
"""
|
85 |
-
super().__init__()
|
86 |
-
self.depthwise = Conv2d(
|
87 |
-
in_channels,
|
88 |
-
in_channels,
|
89 |
-
kernel_size=kernel_size,
|
90 |
-
padding=padding,
|
91 |
-
dilation=dilation,
|
92 |
-
groups=in_channels,
|
93 |
-
bias=not norm1,
|
94 |
-
norm=get_norm(norm1, in_channels),
|
95 |
-
activation=activation1,
|
96 |
-
)
|
97 |
-
self.pointwise = Conv2d(
|
98 |
-
in_channels,
|
99 |
-
out_channels,
|
100 |
-
kernel_size=1,
|
101 |
-
bias=not norm2,
|
102 |
-
norm=get_norm(norm2, out_channels),
|
103 |
-
activation=activation2,
|
104 |
-
)
|
105 |
-
|
106 |
-
# default initialization
|
107 |
-
weight_init.c2_msra_fill(self.depthwise)
|
108 |
-
weight_init.c2_msra_fill(self.pointwise)
|
109 |
-
|
110 |
-
def forward(self, x):
|
111 |
-
return self.pointwise(self.depthwise(x))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/v-doc_abstractive_mac/config.py
DELETED
@@ -1,491 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import argparse
|
3 |
-
|
4 |
-
###################################### configuration ######################################
|
5 |
-
class Config(object):
|
6 |
-
|
7 |
-
typeFilters = [[], ["1_query_size_",
|
8 |
-
"1_query_material_",
|
9 |
-
"2_equal_color_",
|
10 |
-
"2_equal_shape_"],
|
11 |
-
["1_query_color_",
|
12 |
-
"1_query_shape_",
|
13 |
-
"2_equal_size_",
|
14 |
-
"2_equal_material_"]]
|
15 |
-
|
16 |
-
#### files interface
|
17 |
-
## data files
|
18 |
-
dataPath = "" # dataset specific
|
19 |
-
datasetFilename = "" # dataset specific
|
20 |
-
|
21 |
-
# file names
|
22 |
-
imagesFilename = "{tier}.h5" # Images
|
23 |
-
instancesFilename = "{tier}Instances.json"
|
24 |
-
# symbols dictionaries
|
25 |
-
questionDictFilename = "questionDict.pkl"
|
26 |
-
answerDictFilename = "answerDict.pkl"
|
27 |
-
qaDictFilename = "qaDict.pkl"
|
28 |
-
|
29 |
-
## experiment files
|
30 |
-
expPathname = "{expName}"
|
31 |
-
expName = "" # will be assigned through argparse
|
32 |
-
|
33 |
-
weightsPath = "./weights"
|
34 |
-
weightsFilename = "weights{epoch}.ckpt"
|
35 |
-
|
36 |
-
# model predictions and optionally attention maps
|
37 |
-
predsPath = "./preds"
|
38 |
-
predsFilename = "{tier}Predictions-{expName}.json"
|
39 |
-
answersFilename = "{tier}Answers-{expName}.txt"
|
40 |
-
|
41 |
-
# logging of accuracy, loss etc. per epoch
|
42 |
-
logPath = "./results"
|
43 |
-
logFilename = "results-{expName}.csv"
|
44 |
-
|
45 |
-
# configuration file of the used flags to run the experiment
|
46 |
-
configPath = "./results"
|
47 |
-
configFilename = "config-{expName}.json"
|
48 |
-
|
49 |
-
def toString(self):
|
50 |
-
return self.expName
|
51 |
-
|
52 |
-
# make directories of experiment if not exist yet
|
53 |
-
def makedirs(self, directory):
|
54 |
-
directory = os.path.join(directory, self.expPath())
|
55 |
-
if not os.path.exists(directory):
|
56 |
-
os.makedirs(directory)
|
57 |
-
return directory
|
58 |
-
|
59 |
-
### filename builders
|
60 |
-
## data files
|
61 |
-
def dataFile(self, filename):
|
62 |
-
return os.path.join(self.dataPath, filename)
|
63 |
-
|
64 |
-
def generatedFile(self, filename):
|
65 |
-
return self.dataFile(self.generatedPrefix + filename)
|
66 |
-
|
67 |
-
datasetFile = lambda self, tier: self.dataFile(self.datasetFilename.format(tier = tier))
|
68 |
-
imagesIdsFile = lambda self, tier: self.dataFile(self.imgIdsFilename.format(tier = tier)) #
|
69 |
-
imagesFile = lambda self, tier: self.dataFile(self.imagesFilename.format(tier = tier))
|
70 |
-
instancesFile = lambda self, tier: self.generatedFile(self.instancesFilename.format(tier = tier))
|
71 |
-
|
72 |
-
questionDictFile = lambda self: self.generatedFile(self.questionDictFilename)
|
73 |
-
answerDictFile = lambda self: self.generatedFile(self.answerDictFilename)
|
74 |
-
qaDictFile = lambda self: self.generatedFile(self.qaDictFilename)
|
75 |
-
|
76 |
-
## experiment files
|
77 |
-
expPath = lambda self: self.expPathname.format(expName = self.toString())
|
78 |
-
|
79 |
-
weightsDir = lambda self: self.makedirs(self.weightsPath)
|
80 |
-
predsDir = lambda self: self.makedirs(self.predsPath)
|
81 |
-
logDir = lambda self: self.makedirs(self.logPath)
|
82 |
-
configDir = lambda self: self.makedirs(self.configPath)
|
83 |
-
|
84 |
-
weightsFile = lambda self, epoch: os.path.join(self.weightsDir(), self.weightsFilename.format(epoch = str(epoch)))
|
85 |
-
predsFile = lambda self, tier: os.path.join(self.predsDir(), self.predsFilename.format(tier = tier, expName = self.expName))
|
86 |
-
answersFile = lambda self, tier: os.path.join(self.predsDir(), self.answersFilename.format(tier = tier, expName = self.expName))
|
87 |
-
logFile = lambda self: os.path.join(self.logDir(), self.logFilename.format(expName = self.expName))
|
88 |
-
configFile = lambda self: os.path.join(self.configDir(), self.configFilename.format(expName = self.expName))
|
89 |
-
|
90 |
-
|
91 |
-
# global configuration variable. Holds file paths and program parameters
|
92 |
-
config = Config()
|
93 |
-
|
94 |
-
###################################### arguments ######################################
|
95 |
-
def parseArgs():
|
96 |
-
parser = argparse.ArgumentParser(fromfile_prefix_chars = "@")
|
97 |
-
|
98 |
-
|
99 |
-
################ systems
|
100 |
-
|
101 |
-
#custom args
|
102 |
-
parser.add_argument('--train_image_length', default=500, type=int, )
|
103 |
-
parser.add_argument('--test_image_length', default=100, type=int, )
|
104 |
-
parser.add_argument('--val_image_length', default=50, type=int, )
|
105 |
-
|
106 |
-
# gpus and memory
|
107 |
-
parser.add_argument("--gpus", default = "", type = str, help = "comma-separated list of gpus to use")
|
108 |
-
parser.add_argument("--gpusNum", default = 1, type = int, help = "number of gpus to use")
|
109 |
-
|
110 |
-
parser.add_argument("--allowGrowth", action = "store_true", help = "allow gpu memory growth")
|
111 |
-
parser.add_argument("--maxMemory", default = 1.0, type = float, help = "set maximum gpu memory usage")
|
112 |
-
|
113 |
-
parser.add_argument("--parallel", action = "store_true", help = "load images in parallel to batch running")
|
114 |
-
parser.add_argument("--workers", default = 1, type = int, help = "number of workers to load images")
|
115 |
-
parser.add_argument("--taskSize", default = 8, type = int, help = "number of image batches to load in advance") # 40
|
116 |
-
# parser.add_argument("--tasksNum", default = 20, type = int, help = "maximal queue size for tasks (to constrain ram usage)") # 2
|
117 |
-
|
118 |
-
parser.add_argument("--useCPU", action = "store_true", help = "put word embeddings on cpu")
|
119 |
-
|
120 |
-
# weight loading and training
|
121 |
-
parser.add_argument("-r", "--restore", action = "store_true", help = "restore last epoch (based on results file)")
|
122 |
-
parser.add_argument("--restoreEpoch", default = 0, type = int, help = "if positive, specific epoch to restore")
|
123 |
-
parser.add_argument("--weightsToKeep", default = 2, type = int, help = "number of previous epochs' weights keep")
|
124 |
-
parser.add_argument("--saveEvery", default = 3000, type = int, help = "number of iterations to save weights after")
|
125 |
-
parser.add_argument("--calleEvery", default = 1500, type = int, help = "number of iterations to call custom function after")
|
126 |
-
|
127 |
-
parser.add_argument("--saveSubset", action = "store_true", help = "save only subset of the weights")
|
128 |
-
parser.add_argument("--trainSubset", action = "store_true", help = "train only subset of the weights")
|
129 |
-
parser.add_argument("--varSubset", default = [], nargs = "*", type = str, help = "list of namespaces to train on")
|
130 |
-
|
131 |
-
# trainReader = ["questionEmbeddings", "questionReader"]
|
132 |
-
# saveControl = ["questionEmbeddings", "programEmbeddings", "seqReader", "programControl"]
|
133 |
-
|
134 |
-
# experiment files
|
135 |
-
parser.add_argument("--expName", default = "PDF_exp_extra", type = str, help = "experiment name")
|
136 |
-
|
137 |
-
# data files
|
138 |
-
parser.add_argument("--dataset", default = "PDF", choices = ["PDF", "CLEVR", "NLVR"], type = str) #
|
139 |
-
parser.add_argument("--dataBasedir", default = "./", type = str, help = "data base directory") # /jagupard14/scr1/dorarad/
|
140 |
-
parser.add_argument("--generatedPrefix", default = "gennew", type = str, help = "prefix for generated data files")
|
141 |
-
parser.add_argument("--featureType", default = "norm_128x32", type = str, help = "features type") #
|
142 |
-
# resnet101_512x128, norm_400x100, none_80x20, normPerImage_80x20, norm_80x20
|
143 |
-
|
144 |
-
################ optimization
|
145 |
-
|
146 |
-
# training/testing
|
147 |
-
parser.add_argument("--train", action = "store_true", help = "run training")
|
148 |
-
parser.add_argument("--evalTrain", action = "store_true", help = "run eval with ema on train dataset") #
|
149 |
-
parser.add_argument("--test", action = "store_true", help = "run testing every epoch and generate predictions file") #
|
150 |
-
parser.add_argument("--finalTest", action = "store_true", help = "run testing on final epoch")
|
151 |
-
parser.add_argument("--retainVal", action = "store_true", help = "retain validation order between runs") #
|
152 |
-
|
153 |
-
parser.add_argument("--getPreds", action = "store_true", help = "store prediction")
|
154 |
-
parser.add_argument("--getAtt", action = "store_true", help = "store attention maps")
|
155 |
-
parser.add_argument("--analysisType", default = "", type = str, choices = ["", "questionLength, programLength","type", "arity"], help = "show breakdown of results according to type") #
|
156 |
-
|
157 |
-
parser.add_argument("--trainedNum", default = 0, type = int, help = "if positive, train on subset of the data")
|
158 |
-
parser.add_argument("--testedNum", default = 0, type = int, help = "if positive, test on subset of the data")
|
159 |
-
|
160 |
-
# bucketing
|
161 |
-
parser.add_argument("--noBucket", action = "store_true", help = "bucket data according to question length")
|
162 |
-
parser.add_argument("--noRebucket", action = "store_true", help = "bucket data according to question and program length") #
|
163 |
-
|
164 |
-
# filtering
|
165 |
-
parser.add_argument("--tOnlyChain", action = "store_true", help = "train only chain questions")
|
166 |
-
parser.add_argument("--vOnlyChain", action = "store_true", help = "test only chain questions")
|
167 |
-
parser.add_argument("--tMaxQ", default = 0, type = int, help = "if positive, train on questions up to this length")
|
168 |
-
parser.add_argument("--tMaxP", default = 0, type = int, help = "if positive, test on questions up to this length")
|
169 |
-
parser.add_argument("--vMaxQ", default = 0, type = int, help = "if positive, train on questions with programs up to this length")
|
170 |
-
parser.add_argument("--vMaxP", default = 0, type = int, help = "if positive, test on questions with programs up to this length")
|
171 |
-
parser.add_argument("--tFilterOp", default = 0, type = int, help = "train questions by to be included in the types listed")
|
172 |
-
parser.add_argument("--vFilterOp", default = 0, type = int, help = "test questions by to be included in the types listed")
|
173 |
-
|
174 |
-
# extra and extraVal
|
175 |
-
parser.add_argument("--extra", action = "store_true", help = "prepare extra data (add to vocabulary") #
|
176 |
-
parser.add_argument("--trainExtra", action = "store_true", help = "train (only) on extra data") #
|
177 |
-
parser.add_argument("--alterExtra", action = "store_true", help = "alter main data training with extra dataset") #
|
178 |
-
parser.add_argument("--alterNum", default = 1, type = int, help = "alteration rate") #
|
179 |
-
parser.add_argument("--extraVal", action = "store_true", help = "only extra validation data (for compositional clevr)") #
|
180 |
-
parser.add_argument("--finetuneNum", default = 0, type = int, help = "if positive, finetune on that subset of val (for compositional clevr)") #
|
181 |
-
|
182 |
-
# exponential moving average
|
183 |
-
parser.add_argument("--useEMA", action = "store_true", help = "use exponential moving average for weights")
|
184 |
-
parser.add_argument("--emaDecayRate", default = 0.999, type = float, help = "decay rate for exponential moving average")
|
185 |
-
|
186 |
-
# sgd optimizer
|
187 |
-
parser.add_argument("--batchSize", default = 64, type = int, help = "batch size")
|
188 |
-
parser.add_argument("--epochs", default = 100, type = int, help = "number of epochs to run")
|
189 |
-
parser.add_argument("--lr", default = 0.0001, type = float, help = "learning rate")
|
190 |
-
parser.add_argument("--lrReduce", action = "store_true", help = "reduce learning rate if training loss doesn't go down (manual annealing)")
|
191 |
-
parser.add_argument("--lrDecayRate", default = 0.5, type = float, help = "learning decay rate if training loss doesn't go down")
|
192 |
-
parser.add_argument("--earlyStopping", default = 0, type = int, help = "if positive, stop if no improvement for that number of epochs")
|
193 |
-
|
194 |
-
parser.add_argument("--adam", action = "store_true", help = "use adam")
|
195 |
-
parser.add_argument("--l2", default = 0, type = float, help = "if positive, add l2 loss term")
|
196 |
-
parser.add_argument("--clipGradients", action = "store_true", help = "clip gradients")
|
197 |
-
parser.add_argument("--gradMaxNorm", default = 8, type = int, help = "clipping value")
|
198 |
-
|
199 |
-
# batch normalization
|
200 |
-
parser.add_argument("--memoryBN", action = "store_true", help = "use batch normalization on the recurrent memory")
|
201 |
-
parser.add_argument("--stemBN", action = "store_true", help = "use batch normalization in the image input unit (stem)")
|
202 |
-
parser.add_argument("--outputBN", action = "store_true", help = "use batch normalization in the output unit")
|
203 |
-
parser.add_argument("--bnDecay", default = 0.999, type = float, help = "batch norm decay rate")
|
204 |
-
parser.add_argument("--bnCenter", action = "store_true", help = "batch norm with centering")
|
205 |
-
parser.add_argument("--bnScale", action = "store_true", help = "batch norm with scaling")
|
206 |
-
|
207 |
-
## dropouts
|
208 |
-
parser.add_argument("--encInputDropout", default = 0.85, type = float, help = "dropout of the rnn inputs to the Question Input Unit")
|
209 |
-
parser.add_argument("--encStateDropout", default = 1.0, type = float, help = "dropout of the rnn states of the Question Input Unit")
|
210 |
-
parser.add_argument("--stemDropout", default = 0.82, type = float, help = "dropout of the Image Input Unit (the stem)")
|
211 |
-
|
212 |
-
parser.add_argument("--qDropout", default = 0.92, type = float, help = "dropout on the question vector")
|
213 |
-
# parser.add_argument("--qDropoutOut", default = 1.0, type = float, help = "dropout on the question vector the goes to the output unit")
|
214 |
-
# parser.add_argument("--qDropoutMAC", default = 1.0, type = float, help = "dropout on the question vector the goes to MAC")
|
215 |
-
|
216 |
-
parser.add_argument("--memoryDropout", default = 0.85, type = float, help = "dropout on the recurrent memory")
|
217 |
-
parser.add_argument("--readDropout", default = 0.85, type = float, help = "dropout of the read unit")
|
218 |
-
parser.add_argument("--writeDropout", default = 1.0, type = float, help = "dropout of the write unit")
|
219 |
-
parser.add_argument("--outputDropout", default = 0.85, type = float, help = "dropout of the output unit")
|
220 |
-
|
221 |
-
parser.add_argument("--parametricDropout", action = "store_true", help = "use parametric dropout") #
|
222 |
-
parser.add_argument("--encVariationalDropout", action = "store_true", help = "use variational dropout in the RNN input unit")
|
223 |
-
parser.add_argument("--memoryVariationalDropout", action = "store_true", help = "use variational dropout across the MAC network")
|
224 |
-
|
225 |
-
## nonlinearities
|
226 |
-
parser.add_argument("--relu", default = "ELU", choices = ["STD", "PRM", "ELU", "LKY", "SELU"], type = str, help = "type of ReLU to use: standard, parametric, ELU, or leaky")
|
227 |
-
# parser.add_argument("--reluAlpha", default = 0.2, type = float, help = "alpha value for the leaky ReLU")
|
228 |
-
|
229 |
-
parser.add_argument("--mulBias", default = 0.0, type = float, help = "bias to add in multiplications (x + b) * (y + b) for better training") #
|
230 |
-
|
231 |
-
parser.add_argument("--imageLinPool", default = 2, type = int, help = "pooling for image linearizion")
|
232 |
-
|
233 |
-
################ baseline model parameters
|
234 |
-
|
235 |
-
parser.add_argument("--useBaseline", action = "store_true", help = "run the baseline model")
|
236 |
-
parser.add_argument("--baselineLSTM", action = "store_true", help = "use LSTM in baseline")
|
237 |
-
parser.add_argument("--baselineCNN", action = "store_true", help = "use CNN in baseline")
|
238 |
-
parser.add_argument("--baselineAtt", action = "store_true", help = "use stacked attention baseline")
|
239 |
-
|
240 |
-
parser.add_argument("--baselineProjDim", default = 64, type = int, help = "projection dimension for image linearizion")
|
241 |
-
|
242 |
-
parser.add_argument("--baselineAttNumLayers", default = 2, type = int, help = "number of stacked attention layers")
|
243 |
-
parser.add_argument("--baselineAttType", default = "ADD", type = str, choices = ["MUL", "DIAG", "BL", "ADD"], help = "attention type (multiplicative, additive, etc)")
|
244 |
-
|
245 |
-
################ image input unit (the "stem")
|
246 |
-
|
247 |
-
parser.add_argument("--stemDim", default = 512, type = int, help = "dimension of stem CNNs")
|
248 |
-
parser.add_argument("--stemNumLayers", default = 2, type = int, help = "number of stem layers")
|
249 |
-
parser.add_argument("--stemKernelSize", default = 3, type = int, help = "kernel size for stem (same for all the stem layers)")
|
250 |
-
parser.add_argument("--stemKernelSizes", default = None, nargs = "*", type = int, help = "kernel sizes for stem (per layer)")
|
251 |
-
parser.add_argument("--stemStrideSizes", default = None, nargs = "*", type = int, help = "stride sizes for stem (per layer)")
|
252 |
-
|
253 |
-
parser.add_argument("--stemLinear", action = "store_true", help = "use a linear stem (instead of CNNs)") #
|
254 |
-
# parser.add_argument("--stemProjDim", default = 64, type = int, help = "projection dimension of in image linearization") #
|
255 |
-
# parser.add_argument("--stemProjPooling", default = 2, type = int, help = "pooling for the image linearization") #
|
256 |
-
|
257 |
-
parser.add_argument("--stemGridRnn", action = "store_true", help = "use grid RNN layer") #
|
258 |
-
parser.add_argument("--stemGridRnnMod", default = "RNN", type = str, choices = ["RNN", "GRU"], help = "RNN type for grid") #
|
259 |
-
parser.add_argument("--stemGridAct", default = "NON", type = str, choices = ["NON", "RELU", "TANH"], help = "nonlinearity type for grid") #
|
260 |
-
|
261 |
-
## location
|
262 |
-
parser.add_argument("--locationAware", action = "store_true", help = "add positional features to image representation (linear meshgrid by default)")
|
263 |
-
parser.add_argument("--locationType", default = "L", type = str, choices = ["L", "PE"], help = "L: linear features, PE: Positional Encoding")
|
264 |
-
parser.add_argument("--locationBias", default = 1.0, type = float, help = "the scale of the positional features")
|
265 |
-
parser.add_argument("--locationDim", default = 32, type = int, help = "the number of PE dimensions")
|
266 |
-
|
267 |
-
################ question input unit (the "encoder")
|
268 |
-
parser.add_argument("--encType", default = "LSTM", choices = ["RNN", "GRU", "LSTM", "MiGRU", "MiLSTM"], help = "encoder RNN type")
|
269 |
-
parser.add_argument("--encDim", default = 512, type = int, help = "dimension of encoder RNN")
|
270 |
-
parser.add_argument("--encNumLayers", default = 1, type = int, help = "number of encoder RNN layers")
|
271 |
-
parser.add_argument("--encBi", action = "store_true", help = "use bi-directional encoder")
|
272 |
-
# parser.add_argument("--encOutProj", action = "store_true", help = "add projection layer for encoder outputs")
|
273 |
-
# parser.add_argument("--encOutProjDim", default = 256, type = int, help = "dimension of the encoder projection layer")
|
274 |
-
# parser.add_argument("--encQProj", action = "store_true", help = "add projection for the question representation")
|
275 |
-
parser.add_argument("--encProj", action = "store_true", help = "project encoder outputs and question")
|
276 |
-
parser.add_argument("--encProjQAct", default = "NON", type = str, choices = ["NON", "RELU", "TANH"], help = "project question vector with this activation")
|
277 |
-
|
278 |
-
##### word embeddings
|
279 |
-
parser.add_argument("--wrdEmbDim", default = 300, type = int, help = "word embeddings dimension")
|
280 |
-
parser.add_argument("--wrdEmbRandom", action = "store_true", help = "initialize word embeddings to random (normal)")
|
281 |
-
parser.add_argument("--wrdEmbUniform", action = "store_true", help = "initialize with uniform distribution")
|
282 |
-
parser.add_argument("--wrdEmbScale", default = 1.0, type = float, help = "word embeddings initialization scale")
|
283 |
-
parser.add_argument("--wrdEmbFixed", action = "store_true", help = "set word embeddings fixed (don't train)")
|
284 |
-
parser.add_argument("--wrdEmbUnknown", action = "store_true", help = "set words outside of training set to <UNK>")
|
285 |
-
|
286 |
-
parser.add_argument("--ansEmbMod", default = "NON", choices = ["NON", "SHARED", "BOTH"], type = str, help = "BOTH: create word embeddings for answers. SHARED: share them with question embeddings.") #
|
287 |
-
parser.add_argument("--answerMod", default = "NON", choices = ["NON", "MUL", "DIAG", "BL"], type = str, help = "operation for multiplication with answer embeddings: direct multiplication, scalar weighting, or bilinear") #
|
288 |
-
|
289 |
-
################ output unit (classifier)
|
290 |
-
parser.add_argument("--outClassifierDims", default = [512], nargs = "*", type = int, help = "dimensions of the classifier")
|
291 |
-
parser.add_argument("--outImage", action = "store_true", help = "feed the image to the output unit")
|
292 |
-
parser.add_argument("--outImageDim", default = 1024, type = int, help = "dimension of linearized image fed to the output unit")
|
293 |
-
parser.add_argument("--outQuestion", action = "store_true", help = "feed the question to the output unit")
|
294 |
-
parser.add_argument("--outQuestionMul", action = "store_true", help = "feed the multiplication of question and memory to the output unit")
|
295 |
-
|
296 |
-
################ network
|
297 |
-
|
298 |
-
parser.add_argument("--netLength", default = 16, type = int, help = "network length (number of cells)")
|
299 |
-
# parser.add_argument("--netDim", default = 512, type = int)
|
300 |
-
parser.add_argument("--memDim", default = 512, type = int, help = "dimension of memory state")
|
301 |
-
parser.add_argument("--ctrlDim", default = 512, type = int, help = "dimension of control state")
|
302 |
-
parser.add_argument("--attDim", default = 512, type = int, help = "dimension of pre-attention interactions space")
|
303 |
-
parser.add_argument("--unsharedCells", default = False, type = bool, help = "unshare weights between cells ")
|
304 |
-
|
305 |
-
# initialization
|
306 |
-
parser.add_argument("--initCtrl", default = "PRM", type = str, choices = ["PRM", "ZERO", "Q"], help = "initialization mod for control")
|
307 |
-
parser.add_argument("--initMem", default = "PRM", type = str, choices = ["PRM", "ZERO", "Q"], help = "initialization mod for memory")
|
308 |
-
parser.add_argument("--initKBwithQ", default = "NON", type = str, choices = ["NON", "CNCT", "MUL"], help = "merge question with knowledge base")
|
309 |
-
parser.add_argument("--addNullWord", action = "store_true", help = "add parametric word in the beginning of the question")
|
310 |
-
|
311 |
-
################ control unit
|
312 |
-
# control ablations (use whole question or pre-attention continuous vectors as control)
|
313 |
-
parser.add_argument("--controlWholeQ", action = "store_true", help = "use whole question vector as control")
|
314 |
-
parser.add_argument("--controlContinuous", action = "store_true", help = "use continuous representation of control (without attention)")
|
315 |
-
|
316 |
-
# step 0: inputs to control unit (word embeddings or encoder outputs, with optional projection)
|
317 |
-
parser.add_argument("--controlContextual", action = "store_true", help = "use contextual words for attention (otherwise will use word embeddings)")
|
318 |
-
parser.add_argument("--controlInWordsProj", action = "store_true", help = "apply linear projection over words for attention computation")
|
319 |
-
parser.add_argument("--controlOutWordsProj", action = "store_true", help = "apply linear projection over words for summary computation")
|
320 |
-
|
321 |
-
parser.add_argument("--controlInputUnshared", action = "store_true", help = "use different question representation for each cell")
|
322 |
-
parser.add_argument("--controlInputAct", default = "TANH", type = str, choices = ["NON", "RELU", "TANH"], help = "activation for question projection")
|
323 |
-
|
324 |
-
# step 1: merging previous control and whole question
|
325 |
-
parser.add_argument("--controlFeedPrev", action = "store_true", help = "feed previous control state")
|
326 |
-
parser.add_argument("--controlFeedPrevAtt", action = "store_true", help = "feed previous control post word attention (otherwise will feed continuous control)")
|
327 |
-
parser.add_argument("--controlFeedInputs", action = "store_true", help = "feed question representation")
|
328 |
-
parser.add_argument("--controlContAct", default = "TANH", type = str, choices = ["NON", "RELU", "TANH"], help = "activation on the words interactions")
|
329 |
-
|
330 |
-
# step 2: word attention and optional projection
|
331 |
-
parser.add_argument("--controlConcatWords", action = "store_true", help = "concatenate words to interaction when computing attention")
|
332 |
-
parser.add_argument("--controlProj", action = "store_true", help = "apply linear projection on words interactions")
|
333 |
-
parser.add_argument("--controlProjAct", default = "NON", type = str, choices = ["NON", "RELU", "TANH"], help = "activation for control interactions")
|
334 |
-
|
335 |
-
# parser.add_argument("--controlSelfAtt", default = False, type = bool)
|
336 |
-
|
337 |
-
# parser.add_argument("--controlCoverage", default = False, type = bool)
|
338 |
-
# parser.add_argument("--controlCoverageBias", default = 1.0, type = float)
|
339 |
-
|
340 |
-
# parser.add_argument("--controlPostRNN", default = False, type = bool)
|
341 |
-
# parser.add_argument("--controlPostRNNmod", default = "RNN", type = str) # GRU
|
342 |
-
|
343 |
-
# parser.add_argument("--selfAttShareInter", default = False, type = bool)
|
344 |
-
|
345 |
-
# parser.add_argument("--wordControl", default = False, type = bool)
|
346 |
-
# parser.add_argument("--gradualControl", default = False, type = bool)
|
347 |
-
|
348 |
-
################ read unit
|
349 |
-
# step 1: KB-memory interactions
|
350 |
-
parser.add_argument("--readProjInputs", action = "store_true", help = "project read unit inputs")
|
351 |
-
parser.add_argument("--readProjShared", action = "store_true", help = "use shared projection for all read unit inputs")
|
352 |
-
|
353 |
-
parser.add_argument("--readMemAttType", default = "MUL", type = str, choices = ["MUL", "DIAG", "BL", "ADD"], help = "attention type for interaction with memory")
|
354 |
-
parser.add_argument("--readMemConcatKB", action = "store_true", help = "concatenate KB elements to memory interaction")
|
355 |
-
parser.add_argument("--readMemConcatProj", action = "store_true", help = "concatenate projected values instead or original to memory interaction")
|
356 |
-
parser.add_argument("--readMemProj", action = "store_true", help = "project interactions with memory")
|
357 |
-
parser.add_argument("--readMemAct", default = "RELU", type = str, choices = ["NON", "RELU", "TANH"], help = "activation for memory interaction")
|
358 |
-
|
359 |
-
# step 2: interaction with control
|
360 |
-
parser.add_argument("--readCtrl", action = "store_true", help = "compare KB-memory interactions to control")
|
361 |
-
parser.add_argument("--readCtrlAttType", default = "MUL", type = str, choices = ["MUL", "DIAG", "BL", "ADD"], help = "attention type for interaction with control")
|
362 |
-
parser.add_argument("--readCtrlConcatKB", action = "store_true", help = "concatenate KB elements to control interaction")
|
363 |
-
parser.add_argument("--readCtrlConcatProj", action = "store_true", help = "concatenate projected values instead or original to control interaction")
|
364 |
-
parser.add_argument("--readCtrlConcatInter", action = "store_true", help = "concatenate memory interactions to control interactions")
|
365 |
-
parser.add_argument("--readCtrlAct", default = "RELU", type = str, choices = ["NON", "RELU", "TANH"], help = "activation for control interaction")
|
366 |
-
|
367 |
-
# step 3: summarize attention over knowledge base
|
368 |
-
parser.add_argument("--readSmryKBProj", action = "store_true", help = "use knowledge base projections when summing attention up (should be used only if KB is projected.")
|
369 |
-
|
370 |
-
# parser.add_argument("--saAllMultiplicative", default = False, type = bool)
|
371 |
-
# parser.add_argument("--saSumMultiplicative", default = False, type = bool)
|
372 |
-
|
373 |
-
################ write unit
|
374 |
-
# step 1: input to the write unit (only previous memory, or new information, or both)
|
375 |
-
parser.add_argument("--writeInputs", default = "BOTH", type = str, choices = ["MEM", "INFO", "BOTH", "SUM"], help = "inputs to the write unit")
|
376 |
-
parser.add_argument("--writeConcatMul", action = "store_true", help = "add multiplicative integration between inputs")
|
377 |
-
|
378 |
-
parser.add_argument("--writeInfoProj", action = "store_true", help = "project retrieved info")
|
379 |
-
parser.add_argument("--writeInfoAct", default = "NON", type = str, choices = ["NON", "RELU", "TANH"], help = "new info activation")
|
380 |
-
|
381 |
-
# step 2: self attention and following projection
|
382 |
-
parser.add_argument("--writeSelfAtt", action = "store_true", help = "use self attention")
|
383 |
-
parser.add_argument("--writeSelfAttMod", default = "NON", type = str, choices = ["NON", "CONT"], help = "control version to compare to")
|
384 |
-
|
385 |
-
parser.add_argument("--writeMergeCtrl", action = "store_true", help = "merge control with memory")
|
386 |
-
|
387 |
-
parser.add_argument("--writeMemProj", action = "store_true", help = "project new memory")
|
388 |
-
parser.add_argument("--writeMemAct", default = "NON", type = str, choices = ["NON", "RELU", "TANH"], help = "new memory activation")
|
389 |
-
|
390 |
-
# step 3: gate between new memory and previous value
|
391 |
-
parser.add_argument("--writeGate", action = "store_true", help = "add gate to write unit")
|
392 |
-
parser.add_argument("--writeGateShared", action = "store_true", help = "use one gate value for all dimensions of the memory state")
|
393 |
-
parser.add_argument("--writeGateBias", default = 1.0, type = float, help = "bias for the write unit gate (positive to bias for taking new memory)")
|
394 |
-
|
395 |
-
## modular
|
396 |
-
# parser.add_argument("--modulesNum", default = 10, type = int)
|
397 |
-
# parser.add_argument("--controlBoth", default = False, type = bool)
|
398 |
-
# parser.add_argument("--addZeroModule", default = False, type = bool)
|
399 |
-
# parser.add_argument("--endModule", default = False, type = bool)
|
400 |
-
|
401 |
-
## hybrid
|
402 |
-
# parser.add_argument("--hybrid", default = False, type = bool, help = "hybrid attention cnn model")
|
403 |
-
# parser.add_argument("--earlyHybrid", default = False, type = bool)
|
404 |
-
# parser.add_argument("--lateHybrid", default = False, type = bool)
|
405 |
-
|
406 |
-
## autoencoders
|
407 |
-
# parser.add_argument("--autoEncMem", action = "store_true", help = "add memory2control auto-encoder loss")
|
408 |
-
# parser.add_argument("--autoEncMemW", default = 0.0001, type = float, help = "weight for auto-encoder loss")
|
409 |
-
# parser.add_argument("--autoEncMemInputs", default = "INFO", type = str, choices = ["MEM", "INFO"], help = "inputs to auto-encoder")
|
410 |
-
# parser.add_argument("--autoEncMemAct", default = "NON", type = str, choices = ["NON", "RELU", "TANH"], help = "activation type in the auto-encoder")
|
411 |
-
# parser.add_argument("--autoEncMemLoss", default = "CONT", type = str, choices = ["CONT", "PROB", "SMRY"], help = "target for the auto-encoder loss")
|
412 |
-
# parser.add_argument("--autoEncMemCnct", action = "store_true", help = "concat word attentions to auto-encoder features")
|
413 |
-
|
414 |
-
# parser.add_argument("--autoEncCtrl", action = "store_true")
|
415 |
-
# parser.add_argument("--autoEncCtrlW", default = 0.0001, type = float)
|
416 |
-
# parser.add_argument("--autoEncCtrlGRU", action = "store_true")
|
417 |
-
|
418 |
-
## temperature
|
419 |
-
# parser.add_argument("--temperature", default = 1.0, type = float, help = "temperature for modules softmax") #
|
420 |
-
# parser.add_argument("--tempParametric", action = "store_true", help = "parametric temperature") #
|
421 |
-
# parser.add_argument("--tempDynamic", action = "store_true", help = "dynamic temperature") #
|
422 |
-
# parser.add_argument("--tempAnnealRate", default = 0.000004, type = float, help = "temperature annealing rate") #
|
423 |
-
# parser.add_argument("--tempMin", default = 0.5, type = float, help = "minimum temperature") #
|
424 |
-
|
425 |
-
## gumbel
|
426 |
-
# parser.add_argument("--gumbelSoftmax", action = "store_true", help = "use gumbel for the module softmax (soft for training and hard for testing)") #
|
427 |
-
# parser.add_argument("--gumbelSoftmaxBoth", action = "store_true", help = "use softmax for training and testing") #
|
428 |
-
# parser.add_argument("--gumbelArgmaxBoth", action = "store_true", help = "use argmax for training and testing") #
|
429 |
-
|
430 |
-
parser.parse_args(namespace = config)
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
###################################### dataset configuration ######################################
|
436 |
-
|
437 |
-
def configPDF():
|
438 |
-
config.dataPath = "{dataBasedir}/PDF_v1/data".format(dataBasedir = config.dataBasedir)
|
439 |
-
config.datasetFilename = "PDF_{tier}_questions.json"
|
440 |
-
config.wordVectorsFile = "./PDF_v1/data/glove/glove.6B.{dim}d.txt".format(dim = config.wrdEmbDim) #
|
441 |
-
|
442 |
-
config.imageDims = [14, 14, 1024]
|
443 |
-
config.programLims = [5, 10, 15, 20]
|
444 |
-
config.questionLims = [10, 15, 20, 25]
|
445 |
-
|
446 |
-
def configCLEVR():
|
447 |
-
config.dataPath = "{dataBasedir}/CLEVR_v1/data".format(dataBasedir = config.dataBasedir)
|
448 |
-
config.datasetFilename = "CLEVR_{tier}_questions.json"
|
449 |
-
config.wordVectorsFile = "./CLEVR_v1/data/glove/glove.6B.{dim}d.txt".format(dim = config.wrdEmbDim) #
|
450 |
-
|
451 |
-
config.imageDims = [14, 14, 1024]
|
452 |
-
config.programLims = [5, 10, 15, 20]
|
453 |
-
config.questionLims = [10, 15, 20, 25]
|
454 |
-
|
455 |
-
def configNLVR():
|
456 |
-
config.dataPath = "{dataBasedir}/nlvr".format(dataBasedir = config.dataBasedir)
|
457 |
-
config.datasetFilename = "{tier}.json"
|
458 |
-
config.imagesFilename = "{{tier}}_{featureType}.h5".format(featureType = config.featureType)
|
459 |
-
config.imgIdsFilename = "{tier}ImgIds.json"
|
460 |
-
config.wordVectorsFile = "./CLEVR_v1/data/glove/glove.6B.{dim}d.txt".format(dim = config.wrdEmbDim) #
|
461 |
-
|
462 |
-
config.questionLims = [12]
|
463 |
-
# config.noRebucket = True
|
464 |
-
|
465 |
-
# if config.stemKernelSizes == []:
|
466 |
-
# if config.featureType.endsWith("128x32"):
|
467 |
-
# config.stemKernelSizes = [8, 4, 4]
|
468 |
-
# config.stemStrideSizes = [2, 2, 1]
|
469 |
-
# config.stemNumLayers = 3
|
470 |
-
# if config.featureType.endsWith("512x128"):
|
471 |
-
# config.stemKernelSizes = [8, 4, 4, 2]
|
472 |
-
# config.stemStrideSizes = [4, 2, 2, 1]
|
473 |
-
# config.stemNumLayers = 4
|
474 |
-
# config.stemDim = 64
|
475 |
-
|
476 |
-
if config.featureType == "resnet101_512x128":
|
477 |
-
config.imageDims = [8, 32, 1024]
|
478 |
-
else:
|
479 |
-
stridesOverall = 1
|
480 |
-
if stemStrideSizes is not None:
|
481 |
-
for s in config.stemStrideSizes:
|
482 |
-
stridesOverall *= int(s)
|
483 |
-
size = config.featureType.split("_")[-1].split("x")
|
484 |
-
config.imageDims = [int(size[1]) / stridesOverall, int(size[0]) / stridesOverall, 3]
|
485 |
-
|
486 |
-
## dataset specific configs
|
487 |
-
loadDatasetConfig = {
|
488 |
-
"CLEVR": configCLEVR,
|
489 |
-
"NLVR": configNLVR,
|
490 |
-
"PDF": configPDF
|
491 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Carlosito16/HXM-summarization/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: HXM Summarization
|
3 |
-
emoji: 🌍
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.17.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChandraMohanNayal/AutoGPT/autogpt/prompt.py
DELETED
@@ -1,204 +0,0 @@
|
|
1 |
-
from colorama import Fore
|
2 |
-
|
3 |
-
from autogpt.config import Config
|
4 |
-
from autogpt.config.ai_config import AIConfig
|
5 |
-
from autogpt.config.config import Config
|
6 |
-
from autogpt.logs import logger
|
7 |
-
from autogpt.promptgenerator import PromptGenerator
|
8 |
-
from autogpt.setup import prompt_user
|
9 |
-
from autogpt.utils import clean_input
|
10 |
-
|
11 |
-
CFG = Config()
|
12 |
-
|
13 |
-
|
14 |
-
def get_prompt() -> str:
|
15 |
-
"""
|
16 |
-
This function generates a prompt string that includes various constraints,
|
17 |
-
commands, resources, and performance evaluations.
|
18 |
-
|
19 |
-
Returns:
|
20 |
-
str: The generated prompt string.
|
21 |
-
"""
|
22 |
-
|
23 |
-
# Initialize the Config object
|
24 |
-
cfg = Config()
|
25 |
-
|
26 |
-
# Initialize the PromptGenerator object
|
27 |
-
prompt_generator = PromptGenerator()
|
28 |
-
|
29 |
-
# Add constraints to the PromptGenerator object
|
30 |
-
prompt_generator.add_constraint(
|
31 |
-
"~4000 word limit for short term memory. Your short term memory is short, so"
|
32 |
-
" immediately save important information to files."
|
33 |
-
)
|
34 |
-
prompt_generator.add_constraint(
|
35 |
-
"If you are unsure how you previously did something or want to recall past"
|
36 |
-
" events, thinking about similar events will help you remember."
|
37 |
-
)
|
38 |
-
prompt_generator.add_constraint("No user assistance")
|
39 |
-
prompt_generator.add_constraint(
|
40 |
-
'Exclusively use the commands listed in double quotes e.g. "command name"'
|
41 |
-
)
|
42 |
-
prompt_generator.add_constraint(
|
43 |
-
"Use subprocesses for commands that will not terminate within a few minutes"
|
44 |
-
)
|
45 |
-
|
46 |
-
# Define the command list
|
47 |
-
commands = [
|
48 |
-
("Google Search", "google", {"input": "<search>"}),
|
49 |
-
(
|
50 |
-
"Browse Website",
|
51 |
-
"browse_website",
|
52 |
-
{"url": "<url>", "question": "<what_you_want_to_find_on_website>"},
|
53 |
-
),
|
54 |
-
(
|
55 |
-
"Start GPT Agent",
|
56 |
-
"start_agent",
|
57 |
-
{"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"},
|
58 |
-
),
|
59 |
-
(
|
60 |
-
"Message GPT Agent",
|
61 |
-
"message_agent",
|
62 |
-
{"key": "<key>", "message": "<message>"},
|
63 |
-
),
|
64 |
-
("List GPT Agents", "list_agents", {}),
|
65 |
-
("Delete GPT Agent", "delete_agent", {"key": "<key>"}),
|
66 |
-
(
|
67 |
-
"Clone Repository",
|
68 |
-
"clone_repository",
|
69 |
-
{"repository_url": "<url>", "clone_path": "<directory>"},
|
70 |
-
),
|
71 |
-
("Write to file", "write_to_file", {"file": "<file>", "text": "<text>"}),
|
72 |
-
("Read file", "read_file", {"file": "<file>"}),
|
73 |
-
("Append to file", "append_to_file", {"file": "<file>", "text": "<text>"}),
|
74 |
-
("Delete file", "delete_file", {"file": "<file>"}),
|
75 |
-
("Search Files", "search_files", {"directory": "<directory>"}),
|
76 |
-
("Analyze Code", "analyze_code", {"code": "<full_code_string>"}),
|
77 |
-
(
|
78 |
-
"Get Improved Code",
|
79 |
-
"improve_code",
|
80 |
-
{"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"},
|
81 |
-
),
|
82 |
-
(
|
83 |
-
"Write Tests",
|
84 |
-
"write_tests",
|
85 |
-
{"code": "<full_code_string>", "focus": "<list_of_focus_areas>"},
|
86 |
-
),
|
87 |
-
("Execute Python File", "execute_python_file", {"file": "<file>"}),
|
88 |
-
("Task Complete (Shutdown)", "task_complete", {"reason": "<reason>"}),
|
89 |
-
("Generate Image", "generate_image", {"prompt": "<prompt>"}),
|
90 |
-
("Send Tweet", "send_tweet", {"text": "<text>"}),
|
91 |
-
]
|
92 |
-
|
93 |
-
# Only add the audio to text command if the model is specified
|
94 |
-
if cfg.huggingface_audio_to_text_model:
|
95 |
-
commands.append(
|
96 |
-
("Convert Audio to text", "read_audio_from_file", {"file": "<file>"}),
|
97 |
-
)
|
98 |
-
|
99 |
-
# Only add shell command to the prompt if the AI is allowed to execute it
|
100 |
-
if cfg.execute_local_commands:
|
101 |
-
commands.append(
|
102 |
-
(
|
103 |
-
"Execute Shell Command, non-interactive commands only",
|
104 |
-
"execute_shell",
|
105 |
-
{"command_line": "<command_line>"},
|
106 |
-
),
|
107 |
-
)
|
108 |
-
commands.append(
|
109 |
-
(
|
110 |
-
"Execute Shell Command Popen, non-interactive commands only",
|
111 |
-
"execute_shell_popen",
|
112 |
-
{"command_line": "<command_line>"},
|
113 |
-
),
|
114 |
-
)
|
115 |
-
|
116 |
-
# Only add the download file command if the AI is allowed to execute it
|
117 |
-
if cfg.allow_downloads:
|
118 |
-
commands.append(
|
119 |
-
(
|
120 |
-
"Downloads a file from the internet, and stores it locally",
|
121 |
-
"download_file",
|
122 |
-
{"url": "<file_url>", "file": "<saved_filename>"},
|
123 |
-
),
|
124 |
-
)
|
125 |
-
|
126 |
-
# Add these command last.
|
127 |
-
commands.append(
|
128 |
-
("Do Nothing", "do_nothing", {}),
|
129 |
-
)
|
130 |
-
commands.append(
|
131 |
-
("Task Complete (Shutdown)", "task_complete", {"reason": "<reason>"}),
|
132 |
-
)
|
133 |
-
|
134 |
-
# Add commands to the PromptGenerator object
|
135 |
-
for command_label, command_name, args in commands:
|
136 |
-
prompt_generator.add_command(command_label, command_name, args)
|
137 |
-
|
138 |
-
# Add resources to the PromptGenerator object
|
139 |
-
prompt_generator.add_resource(
|
140 |
-
"Internet access for searches and information gathering."
|
141 |
-
)
|
142 |
-
prompt_generator.add_resource("Long Term memory management.")
|
143 |
-
prompt_generator.add_resource(
|
144 |
-
"GPT-3.5 powered Agents for delegation of simple tasks."
|
145 |
-
)
|
146 |
-
prompt_generator.add_resource("File output.")
|
147 |
-
|
148 |
-
# Add performance evaluations to the PromptGenerator object
|
149 |
-
prompt_generator.add_performance_evaluation(
|
150 |
-
"Continuously review and analyze your actions to ensure you are performing to"
|
151 |
-
" the best of your abilities."
|
152 |
-
)
|
153 |
-
prompt_generator.add_performance_evaluation(
|
154 |
-
"Constructively self-criticize your big-picture behavior constantly."
|
155 |
-
)
|
156 |
-
prompt_generator.add_performance_evaluation(
|
157 |
-
"Reflect on past decisions and strategies to refine your approach."
|
158 |
-
)
|
159 |
-
prompt_generator.add_performance_evaluation(
|
160 |
-
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
|
161 |
-
" the least number of steps."
|
162 |
-
)
|
163 |
-
|
164 |
-
# Generate the prompt string
|
165 |
-
return prompt_generator.generate_prompt_string()
|
166 |
-
|
167 |
-
|
168 |
-
def construct_prompt() -> str:
|
169 |
-
"""Construct the prompt for the AI to respond to
|
170 |
-
|
171 |
-
Returns:
|
172 |
-
str: The prompt string
|
173 |
-
"""
|
174 |
-
config = AIConfig.load(CFG.ai_settings_file)
|
175 |
-
if CFG.skip_reprompt and config.ai_name:
|
176 |
-
logger.typewriter_log("Name :", Fore.GREEN, config.ai_name)
|
177 |
-
logger.typewriter_log("Role :", Fore.GREEN, config.ai_role)
|
178 |
-
logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}")
|
179 |
-
elif config.ai_name:
|
180 |
-
logger.typewriter_log(
|
181 |
-
"Welcome back! ",
|
182 |
-
Fore.GREEN,
|
183 |
-
f"Would you like me to return to being {config.ai_name}?",
|
184 |
-
speak_text=True,
|
185 |
-
)
|
186 |
-
should_continue = clean_input(
|
187 |
-
f"""Continue with the last settings?
|
188 |
-
Name: {config.ai_name}
|
189 |
-
Role: {config.ai_role}
|
190 |
-
Goals: {config.ai_goals}
|
191 |
-
Continue (y/n): """
|
192 |
-
)
|
193 |
-
if should_continue.lower() == "n":
|
194 |
-
config = AIConfig()
|
195 |
-
|
196 |
-
if not config.ai_name:
|
197 |
-
config = prompt_user()
|
198 |
-
config.save(CFG.ai_settings_file)
|
199 |
-
|
200 |
-
# Get rid of this global:
|
201 |
-
global ai_name
|
202 |
-
ai_name = config.ai_name
|
203 |
-
|
204 |
-
return config.construct_full_prompt()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/my_abi/modules/model_language.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import torch.nn as nn
|
3 |
-
from fastai.vision import *
|
4 |
-
|
5 |
-
from modules.model import _default_tfmer_cfg
|
6 |
-
from modules.model import Model
|
7 |
-
from modules.transformer import (PositionalEncoding,
|
8 |
-
TransformerDecoder,
|
9 |
-
TransformerDecoderLayer)
|
10 |
-
|
11 |
-
|
12 |
-
class BCNLanguage(Model):
|
13 |
-
def __init__(self, config):
|
14 |
-
super().__init__(config)
|
15 |
-
d_model = ifnone(config.model_language_d_model, _default_tfmer_cfg['d_model'])
|
16 |
-
nhead = ifnone(config.model_language_nhead, _default_tfmer_cfg['nhead'])
|
17 |
-
d_inner = ifnone(config.model_language_d_inner, _default_tfmer_cfg['d_inner'])
|
18 |
-
dropout = ifnone(config.model_language_dropout, _default_tfmer_cfg['dropout'])
|
19 |
-
activation = ifnone(config.model_language_activation, _default_tfmer_cfg['activation'])
|
20 |
-
num_layers = ifnone(config.model_language_num_layers, 4)
|
21 |
-
self.d_model = d_model
|
22 |
-
self.detach = ifnone(config.model_language_detach, True)
|
23 |
-
self.use_self_attn = ifnone(config.model_language_use_self_attn, False)
|
24 |
-
self.loss_weight = ifnone(config.model_language_loss_weight, 1.0)
|
25 |
-
self.max_length = config.dataset_max_length + 1 # additional stop token
|
26 |
-
self.debug = ifnone(config.global_debug, False)
|
27 |
-
|
28 |
-
self.proj = nn.Linear(self.charset.num_classes, d_model, False)
|
29 |
-
self.token_encoder = PositionalEncoding(d_model, max_len=self.max_length)
|
30 |
-
self.pos_encoder = PositionalEncoding(d_model, dropout=0, max_len=self.max_length)
|
31 |
-
decoder_layer = TransformerDecoderLayer(d_model, nhead, d_inner, dropout,
|
32 |
-
activation, self_attn=self.use_self_attn, debug=self.debug)
|
33 |
-
self.model = TransformerDecoder(decoder_layer, num_layers)
|
34 |
-
|
35 |
-
self.cls = nn.Linear(d_model, self.charset.num_classes)
|
36 |
-
|
37 |
-
if config.model_language_checkpoint is not None:
|
38 |
-
logging.info(f'Read language model from {config.model_language_checkpoint}.')
|
39 |
-
self.load(config.model_language_checkpoint)
|
40 |
-
|
41 |
-
def forward(self, tokens, lengths):
|
42 |
-
"""
|
43 |
-
Args:
|
44 |
-
tokens: (N, T, C) where T is length, N is batch size and C is classes number
|
45 |
-
lengths: (N,)
|
46 |
-
"""
|
47 |
-
if self.detach: tokens = tokens.detach()
|
48 |
-
embed = self.proj(tokens) # (N, T, E)
|
49 |
-
embed = embed.permute(1, 0, 2) # (T, N, E)
|
50 |
-
embed = self.token_encoder(embed) # (T, N, E)
|
51 |
-
padding_mask = self._get_padding_mask(lengths, self.max_length)
|
52 |
-
|
53 |
-
zeros = embed.new_zeros(*embed.shape)
|
54 |
-
qeury = self.pos_encoder(zeros)
|
55 |
-
location_mask = self._get_location_mask(self.max_length, tokens.device)
|
56 |
-
output = self.model(qeury, embed,
|
57 |
-
tgt_key_padding_mask=padding_mask,
|
58 |
-
memory_mask=location_mask,
|
59 |
-
memory_key_padding_mask=padding_mask) # (T, N, E)
|
60 |
-
output = output.permute(1, 0, 2) # (N, T, E)
|
61 |
-
|
62 |
-
logits = self.cls(output) # (N, T, C)
|
63 |
-
pt_lengths = self._get_length(logits)
|
64 |
-
|
65 |
-
res = {'feature': output, 'logits': logits, 'pt_lengths': pt_lengths,
|
66 |
-
'loss_weight':self.loss_weight, 'name': 'language'}
|
67 |
-
return res
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DCXGAO/DeepDanbooru_string/README.md
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: DeepDanbooru String
|
3 |
-
emoji: 💬
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.6
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
duplicated_from: NoCrypt/DeepDanbooru_string
|
11 |
-
---
|
12 |
-
|
13 |
-
# Configuration
|
14 |
-
|
15 |
-
`title`: _string_
|
16 |
-
Display title for the Space
|
17 |
-
|
18 |
-
`emoji`: _string_
|
19 |
-
Space emoji (emoji-only character allowed)
|
20 |
-
|
21 |
-
`colorFrom`: _string_
|
22 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
23 |
-
|
24 |
-
`colorTo`: _string_
|
25 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
26 |
-
|
27 |
-
`sdk`: _string_
|
28 |
-
Can be either `gradio`, `streamlit`, or `static`
|
29 |
-
|
30 |
-
`sdk_version` : _string_
|
31 |
-
Only applicable for `streamlit` SDK.
|
32 |
-
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
33 |
-
|
34 |
-
`app_file`: _string_
|
35 |
-
Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
|
36 |
-
Path is relative to the root of the repository.
|
37 |
-
|
38 |
-
`pinned`: _boolean_
|
39 |
-
Whether the Space stays on top of your list.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/__init__.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
"""
|
3 |
-
Charset-Normalizer
|
4 |
-
~~~~~~~~~~~~~~
|
5 |
-
The Real First Universal Charset Detector.
|
6 |
-
A library that helps you read text from an unknown charset encoding.
|
7 |
-
Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
|
8 |
-
All IANA character set names for which the Python core library provides codecs are supported.
|
9 |
-
|
10 |
-
Basic usage:
|
11 |
-
>>> from charset_normalizer import from_bytes
|
12 |
-
>>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
|
13 |
-
>>> best_guess = results.best()
|
14 |
-
>>> str(best_guess)
|
15 |
-
'Bсеки човек има право на образование. Oбразованието!'
|
16 |
-
|
17 |
-
Others methods and usages are available - see the full documentation
|
18 |
-
at <https://github.com/Ousret/charset_normalizer>.
|
19 |
-
:copyright: (c) 2021 by Ahmed TAHRI
|
20 |
-
:license: MIT, see LICENSE for more details.
|
21 |
-
"""
|
22 |
-
import logging
|
23 |
-
|
24 |
-
from .api import from_bytes, from_fp, from_path, is_binary
|
25 |
-
from .legacy import detect
|
26 |
-
from .models import CharsetMatch, CharsetMatches
|
27 |
-
from .utils import set_logging_handler
|
28 |
-
from .version import VERSION, __version__
|
29 |
-
|
30 |
-
__all__ = (
|
31 |
-
"from_fp",
|
32 |
-
"from_path",
|
33 |
-
"from_bytes",
|
34 |
-
"is_binary",
|
35 |
-
"detect",
|
36 |
-
"CharsetMatch",
|
37 |
-
"CharsetMatches",
|
38 |
-
"__version__",
|
39 |
-
"VERSION",
|
40 |
-
"set_logging_handler",
|
41 |
-
)
|
42 |
-
|
43 |
-
# Attach a NullHandler to the top level logger by default
|
44 |
-
# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
|
45 |
-
|
46 |
-
logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/encodings/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
"""Empty __init__.py file to signal Python this directory is a package."""
|
|
|
|
spaces/Daniton/superjourney/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Superjourney
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Danky/dreamlike-art-dreamlike-diffusion-1.0/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/dreamlike-art/dreamlike-diffusion-1.0").launch()
|
|
|
|
|
|
|
|
spaces/Datasculptor/DescriptionGPT/datasets/README.md
DELETED
@@ -1,207 +0,0 @@
|
|
1 |
-
# Prepare datasets for Detic
|
2 |
-
|
3 |
-
The basic training of our model uses [LVIS](https://www.lvisdataset.org/) (which uses [COCO](https://cocodataset.org/) images) and [ImageNet-21K](https://www.image-net.org/download.php).
|
4 |
-
Some models are trained on [Conceptual Caption (CC3M)](https://ai.google.com/research/ConceptualCaptions/).
|
5 |
-
Optionally, we use [Objects365](https://www.objects365.org/) and [OpenImages (Challenge 2019 version)](https://storage.googleapis.com/openimages/web/challenge2019.html) for cross-dataset evaluation.
|
6 |
-
Before starting processing, please download the (selected) datasets from the official websites and place or sim-link them under `$Detic_ROOT/datasets/`.
|
7 |
-
|
8 |
-
```
|
9 |
-
$Detic_ROOT/datasets/
|
10 |
-
metadata/
|
11 |
-
lvis/
|
12 |
-
coco/
|
13 |
-
imagenet/
|
14 |
-
cc3m/
|
15 |
-
objects365/
|
16 |
-
oid/
|
17 |
-
```
|
18 |
-
`metadata/` is our preprocessed meta-data (included in the repo). See the below [section](#Metadata) for details.
|
19 |
-
Please follow the following instruction to pre-process individual datasets.
|
20 |
-
|
21 |
-
### COCO and LVIS
|
22 |
-
|
23 |
-
First, download COCO and LVIS data place them in the following way:
|
24 |
-
|
25 |
-
```
|
26 |
-
lvis/
|
27 |
-
lvis_v1_train.json
|
28 |
-
lvis_v1_val.json
|
29 |
-
coco/
|
30 |
-
train2017/
|
31 |
-
val2017/
|
32 |
-
annotations/
|
33 |
-
captions_train2017.json
|
34 |
-
instances_train2017.json
|
35 |
-
instances_val2017.json
|
36 |
-
```
|
37 |
-
|
38 |
-
Next, prepare the open-vocabulary LVIS training set using
|
39 |
-
|
40 |
-
```
|
41 |
-
python tools/remove_lvis_rare.py --ann datasets/lvis/lvis_v1_train.json
|
42 |
-
```
|
43 |
-
|
44 |
-
This will generate `datasets/lvis/lvis_v1_train_norare.json`.
|
45 |
-
|
46 |
-
### ImageNet-21K
|
47 |
-
|
48 |
-
The ImageNet-21K folder should look like:
|
49 |
-
```
|
50 |
-
imagenet/
|
51 |
-
ImageNet-21K/
|
52 |
-
n01593028.tar
|
53 |
-
n01593282.tar
|
54 |
-
...
|
55 |
-
```
|
56 |
-
|
57 |
-
We first unzip the overlapping classes of LVIS (we will directly work with the .tar file for the rest classes) and convert them into LVIS annotation format.
|
58 |
-
|
59 |
-
~~~
|
60 |
-
mkdir imagenet/annotations
|
61 |
-
python tools/unzip_imagenet_lvis.py --dst_path datasets/imagenet/ImageNet-LVIS
|
62 |
-
python tools/create_imagenetlvis_json.py --imagenet_path datasets/imagenet/ImageNet-LVIS --out_path datasets/imagenet/annotations/imagenet_lvis_image_info.json
|
63 |
-
~~~
|
64 |
-
This creates `datasets/imagenet/annotations/imagenet_lvis_image_info.json`.
|
65 |
-
|
66 |
-
[Optional] To train with all the 21K classes, run
|
67 |
-
|
68 |
-
~~~
|
69 |
-
python tools/get_imagenet_21k_full_tar_json.py
|
70 |
-
python tools/create_lvis_21k.py
|
71 |
-
~~~
|
72 |
-
This creates `datasets/imagenet/annotations/imagenet-21k_image_info_lvis-21k.json` and `datasets/lvis/lvis_v1_train_lvis-21k.json` (combined LVIS and ImageNet-21K classes in `categories`).
|
73 |
-
|
74 |
-
[Optional] To train on combined LVIS and COCO, run
|
75 |
-
|
76 |
-
~~~
|
77 |
-
python tools/merge_lvis_coco.py
|
78 |
-
~~~
|
79 |
-
This creates `datasets/lvis/lvis_v1_train+coco_mask.json`
|
80 |
-
|
81 |
-
### Conceptual Caption
|
82 |
-
|
83 |
-
|
84 |
-
Download the dataset from [this](https://ai.google.com/research/ConceptualCaptions/download) page and place them as:
|
85 |
-
```
|
86 |
-
cc3m/
|
87 |
-
GCC-training.tsv
|
88 |
-
```
|
89 |
-
|
90 |
-
Run the following command to download the images and convert the annotations to LVIS format (Note: download images takes long).
|
91 |
-
|
92 |
-
~~~
|
93 |
-
python tools/download_cc.py --ann datasets/cc3m/GCC-training.tsv --save_image_path datasets/cc3m/training/ --out_path datasets/cc3m/train_image_info.json
|
94 |
-
python tools/get_cc_tags.py
|
95 |
-
~~~
|
96 |
-
|
97 |
-
This creates `datasets/cc3m/train_image_info_tags.json`.
|
98 |
-
|
99 |
-
### Objects365
|
100 |
-
Download Objects365 (v2) from the website. We only need the validation set in this project:
|
101 |
-
```
|
102 |
-
objects365/
|
103 |
-
annotations/
|
104 |
-
zhiyuan_objv2_val.json
|
105 |
-
val/
|
106 |
-
images/
|
107 |
-
v1/
|
108 |
-
patch0/
|
109 |
-
...
|
110 |
-
patch15/
|
111 |
-
v2/
|
112 |
-
patch16/
|
113 |
-
...
|
114 |
-
patch49/
|
115 |
-
|
116 |
-
```
|
117 |
-
|
118 |
-
The original annotation has typos in the class names, we first fix them for our following use of language embeddings.
|
119 |
-
|
120 |
-
```
|
121 |
-
python tools/fix_o365_names.py --ann datasets/objects365/annotations/zhiyuan_objv2_val.json
|
122 |
-
```
|
123 |
-
This creates `datasets/objects365/zhiyuan_objv2_val_fixname.json`.
|
124 |
-
|
125 |
-
To train on Objects365, download the training images and use the command above. We note some images in the training annotation do not exist.
|
126 |
-
We use the following command to filter the missing images.
|
127 |
-
~~~
|
128 |
-
python tools/fix_0365_path.py
|
129 |
-
~~~
|
130 |
-
This creates `datasets/objects365/zhiyuan_objv2_train_fixname_fixmiss.json`.
|
131 |
-
|
132 |
-
### OpenImages
|
133 |
-
|
134 |
-
We followed the instructions in [UniDet](https://github.com/xingyizhou/UniDet/blob/master/projects/UniDet/unidet_docs/DATASETS.md#openimages) to convert the metadata for OpenImages.
|
135 |
-
|
136 |
-
The converted folder should look like
|
137 |
-
|
138 |
-
```
|
139 |
-
oid/
|
140 |
-
annotations/
|
141 |
-
oid_challenge_2019_train_bbox.json
|
142 |
-
oid_challenge_2019_val_expanded.json
|
143 |
-
images/
|
144 |
-
0/
|
145 |
-
1/
|
146 |
-
2/
|
147 |
-
...
|
148 |
-
```
|
149 |
-
|
150 |
-
### Open-vocabulary COCO
|
151 |
-
|
152 |
-
We first follow [OVR-CNN](https://github.com/alirezazareian/ovr-cnn/blob/master/ipynb/003.ipynb) to create the open-vocabulary COCO split. The converted files should be like
|
153 |
-
|
154 |
-
```
|
155 |
-
coco/
|
156 |
-
zero-shot/
|
157 |
-
instances_train2017_seen_2.json
|
158 |
-
instances_val2017_all_2.json
|
159 |
-
```
|
160 |
-
|
161 |
-
We further pre-process the annotation format for easier evaluation:
|
162 |
-
|
163 |
-
```
|
164 |
-
python tools/get_coco_zeroshot_oriorder.py --data_path datasets/coco/zero-shot/instances_train2017_seen_2.json
|
165 |
-
python tools/get_coco_zeroshot_oriorder.py --data_path datasets/coco/zero-shot/instances_val2017_all_2.json
|
166 |
-
```
|
167 |
-
|
168 |
-
Next, we preprocess the COCO caption data:
|
169 |
-
|
170 |
-
```
|
171 |
-
python tools/get_cc_tags.py --cc_ann datasets/coco/annotations/captions_train2017.json --out_path datasets/coco/captions_train2017_tags_allcaps.json --allcaps --convert_caption
|
172 |
-
```
|
173 |
-
This creates `datasets/coco/captions_train2017_tags_allcaps.json`.
|
174 |
-
|
175 |
-
### Metadata
|
176 |
-
|
177 |
-
```
|
178 |
-
metadata/
|
179 |
-
lvis_v1_train_cat_info.json
|
180 |
-
coco_clip_a+cname.npy
|
181 |
-
lvis_v1_clip_a+cname.npy
|
182 |
-
o365_clip_a+cnamefix.npy
|
183 |
-
oid_clip_a+cname.npy
|
184 |
-
imagenet_lvis_wnid.txt
|
185 |
-
Objects365_names_fix.csv
|
186 |
-
```
|
187 |
-
|
188 |
-
`lvis_v1_train_cat_info.json` is used by the Federated loss.
|
189 |
-
This is created by
|
190 |
-
~~~
|
191 |
-
python tools/get_lvis_cat_info.py --ann datasets/lvis/lvis_v1_train.json
|
192 |
-
~~~
|
193 |
-
|
194 |
-
`*_clip_a+cname.npy` is the pre-computed CLIP embeddings for each datasets.
|
195 |
-
They are created by (taking LVIS as an example)
|
196 |
-
~~~
|
197 |
-
python tools/dump_clip_features.py --ann datasets/lvis/lvis_v1_val.json --out_path metadata/lvis_v1_clip_a+cname.npy
|
198 |
-
~~~
|
199 |
-
Note we do not include the 21K class embeddings due to the large file size.
|
200 |
-
To create it, run
|
201 |
-
~~~
|
202 |
-
python tools/dump_clip_features.py --ann datasets/lvis/lvis_v1_val_lvis-21k.json --out_path datasets/metadata/lvis-21k_clip_a+cname.npy
|
203 |
-
~~~
|
204 |
-
|
205 |
-
`imagenet_lvis_wnid.txt` is the list of matched classes between ImageNet-21K and LVIS.
|
206 |
-
|
207 |
-
`Objects365_names_fix.csv` is our manual fix of the Objects365 names.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Detomo/AnimeGAN/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: AnimeGAN
|
3 |
-
emoji: 😶🌫️
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.18.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: creativeml-openrail-m
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DiffusionArtco/Diffusion200Max/app.py
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import requests
|
3 |
-
from PIL import Image
|
4 |
-
from io import BytesIO
|
5 |
-
import base64
|
6 |
-
|
7 |
-
api_url = "https://5cb20b40-572c-426f-9466-995256f9b6eb.id.repl.co/generate_image"
|
8 |
-
|
9 |
-
def generate_image(model="Deliberate", prompt="", seed=0, negative_prompt="", sampler="k_dpmpp_2s_a", steps=50):
|
10 |
-
data = "?model=" + model + "&prompt=" + prompt + "&seed=" + str(seed) + "&negative_prompt=" + negative_prompt + "&sampler=" + sampler + "&steps=" + str(steps)
|
11 |
-
response = requests.post(api_url + data, timeout=400)
|
12 |
-
if response.status_code == 200:
|
13 |
-
img_base64 = response.json()["url"]
|
14 |
-
img_bytes = base64.b64decode(img_base64)
|
15 |
-
img = Image.open(BytesIO(img_bytes))
|
16 |
-
return img
|
17 |
-
else:
|
18 |
-
return None
|
19 |
-
|
20 |
-
inputs = [
|
21 |
-
gr.inputs.Dropdown(['3DKX', 'Abyss OrangeMix', 'AbyssOrangeMix-AfterDark', 'ACertainThing',
|
22 |
-
'AIO Pixel Art', 'Analog Diffusion', 'Anime Pencil Diffusion', 'Anygen',
|
23 |
-
'Anything Diffusion', 'Anything v3', 'anything_v4_inpainting',
|
24 |
-
'App Icon Diffusion', 'Arcane Diffusion', 'Archer Diffusion',
|
25 |
-
'Asim Simpsons', 'A to Zovya RPG', 'Balloon Art', 'Borderlands', 'BPModel',
|
26 |
-
'BubblyDubbly', 'Char', 'CharHelper', 'Cheese Daddys Landscape Mix',
|
27 |
-
'ChilloutMix', 'ChromaV5', 'Classic Animation Diffusion', 'Clazy',
|
28 |
-
'Colorful', 'Coloring Book', 'Comic-Diffusion', 'Concept Sheet',
|
29 |
-
'Counterfeit', 'Cyberpunk Anime Diffusion', 'CyriousMix',
|
30 |
-
'Dan Mumford Style', 'Darkest Diffusion', 'Dark Victorian Diffusion',
|
31 |
-
'Deliberate', 'DGSpitzer Art Diffusion', 'Disco Elysium', 'DnD Item',
|
32 |
-
'Double Exposure Diffusion', 'Dreamlike Diffusion',
|
33 |
-
'dreamlike_diffusion_inpainting', 'Dreamlike Photoreal',
|
34 |
-
'DreamLikeSamKuvshinov', 'Dreamshaper', 'DucHaiten',
|
35 |
-
'DucHaiten Classic Anime', 'Dungeons and Diffusion', 'Dungeons n Waifus',
|
36 |
-
'Eimis Anime Diffusion', 'Elden Ring Diffusion', "Elldreth's Lucid Mix",
|
37 |
-
'Elldreths Retro Mix', 'Epic Diffusion', 'Eternos', 'Experience',
|
38 |
-
'ExpMix Line', 'FaeTastic', 'Fantasy Card Diffusion', 'FKing SciFi',
|
39 |
-
'Funko Diffusion', 'Furry Epoch', 'Future Diffusion', 'Ghibli Diffusion',
|
40 |
-
'GorynichMix', 'Grapefruit Hentai', 'Graphic-Art',
|
41 |
-
'GTA5 Artwork Diffusion', 'GuoFeng', 'Guohua Diffusion', 'HASDX',
|
42 |
-
'Hassanblend', "Healy's Anime Blend", 'Hentai Diffusion', 'HRL', 'iCoMix',
|
43 |
-
'Illuminati Diffusion', 'Inkpunk Diffusion', 'Jim Eidomode',
|
44 |
-
'JWST Deep Space Diffusion', 'Kenshi', 'Knollingcase', 'Korestyle',
|
45 |
-
'kurzgesagt', 'Laolei New Berry Protogen Mix', "Lawlas's yiff mix",
|
46 |
-
'Liberty', 'Marvel Diffusion', 'Mega Merge Diffusion', 'Microcasing',
|
47 |
-
'Microchars', 'Microcritters', 'Microscopic', 'Microworlds',
|
48 |
-
'Midjourney Diffusion', 'Midjourney PaintArt', 'Min Illust Background',
|
49 |
-
'ModernArt Diffusion', 'mo-di-diffusion', 'Moedel', 'MoistMix',
|
50 |
-
'Movie Diffusion', 'NeverEnding Dream', 'Nitro Diffusion', 'Openniji',
|
51 |
-
'OrbAI', 'Papercutcraft', 'Papercut Diffusion', 'Pastel Mix',
|
52 |
-
'Perfect World', 'PFG', 'PIXHELL', 'Poison', 'Pokemon3D', 'PortraitPlus',
|
53 |
-
'PPP', 'Pretty 2.5D', 'PRMJ', 'Project Unreal Engine 5', 'ProtoGen',
|
54 |
-
'Protogen Anime', 'Protogen Infinity', 'Pulp Vector Art', 'PVC',
|
55 |
-
'Rachel Walker Watercolors', 'Rainbowpatch', 'Ranma Diffusion',
|
56 |
-
'RCNZ Dumb Monkey', 'RCNZ Gorilla With A Brick', 'RealBiter',
|
57 |
-
'Realism Engine', 'Realistic Vision', 'Redshift Diffusion', 'Rev Animated',
|
58 |
-
'Robo-Diffusion', 'Rodent Diffusion', 'RPG', 'Samdoesarts Ultmerge',
|
59 |
-
'Sci-Fi Diffusion', 'SD-Silicon', 'Seek.art MEGA', 'Smoke Diffusion',
|
60 |
-
'Something', 'Sonic Diffusion', 'Spider-Verse Diffusion',
|
61 |
-
'Squishmallow Diffusion', 'stable_diffusion', 'stable_diffusion_2.1',
|
62 |
-
'stable_diffusion_2_inpainting', 'Supermarionation', 'Sygil-Dev Diffusion',
|
63 |
-
'Synthwave', 'SynthwavePunk', 'TrexMix', 'trinart', 'Trinart Characters',
|
64 |
-
'Tron Legacy Diffusion', 'T-Shirt Diffusion', 'T-Shirt Print Designs',
|
65 |
-
'Uhmami', 'Ultraskin', 'UMI Olympus', 'Unstable Ink Dream', 'URPM',
|
66 |
-
'Valorant Diffusion', 'Van Gogh Diffusion', 'Vector Art', 'vectorartz',
|
67 |
-
'Vintedois Diffusion', 'VinteProtogenMix', 'Vivid Watercolors',
|
68 |
-
'Voxel Art Diffusion', 'waifu_diffusion', 'Wavyfusion', 'Woop-Woop Photo',
|
69 |
-
'Xynthii-Diffusion', 'Yiffy', 'Zack3D', 'Zeipher Female Model',
|
70 |
-
'Zelda BOTW'], label="Model", default="Deliberate"),
|
71 |
-
gr.inputs.Textbox(label="Prompt", default=""),
|
72 |
-
gr.inputs.Number(label="Seed", default=0),
|
73 |
-
gr.inputs.Textbox(label="Negative Prompt", default=""),
|
74 |
-
gr.inputs.Dropdown(["k_lms", "k_heun", "k_euler", "k_euler_a", "k_dpm_2", "k_dpm_2_a", "DDIM", "k_dpm_fast", "k_dpm_adaptive", "k_dpmpp_2m", "k_dpmpp_2s_a", "k_dpmpp_sde"], label="Sampler", default="k_dpmpp_2s_a"),
|
75 |
-
gr.inputs.Number(label="Steps", default=50)
|
76 |
-
]
|
77 |
-
|
78 |
-
outputs = gr.outputs.Image(label="Generated Image", type="pil")
|
79 |
-
|
80 |
-
interface = gr.Interface(generate_image, inputs, outputs, title="Diffusion 200",
|
81 |
-
description="<center>Live access to Top 200 Diffusion models</center>",
|
82 |
-
examples=[])
|
83 |
-
|
84 |
-
interface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dinoking/Guccio-AI-Designer/netdissect/broden.py
DELETED
@@ -1,271 +0,0 @@
|
|
1 |
-
import os, errno, numpy, torch, csv, re, shutil, os, zipfile
|
2 |
-
from collections import OrderedDict
|
3 |
-
from torchvision.datasets.folder import default_loader
|
4 |
-
from torchvision import transforms
|
5 |
-
from scipy import ndimage
|
6 |
-
from urllib.request import urlopen
|
7 |
-
|
8 |
-
class BrodenDataset(torch.utils.data.Dataset):
|
9 |
-
'''
|
10 |
-
A multicategory segmentation data set.
|
11 |
-
|
12 |
-
Returns three streams:
|
13 |
-
(1) The image (3, h, w).
|
14 |
-
(2) The multicategory segmentation (labelcount, h, w).
|
15 |
-
(3) A bincount of pixels in the segmentation (labelcount).
|
16 |
-
|
17 |
-
Net dissect also assumes that the dataset object has three properties
|
18 |
-
with human-readable labels:
|
19 |
-
|
20 |
-
ds.labels = ['red', 'black', 'car', 'tree', 'grid', ...]
|
21 |
-
ds.categories = ['color', 'part', 'object', 'texture']
|
22 |
-
ds.label_category = [0, 0, 2, 2, 3, ...] # The category for each label
|
23 |
-
'''
|
24 |
-
def __init__(self, directory='dataset/broden', resolution=384,
|
25 |
-
split='train', categories=None,
|
26 |
-
transform=None, transform_segment=None,
|
27 |
-
download=False, size=None, include_bincount=True,
|
28 |
-
broden_version=1, max_segment_depth=6):
|
29 |
-
assert resolution in [224, 227, 384]
|
30 |
-
if download:
|
31 |
-
ensure_broden_downloaded(directory, resolution, broden_version)
|
32 |
-
self.directory = directory
|
33 |
-
self.resolution = resolution
|
34 |
-
self.resdir = os.path.join(directory, 'broden%d_%d' %
|
35 |
-
(broden_version, resolution))
|
36 |
-
self.loader = default_loader
|
37 |
-
self.transform = transform
|
38 |
-
self.transform_segment = transform_segment
|
39 |
-
self.include_bincount = include_bincount
|
40 |
-
# The maximum number of multilabel layers that coexist at an image.
|
41 |
-
self.max_segment_depth = max_segment_depth
|
42 |
-
with open(os.path.join(self.resdir, 'category.csv'),
|
43 |
-
encoding='utf-8') as f:
|
44 |
-
self.category_info = OrderedDict()
|
45 |
-
for row in csv.DictReader(f):
|
46 |
-
self.category_info[row['name']] = row
|
47 |
-
if categories is not None:
|
48 |
-
# Filter out unused categories
|
49 |
-
categories = set([c for c in categories if c in self.category_info])
|
50 |
-
for cat in list(self.category_info.keys()):
|
51 |
-
if cat not in categories:
|
52 |
-
del self.category_info[cat]
|
53 |
-
categories = list(self.category_info.keys())
|
54 |
-
self.categories = categories
|
55 |
-
|
56 |
-
# Filter out unneeded images.
|
57 |
-
with open(os.path.join(self.resdir, 'index.csv'),
|
58 |
-
encoding='utf-8') as f:
|
59 |
-
all_images = [decode_index_dict(r) for r in csv.DictReader(f)]
|
60 |
-
self.image = [row for row in all_images
|
61 |
-
if index_has_any_data(row, categories) and row['split'] == split]
|
62 |
-
if size is not None:
|
63 |
-
self.image = self.image[:size]
|
64 |
-
with open(os.path.join(self.resdir, 'label.csv'),
|
65 |
-
encoding='utf-8') as f:
|
66 |
-
self.label_info = build_dense_label_array([
|
67 |
-
decode_label_dict(r) for r in csv.DictReader(f)])
|
68 |
-
self.labels = [l['name'] for l in self.label_info]
|
69 |
-
# Build dense remapping arrays for labels, so that you can
|
70 |
-
# get dense ranges of labels for each category.
|
71 |
-
self.category_map = {}
|
72 |
-
self.category_unmap = {}
|
73 |
-
self.category_label = {}
|
74 |
-
for cat in self.categories:
|
75 |
-
with open(os.path.join(self.resdir, 'c_%s.csv' % cat),
|
76 |
-
encoding='utf-8') as f:
|
77 |
-
c_data = [decode_label_dict(r) for r in csv.DictReader(f)]
|
78 |
-
self.category_unmap[cat], self.category_map[cat] = (
|
79 |
-
build_numpy_category_map(c_data))
|
80 |
-
self.category_label[cat] = build_dense_label_array(
|
81 |
-
c_data, key='code')
|
82 |
-
self.num_labels = len(self.labels)
|
83 |
-
# Primary categories for each label is the category in which it
|
84 |
-
# appears with the maximum coverage.
|
85 |
-
self.label_category = numpy.zeros(self.num_labels, dtype=int)
|
86 |
-
for i in range(self.num_labels):
|
87 |
-
maxcoverage, self.label_category[i] = max(
|
88 |
-
(self.category_label[cat][self.category_map[cat][i]]['coverage']
|
89 |
-
if i < len(self.category_map[cat])
|
90 |
-
and self.category_map[cat][i] else 0, ic)
|
91 |
-
for ic, cat in enumerate(categories))
|
92 |
-
|
93 |
-
def __len__(self):
|
94 |
-
return len(self.image)
|
95 |
-
|
96 |
-
def __getitem__(self, idx):
|
97 |
-
record = self.image[idx]
|
98 |
-
# example record: {
|
99 |
-
# 'image': 'opensurfaces/25605.jpg', 'split': 'train',
|
100 |
-
# 'ih': 384, 'iw': 384, 'sh': 192, 'sw': 192,
|
101 |
-
# 'color': ['opensurfaces/25605_color.png'],
|
102 |
-
# 'object': [], 'part': [],
|
103 |
-
# 'material': ['opensurfaces/25605_material.png'],
|
104 |
-
# 'scene': [], 'texture': []}
|
105 |
-
image = self.loader(os.path.join(self.resdir, 'images',
|
106 |
-
record['image']))
|
107 |
-
segment = numpy.zeros(shape=(self.max_segment_depth,
|
108 |
-
record['sh'], record['sw']), dtype=int)
|
109 |
-
if self.include_bincount:
|
110 |
-
bincount = numpy.zeros(shape=(self.num_labels,), dtype=int)
|
111 |
-
depth = 0
|
112 |
-
for cat in self.categories:
|
113 |
-
for layer in record[cat]:
|
114 |
-
if isinstance(layer, int):
|
115 |
-
segment[depth,:,:] = layer
|
116 |
-
if self.include_bincount:
|
117 |
-
bincount[layer] += segment.shape[1] * segment.shape[2]
|
118 |
-
else:
|
119 |
-
png = numpy.asarray(self.loader(os.path.join(
|
120 |
-
self.resdir, 'images', layer)))
|
121 |
-
segment[depth,:,:] = png[:,:,0] + png[:,:,1] * 256
|
122 |
-
if self.include_bincount:
|
123 |
-
bincount += numpy.bincount(segment[depth,:,:].flatten(),
|
124 |
-
minlength=self.num_labels)
|
125 |
-
depth += 1
|
126 |
-
if self.transform:
|
127 |
-
image = self.transform(image)
|
128 |
-
if self.transform_segment:
|
129 |
-
segment = self.transform_segment(segment)
|
130 |
-
if self.include_bincount:
|
131 |
-
bincount[0] = 0
|
132 |
-
return (image, segment, bincount)
|
133 |
-
else:
|
134 |
-
return (image, segment)
|
135 |
-
|
136 |
-
def build_dense_label_array(label_data, key='number', allow_none=False):
|
137 |
-
'''
|
138 |
-
Input: set of rows with 'number' fields (or another field name key).
|
139 |
-
Output: array such that a[number] = the row with the given number.
|
140 |
-
'''
|
141 |
-
result = [None] * (max([d[key] for d in label_data]) + 1)
|
142 |
-
for d in label_data:
|
143 |
-
result[d[key]] = d
|
144 |
-
# Fill in none
|
145 |
-
if not allow_none:
|
146 |
-
example = label_data[0]
|
147 |
-
def make_empty(k):
|
148 |
-
return dict((c, k if c is key else type(v)())
|
149 |
-
for c, v in example.items())
|
150 |
-
for i, d in enumerate(result):
|
151 |
-
if d is None:
|
152 |
-
result[i] = dict(make_empty(i))
|
153 |
-
return result
|
154 |
-
|
155 |
-
def build_numpy_category_map(map_data, key1='code', key2='number'):
|
156 |
-
'''
|
157 |
-
Input: set of rows with 'number' fields (or another field name key).
|
158 |
-
Output: array such that a[number] = the row with the given number.
|
159 |
-
'''
|
160 |
-
results = list(numpy.zeros((max([d[key] for d in map_data]) + 1),
|
161 |
-
dtype=numpy.int16) for key in (key1, key2))
|
162 |
-
for d in map_data:
|
163 |
-
results[0][d[key1]] = d[key2]
|
164 |
-
results[1][d[key2]] = d[key1]
|
165 |
-
return results
|
166 |
-
|
167 |
-
def index_has_any_data(row, categories):
|
168 |
-
for c in categories:
|
169 |
-
for data in row[c]:
|
170 |
-
if data: return True
|
171 |
-
return False
|
172 |
-
|
173 |
-
def decode_label_dict(row):
|
174 |
-
result = {}
|
175 |
-
for key, val in row.items():
|
176 |
-
if key == 'category':
|
177 |
-
result[key] = dict((c, int(n))
|
178 |
-
for c, n in [re.match('^([^(]*)\(([^)]*)\)$', f).groups()
|
179 |
-
for f in val.split(';')])
|
180 |
-
elif key == 'name':
|
181 |
-
result[key] = val
|
182 |
-
elif key == 'syns':
|
183 |
-
result[key] = val.split(';')
|
184 |
-
elif re.match('^\d+$', val):
|
185 |
-
result[key] = int(val)
|
186 |
-
elif re.match('^\d+\.\d*$', val):
|
187 |
-
result[key] = float(val)
|
188 |
-
else:
|
189 |
-
result[key] = val
|
190 |
-
return result
|
191 |
-
|
192 |
-
def decode_index_dict(row):
|
193 |
-
result = {}
|
194 |
-
for key, val in row.items():
|
195 |
-
if key in ['image', 'split']:
|
196 |
-
result[key] = val
|
197 |
-
elif key in ['sw', 'sh', 'iw', 'ih']:
|
198 |
-
result[key] = int(val)
|
199 |
-
else:
|
200 |
-
item = [s for s in val.split(';') if s]
|
201 |
-
for i, v in enumerate(item):
|
202 |
-
if re.match('^\d+$', v):
|
203 |
-
item[i] = int(v)
|
204 |
-
result[key] = item
|
205 |
-
return result
|
206 |
-
|
207 |
-
class ScaleSegmentation:
|
208 |
-
'''
|
209 |
-
Utility for scaling segmentations, using nearest-neighbor zooming.
|
210 |
-
'''
|
211 |
-
def __init__(self, target_height, target_width):
|
212 |
-
self.target_height = target_height
|
213 |
-
self.target_width = target_width
|
214 |
-
def __call__(self, seg):
|
215 |
-
ratio = (1, self.target_height / float(seg.shape[1]),
|
216 |
-
self.target_width / float(seg.shape[2]))
|
217 |
-
return ndimage.zoom(seg, ratio, order=0)
|
218 |
-
|
219 |
-
def scatter_batch(seg, num_labels, omit_zero=True, dtype=torch.uint8):
|
220 |
-
'''
|
221 |
-
Utility for scattering semgentations into a one-hot representation.
|
222 |
-
'''
|
223 |
-
result = torch.zeros(*((seg.shape[0], num_labels,) + seg.shape[2:]),
|
224 |
-
dtype=dtype, device=seg.device)
|
225 |
-
result.scatter_(1, seg, 1)
|
226 |
-
if omit_zero:
|
227 |
-
result[:,0] = 0
|
228 |
-
return result
|
229 |
-
|
230 |
-
def ensure_broden_downloaded(directory, resolution, broden_version=1):
|
231 |
-
assert resolution in [224, 227, 384]
|
232 |
-
baseurl = 'http://netdissect.csail.mit.edu/data/'
|
233 |
-
dirname = 'broden%d_%d' % (broden_version, resolution)
|
234 |
-
if os.path.isfile(os.path.join(directory, dirname, 'index.csv')):
|
235 |
-
return # Already downloaded
|
236 |
-
zipfilename = 'broden1_%d.zip' % resolution
|
237 |
-
download_dir = os.path.join(directory, 'download')
|
238 |
-
os.makedirs(download_dir, exist_ok=True)
|
239 |
-
full_zipfilename = os.path.join(download_dir, zipfilename)
|
240 |
-
if not os.path.exists(full_zipfilename):
|
241 |
-
url = '%s/%s' % (baseurl, zipfilename)
|
242 |
-
print('Downloading %s' % url)
|
243 |
-
data = urlopen(url)
|
244 |
-
with open(full_zipfilename, 'wb') as f:
|
245 |
-
f.write(data.read())
|
246 |
-
print('Unzipping %s' % zipfilename)
|
247 |
-
with zipfile.ZipFile(full_zipfilename, 'r') as zip_ref:
|
248 |
-
zip_ref.extractall(directory)
|
249 |
-
assert os.path.isfile(os.path.join(directory, dirname, 'index.csv'))
|
250 |
-
|
251 |
-
def test_broden_dataset():
|
252 |
-
'''
|
253 |
-
Testing code.
|
254 |
-
'''
|
255 |
-
bds = BrodenDataset('dataset/broden', resolution=384,
|
256 |
-
transform=transforms.Compose([
|
257 |
-
transforms.Resize(224),
|
258 |
-
transforms.ToTensor()]),
|
259 |
-
transform_segment=transforms.Compose([
|
260 |
-
ScaleSegmentation(224, 224)
|
261 |
-
]),
|
262 |
-
include_bincount=True)
|
263 |
-
loader = torch.utils.data.DataLoader(bds, batch_size=100, num_workers=24)
|
264 |
-
for i in range(1,20):
|
265 |
-
print(bds.label[i]['name'],
|
266 |
-
list(bds.category.keys())[bds.primary_category[i]])
|
267 |
-
for i, (im, seg, bc) in enumerate(loader):
|
268 |
-
print(i, im.shape, seg.shape, seg.max(), bc.shape)
|
269 |
-
|
270 |
-
if __name__ == '__main__':
|
271 |
-
test_broden_dataset()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|