parquet-converter commited on
Commit
942ccad
·
1 Parent(s): 9ac2d24

Update parquet files (step 83 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Acronis Backup 12.5.1 Build 14240 Crack !EXCLUSIVE!.md +0 -36
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Navisworks Exporter for Revit and Boost Your Collaboration and Coordination.md +0 -31
  3. spaces/1gistliPinn/ChatGPT4/Examples/Descargar Native Instruments Battery 4 Crack.md +0 -6
  4. spaces/1phancelerku/anime-remove-background/Acronis True Image 2018 Download and Try the Most Reliable Backup Tool.md +0 -131
  5. spaces/1phancelerku/anime-remove-background/Download Very Very Very by I.O.I - The Song That Broke the Charts.md +0 -132
  6. spaces/2023Liu2023/bingo/src/components/markdown.tsx +0 -9
  7. spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/svs/base_svs_infer.py +0 -265
  8. spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/decision_maker/brainstorming.py +0 -67
  9. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/AddChildMethods.js +0 -112
  10. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/lineprogresscanvas/Factory.js +0 -13
  11. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/methods/ParseEaseConfig.js +0 -18
  12. spaces/Akshat-1812/Dog-Vision/README.md +0 -13
  13. spaces/AlexWang/lama/saicinpainting/training/visualizers/noop.py +0 -9
  14. spaces/Alpaca233/ChatGPT-PPT-Generate/app.py +0 -245
  15. spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_mbf.py +0 -26
  16. spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_mstrain_2x.py +0 -167
  17. spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/deeplabv3_r50-d8.py +0 -44
  18. spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug.py +0 -7
  19. spaces/Andy1621/uniformer_image_segmentation/configs/fp16/deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes.py +0 -5
  20. spaces/Apex-X/GODROOP/roop/processors/__init__.py +0 -0
  21. spaces/Apex-X/ROOPOK/roop/metadata.py +0 -2
  22. spaces/ArcanAlt/arcanDream/README.md +0 -10
  23. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/tenacity/nap.py +0 -43
  24. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/more_itertools/__init__.py +0 -4
  25. spaces/Audio-AGI/AudioSep/utils.py +0 -384
  26. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_model_zoo.py +0 -50
  27. spaces/Bart92/RVC_HF/infer_batch_rvc.py +0 -215
  28. spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/dataset.py +0 -183
  29. spaces/Belshia/shia/README.md +0 -13
  30. spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/_version.py +0 -5
  31. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py +0 -537
  32. spaces/Boadiwaa/Recipes/openai/api_resources/experimental/completion_config.py +0 -11
  33. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/unique_by_key.h +0 -67
  34. spaces/CVPR/unicl-zero-shot-img-recog/model/text_encoder/build.py +0 -31
  35. spaces/CognitiveLabs/GPT-auto-webscraping/app.py +0 -107
  36. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/tf.py +0 -269
  37. spaces/DHEIVER/ImageClassifierCataract/README.md +0 -12
  38. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/http.py +0 -70
  39. spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/modules/patch_feature_extractor.py +0 -57
  40. spaces/DeepDrivePL/PaddleSeg-Matting/README.md +0 -37
  41. spaces/Dinoking/Guccio-AI-Designer/netdissect/segmodel/models.py +0 -558
  42. spaces/DragGan/DragGan-Inversion/PTI/torch_utils/ops/__init__.py +0 -9
  43. spaces/ECCV2022/PSG/OpenPSG/configs/motifs/panoptic_fpn_r101_fpn_1x_predcls_psg.py +0 -28
  44. spaces/ECE1786-AG/ArtIstic-GENREator/app.py +0 -91
  45. spaces/FridaZuley/RVC_HFKawaii/infer/lib/infer_pack/modules.py +0 -521
  46. spaces/FridaZuley/RVC_HFKawaii/infer/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py +0 -98
  47. spaces/GT-RIPL/GPT-K/knowledge/retrieve.py +0 -105
  48. spaces/Gen-Sim/Gen-Sim/cliport/models/streams/two_stream_transport_lang_fusion.py +0 -196
  49. spaces/Gen-Sim/Gen-Sim/cliport/utils/__init__.py +0 -0
  50. spaces/GeorgeOrville/bingo/postcss.config.js +0 -6
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Acronis Backup 12.5.1 Build 14240 Crack !EXCLUSIVE!.md DELETED
@@ -1,36 +0,0 @@
1
-
2
- <h1>Acronis Backup 12.5.1 Build 14240 Crack: A Reliable and Flexible Solution for Data Protection</h1>
3
- <p>Acronis Backup 12.5.1 Build 14240 Crack is a powerful and versatile software that provides comprehensive data protection for any environment, including physical, virtual, cloud, mobile, and applications. With Acronis Backup 12.5.1 Build 14240 Crack, you can easily backup and restore your data, manage your backup policies, monitor your backup activities, and recover your data in minutes.</p>
4
- <h2>Acronis Backup 12.5.1 Build 14240 Crack</h2><br /><p><b><b>DOWNLOAD</b> &gt;&gt;&gt; <a href="https://byltly.com/2uKAdS">https://byltly.com/2uKAdS</a></b></p><br /><br />
5
- <p>Acronis Backup 12.5.1 Build 14240 Crack is the latest update of Acronis Backup 12.5, which was released in August 2019. This update introduces several new features and enhancements, such as:</p>
6
- <ul>
7
- <li>The enhanced backup option Performance and backup window (former Performance) enables you to set one of three levels of backup performance (high, low, prohibited) for every hour within a week. The high and low levels are configurable in terms of the process priority and output speed[^1^].</li>
8
- <li>The new option Enable backup validation enables you to automatically validate your backups after they are created or according to a schedule. You can also specify the number of backups to keep validated[^2^].</li>
9
- <li>The new option Enable ransomware protection enables you to protect your backups from ransomware attacks by detecting and blocking unauthorized encryption attempts[^2^].</li>
10
- <li>The new option Enable deduplication enables you to reduce the storage space required for your backups by eliminating duplicate data blocks[^2^].</li>
11
- <li>The new option Enable encryption enables you to encrypt your backups with AES-256 algorithm to ensure data security and privacy[^2^].</li>
12
- <li>The new option Enable compression enables you to compress your backups to save storage space and bandwidth[^2^].</li>
13
- <li>The new option Enable notifications enables you to receive email notifications about the status of your backup operations[^2^].</li>
14
- <li>The new option Enable reports enables you to generate and view detailed reports about your backup activities and performance[^2^].</li>
15
- </ul>
16
- <p>Acronis Backup 12.5.1 Build 14240 Crack supports a wide range of operating systems, platforms, and applications, such as Windows, Linux, Mac OS X, VMware, Hyper-V, Citrix XenServer, Oracle VM Server, Microsoft Exchange Server, Microsoft SQL Server, Microsoft SharePoint Server, Microsoft Active Directory, Microsoft Office 365, Google G Suite, Amazon EC2, Azure VMs, iOS, Android, and more[^2^].</p>
17
- <p>Acronis Backup 12.5.1 Build 14240 Crack is a reliable and flexible solution for data protection that can meet the needs of any business size and complexity. With Acronis Backup 12.5.1 Build 14240 Crack, you can ensure the availability and security of your data while saving time and money.</p>
18
- <p></p><h2>Acronis Backup 12.5.1 Build 14240 Crack: What Customers Say</h2>
19
- <p>Acronis Backup 12.5.1 Build 14240 Crack is not only a powerful and versatile software for data protection, but also a highly rated and recommended solution by customers who have used it. According to TrustRadius, a platform for verified user reviews, Acronis Backup 12.5 has an average rating of 7.7 out of 10 based on 136 reviews and ratings[^3^]. Here are some of the pros and cons that customers have shared about Acronis Backup 12.5.1 Build 14240 Crack:</p>
20
- <h3>Pros</h3>
21
- <ul>
22
- <li>Acronis Backup 12.5.1 Build 14240 Crack offers excellent backup speeds, which can save time and resources for backup operations.</li>
23
- <li>Acronis Backup 12.5.1 Build 14240 Crack supports a wide range of platforms, including physical, virtual, cloud, mobile, and applications, which can provide comprehensive data protection for any environment.</li>
24
- <li>Acronis Backup 12.5.1 Build 14240 Crack is easy to deploy, manage, and use, with a web-based console that provides a customizable dashboard and drag-and-drop widgets.</li>
25
- <li>Acronis Backup 12.5.1 Build 14240 Crack provides valuable ransomware protection with Acronis Active Protection, which can detect and block unauthorized encryption attempts and restore affected files.</li>
26
- <li>Acronis Backup 12.5.1 Build 14240 Crack offers flexible backup options, such as universal recovery, recovery verification, instant recovery, incremental backup identification, deduplication, encryption, compression, notifications, and reports[^2^] .</li>
27
- </ul>
28
- <h3>Cons</h3>
29
- <ul>
30
- <li>Acronis Backup 12.5.1 Build 14240 Crack may have some issues with recovery time objectives (RTO), which may lag slightly behind the stated speeds.</li>
31
- <li>Acronis Backup 12.5.1 Build 14240 Crack may not be able to recover data for a single instance installation[^3^].</li>
32
- <li>Acronis Backup 12.5.1 Build 14240 Crack may be pricier than other solutions for small amounts of data to backup[^3^].</li>
33
- </ul>
34
- <p>Overall, customers are satisfied with Acronis Backup 12.5.1 Build 14240 Crack and its features, performance, reliability, and support. Many customers have praised Acronis Backup 12.5.1 Build 14240 Crack as a solid solution for data protection that can meet the needs of any business size and complexity[^3^] .</p> 7b8c122e87<br />
35
- <br />
36
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Navisworks Exporter for Revit and Boost Your Collaboration and Coordination.md DELETED
@@ -1,31 +0,0 @@
1
-
2
- <h1>How to Download and Install Navisworks Exporter for Revit</h1>
3
- <p>Navisworks Exporter for Revit is a plug-in that allows you to export Revit models as NWC files that can be opened and viewed in Navisworks. NWC files are optimized for performance and collaboration, and can be used for clash detection, coordination, and simulation.</p>
4
- <h2>download navisworks exporter for revit</h2><br /><p><b><b>DOWNLOAD</b> &#8230; <a href="https://byltly.com/2uKyw1">https://byltly.com/2uKyw1</a></b></p><br /><br />
5
- <p>If you want to download and install Navisworks Exporter for Revit, you can follow these steps:</p>
6
- <ol>
7
- <li>Go to this page: <a href="https://www.autodesk.com/support/technical/article/caas/sfdcarticles/sfdcarticles/Where-to-find-the-Navisworks-Exporter-for-Revit.html">Where to find the Navisworks Exporter for Revit</a>.</li>
8
- <li>Scroll down to the section entitled <strong>Navisworks NWC Export Utility</strong> and click on the link that matches your Revit version and operating system.</li>
9
- <li>Save the file to your computer and run the installer. Follow the instructions on the screen to complete the installation.</li>
10
- <li>Restart Revit if it was running during the installation.</li>
11
- <li>To export a Revit model as an NWC file, click <strong>Add-Ins > External Tools > Autodesk Navisworks</strong>. In the <strong>Export Scene As</strong> dialog box, click the <strong>Autodesk Navisworks Settings</strong> button. Adjust the settings for your export and click <strong>OK</strong>. Then choose a location and a name for your NWC file and click <strong>Save</strong>.</li>
12
- </ol>
13
- <p>Congratulations! You have successfully downloaded and installed Navisworks Exporter for Revit and exported your first NWC file.</p><p>Here are some more paragraphs for your article:</p>
14
- <p>NWC files are a great way to share and collaborate on Revit models with other stakeholders. You can use Navisworks to open and view NWC files, as well as combine them with other NWC files from different disciplines and sources. You can also use Navisworks to perform various tasks on the NWC files, such as:</p>
15
- <p></p>
16
- <ul>
17
- <li>Clash detection: You can check for interferences and conflicts between different elements and objects in the NWC files, and generate reports and markups to resolve them.</li>
18
- <li>Coordination: You can align and synchronize the NWC files with a common coordinate system and time frame, and create federated models that show the whole project.</li>
19
- <li>Simulation: You can create animations and walkthroughs of the NWC files, and simulate the construction sequence and schedule of the project.</li>
20
- </ul>
21
- <p>To view an NWC file in Navisworks, you need to have Navisworks installed on your computer. You can download a free trial version of Navisworks from this page: <a href="https://www.autodesk.com/products/navisworks/free-trial">Navisworks Free Trial</a>. Once you have Navisworks installed, you can open an NWC file by clicking <strong>File > Open</strong> and browsing to the location of the file. You can also drag and drop the file into the Navisworks window.</p>
22
- <p>You can adjust the settings for future exports of NWC files from Revit by using the Options Editor in Navisworks. To access the Options Editor, click <strong>File > Options</strong>. Expand the <strong>File Exporters</strong> node and click the <strong>Revit</strong> page. Here you can change various options for your export, such as:</p>
23
- <ul>
24
- <li>Export mode: You can choose between exporting the entire project or only selected elements.</li>
25
- <li>Export properties: You can choose which properties to include in the NWC file, such as categories, materials, phases, etc.</li>
26
- <li>Export geometry: You can choose how to export the geometry of the Revit model, such as using tessellation or solids.</li>
27
- <li>Export links: You can choose how to export linked Revit models or CAD files, such as embedding them or using relative paths.</li>
28
- </ul>
29
- <p>You can also save your export settings as a profile and load it later for convenience.</p> ddb901b051<br />
30
- <br />
31
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Descargar Native Instruments Battery 4 Crack.md DELETED
@@ -1,6 +0,0 @@
1
-
2
- <p>When you choose a cell in a battery, you can create a new Hit or Hit Mix which includes various samples. Battery provides an offline sampler which makes it possible for the user to import the audio sample of his or her choice. Also, there are a host of products and advanced effects to use with the samples in the collection.</p>
3
- <h2>Descargar Native Instruments Battery 4 Crack</h2><br /><p><b><b>Download</b> &#10022; <a href="https://imgfil.com/2uy1b0">https://imgfil.com/2uy1b0</a></b></p><br /><br />
4
- <p>Battery is available in a free trial version and not all features are available in the trial version. In the trial version, you can load the samples that are already installed and can download sounds from the online sampler. The trial version also lets the user preview the recorded drum, effects, and EQ details and provides full access to the extensive online sampler. In the trial version, however, no additional modules, multi-track editing, importing of audio clips and creating custom kits are available. In order to use the trial version, you must register for a free NIN account which has its own unique limitations. It is not possible to download the free trial version to the desktop. Battery 3 offers 16 new percussion instruments from the most popular electronic percussion instruments. There are presets for everything from traditional acoustic drum kits to entire electronic drum kits. Some of the drums include modern drums such as the Hi-hat, ride, toms, cymbals, an A/D core, and much more. These drums are made using a specially designed rack with thousands of samples for real time performance to make the user an expert at creating real drum sounds. Production is easy with the space for mixing with real-time samples and also virtual racks to create your own kits. Batteries also enables the user to get started quickly and easily.</p> 899543212b<br />
5
- <br />
6
- <br />
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Acronis True Image 2018 Download and Try the Most Reliable Backup Tool.md DELETED
@@ -1,131 +0,0 @@
1
- <br />
2
- <h1>How to Download Acronis True Image 2018</h1>
3
- <p>If you are looking for a reliable and easy-to-use backup software that can protect your data and system from any disaster, you might want to consider Acronis True Image 2018. This software is one of the best in the market, offering a comprehensive set of features and tools that can help you create, manage, and restore backups of your files, disks, partitions, or entire machines. In this article, we will show you how to download Acronis True Image 2018, how to install and activate it, how to use its main functions, and how to get help and support if you need it.</p>
4
- <h2>What is Acronis True Image 2018 and why you need it</h2>
5
- <p>Acronis True Image 2018 is a personal cyber protection solution that delivers easy-to-use, efficient, and secure backup and recovery of your data and system. It can help you prevent data loss due to hardware failure, malware infection, accidental deletion, theft, or natural disaster. It can also help you migrate your data to a new device, clone your disk to a new drive, archive your files to save space, or verify the authenticity of your data with blockchain technology.</p>
6
- <h2>download acronis true image 2018</h2><br /><p><b><b>Download File</b> &mdash;&mdash;&mdash;>>> <a href="https://jinyurl.com/2uNLN7">https://jinyurl.com/2uNLN7</a></b></p><br /><br />
7
- <h3>Features of Acronis True Image 2018</h3>
8
- <p>Acronis True Image 2018 offers a rich set of features that can meet your backup needs. Some of the main features are:</p>
9
- <ul>
10
- <li><b>Disk and partition backup:</b> You can create an exact copy of your entire disk or partition, including the operating system, applications, settings, and data. This is useful for restoring your system in case of a crash or replacing your disk with a new one.</li>
11
- <li><b>File backup:</b> You can back up individual files or folders to local, network, or cloud storage. You can also choose the backup type (full, incremental, or differential) and the backup frequency (daily, weekly, monthly, or on event).</li>
12
- <li><b>Backup to cloud:</b> You can store your backups in Acronis Cloud, a secure online storage that offers unlimited space for your data. You can access your backups from any device or location, as well as sync them across multiple devices.</li>
13
- <li><b>Recovery:</b> You can restore your data or system from any backup source (local, network, cloud) to any destination (same or different device). You can also restore individual files or folders from disk or file backups.</li>
14
- <li><b>Cloning:</b> You can clone your disk to another disk of the same or different size. This is useful for upgrading your disk to a larger or faster one without reinstalling the operating system or applications.</li>
15
- <li><b>Archiving:</b> You can archive your files that you rarely use or need to a local drive or cloud storage. This can help you free up space on your disk and optimize its performance.</li>
16
- <li><b>Active protection:</b> You can protect your data from ransomware attacks with Acronis Active Protection. This feature monitors your system for suspicious activity and blocks any unauthorized encryption attempts. It also allows you to recover any affected files from a backup.</li>
17
- <li><b>Notary </b>: You can verify the integrity and authenticity of your data with Acronis Notary. This feature uses blockchain technology to create a unique digital fingerprint for your data and store it in a public ledger. You can then use this fingerprint to prove that your data has not been altered or tampered with.</li>
18
- </ul>
19
- <h3>System requirements for Acronis True Image 2018</h3>
20
- <p>To use Acronis True Image 2018, you need to have a device that meets the following minimum system requirements:</p>
21
- <table>
22
- <tr><th>Operating system</th><th>Hardware</th></tr>
23
- <tr><td>Windows 7 SP1 or later (32-bit and 64-bit)</td><td>1 GHz processor or faster</td></tr>
24
- <tr><td>macOS 10.11 or later</td><td>2 GB RAM or more</td></tr>
25
- <tr><td>iOS 10.0 or later</td><td>1.5 GB free disk space or more</td></tr>
26
- <tr><td>Android 4.1 or later</td><td>A high-speed internet connection for cloud backup and recovery</td></tr>
27
- </table>
28
- <h2>How to purchase and activate Acronis True Image 2018</h2>
29
- <p>To use Acronis True Image 2018, you need to purchase a subscription plan and activate the software with a license key. Here is how you can do that:</p>
30
- <h3>Pricing and subscription plans</h3>
31
- <p>Acronis True Image 2018 offers three subscription plans that vary in terms of features, cloud storage, and number of devices. You can choose the plan that suits your needs and budget. The plans are:</p>
32
- <ul>
33
- <li><b>Standard:</b> This is a one-time purchase plan that gives you access to the basic features of Acronis True Image 2018, such as disk and file backup, recovery, cloning, and archiving. It does not include cloud backup, active protection, notary, or any other advanced features. It also does not include any updates or upgrades. You can use this plan on one device only. The price of this plan is $49.99.</li>
34
- <li><b>Advanced:</b> This is a yearly subscription plan that gives you access to all the features of Acronis True Image 2018, including cloud backup, active protection, notary, and more. It also includes free updates and upgrades. You can use this plan on up to three devices. The price of this plan is $49.99 per year.</li>
35
- <li><b>Premium:</b> This is a yearly subscription plan that gives you access to all the features of Acronis True Image 2018, including cloud backup, active protection, notary, and more. It also includes free updates and upgrades. You can use this plan on up to five devices. The price of this plan is $99.99 per year.</li>
36
- </ul>
37
- <h3>Activation and licensing process</h3>
38
- <p>To activate Acronis True Image 2018, you need to have a license key that corresponds to your subscription plan. You can get the license key in one of the following ways:</p>
39
- <ul>
40
- <li><b>Purchase online:</b> You can purchase Acronis True Image 2018 online from the official website or from an authorized reseller. You will receive an email with the license key and a download link after completing the payment.</li>
41
- <li><b>Purchase offline:</b> You can purchase Acronis True Image 2018 offline from a retail store or a distributor. You will receive a box with the installation media and the license key inside.</li>
42
- <li><b>Trial version:</b> You can try Acronis True Image 2018 for free for 30 days by downloading the trial version from the official website. You will receive a trial license key by email after registering for the trial.</li>
43
- </ul>
44
- <p>To activate Acronis True Image 2018, you need to enter the license key in the software interface after installing it on your device. You can also activate it online by logging in to your Acronis account and entering the license key there.</p>
45
- <h2>How to download and install Acronis True Image 2018</h2>
46
- <p>To download and install Acronis True Image 2018, you need to have a valid license key and an internet connection. Here is how you can do that:</p>
47
- <h3>Download link and installation file</h3>
48
- <p>You can download Acronis True Image 2018 from the official website or from the email that you received after purchasing or registering for the trial. The download link will direct you to the appropriate version of the software for your operating system (Windows, macOS, iOS, or Android). The installation file is a .exe file for Windows, a .dmg file for macOS, an .ipa file for iOS, and an .apk file for Android. The file size is about 500 MB for Windows and macOS, and about 100 MB for iOS and Android. You can save the file to your device or run it directly from the browser.</p>
49
- <p>How to download acronis true image 2018 for free<br />
50
- Download acronis true image 2018 full version with crack<br />
51
- Acronis true image 2018 download link<br />
52
- Download acronis true image 2018 iso<br />
53
- Acronis true image 2018 bootable usb download<br />
54
- Download acronis true image 2018 offline installer<br />
55
- Acronis true image 2018 trial download<br />
56
- Download acronis true image 2018 for windows 10<br />
57
- Acronis true image 2018 mac download<br />
58
- Download acronis true image 2018 serial key<br />
59
- Acronis true image 2018 activation key download<br />
60
- Download acronis true image 2018 user guide<br />
61
- Acronis true image 2018 backup software download<br />
62
- Download acronis true image 2018 update<br />
63
- Acronis true image 2018 cloud download<br />
64
- Download acronis true image 2018 recovery disk<br />
65
- Acronis true image 2018 clone disk download<br />
66
- Download acronis true image 2018 license key<br />
67
- Acronis true image 2018 coupon code download<br />
68
- Download acronis true image 2018 portable<br />
69
- Acronis true image 2018 linux download<br />
70
- Download acronis true image 2018 for android<br />
71
- Acronis true image 2018 review download<br />
72
- Download acronis true image 2018 patch<br />
73
- Acronis true image 2018 keygen download<br />
74
- Download acronis true image 2018 for pc<br />
75
- Acronis true image 2018 system requirements download<br />
76
- Download acronis true image 2018 latest version<br />
77
- Acronis true image 2018 features download<br />
78
- Download acronis true image 2018 comparison chart<br />
79
- Acronis true image 2018 upgrade download<br />
80
- Download acronis true image 2018 tutorial<br />
81
- Acronis true image 2018 support download<br />
82
- Download acronis true image 2018 forum<br />
83
- Acronis true image 2018 problems download<br />
84
- Download acronis true image 2018 tips and tricks<br />
85
- Acronis true image 2018 alternatives download<br />
86
- Download acronis true image 2018 vs norton ghost<br />
87
- Acronis true image 2018 vs windows backup download<br />
88
- Download acronis true image 2018 vs macrium reflect</p>
89
- <h3>Installation steps and options</h3>
90
- <p>To install Acronis True Image 2018, you need to run the installation file and follow the instructions on the screen. The installation process is similar for all operating systems, but there may be some differences in the options and settings. Here are the general steps and options for installing Acronis True Image 2018:</p>
91
- <ol>
92
- <li><b>Accept the license agreement:</b> You need to read and accept the terms and conditions of the license agreement before proceeding with the installation.</li>
93
- <li><b>Choose the installation type:</b> You can choose between a typical installation or a custom installation. The typical installation will install the software with the default settings and options, while the custom installation will allow you to change some of them, such as the installation location, the components to install, and the language.</li>
94
- <li><b>Enter the license key:</b> You need to enter the license key that you received after purchasing or registering for the trial. The license key will activate the software and determine the features and subscription plan that you can use.</li>
95
- <li><b>Sign in to your Acronis account:</b> You need to sign in to your Acronis account or create one if you don't have one. Your Acronis account will allow you to manage your subscription, access your cloud backups, sync your data across devices, and get help and support.</li>
96
- <li><b>Complete the installation:</b> The installation will take a few minutes to complete. You may need to restart your device after the installation is finished.</li>
97
- </ol>
98
- <h2>How to use Acronis True Image 2018</h2>
99
- <p>After installing and activating Acronis True Image 2018, you can start using it to backup and protect your data and system. The software has a user-friendly interface that allows you to access its main functions and settings. Here is how you can use Acronis True Image 2018:</p>
100
- <h3>Backup and recovery options</h3>
101
- <p>To create a backup of your data or system, you need to select the source (the data or disk that you want to backup) and the destination (the location where you want to store the backup). You can also choose the backup type, frequency, encryption, notification, and other options. To restore your data or system from a backup, you need to select the backup source (the location where the backup is stored) and the recovery destination (the location where you want to restore the data or disk). You can also choose the recovery mode, options, and verification.</p>
102
- <h3>Cloning and archiving options</h3>
103
- <p>To clone your disk to another disk, you need to select the source disk (the disk that you want to clone) and the destination disk (the disk where you want to copy the data). You can also choose the cloning mode (automatic or manual) and options (such as resizing partitions or excluding files). To archive your files to another location, you need to select the source files (the files that you want to archive) and the destination location (the local drive or cloud storage where you want to store the archived files). You can also choose the archiving options (such as compression, encryption, or scheduling).</p>
104
- <h3>Active protection and notary options</h3>
105
- <p>To protect your data from ransomware attacks, you need to enable Acronis Active Protection in the software settings. This feature will monitor your system for suspicious activity and block any unauthorized encryption attempts. It will also allow you to recover any affected files from a backup. To verify the integrity and authenticity of your data, you need to use Acronis Notary in the software interface. This feature will create a unique digital fingerprint for your data and store it in a public ledger. You can then use this fingerprint to prove that your data has not been altered or tampered with.</p>
106
- <h2>How to get help and support for Acronis True Image 2018</h2>
107
- <p>If you have any questions or issues with Acronis True Image 2018, you can get help and support from various sources. Some of the main sources are:</p>
108
- <h3>Documentation and tutorials</h3>
109
- <p>You can find the user guide, the quick start guide, the FAQ, and the video tutorials for Acronis True Image 2018 on the official website. These resources will provide you with detailed information and instructions on how to use the software and its features.</p>
110
- <h3>Knowledge base and community forum</h3>
111
- <p>You can search for answers and solutions to common problems and errors in the knowledge base and the community forum on the official website. These resources will provide you with articles, tips, tricks, and advice from Acronis experts and other users.</p>
112
- <h3>Technical support and initial setup service</h3>
113
- <p>You can contact the technical support team by phone, email, or chat if you need assistance with installation, activation, configuration, or troubleshooting. The technical support team is available 24/7 and can help you resolve any issues or errors. You can also purchase the initial setup service if you want an Acronis technician to remotely install and configure the software for you.</p>
114
- <h2>Conclusion and FAQs</h2>
115
- <p>Acronis True Image 2018 is a powerful backup software that can protect your data and system from any disaster. It offers a comprehensive set of features and tools that can help you create, manage, and restore backups of your files, disks, partitions, or entire machines. It also offers cloud backup, active protection, notary, and other advanced features that can enhance your data security and integrity. To use Acronis True Image 2018, you need to purchase a subscription plan, activate the software with a license key, download and install the software on your device, and start using its main functions. You can also get help and support from various sources if you need it.</p>
116
- <p>Here are some FAQs that you might have about Acronis True Image 2018:</p>
117
- <ul>
118
- <li><b>Q: How can I update or upgrade Acronis True Image 2018?</b></li>
119
- <li><b>A: If you have an active subscription plan (Advanced or Premium), you can update or upgrade Acronis True Image 2018 for free. You will receive notifications when a new version or update is available. You can also check for updates manually in the software settings. If you have a one-time purchase plan (Standard), you cannot update or upgrade Acronis True Image 2018 for free. You will need to purchase a new license key for the latest version.</b></li>
120
- <li><b>Q: How can I cancel or renew my subscription plan?</b></li>
121
- <li><b>A: If you have an active subscription plan (Advanced or Premium), you can cancel or renew it at any time. You can manage your subscription plan in your Acronis account online. You can also change your payment method, billing information, or subscription type there.</b></li>
122
- <li><b>Q: How can I backup or restore my mobile device?</b></li>
123
- <li><b>A: If you have an iOS or Android device, you can backup or restore it with Acronis True Image 2018. You need to download and install the Acronis Mobile app on your device and sign in with your Acronis account. You can then backup your contacts, photos, videos, messages, calendars, and other data to Acronis Cloud or another device. You can also restore your data from a backup to the same or a different device.</b></li>
124
- <li><b>Q: How can I access my cloud backups?</b></li>
125
- <li><b>A: If you have backed up your data to Acronis Cloud, you can access it from any device or location. You need to sign in to your Acronis account online or use the Acronis Mobile app on your device. You can then view, download, delete, or share your cloud backups.</b></li>
126
- <li><b>Q: How can I contact Acronis technical support?</b></li>
127
- <li><b>A: If you need assistance with Acronis True Image 2018, you can contact Acronis technical support by phone, email, or chat. You can find the contact information on the official website. You will need to provide your license key, product version, operating system, error message, and other details that can help them solve your problem.</b></li>
128
- </ul>
129
- <p>I hope this article has helped you learn how to download Acronis True Image 2018 and use it to backup and protect your data and system. If you have any feedback or suggestions, please let me know in the comments below. Thank you for reading!</p> 197e85843d<br />
130
- <br />
131
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Very Very Very by I.O.I - The Song That Broke the Charts.md DELETED
@@ -1,132 +0,0 @@
1
-
2
- <h1>How to Download I.O.I's Very Very Very Song and Enjoy Its Catchy Melody</h1>
3
- <p>If you are a fan of K-pop, you might have heard of I.O.I, a girl group project that was formed through a survival reality show called Produce 101. The group consisted of 11 members who were selected from different agencies and debuted in 2016. They released two mini-albums and several singles before disbanding in 2017.</p>
4
- <p>One of their most popular songs is Very Very Very, which was released as the title track of their second mini-album Miss Me? in October 2016. The song was composed by Park Jin-young, the founder of JYP Entertainment, and has a catchy melody and lyrics that express a girl's feelings for a boy. The song topped various music charts in South Korea and won several music awards.</p>
5
- <h2>download ioi very very very</h2><br /><p><b><b>Download File</b> &#9989; <a href="https://jinyurl.com/2uNULO">https://jinyurl.com/2uNULO</a></b></p><br /><br />
6
- <p>If you love this song and want to listen to it anytime and anywhere, you might want to download it and enjoy it offline. Downloading the song can save you data and battery, as well as allow you to play it on different devices without internet connection. In this article, we will show you how to download I.O.I's Very Very Very song from different platforms, and how to enjoy it offline.</p>
7
- <h2>How to Download the Song from Different Platforms</h2>
8
- <p>There are many platforms where you can stream or download I.O.I's Very Very Very song, such as YouTube, Spotify, Apple Music, etc. However, not all of them offer free downloads or easy access. Here are some ways you can download the song from these platforms:</p>
9
- <h3>YouTube</h3>
10
- <p>YouTube is one of the most popular platforms where you can watch I.O.I's Very Very Very music video and listen to their song. However, if you want to download the song from YouTube, you have two options:</p>
11
- <ul>
12
- <li>Use YouTube Premium or YouTube Music. These are subscription services that allow you to download videos and songs from YouTube and play them offline. You can sign up for a free trial or pay a monthly fee to use these services. To download I.O.I's Very Very Very song from YouTube Premium or YouTube Music, you need to:</li>
13
- <ol>
14
- <li>Open the YouTube app on your device and search for I.O.I's Very Very Very music video or song.</li>
15
- <li>Tap on the download icon below the video or song and choose the quality you want.</li>
16
- <li>Wait for the download to finish and then go to your library or downloads section to find your downloaded video or song.</li>
17
- </ol>
18
- <li>Use third-party tools or websites. These are free or paid tools or websites that allow you to download videos and songs from YouTube by copying and pasting their URLs. However, these tools or websites may not be safe, legal, or reliable. They may also have limited features, quality, or speed. To download I.O.I's Very Very Very song from third-party tools or websites, you need to:</li>
19
- <ol>
20
- <li>Open the YouTube app or website on your device and search for I.O.I's Very Very Very music video or song.</li>
21
- <li>Copy the URL of the video or song from the address bar or the share option.</li>
22
- <li>Open a third-party tool or website that can download YouTube videos or songs, such as Y2mate, 4K Video Downloader, MP3Juices, etc.</li>
23
- <li>Paste the URL of the video or song into the tool or website and choose the format and quality you want.</li>
24
- <li>Click on the download button and wait for the download to finish.</li>
25
- <li>Find your downloaded file in your device's storage or downloads folder.</li>
26
- </ol>
27
- </ul>
28
- <h3>Spotify</h3>
29
- <p>Spotify is another popular platform where you can stream or download I.O.I's Very Very Very song, as well as other songs from their albums and playlists. However, if you want to download the song from Spotify, you also have two options:</p>
30
- <ul>
31
- <li>Use Spotify Premium. This is a subscription service that allows you to download songs from Spotify and play them offline. You can sign up for a free trial or pay a monthly fee to use this service. To download I.O.I's Very Very Very song from Spotify Premium, you need to:</li>
32
- <ol>
33
- <li>Open the Spotify app on your device and search for I.O.I's Very Very Very song or their Miss Me? album.</li>
34
- <li>Tap on the heart icon next to the song or the album to add it to your library.</li>
35
- <li>Go to your library and tap on the download toggle next to the song or the album.</li>
36
- <li>Wait for the download to finish and then go to your library or downloads section to find your downloaded song or album.</li>
37
- </ol>
38
- <li>Use third-party tools or websites. These are free or paid tools or websites that allow you to download songs from Spotify by copying and pasting their URLs. However, these tools or websites may not be safe, legal, or reliable. They may also have limited features, quality, or speed. To download I.O.I's Very Very Very song from third-party tools or websites, you need to:</li>
39
- <ol>
40
- <li>Open the Spotify app or website on your device and search for I.O.I's Very Very Very song or their Miss Me? album.</li>
41
- <li>Copy the URL of the song or the album from the address bar or the share option.</li>
42
- <li>Open a third-party tool or website that can download Spotify songs, such as Sidify, TuneFab, AudFree, etc.</li>
43
- <li>Paste the URL of the song or the album into the tool or website and choose the format and quality you want.</li>
44
- <li>Click on the download button and wait for the download to finish.</li>
45
- <li>Find your downloaded file in your device's storage or downloads folder.</li>
46
- </ol>
47
- </ul> <h3>Apple Music</h3>
48
- <p>Apple Music is another popular platform where you can stream or download I.O.I's Very Very Very song, as well as other songs from their albums and playlists. However, if you want to download the song from Apple Music, you also have two options:</p>
49
- <p>download ioi very very very mp3<br />
50
- download ioi very very very lyrics<br />
51
- download ioi very very very album<br />
52
- download ioi very very very mv<br />
53
- download ioi very very very dance practice<br />
54
- download ioi very very very instrumental<br />
55
- download ioi very very very live performance<br />
56
- download ioi very very very ringtone<br />
57
- download ioi very very very english cover<br />
58
- download ioi very very very remix<br />
59
- download ioi miss me album with very very very<br />
60
- download ioi park jin young produced song very very very<br />
61
- download ioi final comeback song very very very<br />
62
- download ioi somi center song very very very<br />
63
- download ioi addictive song very very very<br />
64
- download ioi electropop song very very very<br />
65
- download ioi bubblegum pop song very very very<br />
66
- download ioi drum and bass song very very very<br />
67
- download ioi number one song on gaon chart for 2016 october week 3 - 4, 2016, november week 1 - 2, 2016, december week 1 - 2, 2016, january week 1 - 2, 2017, february week 1 - 2, 2017, march week 1 - 2, 2017, april week 1 - 2, 2017, may week 1 - 2, 2017, june week 1 - 2, 2017, july week 1 - 2, 2017, august week 1 - 2, 2017, september week 1 - 2, 2017, october week 1 - 2, 2017, november week 1 - 2, 2017 and december week 1 - 2, 2017. <br />
68
- download ioi most viewed kpop music video on youtube in america and worldwide for october month of year two thousand and sixteen according to billboard magazine article titled "Most Viewed K-Pop Videos in America & Around the World: October Month of Year Two Thousand and Sixteen" published on november month of year two thousand and sixteen date fourteen.<br />
69
- download ioi song that sold over four hundred and twenty three thousand four hundred and ninety one downloads as of october month of year two thousand and sixteen according to gaon chart.<br />
70
- download ioi song that won first place on mbc music show champion on october month of year two thousand and sixteen date twenty six and on mnet music show m countdown on october month of year two thousand and sixteen date twenty seven.<br />
71
- download ioi song that was performed on mnet i.o.i x jyp special show and on the showcase for the mini album miss me release.<br />
72
- download ioi song that was composed by park jin young who also wrote the lyrics.<br />
73
- download ioi song that has a catchy chorus with the repeated phrase "neomu neomu neomu" which means "very very very" in korean language.<br />
74
- download ioi song that expresses the feelings of a girl who wants to hear the confession from the guy she likes.<br />
75
- download ioi song that has a colorful and cute music video with various outfits and props.<br />
76
- download ioi song that has a fun and energetic dance choreography with a lot of jumping and waving.<br />
77
- download ioi song that features all eleven members of the group including nayoung chungha sejeong chaeyeon kyulkyung sohye yeonjung yoojung mina doyeon and somi.<br />
78
- download ioi song that is the title track of their second mini album miss me which was released on october month of year two thousand and sixteen date seventeen.</p>
79
- <ul>
80
- <li>Use Apple Music subscription. This is a subscription service that allows you to download songs from Apple Music and play them offline. You can sign up for a free trial or pay a monthly fee to use this service. To download I.O.I's Very Very Very song from Apple Music, you need to:</li>
81
- <ol>
82
- <li>Open the Apple Music app on your device and search for I.O.I's Very Very Very song or their Miss Me? album.</li>
83
- <li>Tap on the plus icon next to the song or the album to add it to your library.</li>
84
- <li>Go to your library and tap on the cloud icon next to the song or the album.</li>
85
- <li>Wait for the download to finish and then go to your library or downloads section to find your downloaded song or album.</li>
86
- </ol>
87
- <li>Use third-party tools or websites. These are free or paid tools or websites that allow you to download songs from Apple Music by copying and pasting their URLs. However, these tools or websites may not be safe, legal, or reliable. They may also have limited features, quality, or speed. To download I.O.I's Very Very Very song from third-party tools or websites, you need to:</li>
88
- <ol>
89
- <li>Open the Apple Music app or website on your device and search for I.O.I's Very Very Very song or their Miss Me? album.</li>
90
- <li>Copy the URL of the song or the album from the address bar or the share option.</li>
91
- <li>Open a third-party tool or website that can download Apple Music songs, such as NoteBurner, TunesKit, DRmare, etc.</li>
92
- <li>Paste the URL of the song or the album into the tool or website and choose the format and quality you want.</li>
93
- <li>Click on the download button and wait for the download to finish.</li>
94
- <li>Find your downloaded file in your device's storage or downloads folder.</li>
95
- </ol>
96
- </ul>
97
- <h2>How to Enjoy the Song Offline</h2>
98
- <p>Now that you have downloaded I.O.I's Very Very Very song from your preferred platform, you can enjoy it offline anytime and anywhere. Here are some ways you can enjoy the song offline:</p>
99
- <h3>Transfer the Song to Your Devices</h3>
100
- <p>If you want to listen to the song on different devices, such as your phone, tablet, laptop, etc., you need to transfer the song from your original device to your other devices. There are several ways you can do this:</p>
101
- <ul>
102
- <li>Use USB cables. You can connect your devices with USB cables and copy and paste the song file from one device to another. This is a simple and fast way to transfer files, but it may require different types of cables for different devices.</li>
103
- <li>Use Bluetooth. You can pair your devices with Bluetooth and send and receive the song file wirelessly. This is a convenient and cordless way to transfer files, but it may take longer time and consume more battery.</li>
104
- <li>Use cloud services. You can upload your song file to a cloud service, such as Google Drive, Dropbox, iCloud, etc., and then download it to your other devices. This is a secure and accessible way to transfer files, but it may require internet connection and storage space.</li>
105
- </ul>
106
- <h3>Play the Song with Your Favorite Music Player</h3>
107
- <p>If you want to listen to the song with your favorite music player, such as VLC, Winamp, iTunes, etc., you need to open the song file with your music player and enjoy its features and settings. Here are some tips you can follow:</p>
108
- <ul>
109
- <li>Choose a music player that suits your preferences and needs. There are many music players available for different devices and platforms, each with its own advantages and disadvantages. You can compare their features, functions, compatibility, interface, etc., and choose one that meets your expectations.</li>
110
- <li>Adjust the settings and features of the music player to enhance your listening experience. You can customize various aspects of your music player, such as volume, equalizer, playback mode, playlist, etc., to suit your mood and taste. You can also explore other features of your music player, such as lyrics display, visualizer, sound effects, etc., to make your listening more fun and enjoyable.</li>
111
- </ul>
112
- <h3>Sing Along with the Lyrics and Learn Some Korean Words</h3>
113
- <p>If you want to sing along with I.O.I's Very Very Very song and learn some Korean words from it, you need to find the lyrics of the song online or offline. You can use the following table to compare the sources of the lyrics and their features: | Source | Features | | ------ | -------- | | [Color Coded Lyrics](^1^) | Provides the lyrics in Korean, Romanization, and English translation. Also provides the color codes for each member's parts and some background information about the song. | | [Genius](^2^) | Provides the lyrics in Korean and English translation. Also provides some annotations, explanations, and trivia about the song. | | [AZLyrics](^3^) | Provides the lyrics in English translation only. | You can choose the source that suits your preference and needs, and then follow these steps to sing along with the lyrics and learn some Korean words: - Open the source of the lyrics on your device and search for I.O.I's Very Very Very song. - Play the song with your music player and follow the lyrics on your screen. - Try to sing along with the song and pronounce the Korean words correctly. You can also use the Romanization or the English translation to help you understand the meaning of the words. - Pay attention to some common or useful Korean words and phrases from the song, such as 너무 (very), 좋아하다 (to like), 말해줘 (tell me), 자꾸 (keep), 떠오르다 (to come to mind), 조심하다 (to be careful), etc. You can also use a dictionary or a translator to look up more words or phrases that interest you. - Repeat the steps until you can sing along with the song confidently and learn some Korean words fluently. <h2>Conclusion</h2>
114
- <p>In this article, we have shown you how to download I.O.I's Very Very Very song from different platforms, such as YouTube, Spotify, Apple Music, etc., and how to enjoy it offline, such as transferring it to your devices, playing it with your favorite music player, singing along with the lyrics, and learning some Korean words. We hope you have found this article helpful and informative, and that you have enjoyed listening to I.O.I's Very Very Very song.</p>
115
- <p>I.O.I was a talented and charming girl group that left a lasting impression on many fans with their songs and performances. Although they have disbanded, their music lives on and can still bring joy and happiness to many listeners. If you are one of them, we encourage you to download and enjoy their Very Very Very song offline, as well as their other songs from their albums and playlists.</p>
116
- <p>Thank you for reading this article. If you have any questions or feedback, please feel free to leave them in the comments section below. We would love to hear from you.</p>
117
- <h2>FAQs</h2>
118
- <p>Here are some frequently asked questions about I.O.I's Very Very Very song and how to download and enjoy it offline:</p>
119
- <ol>
120
- <li>Q: When was I.O.I's Very Very Very song released?<br>
121
- A: I.O.I's Very Very Very song was released on October 17, 2016 as the title track of their second mini-album Miss Me?</li>
122
- <li>Q: Who composed I.O.I's Very Very Very song?<br>
123
- A: I.O.I's Very Very Very song was composed by Park Jin-young, the founder of JYP Entertainment, who also produced other songs for I.O.I.</li>
124
- <li>Q: How many members were in I.O.I?<br>
125
- A: I.O.I had 11 members who were selected from different agencies through a survival reality show called Produce 101. They were Nayoung, Chungha, Sejeong, Chaeyeon, Kyulkyung, Sohye, Yeonjung, Yoojung, Mina, Doyeon, and Somi.</li>
126
- <li>Q: Why did I.O.I disband?<br>
127
- A: I.O.I disbanded in 2017 because they were a project group that had a limited contract period. The members returned to their original agencies and pursued their individual careers.</li>
128
- <li>Q: Where can I find more songs by I.O.I?<br>
129
- A: You can find more songs by I.O.I on various platforms, such as YouTube, Spotify, Apple Music, etc. You can also check out their albums and playlists, such as Chrysalis, Miss Me?, Whatta Man, etc.</li>
130
- </ol></p> 401be4b1e0<br />
131
- <br />
132
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/src/components/markdown.tsx DELETED
@@ -1,9 +0,0 @@
1
- import { FC, memo } from 'react'
2
- import ReactMarkdown, { Options } from 'react-markdown'
3
-
4
- export const MemoizedReactMarkdown: FC<Options> = memo(
5
- ReactMarkdown,
6
- (prevProps, nextProps) =>
7
- prevProps.children === nextProps.children &&
8
- prevProps.className === nextProps.className
9
- )
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/svs/base_svs_infer.py DELETED
@@ -1,265 +0,0 @@
1
- import os
2
-
3
- import torch
4
- import numpy as np
5
- from modules.hifigan.hifigan import HifiGanGenerator
6
- from vocoders.hifigan import HifiGAN
7
- from inference.svs.opencpop.map import cpop_pinyin2ph_func
8
-
9
- from utils import load_ckpt
10
- from utils.hparams import set_hparams, hparams
11
- from utils.text_encoder import TokenTextEncoder
12
- from pypinyin import pinyin, lazy_pinyin, Style
13
- import librosa
14
- import glob
15
- import re
16
-
17
-
18
- class BaseSVSInfer:
19
- def __init__(self, hparams, device=None):
20
- if device is None:
21
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
22
- self.hparams = hparams
23
- self.device = device
24
-
25
- phone_list = ["AP", "SP", "a", "ai", "an", "ang", "ao", "b", "c", "ch", "d", "e", "ei", "en", "eng", "er", "f", "g",
26
- "h", "i", "ia", "ian", "iang", "iao", "ie", "in", "ing", "iong", "iu", "j", "k", "l", "m", "n", "o",
27
- "ong", "ou", "p", "q", "r", "s", "sh", "t", "u", "ua", "uai", "uan", "uang", "ui", "un", "uo", "v",
28
- "van", "ve", "vn", "w", "x", "y", "z", "zh"]
29
- self.ph_encoder = TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',')
30
- self.pinyin2phs = cpop_pinyin2ph_func()
31
- self.spk_map = {'opencpop': 0}
32
-
33
- self.model = self.build_model()
34
- self.model.eval()
35
- self.model.to(self.device)
36
- self.vocoder = self.build_vocoder()
37
- self.vocoder.eval()
38
- self.vocoder.to(self.device)
39
-
40
- def build_model(self):
41
- raise NotImplementedError
42
-
43
- def forward_model(self, inp):
44
- raise NotImplementedError
45
-
46
- def build_vocoder(self):
47
- base_dir = hparams['vocoder_ckpt']
48
- config_path = f'{base_dir}/config.yaml'
49
- ckpt = sorted(glob.glob(f'{base_dir}/model_ckpt_steps_*.ckpt'), key=
50
- lambda x: int(re.findall(f'{base_dir}/model_ckpt_steps_(\d+).ckpt', x)[0]))[-1]
51
- print('| load HifiGAN: ', ckpt)
52
- ckpt_dict = torch.load(ckpt, map_location="cpu")
53
- config = set_hparams(config_path, global_hparams=False)
54
- state = ckpt_dict["state_dict"]["model_gen"]
55
- vocoder = HifiGanGenerator(config)
56
- vocoder.load_state_dict(state, strict=True)
57
- vocoder.remove_weight_norm()
58
- vocoder = vocoder.eval().to(self.device)
59
- return vocoder
60
-
61
- def run_vocoder(self, c, **kwargs):
62
- c = c.transpose(2, 1) # [B, 80, T]
63
- f0 = kwargs.get('f0') # [B, T]
64
- if f0 is not None and hparams.get('use_nsf'):
65
- # f0 = torch.FloatTensor(f0).to(self.device)
66
- y = self.vocoder(c, f0).view(-1)
67
- else:
68
- y = self.vocoder(c).view(-1)
69
- # [T]
70
- return y[None]
71
-
72
- def preprocess_word_level_input(self, inp):
73
- # Pypinyin can't solve polyphonic words
74
- text_raw = inp['text'].replace('最长', '最常').replace('长睫毛', '常睫毛') \
75
- .replace('那么长', '那么常').replace('多长', '多常') \
76
- .replace('很长', '很常') # We hope someone could provide a better g2p module for us by opening pull requests.
77
-
78
- # lyric
79
- pinyins = lazy_pinyin(text_raw, strict=False)
80
- ph_per_word_lst = [self.pinyin2phs[pinyin.strip()] for pinyin in pinyins if pinyin.strip() in self.pinyin2phs]
81
-
82
- # Note
83
- note_per_word_lst = [x.strip() for x in inp['notes'].split('|') if x.strip() != '']
84
- mididur_per_word_lst = [x.strip() for x in inp['notes_duration'].split('|') if x.strip() != '']
85
-
86
- if len(note_per_word_lst) == len(ph_per_word_lst) == len(mididur_per_word_lst):
87
- print('Pass word-notes check.')
88
- else:
89
- print('The number of words does\'t match the number of notes\' windows. ',
90
- 'You should split the note(s) for each word by | mark.')
91
- print(ph_per_word_lst, note_per_word_lst, mididur_per_word_lst)
92
- print(len(ph_per_word_lst), len(note_per_word_lst), len(mididur_per_word_lst))
93
- return None
94
-
95
- note_lst = []
96
- ph_lst = []
97
- midi_dur_lst = []
98
- is_slur = []
99
- for idx, ph_per_word in enumerate(ph_per_word_lst):
100
- # for phs in one word:
101
- # single ph like ['ai'] or multiple phs like ['n', 'i']
102
- ph_in_this_word = ph_per_word.split()
103
-
104
- # for notes in one word:
105
- # single note like ['D4'] or multiple notes like ['D4', 'E4'] which means a 'slur' here.
106
- note_in_this_word = note_per_word_lst[idx].split()
107
- midi_dur_in_this_word = mididur_per_word_lst[idx].split()
108
- # process for the model input
109
- # Step 1.
110
- # Deal with note of 'not slur' case or the first note of 'slur' case
111
- # j ie
112
- # F#4/Gb4 F#4/Gb4
113
- # 0 0
114
- for ph in ph_in_this_word:
115
- ph_lst.append(ph)
116
- note_lst.append(note_in_this_word[0])
117
- midi_dur_lst.append(midi_dur_in_this_word[0])
118
- is_slur.append(0)
119
- # step 2.
120
- # Deal with the 2nd, 3rd... notes of 'slur' case
121
- # j ie ie
122
- # F#4/Gb4 F#4/Gb4 C#4/Db4
123
- # 0 0 1
124
- if len(note_in_this_word) > 1: # is_slur = True, we should repeat the YUNMU to match the 2nd, 3rd... notes.
125
- for idx in range(1, len(note_in_this_word)):
126
- ph_lst.append(ph_in_this_word[-1])
127
- note_lst.append(note_in_this_word[idx])
128
- midi_dur_lst.append(midi_dur_in_this_word[idx])
129
- is_slur.append(1)
130
- ph_seq = ' '.join(ph_lst)
131
-
132
- if len(ph_lst) == len(note_lst) == len(midi_dur_lst):
133
- print(len(ph_lst), len(note_lst), len(midi_dur_lst))
134
- print('Pass word-notes check.')
135
- else:
136
- print('The number of words does\'t match the number of notes\' windows. ',
137
- 'You should split the note(s) for each word by | mark.')
138
- return None
139
- return ph_seq, note_lst, midi_dur_lst, is_slur
140
-
141
- def preprocess_phoneme_level_input(self, inp):
142
- ph_seq = inp['ph_seq']
143
- note_lst = inp['note_seq'].split()
144
- midi_dur_lst = inp['note_dur_seq'].split()
145
- is_slur = [float(x) for x in inp['is_slur_seq'].split()]
146
- print(len(note_lst), len(ph_seq.split()), len(midi_dur_lst))
147
- if len(note_lst) == len(ph_seq.split()) == len(midi_dur_lst):
148
- print('Pass word-notes check.')
149
- else:
150
- print('The number of words does\'t match the number of notes\' windows. ',
151
- 'You should split the note(s) for each word by | mark.')
152
- return None
153
- return ph_seq, note_lst, midi_dur_lst, is_slur
154
-
155
- def preprocess_input(self, inp, input_type='word'):
156
- """
157
-
158
- :param inp: {'text': str, 'item_name': (str, optional), 'spk_name': (str, optional)}
159
- :return:
160
- """
161
-
162
- item_name = inp.get('item_name', '<ITEM_NAME>')
163
- spk_name = inp.get('spk_name', 'opencpop')
164
-
165
- # single spk
166
- spk_id = self.spk_map[spk_name]
167
-
168
- # get ph seq, note lst, midi dur lst, is slur lst.
169
- if input_type == 'word':
170
- ret = self.preprocess_word_level_input(inp)
171
- elif input_type == 'phoneme': # like transcriptions.txt in Opencpop dataset.
172
- ret = self.preprocess_phoneme_level_input(inp)
173
- else:
174
- print('Invalid input type.')
175
- return None
176
-
177
- if ret:
178
- ph_seq, note_lst, midi_dur_lst, is_slur = ret
179
- else:
180
- print('==========> Preprocess_word_level or phone_level input wrong.')
181
- return None
182
-
183
- # convert note lst to midi id; convert note dur lst to midi duration
184
- try:
185
- midis = [librosa.note_to_midi(x.split("/")[0]) if x != 'rest' else 0
186
- for x in note_lst]
187
- midi_dur_lst = [float(x) for x in midi_dur_lst]
188
- except Exception as e:
189
- print(e)
190
- print('Invalid Input Type.')
191
- return None
192
-
193
- ph_token = self.ph_encoder.encode(ph_seq)
194
- item = {'item_name': item_name, 'text': inp['text'], 'ph': ph_seq, 'spk_id': spk_id,
195
- 'ph_token': ph_token, 'pitch_midi': np.asarray(midis), 'midi_dur': np.asarray(midi_dur_lst),
196
- 'is_slur': np.asarray(is_slur), }
197
- item['ph_len'] = len(item['ph_token'])
198
- return item
199
-
200
- def input_to_batch(self, item):
201
- item_names = [item['item_name']]
202
- text = [item['text']]
203
- ph = [item['ph']]
204
- txt_tokens = torch.LongTensor(item['ph_token'])[None, :].to(self.device)
205
- txt_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device)
206
- spk_ids = torch.LongTensor(item['spk_id'])[None, :].to(self.device)
207
-
208
- pitch_midi = torch.LongTensor(item['pitch_midi'])[None, :hparams['max_frames']].to(self.device)
209
- midi_dur = torch.FloatTensor(item['midi_dur'])[None, :hparams['max_frames']].to(self.device)
210
- is_slur = torch.LongTensor(item['is_slur'])[None, :hparams['max_frames']].to(self.device)
211
-
212
- batch = {
213
- 'item_name': item_names,
214
- 'text': text,
215
- 'ph': ph,
216
- 'txt_tokens': txt_tokens,
217
- 'txt_lengths': txt_lengths,
218
- 'spk_ids': spk_ids,
219
- 'pitch_midi': pitch_midi,
220
- 'midi_dur': midi_dur,
221
- 'is_slur': is_slur
222
- }
223
- return batch
224
-
225
- def postprocess_output(self, output):
226
- return output
227
-
228
- def infer_once(self, inp):
229
- inp = self.preprocess_input(inp, input_type=inp['input_type'] if inp.get('input_type') else 'word')
230
- output = self.forward_model(inp)
231
- output = self.postprocess_output(output)
232
- return output
233
-
234
- @classmethod
235
- def example_run(cls, inp):
236
- from utils.audio import save_wav
237
- set_hparams(print_hparams=False)
238
- infer_ins = cls(hparams)
239
- out = infer_ins.infer_once(inp)
240
- os.makedirs('infer_out', exist_ok=True)
241
- save_wav(out, f'infer_out/example_out.wav', hparams['audio_sample_rate'])
242
-
243
-
244
- # if __name__ == '__main__':
245
- # debug
246
- # a = BaseSVSInfer(hparams)
247
- # a.preprocess_input({'text': '你 说 你 不 SP 懂 为 何 在 这 时 牵 手 AP',
248
- # 'notes': 'D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | rest | D#4/Eb4 | D4 | D4 | D4 | D#4/Eb4 | F4 | D#4/Eb4 | D4 | rest',
249
- # 'notes_duration': '0.113740 | 0.329060 | 0.287950 | 0.133480 | 0.150900 | 0.484730 | 0.242010 | 0.180820 | 0.343570 | 0.152050 | 0.266720 | 0.280310 | 0.633300 | 0.444590'
250
- # })
251
-
252
- # b = {
253
- # 'text': '小酒窝长睫毛AP是你最美的记号',
254
- # 'notes': 'C#4/Db4 | F#4/Gb4 | G#4/Ab4 | A#4/Bb4 F#4/Gb4 | F#4/Gb4 C#4/Db4 | C#4/Db4 | rest | C#4/Db4 | A#4/Bb4 | G#4/Ab4 | A#4/Bb4 | G#4/Ab4 | F4 | C#4/Db4',
255
- # 'notes_duration': '0.407140 | 0.376190 | 0.242180 | 0.509550 0.183420 | 0.315400 0.235020 | 0.361660 | 0.223070 | 0.377270 | 0.340550 | 0.299620 | 0.344510 | 0.283770 | 0.323390 | 0.360340'
256
- # }
257
- # c = {
258
- # 'text': '小酒窝长睫毛AP是你最美的记号',
259
- # 'ph_seq': 'x iao j iu w o ch ang ang j ie ie m ao AP sh i n i z ui m ei d e j i h ao',
260
- # 'note_seq': 'C#4/Db4 C#4/Db4 F#4/Gb4 F#4/Gb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 F#4/Gb4 F#4/Gb4 F#4/Gb4 C#4/Db4 C#4/Db4 C#4/Db4 rest C#4/Db4 C#4/Db4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 F4 F4 C#4/Db4 C#4/Db4',
261
- # 'note_dur_seq': '0.407140 0.407140 0.376190 0.376190 0.242180 0.242180 0.509550 0.509550 0.183420 0.315400 0.315400 0.235020 0.361660 0.361660 0.223070 0.377270 0.377270 0.340550 0.340550 0.299620 0.299620 0.344510 0.344510 0.283770 0.283770 0.323390 0.323390 0.360340 0.360340',
262
- # 'is_slur_seq': '0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
263
- # } # input like Opencpop dataset.
264
- # a.preprocess_input(b)
265
- # a.preprocess_input(c, input_type='phoneme')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/decision_maker/brainstorming.py DELETED
@@ -1,67 +0,0 @@
1
- from __future__ import annotations
2
- import asyncio
3
- from colorama import Fore
4
-
5
- from typing import TYPE_CHECKING, List
6
-
7
- from . import decision_maker_registry
8
- from .base import BaseDecisionMaker
9
- from agentverse.logging import logger
10
-
11
- from agentverse.message import Message
12
-
13
- if TYPE_CHECKING:
14
- from agentverse.agents.base import BaseAgent
15
- from agentverse.message import CriticMessage
16
-
17
-
18
- @decision_maker_registry.register("brainstorming")
19
- class BrainstormingDecisionMaker(BaseDecisionMaker):
20
- """
21
- Much like the horizontal decision maker, but with some twists:
22
- (1) Solver acts as a summarizer, summarizing the discussion of this turn
23
- (2) After summarizing, all the agents' memory are cleared, and replaced with
24
- the summary (to avoid exceeding maximum context length of the model too fast)
25
- """
26
-
27
- name: str = "brainstorming"
28
-
29
- async def astep(
30
- self,
31
- agents: List[BaseAgent],
32
- task_description: str,
33
- previous_plan: str = "No solution yet.",
34
- advice: str = "No advice yet.",
35
- *args,
36
- **kwargs,
37
- ) -> List[str]:
38
- if advice != "No advice yet.":
39
- self.broadcast_messages(
40
- agents, [Message(content=advice, sender="Evaluator")]
41
- )
42
- for agent in agents[1:]:
43
- review: CriticMessage = await agent.astep(
44
- previous_plan, advice, task_description
45
- )
46
- if review.content != "":
47
- self.broadcast_messages(agents, [review])
48
-
49
- logger.info("", "Reviews:", Fore.YELLOW)
50
- logger.info(
51
- "",
52
- f"[{review.sender}]: {review.content}",
53
- Fore.YELLOW,
54
- )
55
-
56
- result = agents[0].step(previous_plan, advice, task_description)
57
- for agent in agents:
58
- agent.memory.reset()
59
- self.broadcast_messages(
60
- agents,
61
- [
62
- Message(
63
- content=result.content, sender="Summary From Previous Discussion"
64
- )
65
- ],
66
- )
67
- return [result]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/AddChildMethods.js DELETED
@@ -1,112 +0,0 @@
1
- import AddChild from '../basesizer/utils/AddChild.js';
2
- import GetBoundsConfig from '../utils/GetBoundsConfig.js';
3
- import ALIGNMODE from '../utils/AlignConst.js';
4
-
5
- const IsPlainObject = Phaser.Utils.Objects.IsPlainObject;
6
- const GetValue = Phaser.Utils.Objects.GetValue;
7
- const ALIGN_CENTER = Phaser.Display.Align.CENTER;
8
-
9
-
10
- var GetEmptyCellIndex = function (columnIndex, rowIndex, cells, columnCount, rowCount) {
11
- if ((typeof (columnIndex) === 'number') || (typeof (rowIndex) === 'number')) {
12
- if (columnIndex === undefined) {
13
- var idx;
14
- for (var i = 0; i < columnCount; i++) {
15
- idx = (rowIndex * columnCount) + i;
16
- if (!cells[idx]) {
17
- return idx;
18
- }
19
- }
20
- } else if (rowIndex === undefined) {
21
- var idx;
22
- for (var i = 0; i < rowCount; i++) {
23
- idx = (i * columnCount) + columnIndex;
24
- if (!cells[idx]) {
25
- return idx;
26
- }
27
- }
28
- } else {
29
- var idx = (rowIndex * columnCount) + columnIndex;
30
- if (!cells[idx]) {
31
- return idx;
32
- }
33
- }
34
-
35
- } else if (rowIndex === true) {
36
- var idx;
37
- for (var i = 0; i < columnCount; i++) {
38
- for (var j = 0; j < rowCount; j++) {
39
- idx = (j * columnCount) + i;
40
- if (!cells[idx]) {
41
- return idx;
42
- }
43
- }
44
- }
45
- } else {
46
- for (var i = 0, cnt = cells.length; i < cnt; i++) {
47
- if (!cells[i]) {
48
- return i;
49
- }
50
- }
51
- }
52
- return null;
53
- }
54
-
55
- var Add = function (gameObject, columnIndex, rowIndex, align, paddingConfig, expand, childKey) {
56
- AddChild.call(this, gameObject);
57
- if (IsPlainObject(columnIndex)) {
58
- var config = columnIndex;
59
- columnIndex = GetValue(config, 'column', undefined);
60
- rowIndex = GetValue(config, 'row', undefined);
61
- align = GetValue(config, 'align', ALIGN_CENTER);
62
- paddingConfig = GetValue(config, 'padding', 0);
63
- expand = GetValue(config, 'expand', false);
64
- childKey = GetValue(config, 'key', undefined);
65
- }
66
-
67
- // Get insert index
68
- var itemIndex = GetEmptyCellIndex(columnIndex, rowIndex, this.sizerChildren, this.columnCount, this.rowCount);
69
- if (itemIndex === null) {
70
- // Specific index mode
71
- if ((typeof (columnIndex) === 'number') && (typeof (rowIndex) === 'number')) {
72
- return this;
73
- }
74
-
75
- if ((rowIndex === true) || (typeof (rowIndex) === 'number')) {
76
- this.addEmptyColumn();
77
- } else {
78
- this.addEmptyRow();
79
- }
80
-
81
- // Get insert index again
82
- itemIndex = GetEmptyCellIndex(columnIndex, rowIndex, this.sizerChildren, this.columnCount, this.rowCount);
83
- }
84
-
85
- if (typeof (align) === 'string') {
86
- align = ALIGNMODE[align];
87
- }
88
- if (align === undefined) {
89
- align = ALIGN_CENTER;
90
- }
91
- if (paddingConfig === undefined) {
92
- paddingConfig = 0;
93
- }
94
- if (expand === undefined) {
95
- expand = true;
96
- }
97
-
98
- var config = this.getSizerConfig(gameObject);
99
- config.align = align;
100
- config.padding = GetBoundsConfig(paddingConfig);
101
- config.expand = expand;
102
- this.sizerChildren[itemIndex] = gameObject;
103
-
104
- if (childKey !== undefined) {
105
- this.addChildrenMap(childKey, gameObject)
106
- }
107
- return this;
108
- }
109
-
110
- export default {
111
- add: Add
112
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/lineprogresscanvas/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import LineProgressCanvas from './LineProgressCanvas.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('circularProgressCanvas', function (x, y, width, height, barColor, value, config) {
6
- var gameObject = new LineProgressCanvas(this.scene, x, y, width, height, barColor, value, config);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.LineProgressCanvas', LineProgressCanvas);
12
-
13
- export default LineProgressCanvas;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/methods/ParseEaseConfig.js DELETED
@@ -1,18 +0,0 @@
1
- import GetOrientationMode from '../../utils/GetOrientationMode.js';
2
- var ParseEaseConfig = function (menu, easeConfig) {
3
- if (typeof (easeConfig) === 'number') {
4
- easeConfig = {
5
- duration: easeConfig
6
- };
7
- }
8
-
9
- if (easeConfig.hasOwnProperty('orientation') && (easeConfig.orientation !== undefined)) {
10
- easeConfig.sameOrientation = GetOrientationMode(easeConfig.orientation) === menu.orientation;
11
- } else {
12
- easeConfig.sameOrientation = true;
13
- }
14
- easeConfig.destroy = false;
15
- return easeConfig;
16
- }
17
-
18
- export default ParseEaseConfig;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Akshat-1812/Dog-Vision/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Dog Vision
3
- emoji: 📉
4
- colorFrom: indigo
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.1.4
8
- app_file: app.py
9
- pinned: false
10
- license: unknown
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/saicinpainting/training/visualizers/noop.py DELETED
@@ -1,9 +0,0 @@
1
- from saicinpainting.training.visualizers.base import BaseVisualizer
2
-
3
-
4
- class NoopVisualizer(BaseVisualizer):
5
- def __init__(self, *args, **kwargs):
6
- pass
7
-
8
- def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None):
9
- pass
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/ChatGPT-PPT-Generate/app.py DELETED
@@ -1,245 +0,0 @@
1
- import glob
2
- import os
3
- import random
4
- import re
5
- import string
6
-
7
- import gradio as gr
8
-
9
- import openai
10
- from icrawler import ImageDownloader
11
- from icrawler.builtin import GoogleImageCrawler, BingImageCrawler
12
- from uuid import uuid4
13
- from pptx import Presentation
14
-
15
- bad_coding_practice = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in
16
- range(16))
17
-
18
-
19
- def refresh_bad_coding_practice():
20
- global bad_coding_practice
21
- bad_coding_practice = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits)
22
- for _ in range(16))
23
- return
24
-
25
-
26
- class PrefixNameDownloader(ImageDownloader):
27
-
28
- def get_filename(self, task, default_ext):
29
- filename = super(PrefixNameDownloader, self).get_filename(
30
- task, default_ext)
31
- print(bad_coding_practice)
32
- return 'prefix_' + bad_coding_practice + filename
33
-
34
-
35
- def generate_ppt(file, topic, slide_length, api_key):
36
- print(file.name)
37
-
38
- root = Presentation(file.name)
39
-
40
- openai.api_key = api_key
41
-
42
- message = f"""
43
- Create content for a slideshow presentation.
44
- The content's topic is {topic}.
45
- The slideshow is {slide_length} slides long.
46
- The content is written in the language of the content I give you above.
47
-
48
-
49
- You are allowed to use the following slide types:
50
-
51
- Slide types:
52
- Title Slide - (Title, Subtitle)
53
- Content Slide - (Title, Content)
54
- Image Slide - (Title, Content, Image)
55
- Thanks Slide - (Title)
56
-
57
- Put this tag before the Title Slide: [L_TS]
58
- Put this tag before the Content Slide: [L_CS]
59
- Put this tag before the Image Slide: [L_IS]
60
- Put this tag before the Thanks Slide: [L_THS]
61
-
62
- Put "[SLIDEBREAK]" after each slide
63
-
64
- For example:
65
- [L_TS]
66
- [TITLE]Mental Health[/TITLE]
67
-
68
- [SLIDEBREAK]
69
-
70
- [L_CS]
71
- [TITLE]Mental Health Definition[/TITLE]
72
- [CONTENT]
73
- 1. Definition: A person’s condition with regard to their psychological and emotional well-being
74
- 2. Can impact one's physical health
75
- 3. Stigmatized too often.
76
- [/CONTENT]
77
-
78
- [SLIDEBREAK]
79
-
80
- Put this tag before the Title: [TITLE]
81
- Put this tag after the Title: [/TITLE]
82
- Put this tag before the Subitle: [SUBTITLE]
83
- Put this tag after the Subtitle: [/SUBTITLE]
84
- Put this tag before the Content: [CONTENT]
85
- Put this tag after the Content: [/CONTENT]
86
- Put this tag before the Image: [IMAGE]
87
- Put this tag after the Image: [/IMAGE]
88
-
89
- Elaborate on the Content, provide as much information as possible.
90
- You put a [/CONTENT] at the end of the Content.
91
- Do not reply as if you are talking about the slideshow itself. (ex. "Include pictures here about...")
92
- Do not include any special characters (?, !, ., :, ) in the Title.
93
- Do not include any additional information in your response and stick to the format."""
94
-
95
- response = openai.ChatCompletion.create(
96
- model="gpt-3.5-turbo",
97
- messages=[
98
- {"role": "user", "content": message}
99
- ]
100
- )
101
-
102
- # """ Ref for slide types:
103
- # 0 -> title and subtitle
104
- # 1 -> title and content
105
- # 2 -> section header
106
- # 3 -> two content
107
- # 4 -> Comparison
108
- # 5 -> Title only
109
- # 6 -> Blank
110
- # 7 -> Content with caption
111
- # 8 -> Pic with caption
112
- # """
113
-
114
- def delete_all_slides():
115
- for i in range(len(root.slides) - 1, -1, -1):
116
- r_id = root.slides._sldIdLst[i].rId
117
- root.part.drop_rel(r_id)
118
- del root.slides._sldIdLst[i]
119
-
120
- def create_title_slide(title, subtitle):
121
- layout = root.slide_layouts[0]
122
- slide = root.slides.add_slide(layout)
123
- slide.shapes.title.text = title
124
- slide.placeholders[1].text = subtitle
125
-
126
- def create_section_header_slide(title):
127
- layout = root.slide_layouts[2]
128
- slide = root.slides.add_slide(layout)
129
- slide.shapes.title.text = title
130
-
131
- def create_title_and_content_slide(title, content):
132
- layout = root.slide_layouts[1]
133
- slide = root.slides.add_slide(layout)
134
- slide.shapes.title.text = title
135
- slide.placeholders[1].text = content
136
-
137
- def create_title_and_content_and_image_slide(title, content, image_query):
138
- layout = root.slide_layouts[8]
139
- slide = root.slides.add_slide(layout)
140
- slide.shapes.title.text = title
141
- slide.placeholders[2].text = content
142
- refresh_bad_coding_practice()
143
- bing_crawler = GoogleImageCrawler(downloader_cls=PrefixNameDownloader, storage={'root_dir': os.getcwd()})
144
- bing_crawler.crawl(keyword=image_query, max_num=1)
145
- dir_path = os.path.dirname(os.path.realpath(__file__))
146
- file_name = glob.glob(f"prefix_{bad_coding_practice}*")
147
- print(file_name)
148
- img_path = os.path.join(dir_path, file_name[0])
149
- slide.shapes.add_picture(img_path, slide.placeholders[1].left, slide.placeholders[1].top,
150
- slide.placeholders[1].width, slide.placeholders[1].height)
151
-
152
- def find_text_in_between_tags(text, start_tag, end_tag):
153
- start_pos = text.find(start_tag)
154
- end_pos = text.find(end_tag)
155
- result = []
156
- while start_pos > -1 and end_pos > -1:
157
- text_between_tags = text[start_pos + len(start_tag):end_pos]
158
- result.append(text_between_tags)
159
- start_pos = text.find(start_tag, end_pos + len(end_tag))
160
- end_pos = text.find(end_tag, start_pos)
161
- res1 = "".join(result)
162
- res2 = re.sub(r"\[IMAGE\].*?\[/IMAGE\]", '', res1)
163
- if len(result) > 0:
164
- return res2
165
- else:
166
- return ""
167
-
168
- def search_for_slide_type(text):
169
- tags = ["[L_TS]", "[L_CS]", "[L_IS]", "[L_THS]"]
170
- found_text = next((s for s in tags if s in text), None)
171
- return found_text
172
-
173
- def parse_response(reply):
174
- list_of_slides = reply.split("[SLIDEBREAK]")
175
- for slide in list_of_slides:
176
- slide_type = search_for_slide_type(slide)
177
- if slide_type == "[L_TS]":
178
- create_title_slide(find_text_in_between_tags(str(slide), "[TITLE]", "[/TITLE]"),
179
- find_text_in_between_tags(str(slide), "[SUBTITLE]", "[/SUBTITLE]"))
180
- elif slide_type == "[L_CS]":
181
- create_title_and_content_slide("".join(find_text_in_between_tags(str(slide), "[TITLE]", "[/TITLE]")),
182
- "".join(find_text_in_between_tags(str(slide), "[CONTENT]",
183
- "[/CONTENT]")))
184
- elif slide_type == "[L_IS]":
185
- create_title_and_content_and_image_slide("".join(find_text_in_between_tags(str(slide), "[TITLE]",
186
- "[/TITLE]")),
187
- "".join(find_text_in_between_tags(str(slide), "[CONTENT]",
188
- "[/CONTENT]")),
189
- "".join(find_text_in_between_tags(str(slide), "[IMAGE]",
190
- "[/IMAGE]")))
191
- elif slide_type == "[L_THS]":
192
- create_section_header_slide("".join(find_text_in_between_tags(str(slide), "[TITLE]", "[/TITLE]")))
193
-
194
- def find_title():
195
- return root.slides[0].shapes.title.text
196
-
197
- delete_all_slides()
198
-
199
- print(response)
200
-
201
- parse_response(response['choices'][0]['message']['content'])
202
-
203
- name_ = str(uuid4()).replace('-', '')
204
-
205
- root.save(f"./{name_}.pptx")
206
-
207
- print("done")
208
-
209
- dir_path = "./"
210
- prefix = "prefix_"
211
-
212
- for file_name in os.listdir(dir_path):
213
- if file_name.startswith(prefix):
214
- file_path = os.path.join(dir_path, file_name)
215
- if os.path.isfile(file_path):
216
- os.remove(file_path)
217
-
218
- return f"./{name_}.pptx"
219
-
220
-
221
- with gr.Blocks(title="ChatGPT PPT框架生成") as demo:
222
- gr.Markdown("""<h1><center>ChatGPT PPT框架生成</center></h1>""")
223
- with gr.Row():
224
- with gr.Column():
225
- openai_token = gr.Textbox(label="OpenAI API Key")
226
- topic = gr.Textbox(label="PPT的主题或内容")
227
- length = gr.Slider(minimum=1, maximum=20, value=6, label="生成的PPT页数", step=1)
228
- theme = gr.File(value="./theme.pptx", file_types=['pptx', 'ppt'], label="PPT模版")
229
- output_file = gr.File(interactive=False)
230
-
231
- topic.submit(
232
- fn=generate_ppt,
233
- inputs=[theme, topic, length, openai_token],
234
- outputs=[output_file]
235
- )
236
-
237
- submit = gr.Button("生成")
238
- submit.click(
239
- fn=generate_ppt,
240
- inputs=[theme, topic, length, openai_token],
241
- outputs=[output_file]
242
- )
243
-
244
- if __name__ == "__main__":
245
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_mbf.py DELETED
@@ -1,26 +0,0 @@
1
- from easydict import EasyDict as edict
2
-
3
- # make training faster
4
- # our RAM is 256G
5
- # mount -t tmpfs -o size=140G tmpfs /train_tmp
6
-
7
- config = edict()
8
- config.loss = "cosface"
9
- config.network = "mbf"
10
- config.resume = False
11
- config.output = None
12
- config.embedding_size = 512
13
- config.sample_rate = 0.1
14
- config.fp16 = True
15
- config.momentum = 0.9
16
- config.weight_decay = 2e-4
17
- config.batch_size = 128
18
- config.lr = 0.1 # batch size is 512
19
-
20
- config.rec = "/train_tmp/glint360k"
21
- config.num_classes = 360232
22
- config.num_image = 17091657
23
- config.num_epoch = 20
24
- config.warmup_epoch = -1
25
- config.decay_epoch = [8, 12, 15, 18]
26
- config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_mstrain_2x.py DELETED
@@ -1,167 +0,0 @@
1
- _base_ = '../_base_/default_runtime.py'
2
-
3
- # model settings
4
- model = dict(
5
- type='RetinaNet',
6
- pretrained='open-mmlab://detectron2/resnet101_caffe',
7
- backbone=dict(
8
- type='ResNet',
9
- depth=101,
10
- num_stages=4,
11
- out_indices=(0, 1, 2, 3),
12
- frozen_stages=1,
13
- norm_cfg=dict(type='BN', requires_grad=False),
14
- norm_eval=True,
15
- style='caffe'),
16
- neck=dict(
17
- type='FPN',
18
- in_channels=[256, 512, 1024, 2048],
19
- out_channels=256,
20
- start_level=1,
21
- add_extra_convs=True,
22
- num_outs=5),
23
- bbox_head=dict(
24
- type='GARetinaHead',
25
- num_classes=80,
26
- in_channels=256,
27
- stacked_convs=4,
28
- feat_channels=256,
29
- approx_anchor_generator=dict(
30
- type='AnchorGenerator',
31
- octave_base_scale=4,
32
- scales_per_octave=3,
33
- ratios=[0.5, 1.0, 2.0],
34
- strides=[8, 16, 32, 64, 128]),
35
- square_anchor_generator=dict(
36
- type='AnchorGenerator',
37
- ratios=[1.0],
38
- scales=[4],
39
- strides=[8, 16, 32, 64, 128]),
40
- anchor_coder=dict(
41
- type='DeltaXYWHBBoxCoder',
42
- target_means=[.0, .0, .0, .0],
43
- target_stds=[1.0, 1.0, 1.0, 1.0]),
44
- bbox_coder=dict(
45
- type='DeltaXYWHBBoxCoder',
46
- target_means=[.0, .0, .0, .0],
47
- target_stds=[1.0, 1.0, 1.0, 1.0]),
48
- loc_filter_thr=0.01,
49
- loss_loc=dict(
50
- type='FocalLoss',
51
- use_sigmoid=True,
52
- gamma=2.0,
53
- alpha=0.25,
54
- loss_weight=1.0),
55
- loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
56
- loss_cls=dict(
57
- type='FocalLoss',
58
- use_sigmoid=True,
59
- gamma=2.0,
60
- alpha=0.25,
61
- loss_weight=1.0),
62
- loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)))
63
- # training and testing settings
64
- train_cfg = dict(
65
- ga_assigner=dict(
66
- type='ApproxMaxIoUAssigner',
67
- pos_iou_thr=0.5,
68
- neg_iou_thr=0.4,
69
- min_pos_iou=0.4,
70
- ignore_iof_thr=-1),
71
- ga_sampler=dict(
72
- type='RandomSampler',
73
- num=256,
74
- pos_fraction=0.5,
75
- neg_pos_ub=-1,
76
- add_gt_as_proposals=False),
77
- assigner=dict(
78
- type='MaxIoUAssigner',
79
- pos_iou_thr=0.5,
80
- neg_iou_thr=0.5,
81
- min_pos_iou=0.0,
82
- ignore_iof_thr=-1),
83
- allowed_border=-1,
84
- pos_weight=-1,
85
- center_ratio=0.2,
86
- ignore_ratio=0.5,
87
- debug=False)
88
- test_cfg = dict(
89
- nms_pre=1000,
90
- min_bbox_size=0,
91
- score_thr=0.05,
92
- nms=dict(type='nms', iou_threshold=0.5),
93
- max_per_img=100)
94
- # dataset settings
95
- dataset_type = 'CocoDataset'
96
- data_root = 'data/coco/'
97
- img_norm_cfg = dict(
98
- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
99
- train_pipeline = [
100
- dict(type='LoadImageFromFile'),
101
- dict(type='LoadAnnotations', with_bbox=True),
102
- dict(
103
- type='Resize',
104
- img_scale=[(1333, 480), (1333, 960)],
105
- keep_ratio=True,
106
- multiscale_mode='range'),
107
- dict(type='RandomFlip', flip_ratio=0.5),
108
- dict(type='Normalize', **img_norm_cfg),
109
- dict(type='Pad', size_divisor=32),
110
- dict(type='DefaultFormatBundle'),
111
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
112
- ]
113
- test_pipeline = [
114
- dict(type='LoadImageFromFile'),
115
- dict(
116
- type='MultiScaleFlipAug',
117
- img_scale=(1333, 800),
118
- flip=False,
119
- transforms=[
120
- dict(type='Resize', keep_ratio=True),
121
- dict(type='RandomFlip'),
122
- dict(type='Normalize', **img_norm_cfg),
123
- dict(type='Pad', size_divisor=32),
124
- dict(type='ImageToTensor', keys=['img']),
125
- dict(type='Collect', keys=['img']),
126
- ])
127
- ]
128
- data = dict(
129
- samples_per_gpu=2,
130
- workers_per_gpu=2,
131
- train=dict(
132
- type=dataset_type,
133
- ann_file=data_root + 'annotations/instances_train2017.json',
134
- img_prefix=data_root + 'train2017/',
135
- pipeline=train_pipeline),
136
- val=dict(
137
- type=dataset_type,
138
- ann_file=data_root + 'annotations/instances_val2017.json',
139
- img_prefix=data_root + 'val2017/',
140
- pipeline=test_pipeline),
141
- test=dict(
142
- type=dataset_type,
143
- ann_file=data_root + 'annotations/instances_val2017.json',
144
- img_prefix=data_root + 'val2017/',
145
- pipeline=test_pipeline))
146
- evaluation = dict(interval=1, metric='bbox')
147
- # optimizer
148
- optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
149
- optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
150
- # learning policy
151
- lr_config = dict(
152
- policy='step',
153
- warmup='linear',
154
- warmup_iters=500,
155
- warmup_ratio=1.0 / 3,
156
- step=[16, 22])
157
- checkpoint_config = dict(interval=1)
158
- # yapf:disable
159
- log_config = dict(
160
- interval=50,
161
- hooks=[
162
- dict(type='TextLoggerHook'),
163
- # dict(type='TensorboardLoggerHook')
164
- ])
165
- # yapf:enable
166
- # runtime settings
167
- runner = dict(type='EpochBasedRunner', max_epochs=24)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/deeplabv3_r50-d8.py DELETED
@@ -1,44 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='ASPPHead',
19
- in_channels=2048,
20
- in_index=3,
21
- channels=512,
22
- dilations=(1, 12, 24, 36),
23
- dropout_ratio=0.1,
24
- num_classes=19,
25
- norm_cfg=norm_cfg,
26
- align_corners=False,
27
- loss_decode=dict(
28
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
29
- auxiliary_head=dict(
30
- type='FCNHead',
31
- in_channels=1024,
32
- in_index=2,
33
- channels=256,
34
- num_convs=1,
35
- concat_input=False,
36
- dropout_ratio=0.1,
37
- num_classes=19,
38
- norm_cfg=norm_cfg,
39
- align_corners=False,
40
- loss_decode=dict(
41
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
42
- # model training and testing settings
43
- train_cfg=dict(),
44
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug.py DELETED
@@ -1,7 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/deeplabv3plus_r50-d8.py',
3
- '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_20k.py'
5
- ]
6
- model = dict(
7
- decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/fp16/deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes.py DELETED
@@ -1,5 +0,0 @@
1
- _base_ = '../deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py'
2
- # fp16 settings
3
- optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.)
4
- # fp16 placeholder
5
- fp16 = dict()
 
 
 
 
 
 
spaces/Apex-X/GODROOP/roop/processors/__init__.py DELETED
File without changes
spaces/Apex-X/ROOPOK/roop/metadata.py DELETED
@@ -1,2 +0,0 @@
1
- name = 'roop'
2
- version = '1.3.2'
 
 
 
spaces/ArcanAlt/arcanDream/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: ArcanDream
3
- emoji: 💻
4
- colorFrom: green
5
- colorTo: green
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/tenacity/nap.py DELETED
@@ -1,43 +0,0 @@
1
- # Copyright 2016 Étienne Bersac
2
- # Copyright 2016 Julien Danjou
3
- # Copyright 2016 Joshua Harlow
4
- # Copyright 2013-2014 Ray Holder
5
- #
6
- # Licensed under the Apache License, Version 2.0 (the "License");
7
- # you may not use this file except in compliance with the License.
8
- # You may obtain a copy of the License at
9
- #
10
- # http://www.apache.org/licenses/LICENSE-2.0
11
- #
12
- # Unless required by applicable law or agreed to in writing, software
13
- # distributed under the License is distributed on an "AS IS" BASIS,
14
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
- # See the License for the specific language governing permissions and
16
- # limitations under the License.
17
-
18
- import time
19
- import typing
20
-
21
- if typing.TYPE_CHECKING:
22
- import threading
23
-
24
-
25
- def sleep(seconds: float) -> None:
26
- """
27
- Sleep strategy that delays execution for a given number of seconds.
28
-
29
- This is the default strategy, and may be mocked out for unit testing.
30
- """
31
- time.sleep(seconds)
32
-
33
-
34
- class sleep_using_event:
35
- """Sleep strategy that waits on an event to be set."""
36
-
37
- def __init__(self, event: "threading.Event") -> None:
38
- self.event = event
39
-
40
- def __call__(self, timeout: typing.Optional[float]) -> None:
41
- # NOTE(harlowja): this may *not* actually wait for timeout
42
- # seconds if the event is set (ie this may eject out early).
43
- self.event.wait(timeout=timeout)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/more_itertools/__init__.py DELETED
@@ -1,4 +0,0 @@
1
- from .more import * # noqa
2
- from .recipes import * # noqa
3
-
4
- __version__ = '8.12.0'
 
 
 
 
 
spaces/Audio-AGI/AudioSep/utils.py DELETED
@@ -1,384 +0,0 @@
1
- import os
2
- import datetime
3
- import json
4
- import logging
5
- import librosa
6
- import pickle
7
- from typing import Dict
8
- import numpy as np
9
- import torch
10
- import torch.nn as nn
11
- import yaml
12
- from models.audiosep import AudioSep, get_model_class
13
-
14
-
15
- def ignore_warnings():
16
- import warnings
17
- # Ignore UserWarning from torch.meshgrid
18
- warnings.filterwarnings('ignore', category=UserWarning, module='torch.functional')
19
-
20
- # Refined regex pattern to capture variations in the warning message
21
- pattern = r"Some weights of the model checkpoint at roberta-base were not used when initializing RobertaModel: \['lm_head\..*'\].*"
22
- warnings.filterwarnings('ignore', message=pattern)
23
-
24
-
25
-
26
- def create_logging(log_dir, filemode):
27
- os.makedirs(log_dir, exist_ok=True)
28
- i1 = 0
29
-
30
- while os.path.isfile(os.path.join(log_dir, "{:04d}.log".format(i1))):
31
- i1 += 1
32
-
33
- log_path = os.path.join(log_dir, "{:04d}.log".format(i1))
34
- logging.basicConfig(
35
- level=logging.DEBUG,
36
- format="%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s",
37
- datefmt="%a, %d %b %Y %H:%M:%S",
38
- filename=log_path,
39
- filemode=filemode,
40
- )
41
-
42
- # Print to console
43
- console = logging.StreamHandler()
44
- console.setLevel(logging.INFO)
45
- formatter = logging.Formatter("%(name)-12s: %(levelname)-8s %(message)s")
46
- console.setFormatter(formatter)
47
- logging.getLogger("").addHandler(console)
48
-
49
- return logging
50
-
51
-
52
- def float32_to_int16(x: float) -> int:
53
- x = np.clip(x, a_min=-1, a_max=1)
54
- return (x * 32767.0).astype(np.int16)
55
-
56
-
57
- def int16_to_float32(x: int) -> float:
58
- return (x / 32767.0).astype(np.float32)
59
-
60
-
61
- def parse_yaml(config_yaml: str) -> Dict:
62
- r"""Parse yaml file.
63
-
64
- Args:
65
- config_yaml (str): config yaml path
66
-
67
- Returns:
68
- yaml_dict (Dict): parsed yaml file
69
- """
70
-
71
- with open(config_yaml, "r") as fr:
72
- return yaml.load(fr, Loader=yaml.FullLoader)
73
-
74
-
75
- def get_audioset632_id_to_lb(ontology_path: str) -> Dict:
76
- r"""Get AudioSet 632 classes ID to label mapping."""
77
-
78
- audioset632_id_to_lb = {}
79
-
80
- with open(ontology_path) as f:
81
- data_list = json.load(f)
82
-
83
- for e in data_list:
84
- audioset632_id_to_lb[e["id"]] = e["name"]
85
-
86
- return audioset632_id_to_lb
87
-
88
-
89
- def load_pretrained_panns(
90
- model_type: str,
91
- checkpoint_path: str,
92
- freeze: bool
93
- ) -> nn.Module:
94
- r"""Load pretrained pretrained audio neural networks (PANNs).
95
-
96
- Args:
97
- model_type: str, e.g., "Cnn14"
98
- checkpoint_path, str, e.g., "Cnn14_mAP=0.431.pth"
99
- freeze: bool
100
-
101
- Returns:
102
- model: nn.Module
103
- """
104
-
105
- if model_type == "Cnn14":
106
- Model = Cnn14
107
-
108
- elif model_type == "Cnn14_DecisionLevelMax":
109
- Model = Cnn14_DecisionLevelMax
110
-
111
- else:
112
- raise NotImplementedError
113
-
114
- model = Model(sample_rate=32000, window_size=1024, hop_size=320,
115
- mel_bins=64, fmin=50, fmax=14000, classes_num=527)
116
-
117
- if checkpoint_path:
118
- checkpoint = torch.load(checkpoint_path, map_location="cpu")
119
- model.load_state_dict(checkpoint["model"])
120
-
121
- if freeze:
122
- for param in model.parameters():
123
- param.requires_grad = False
124
-
125
- return model
126
-
127
-
128
- def energy(x):
129
- return torch.mean(x ** 2)
130
-
131
-
132
- def magnitude_to_db(x):
133
- eps = 1e-10
134
- return 20. * np.log10(max(x, eps))
135
-
136
-
137
- def db_to_magnitude(x):
138
- return 10. ** (x / 20)
139
-
140
-
141
- def ids_to_hots(ids, classes_num, device):
142
- hots = torch.zeros(classes_num).to(device)
143
- for id in ids:
144
- hots[id] = 1
145
- return hots
146
-
147
-
148
- def calculate_sdr(
149
- ref: np.ndarray,
150
- est: np.ndarray,
151
- eps=1e-10
152
- ) -> float:
153
- r"""Calculate SDR between reference and estimation.
154
-
155
- Args:
156
- ref (np.ndarray), reference signal
157
- est (np.ndarray), estimated signal
158
- """
159
- reference = ref
160
- noise = est - reference
161
-
162
-
163
- numerator = np.clip(a=np.mean(reference ** 2), a_min=eps, a_max=None)
164
-
165
- denominator = np.clip(a=np.mean(noise ** 2), a_min=eps, a_max=None)
166
-
167
- sdr = 10. * np.log10(numerator / denominator)
168
-
169
- return sdr
170
-
171
-
172
- def calculate_sisdr(ref, est):
173
- r"""Calculate SDR between reference and estimation.
174
-
175
- Args:
176
- ref (np.ndarray), reference signal
177
- est (np.ndarray), estimated signal
178
- """
179
-
180
- eps = np.finfo(ref.dtype).eps
181
-
182
- reference = ref.copy()
183
- estimate = est.copy()
184
-
185
- reference = reference.reshape(reference.size, 1)
186
- estimate = estimate.reshape(estimate.size, 1)
187
-
188
- Rss = np.dot(reference.T, reference)
189
- # get the scaling factor for clean sources
190
- a = (eps + np.dot(reference.T, estimate)) / (Rss + eps)
191
-
192
- e_true = a * reference
193
- e_res = estimate - e_true
194
-
195
- Sss = (e_true**2).sum()
196
- Snn = (e_res**2).sum()
197
-
198
- sisdr = 10 * np.log10((eps+ Sss)/(eps + Snn))
199
-
200
- return sisdr
201
-
202
-
203
- class StatisticsContainer(object):
204
- def __init__(self, statistics_path):
205
- self.statistics_path = statistics_path
206
-
207
- self.backup_statistics_path = "{}_{}.pkl".format(
208
- os.path.splitext(self.statistics_path)[0],
209
- datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
210
- )
211
-
212
- self.statistics_dict = {"balanced_train": [], "test": []}
213
-
214
- def append(self, steps, statistics, split, flush=True):
215
- statistics["steps"] = steps
216
- self.statistics_dict[split].append(statistics)
217
-
218
- if flush:
219
- self.flush()
220
-
221
- def flush(self):
222
- pickle.dump(self.statistics_dict, open(self.statistics_path, "wb"))
223
- pickle.dump(self.statistics_dict, open(self.backup_statistics_path, "wb"))
224
- logging.info(" Dump statistics to {}".format(self.statistics_path))
225
- logging.info(" Dump statistics to {}".format(self.backup_statistics_path))
226
-
227
-
228
- def get_mean_sdr_from_dict(sdris_dict):
229
- mean_sdr = np.nanmean(list(sdris_dict.values()))
230
- return mean_sdr
231
-
232
-
233
- def remove_silence(audio: np.ndarray, sample_rate: int) -> np.ndarray:
234
- r"""Remove silent frames."""
235
- window_size = int(sample_rate * 0.1)
236
- threshold = 0.02
237
-
238
- frames = librosa.util.frame(x=audio, frame_length=window_size, hop_length=window_size).T
239
- # shape: (frames_num, window_size)
240
-
241
- new_frames = get_active_frames(frames, threshold)
242
- # shape: (new_frames_num, window_size)
243
-
244
- new_audio = new_frames.flatten()
245
- # shape: (new_audio_samples,)
246
-
247
- return new_audio
248
-
249
-
250
- def get_active_frames(frames: np.ndarray, threshold: float) -> np.ndarray:
251
- r"""Get active frames."""
252
-
253
- energy = np.max(np.abs(frames), axis=-1)
254
- # shape: (frames_num,)
255
-
256
- active_indexes = np.where(energy > threshold)[0]
257
- # shape: (new_frames_num,)
258
-
259
- new_frames = frames[active_indexes]
260
- # shape: (new_frames_num,)
261
-
262
- return new_frames
263
-
264
-
265
- def repeat_to_length(audio: np.ndarray, segment_samples: int) -> np.ndarray:
266
- r"""Repeat audio to length."""
267
-
268
- repeats_num = (segment_samples // audio.shape[-1]) + 1
269
- audio = np.tile(audio, repeats_num)[0 : segment_samples]
270
-
271
- return audio
272
-
273
- def calculate_segmentwise_sdr(ref, est, hop_samples, return_sdr_list=False):
274
- min_len = min(ref.shape[-1], est.shape[-1])
275
- pointer = 0
276
- sdrs = []
277
- while pointer + hop_samples < min_len:
278
- sdr = calculate_sdr(
279
- ref=ref[:, pointer : pointer + hop_samples],
280
- est=est[:, pointer : pointer + hop_samples],
281
- )
282
- sdrs.append(sdr)
283
- pointer += hop_samples
284
-
285
- sdr = np.nanmedian(sdrs)
286
-
287
- if return_sdr_list:
288
- return sdr, sdrs
289
- else:
290
- return sdr
291
-
292
-
293
- def loudness(data, input_loudness, target_loudness):
294
- """ Loudness normalize a signal.
295
-
296
- Normalize an input signal to a user loudness in dB LKFS.
297
-
298
- Params
299
- -------
300
- data : torch.Tensor
301
- Input multichannel audio data.
302
- input_loudness : float
303
- Loudness of the input in dB LUFS.
304
- target_loudness : float
305
- Target loudness of the output in dB LUFS.
306
-
307
- Returns
308
- -------
309
- output : torch.Tensor
310
- Loudness normalized output data.
311
- """
312
-
313
- # calculate the gain needed to scale to the desired loudness level
314
- delta_loudness = target_loudness - input_loudness
315
- gain = torch.pow(10.0, delta_loudness / 20.0)
316
-
317
- output = gain * data
318
-
319
- # check for potentially clipped samples
320
- # if torch.max(torch.abs(output)) >= 1.0:
321
- # warnings.warn("Possible clipped samples in output.")
322
-
323
- return output
324
-
325
-
326
- def load_ss_model(
327
- configs: Dict,
328
- checkpoint_path: str,
329
- query_encoder: nn.Module
330
- ) -> nn.Module:
331
- r"""Load trained universal source separation model.
332
-
333
- Args:
334
- configs (Dict)
335
- checkpoint_path (str): path of the checkpoint to load
336
- device (str): e.g., "cpu" | "cuda"
337
-
338
- Returns:
339
- pl_model: pl.LightningModule
340
- """
341
-
342
- ss_model_type = configs["model"]["model_type"]
343
- input_channels = configs["model"]["input_channels"]
344
- output_channels = configs["model"]["output_channels"]
345
- condition_size = configs["model"]["condition_size"]
346
-
347
- # Initialize separation model
348
- SsModel = get_model_class(model_type=ss_model_type)
349
-
350
- ss_model = SsModel(
351
- input_channels=input_channels,
352
- output_channels=output_channels,
353
- condition_size=condition_size,
354
- )
355
-
356
- # Load PyTorch Lightning model
357
- pl_model = AudioSep.load_from_checkpoint(
358
- checkpoint_path=checkpoint_path,
359
- strict=False,
360
- ss_model=ss_model,
361
- waveform_mixer=None,
362
- query_encoder=query_encoder,
363
- loss_function=None,
364
- optimizer_type=None,
365
- learning_rate=None,
366
- lr_lambda_func=None,
367
- map_location=torch.device('cpu'),
368
- )
369
-
370
- return pl_model
371
-
372
-
373
- def parse_yaml(config_yaml: str) -> Dict:
374
- r"""Parse yaml file.
375
-
376
- Args:
377
- config_yaml (str): config yaml path
378
-
379
- Returns:
380
- yaml_dict (Dict): parsed yaml file
381
- """
382
-
383
- with open(config_yaml, "r") as fr:
384
- return yaml.load(fr, Loader=yaml.FullLoader)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_model_zoo.py DELETED
@@ -1,50 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import logging
3
- import unittest
4
-
5
- from detectron2 import model_zoo
6
- from detectron2.config import instantiate
7
- from detectron2.modeling import FPN, GeneralizedRCNN
8
-
9
- logger = logging.getLogger(__name__)
10
-
11
-
12
- class TestModelZoo(unittest.TestCase):
13
- def test_get_returns_model(self):
14
- model = model_zoo.get("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml", trained=False)
15
- self.assertIsInstance(model, GeneralizedRCNN)
16
- self.assertIsInstance(model.backbone, FPN)
17
-
18
- def test_get_invalid_model(self):
19
- self.assertRaises(RuntimeError, model_zoo.get, "Invalid/config.yaml")
20
-
21
- def test_get_url(self):
22
- url = model_zoo.get_checkpoint_url("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml")
23
- self.assertEqual(
24
- url,
25
- "https://dl.fbaipublicfiles.com/detectron2/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn/138602908/model_final_01ca85.pkl", # noqa
26
- )
27
- url2 = model_zoo.get_checkpoint_url("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.py")
28
- self.assertEqual(url, url2)
29
-
30
- def _build_lazy_model(self, name):
31
- cfg = model_zoo.get_config("common/models/" + name)
32
- instantiate(cfg.model)
33
-
34
- def test_mask_rcnn_fpn(self):
35
- self._build_lazy_model("mask_rcnn_fpn.py")
36
-
37
- def test_mask_rcnn_c4(self):
38
- self._build_lazy_model("mask_rcnn_c4.py")
39
-
40
- def test_panoptic_fpn(self):
41
- self._build_lazy_model("panoptic_fpn.py")
42
-
43
- def test_schedule(self):
44
- cfg = model_zoo.get_config("common/coco_schedule.py")
45
- for _, v in cfg.items():
46
- instantiate(v)
47
-
48
-
49
- if __name__ == "__main__":
50
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/infer_batch_rvc.py DELETED
@@ -1,215 +0,0 @@
1
- """
2
- v1
3
- runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\logs\mi-test\added_IVF677_Flat_nprobe_7.index" harvest "E:\codes\py39\RVC-beta\output" "E:\codes\py39\test-20230416b\weights\mi-test.pth" 0.66 cuda:0 True 3 0 1 0.33
4
- v2
5
- runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\test-20230416b\logs\mi-test-v2\aadded_IVF677_Flat_nprobe_1_v2.index" harvest "E:\codes\py39\RVC-beta\output_v2" "E:\codes\py39\test-20230416b\weights\mi-test-v2.pth" 0.66 cuda:0 True 3 0 1 0.33
6
- """
7
- import os, sys, pdb, torch
8
-
9
- now_dir = os.getcwd()
10
- sys.path.append(now_dir)
11
- import sys
12
- import torch
13
- import tqdm as tq
14
- from multiprocessing import cpu_count
15
-
16
-
17
- class Config:
18
- def __init__(self, device, is_half):
19
- self.device = device
20
- self.is_half = is_half
21
- self.n_cpu = 0
22
- self.gpu_name = None
23
- self.gpu_mem = None
24
- self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
25
-
26
- def device_config(self) -> tuple:
27
- if torch.cuda.is_available():
28
- i_device = int(self.device.split(":")[-1])
29
- self.gpu_name = torch.cuda.get_device_name(i_device)
30
- if (
31
- ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
32
- or "P40" in self.gpu_name.upper()
33
- or "1060" in self.gpu_name
34
- or "1070" in self.gpu_name
35
- or "1080" in self.gpu_name
36
- ):
37
- print("16系/10系显卡和P40强制单精度")
38
- self.is_half = False
39
- for config_file in ["32k.json", "40k.json", "48k.json"]:
40
- with open(f"configs/{config_file}", "r") as f:
41
- strr = f.read().replace("true", "false")
42
- with open(f"configs/{config_file}", "w") as f:
43
- f.write(strr)
44
- with open("infer/modules/train/preprocess.py", "r") as f:
45
- strr = f.read().replace("3.7", "3.0")
46
- with open("infer/modules/train/preprocess.py", "w") as f:
47
- f.write(strr)
48
- else:
49
- self.gpu_name = None
50
- self.gpu_mem = int(
51
- torch.cuda.get_device_properties(i_device).total_memory
52
- / 1024
53
- / 1024
54
- / 1024
55
- + 0.4
56
- )
57
- if self.gpu_mem <= 4:
58
- with open("infer/modules/train/preprocess.py", "r") as f:
59
- strr = f.read().replace("3.7", "3.0")
60
- with open("infer/modules/train/preprocess.py", "w") as f:
61
- f.write(strr)
62
- elif torch.backends.mps.is_available():
63
- print("没有发现支持的N卡, 使用MPS进行推理")
64
- self.device = "mps"
65
- else:
66
- print("没有发现支持的N卡, 使用CPU进行推理")
67
- self.device = "cpu"
68
- self.is_half = True
69
-
70
- if self.n_cpu == 0:
71
- self.n_cpu = cpu_count()
72
-
73
- if self.is_half:
74
- # 6G显存配置
75
- x_pad = 3
76
- x_query = 10
77
- x_center = 60
78
- x_max = 65
79
- else:
80
- # 5G显存配置
81
- x_pad = 1
82
- x_query = 6
83
- x_center = 38
84
- x_max = 41
85
-
86
- if self.gpu_mem != None and self.gpu_mem <= 4:
87
- x_pad = 1
88
- x_query = 5
89
- x_center = 30
90
- x_max = 32
91
-
92
- return x_pad, x_query, x_center, x_max
93
-
94
-
95
- f0up_key = sys.argv[1]
96
- input_path = sys.argv[2]
97
- index_path = sys.argv[3]
98
- f0method = sys.argv[4] # harvest or pm
99
- opt_path = sys.argv[5]
100
- model_path = sys.argv[6]
101
- index_rate = float(sys.argv[7])
102
- device = sys.argv[8]
103
- is_half = sys.argv[9].lower() != "false"
104
- filter_radius = int(sys.argv[10])
105
- resample_sr = int(sys.argv[11])
106
- rms_mix_rate = float(sys.argv[12])
107
- protect = float(sys.argv[13])
108
- print(sys.argv)
109
- config = Config(device, is_half)
110
- now_dir = os.getcwd()
111
- sys.path.append(now_dir)
112
- from infer.modules.vc.modules import VC
113
- from lib.infer_pack.models import (
114
- SynthesizerTrnMs256NSFsid,
115
- SynthesizerTrnMs256NSFsid_nono,
116
- SynthesizerTrnMs768NSFsid,
117
- SynthesizerTrnMs768NSFsid_nono,
118
- )
119
- from infer.lib.audio import load_audio
120
- from fairseq import checkpoint_utils
121
- from scipy.io import wavfile
122
-
123
- hubert_model = None
124
-
125
-
126
- def load_hubert():
127
- global hubert_model
128
- models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
129
- ["hubert_base.pt"],
130
- suffix="",
131
- )
132
- hubert_model = models[0]
133
- hubert_model = hubert_model.to(device)
134
- if is_half:
135
- hubert_model = hubert_model.half()
136
- else:
137
- hubert_model = hubert_model.float()
138
- hubert_model.eval()
139
-
140
-
141
- def vc_single(sid, input_audio, f0_up_key, f0_file, f0_method, file_index, index_rate):
142
- global tgt_sr, net_g, vc, hubert_model, version
143
- if input_audio is None:
144
- return "You need to upload an audio", None
145
- f0_up_key = int(f0_up_key)
146
- audio = load_audio(input_audio, 16000)
147
- times = [0, 0, 0]
148
- if hubert_model == None:
149
- load_hubert()
150
- if_f0 = cpt.get("f0", 1)
151
- # audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,file_big_npy,index_rate,if_f0,f0_file=f0_file)
152
- audio_opt = vc.pipeline(
153
- hubert_model,
154
- net_g,
155
- sid,
156
- audio,
157
- input_audio,
158
- times,
159
- f0_up_key,
160
- f0_method,
161
- file_index,
162
- index_rate,
163
- if_f0,
164
- filter_radius,
165
- tgt_sr,
166
- resample_sr,
167
- rms_mix_rate,
168
- version,
169
- protect,
170
- f0_file=f0_file,
171
- )
172
- print(times)
173
- return audio_opt
174
-
175
-
176
- def get_vc(model_path):
177
- global n_spk, tgt_sr, net_g, vc, cpt, device, is_half, version
178
- print("loading pth %s" % model_path)
179
- cpt = torch.load(model_path, map_location="cpu")
180
- tgt_sr = cpt["config"][-1]
181
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
182
- if_f0 = cpt.get("f0", 1)
183
- version = cpt.get("version", "v1")
184
- if version == "v1":
185
- if if_f0 == 1:
186
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
187
- else:
188
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
189
- elif version == "v2":
190
- if if_f0 == 1: #
191
- net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=is_half)
192
- else:
193
- net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
194
- del net_g.enc_q
195
- print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净,真奇葩
196
- net_g.eval().to(device)
197
- if is_half:
198
- net_g = net_g.half()
199
- else:
200
- net_g = net_g.float()
201
- vc = VC(tgt_sr, config)
202
- n_spk = cpt["config"][-3]
203
- # return {"visible": True,"maximum": n_spk, "__type__": "update"}
204
-
205
-
206
- get_vc(model_path)
207
- audios = os.listdir(input_path)
208
- for file in tq.tqdm(audios):
209
- if file.endswith(".wav"):
210
- file_path = input_path + "/" + file
211
- wav_opt = vc_single(
212
- 0, file_path, f0up_key, None, f0method, index_path, index_rate
213
- )
214
- out_path = opt_path + "/" + file
215
- wavfile.write(out_path, tgt_sr, wav_opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/dataset.py DELETED
@@ -1,183 +0,0 @@
1
- import os
2
- import random
3
-
4
- import numpy as np
5
- import torch
6
- import torch.utils.data
7
- from tqdm import tqdm
8
-
9
- from . import spec_utils
10
-
11
-
12
- class VocalRemoverValidationSet(torch.utils.data.Dataset):
13
- def __init__(self, patch_list):
14
- self.patch_list = patch_list
15
-
16
- def __len__(self):
17
- return len(self.patch_list)
18
-
19
- def __getitem__(self, idx):
20
- path = self.patch_list[idx]
21
- data = np.load(path)
22
-
23
- X, y = data["X"], data["y"]
24
-
25
- X_mag = np.abs(X)
26
- y_mag = np.abs(y)
27
-
28
- return X_mag, y_mag
29
-
30
-
31
- def make_pair(mix_dir, inst_dir):
32
- input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"]
33
-
34
- X_list = sorted(
35
- [
36
- os.path.join(mix_dir, fname)
37
- for fname in os.listdir(mix_dir)
38
- if os.path.splitext(fname)[1] in input_exts
39
- ]
40
- )
41
- y_list = sorted(
42
- [
43
- os.path.join(inst_dir, fname)
44
- for fname in os.listdir(inst_dir)
45
- if os.path.splitext(fname)[1] in input_exts
46
- ]
47
- )
48
-
49
- filelist = list(zip(X_list, y_list))
50
-
51
- return filelist
52
-
53
-
54
- def train_val_split(dataset_dir, split_mode, val_rate, val_filelist):
55
- if split_mode == "random":
56
- filelist = make_pair(
57
- os.path.join(dataset_dir, "mixtures"),
58
- os.path.join(dataset_dir, "instruments"),
59
- )
60
-
61
- random.shuffle(filelist)
62
-
63
- if len(val_filelist) == 0:
64
- val_size = int(len(filelist) * val_rate)
65
- train_filelist = filelist[:-val_size]
66
- val_filelist = filelist[-val_size:]
67
- else:
68
- train_filelist = [
69
- pair for pair in filelist if list(pair) not in val_filelist
70
- ]
71
- elif split_mode == "subdirs":
72
- if len(val_filelist) != 0:
73
- raise ValueError(
74
- "The `val_filelist` option is not available in `subdirs` mode"
75
- )
76
-
77
- train_filelist = make_pair(
78
- os.path.join(dataset_dir, "training/mixtures"),
79
- os.path.join(dataset_dir, "training/instruments"),
80
- )
81
-
82
- val_filelist = make_pair(
83
- os.path.join(dataset_dir, "validation/mixtures"),
84
- os.path.join(dataset_dir, "validation/instruments"),
85
- )
86
-
87
- return train_filelist, val_filelist
88
-
89
-
90
- def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha):
91
- perm = np.random.permutation(len(X))
92
- for i, idx in enumerate(tqdm(perm)):
93
- if np.random.uniform() < reduction_rate:
94
- y[idx] = spec_utils.reduce_vocal_aggressively(
95
- X[idx], y[idx], reduction_mask
96
- )
97
-
98
- if np.random.uniform() < 0.5:
99
- # swap channel
100
- X[idx] = X[idx, ::-1]
101
- y[idx] = y[idx, ::-1]
102
- if np.random.uniform() < 0.02:
103
- # mono
104
- X[idx] = X[idx].mean(axis=0, keepdims=True)
105
- y[idx] = y[idx].mean(axis=0, keepdims=True)
106
- if np.random.uniform() < 0.02:
107
- # inst
108
- X[idx] = y[idx]
109
-
110
- if np.random.uniform() < mixup_rate and i < len(perm) - 1:
111
- lam = np.random.beta(mixup_alpha, mixup_alpha)
112
- X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]]
113
- y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]]
114
-
115
- return X, y
116
-
117
-
118
- def make_padding(width, cropsize, offset):
119
- left = offset
120
- roi_size = cropsize - left * 2
121
- if roi_size == 0:
122
- roi_size = cropsize
123
- right = roi_size - (width % roi_size) + left
124
-
125
- return left, right, roi_size
126
-
127
-
128
- def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset):
129
- len_dataset = patches * len(filelist)
130
-
131
- X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64)
132
- y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64)
133
-
134
- for i, (X_path, y_path) in enumerate(tqdm(filelist)):
135
- X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft)
136
- coef = np.max([np.abs(X).max(), np.abs(y).max()])
137
- X, y = X / coef, y / coef
138
-
139
- l, r, roi_size = make_padding(X.shape[2], cropsize, offset)
140
- X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant")
141
- y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant")
142
-
143
- starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches)
144
- ends = starts + cropsize
145
- for j in range(patches):
146
- idx = i * patches + j
147
- X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]]
148
- y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]]
149
-
150
- return X_dataset, y_dataset
151
-
152
-
153
- def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset):
154
- patch_list = []
155
- patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format(
156
- cropsize, sr, hop_length, n_fft, offset
157
- )
158
- os.makedirs(patch_dir, exist_ok=True)
159
-
160
- for i, (X_path, y_path) in enumerate(tqdm(filelist)):
161
- basename = os.path.splitext(os.path.basename(X_path))[0]
162
-
163
- X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft)
164
- coef = np.max([np.abs(X).max(), np.abs(y).max()])
165
- X, y = X / coef, y / coef
166
-
167
- l, r, roi_size = make_padding(X.shape[2], cropsize, offset)
168
- X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant")
169
- y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant")
170
-
171
- len_dataset = int(np.ceil(X.shape[2] / roi_size))
172
- for j in range(len_dataset):
173
- outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j))
174
- start = j * roi_size
175
- if not os.path.exists(outpath):
176
- np.savez(
177
- outpath,
178
- X=X_pad[:, :, start : start + cropsize],
179
- y=y_pad[:, :, start : start + cropsize],
180
- )
181
- patch_list.append(outpath)
182
-
183
- return VocalRemoverValidationSet(patch_list)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Belshia/shia/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Shia
3
- emoji: 🌍
4
- colorFrom: purple
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/_version.py DELETED
@@ -1,5 +0,0 @@
1
- # coding: utf-8
2
- # file generated by setuptools_scm
3
- # don't change, don't track in version control
4
- version = '2.8.2'
5
- version_tuple = (2, 8, 2)
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py DELETED
@@ -1,537 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- import collections
4
- import functools
5
- import logging
6
-
7
- from ._collections import RecentlyUsedContainer
8
- from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme
9
- from .exceptions import (
10
- LocationValueError,
11
- MaxRetryError,
12
- ProxySchemeUnknown,
13
- ProxySchemeUnsupported,
14
- URLSchemeUnknown,
15
- )
16
- from .packages import six
17
- from .packages.six.moves.urllib.parse import urljoin
18
- from .request import RequestMethods
19
- from .util.proxy import connection_requires_http_tunnel
20
- from .util.retry import Retry
21
- from .util.url import parse_url
22
-
23
- __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"]
24
-
25
-
26
- log = logging.getLogger(__name__)
27
-
28
- SSL_KEYWORDS = (
29
- "key_file",
30
- "cert_file",
31
- "cert_reqs",
32
- "ca_certs",
33
- "ssl_version",
34
- "ca_cert_dir",
35
- "ssl_context",
36
- "key_password",
37
- "server_hostname",
38
- )
39
-
40
- # All known keyword arguments that could be provided to the pool manager, its
41
- # pools, or the underlying connections. This is used to construct a pool key.
42
- _key_fields = (
43
- "key_scheme", # str
44
- "key_host", # str
45
- "key_port", # int
46
- "key_timeout", # int or float or Timeout
47
- "key_retries", # int or Retry
48
- "key_strict", # bool
49
- "key_block", # bool
50
- "key_source_address", # str
51
- "key_key_file", # str
52
- "key_key_password", # str
53
- "key_cert_file", # str
54
- "key_cert_reqs", # str
55
- "key_ca_certs", # str
56
- "key_ssl_version", # str
57
- "key_ca_cert_dir", # str
58
- "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
59
- "key_maxsize", # int
60
- "key_headers", # dict
61
- "key__proxy", # parsed proxy url
62
- "key__proxy_headers", # dict
63
- "key__proxy_config", # class
64
- "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples
65
- "key__socks_options", # dict
66
- "key_assert_hostname", # bool or string
67
- "key_assert_fingerprint", # str
68
- "key_server_hostname", # str
69
- )
70
-
71
- #: The namedtuple class used to construct keys for the connection pool.
72
- #: All custom key schemes should include the fields in this key at a minimum.
73
- PoolKey = collections.namedtuple("PoolKey", _key_fields)
74
-
75
- _proxy_config_fields = ("ssl_context", "use_forwarding_for_https")
76
- ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields)
77
-
78
-
79
- def _default_key_normalizer(key_class, request_context):
80
- """
81
- Create a pool key out of a request context dictionary.
82
-
83
- According to RFC 3986, both the scheme and host are case-insensitive.
84
- Therefore, this function normalizes both before constructing the pool
85
- key for an HTTPS request. If you wish to change this behaviour, provide
86
- alternate callables to ``key_fn_by_scheme``.
87
-
88
- :param key_class:
89
- The class to use when constructing the key. This should be a namedtuple
90
- with the ``scheme`` and ``host`` keys at a minimum.
91
- :type key_class: namedtuple
92
- :param request_context:
93
- A dictionary-like object that contain the context for a request.
94
- :type request_context: dict
95
-
96
- :return: A namedtuple that can be used as a connection pool key.
97
- :rtype: PoolKey
98
- """
99
- # Since we mutate the dictionary, make a copy first
100
- context = request_context.copy()
101
- context["scheme"] = context["scheme"].lower()
102
- context["host"] = context["host"].lower()
103
-
104
- # These are both dictionaries and need to be transformed into frozensets
105
- for key in ("headers", "_proxy_headers", "_socks_options"):
106
- if key in context and context[key] is not None:
107
- context[key] = frozenset(context[key].items())
108
-
109
- # The socket_options key may be a list and needs to be transformed into a
110
- # tuple.
111
- socket_opts = context.get("socket_options")
112
- if socket_opts is not None:
113
- context["socket_options"] = tuple(socket_opts)
114
-
115
- # Map the kwargs to the names in the namedtuple - this is necessary since
116
- # namedtuples can't have fields starting with '_'.
117
- for key in list(context.keys()):
118
- context["key_" + key] = context.pop(key)
119
-
120
- # Default to ``None`` for keys missing from the context
121
- for field in key_class._fields:
122
- if field not in context:
123
- context[field] = None
124
-
125
- return key_class(**context)
126
-
127
-
128
- #: A dictionary that maps a scheme to a callable that creates a pool key.
129
- #: This can be used to alter the way pool keys are constructed, if desired.
130
- #: Each PoolManager makes a copy of this dictionary so they can be configured
131
- #: globally here, or individually on the instance.
132
- key_fn_by_scheme = {
133
- "http": functools.partial(_default_key_normalizer, PoolKey),
134
- "https": functools.partial(_default_key_normalizer, PoolKey),
135
- }
136
-
137
- pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool}
138
-
139
-
140
- class PoolManager(RequestMethods):
141
- """
142
- Allows for arbitrary requests while transparently keeping track of
143
- necessary connection pools for you.
144
-
145
- :param num_pools:
146
- Number of connection pools to cache before discarding the least
147
- recently used pool.
148
-
149
- :param headers:
150
- Headers to include with all requests, unless other headers are given
151
- explicitly.
152
-
153
- :param \\**connection_pool_kw:
154
- Additional parameters are used to create fresh
155
- :class:`urllib3.connectionpool.ConnectionPool` instances.
156
-
157
- Example::
158
-
159
- >>> manager = PoolManager(num_pools=2)
160
- >>> r = manager.request('GET', 'http://google.com/')
161
- >>> r = manager.request('GET', 'http://google.com/mail')
162
- >>> r = manager.request('GET', 'http://yahoo.com/')
163
- >>> len(manager.pools)
164
- 2
165
-
166
- """
167
-
168
- proxy = None
169
- proxy_config = None
170
-
171
- def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
172
- RequestMethods.__init__(self, headers)
173
- self.connection_pool_kw = connection_pool_kw
174
- self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close())
175
-
176
- # Locally set the pool classes and keys so other PoolManagers can
177
- # override them.
178
- self.pool_classes_by_scheme = pool_classes_by_scheme
179
- self.key_fn_by_scheme = key_fn_by_scheme.copy()
180
-
181
- def __enter__(self):
182
- return self
183
-
184
- def __exit__(self, exc_type, exc_val, exc_tb):
185
- self.clear()
186
- # Return False to re-raise any potential exceptions
187
- return False
188
-
189
- def _new_pool(self, scheme, host, port, request_context=None):
190
- """
191
- Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
192
- any additional pool keyword arguments.
193
-
194
- If ``request_context`` is provided, it is provided as keyword arguments
195
- to the pool class used. This method is used to actually create the
196
- connection pools handed out by :meth:`connection_from_url` and
197
- companion methods. It is intended to be overridden for customization.
198
- """
199
- pool_cls = self.pool_classes_by_scheme[scheme]
200
- if request_context is None:
201
- request_context = self.connection_pool_kw.copy()
202
-
203
- # Although the context has everything necessary to create the pool,
204
- # this function has historically only used the scheme, host, and port
205
- # in the positional args. When an API change is acceptable these can
206
- # be removed.
207
- for key in ("scheme", "host", "port"):
208
- request_context.pop(key, None)
209
-
210
- if scheme == "http":
211
- for kw in SSL_KEYWORDS:
212
- request_context.pop(kw, None)
213
-
214
- return pool_cls(host, port, **request_context)
215
-
216
- def clear(self):
217
- """
218
- Empty our store of pools and direct them all to close.
219
-
220
- This will not affect in-flight connections, but they will not be
221
- re-used after completion.
222
- """
223
- self.pools.clear()
224
-
225
- def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
226
- """
227
- Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
228
-
229
- If ``port`` isn't given, it will be derived from the ``scheme`` using
230
- ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
231
- provided, it is merged with the instance's ``connection_pool_kw``
232
- variable and used to create the new connection pool, if one is
233
- needed.
234
- """
235
-
236
- if not host:
237
- raise LocationValueError("No host specified.")
238
-
239
- request_context = self._merge_pool_kwargs(pool_kwargs)
240
- request_context["scheme"] = scheme or "http"
241
- if not port:
242
- port = port_by_scheme.get(request_context["scheme"].lower(), 80)
243
- request_context["port"] = port
244
- request_context["host"] = host
245
-
246
- return self.connection_from_context(request_context)
247
-
248
- def connection_from_context(self, request_context):
249
- """
250
- Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
251
-
252
- ``request_context`` must at least contain the ``scheme`` key and its
253
- value must be a key in ``key_fn_by_scheme`` instance variable.
254
- """
255
- scheme = request_context["scheme"].lower()
256
- pool_key_constructor = self.key_fn_by_scheme.get(scheme)
257
- if not pool_key_constructor:
258
- raise URLSchemeUnknown(scheme)
259
- pool_key = pool_key_constructor(request_context)
260
-
261
- return self.connection_from_pool_key(pool_key, request_context=request_context)
262
-
263
- def connection_from_pool_key(self, pool_key, request_context=None):
264
- """
265
- Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
266
-
267
- ``pool_key`` should be a namedtuple that only contains immutable
268
- objects. At a minimum it must have the ``scheme``, ``host``, and
269
- ``port`` fields.
270
- """
271
- with self.pools.lock:
272
- # If the scheme, host, or port doesn't match existing open
273
- # connections, open a new ConnectionPool.
274
- pool = self.pools.get(pool_key)
275
- if pool:
276
- return pool
277
-
278
- # Make a fresh ConnectionPool of the desired type
279
- scheme = request_context["scheme"]
280
- host = request_context["host"]
281
- port = request_context["port"]
282
- pool = self._new_pool(scheme, host, port, request_context=request_context)
283
- self.pools[pool_key] = pool
284
-
285
- return pool
286
-
287
- def connection_from_url(self, url, pool_kwargs=None):
288
- """
289
- Similar to :func:`urllib3.connectionpool.connection_from_url`.
290
-
291
- If ``pool_kwargs`` is not provided and a new pool needs to be
292
- constructed, ``self.connection_pool_kw`` is used to initialize
293
- the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
294
- is provided, it is used instead. Note that if a new pool does not
295
- need to be created for the request, the provided ``pool_kwargs`` are
296
- not used.
297
- """
298
- u = parse_url(url)
299
- return self.connection_from_host(
300
- u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
301
- )
302
-
303
- def _merge_pool_kwargs(self, override):
304
- """
305
- Merge a dictionary of override values for self.connection_pool_kw.
306
-
307
- This does not modify self.connection_pool_kw and returns a new dict.
308
- Any keys in the override dictionary with a value of ``None`` are
309
- removed from the merged dictionary.
310
- """
311
- base_pool_kwargs = self.connection_pool_kw.copy()
312
- if override:
313
- for key, value in override.items():
314
- if value is None:
315
- try:
316
- del base_pool_kwargs[key]
317
- except KeyError:
318
- pass
319
- else:
320
- base_pool_kwargs[key] = value
321
- return base_pool_kwargs
322
-
323
- def _proxy_requires_url_absolute_form(self, parsed_url):
324
- """
325
- Indicates if the proxy requires the complete destination URL in the
326
- request. Normally this is only needed when not using an HTTP CONNECT
327
- tunnel.
328
- """
329
- if self.proxy is None:
330
- return False
331
-
332
- return not connection_requires_http_tunnel(
333
- self.proxy, self.proxy_config, parsed_url.scheme
334
- )
335
-
336
- def _validate_proxy_scheme_url_selection(self, url_scheme):
337
- """
338
- Validates that were not attempting to do TLS in TLS connections on
339
- Python2 or with unsupported SSL implementations.
340
- """
341
- if self.proxy is None or url_scheme != "https":
342
- return
343
-
344
- if self.proxy.scheme != "https":
345
- return
346
-
347
- if six.PY2 and not self.proxy_config.use_forwarding_for_https:
348
- raise ProxySchemeUnsupported(
349
- "Contacting HTTPS destinations through HTTPS proxies "
350
- "'via CONNECT tunnels' is not supported in Python 2"
351
- )
352
-
353
- def urlopen(self, method, url, redirect=True, **kw):
354
- """
355
- Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
356
- with custom cross-host redirect logic and only sends the request-uri
357
- portion of the ``url``.
358
-
359
- The given ``url`` parameter must be absolute, such that an appropriate
360
- :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
361
- """
362
- u = parse_url(url)
363
- self._validate_proxy_scheme_url_selection(u.scheme)
364
-
365
- conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
366
-
367
- kw["assert_same_host"] = False
368
- kw["redirect"] = False
369
-
370
- if "headers" not in kw:
371
- kw["headers"] = self.headers.copy()
372
-
373
- if self._proxy_requires_url_absolute_form(u):
374
- response = conn.urlopen(method, url, **kw)
375
- else:
376
- response = conn.urlopen(method, u.request_uri, **kw)
377
-
378
- redirect_location = redirect and response.get_redirect_location()
379
- if not redirect_location:
380
- return response
381
-
382
- # Support relative URLs for redirecting.
383
- redirect_location = urljoin(url, redirect_location)
384
-
385
- # RFC 7231, Section 6.4.4
386
- if response.status == 303:
387
- method = "GET"
388
-
389
- retries = kw.get("retries")
390
- if not isinstance(retries, Retry):
391
- retries = Retry.from_int(retries, redirect=redirect)
392
-
393
- # Strip headers marked as unsafe to forward to the redirected location.
394
- # Check remove_headers_on_redirect to avoid a potential network call within
395
- # conn.is_same_host() which may use socket.gethostbyname() in the future.
396
- if retries.remove_headers_on_redirect and not conn.is_same_host(
397
- redirect_location
398
- ):
399
- headers = list(six.iterkeys(kw["headers"]))
400
- for header in headers:
401
- if header.lower() in retries.remove_headers_on_redirect:
402
- kw["headers"].pop(header, None)
403
-
404
- try:
405
- retries = retries.increment(method, url, response=response, _pool=conn)
406
- except MaxRetryError:
407
- if retries.raise_on_redirect:
408
- response.drain_conn()
409
- raise
410
- return response
411
-
412
- kw["retries"] = retries
413
- kw["redirect"] = redirect
414
-
415
- log.info("Redirecting %s -> %s", url, redirect_location)
416
-
417
- response.drain_conn()
418
- return self.urlopen(method, redirect_location, **kw)
419
-
420
-
421
- class ProxyManager(PoolManager):
422
- """
423
- Behaves just like :class:`PoolManager`, but sends all requests through
424
- the defined proxy, using the CONNECT method for HTTPS URLs.
425
-
426
- :param proxy_url:
427
- The URL of the proxy to be used.
428
-
429
- :param proxy_headers:
430
- A dictionary containing headers that will be sent to the proxy. In case
431
- of HTTP they are being sent with each request, while in the
432
- HTTPS/CONNECT case they are sent only once. Could be used for proxy
433
- authentication.
434
-
435
- :param proxy_ssl_context:
436
- The proxy SSL context is used to establish the TLS connection to the
437
- proxy when using HTTPS proxies.
438
-
439
- :param use_forwarding_for_https:
440
- (Defaults to False) If set to True will forward requests to the HTTPS
441
- proxy to be made on behalf of the client instead of creating a TLS
442
- tunnel via the CONNECT method. **Enabling this flag means that request
443
- and response headers and content will be visible from the HTTPS proxy**
444
- whereas tunneling keeps request and response headers and content
445
- private. IP address, target hostname, SNI, and port are always visible
446
- to an HTTPS proxy even when this flag is disabled.
447
-
448
- Example:
449
- >>> proxy = urllib3.ProxyManager('http://localhost:3128/')
450
- >>> r1 = proxy.request('GET', 'http://google.com/')
451
- >>> r2 = proxy.request('GET', 'http://httpbin.org/')
452
- >>> len(proxy.pools)
453
- 1
454
- >>> r3 = proxy.request('GET', 'https://httpbin.org/')
455
- >>> r4 = proxy.request('GET', 'https://twitter.com/')
456
- >>> len(proxy.pools)
457
- 3
458
-
459
- """
460
-
461
- def __init__(
462
- self,
463
- proxy_url,
464
- num_pools=10,
465
- headers=None,
466
- proxy_headers=None,
467
- proxy_ssl_context=None,
468
- use_forwarding_for_https=False,
469
- **connection_pool_kw
470
- ):
471
-
472
- if isinstance(proxy_url, HTTPConnectionPool):
473
- proxy_url = "%s://%s:%i" % (
474
- proxy_url.scheme,
475
- proxy_url.host,
476
- proxy_url.port,
477
- )
478
- proxy = parse_url(proxy_url)
479
-
480
- if proxy.scheme not in ("http", "https"):
481
- raise ProxySchemeUnknown(proxy.scheme)
482
-
483
- if not proxy.port:
484
- port = port_by_scheme.get(proxy.scheme, 80)
485
- proxy = proxy._replace(port=port)
486
-
487
- self.proxy = proxy
488
- self.proxy_headers = proxy_headers or {}
489
- self.proxy_ssl_context = proxy_ssl_context
490
- self.proxy_config = ProxyConfig(proxy_ssl_context, use_forwarding_for_https)
491
-
492
- connection_pool_kw["_proxy"] = self.proxy
493
- connection_pool_kw["_proxy_headers"] = self.proxy_headers
494
- connection_pool_kw["_proxy_config"] = self.proxy_config
495
-
496
- super(ProxyManager, self).__init__(num_pools, headers, **connection_pool_kw)
497
-
498
- def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
499
- if scheme == "https":
500
- return super(ProxyManager, self).connection_from_host(
501
- host, port, scheme, pool_kwargs=pool_kwargs
502
- )
503
-
504
- return super(ProxyManager, self).connection_from_host(
505
- self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs
506
- )
507
-
508
- def _set_proxy_headers(self, url, headers=None):
509
- """
510
- Sets headers needed by proxies: specifically, the Accept and Host
511
- headers. Only sets headers not provided by the user.
512
- """
513
- headers_ = {"Accept": "*/*"}
514
-
515
- netloc = parse_url(url).netloc
516
- if netloc:
517
- headers_["Host"] = netloc
518
-
519
- if headers:
520
- headers_.update(headers)
521
- return headers_
522
-
523
- def urlopen(self, method, url, redirect=True, **kw):
524
- "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
525
- u = parse_url(url)
526
- if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme):
527
- # For connections using HTTP CONNECT, httplib sets the necessary
528
- # headers on the CONNECT to the proxy. If we're not using CONNECT,
529
- # we'll definitely need to set 'Host' at the very least.
530
- headers = kw.get("headers", self.headers)
531
- kw["headers"] = self._set_proxy_headers(url, headers)
532
-
533
- return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
534
-
535
-
536
- def proxy_from_url(url, **kw):
537
- return ProxyManager(proxy_url=url, **kw)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boadiwaa/Recipes/openai/api_resources/experimental/completion_config.py DELETED
@@ -1,11 +0,0 @@
1
- from openai.api_resources.abstract import (
2
- CreateableAPIResource,
3
- DeletableAPIResource,
4
- ListableAPIResource,
5
- )
6
-
7
-
8
- class CompletionConfig(
9
- CreateableAPIResource, ListableAPIResource, DeletableAPIResource
10
- ):
11
- OBJECT_NAME = "experimental.completion_configs"
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/unique_by_key.h DELETED
@@ -1,67 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/system/tbb/detail/execution_policy.h>
21
- #include <thrust/pair.h>
22
-
23
- namespace thrust
24
- {
25
- namespace system
26
- {
27
- namespace tbb
28
- {
29
- namespace detail
30
- {
31
-
32
-
33
- template<typename DerivedPolicy,
34
- typename ForwardIterator1,
35
- typename ForwardIterator2,
36
- typename BinaryPredicate>
37
- thrust::pair<ForwardIterator1,ForwardIterator2>
38
- unique_by_key(execution_policy<DerivedPolicy> &exec,
39
- ForwardIterator1 keys_first,
40
- ForwardIterator1 keys_last,
41
- ForwardIterator2 values_first,
42
- BinaryPredicate binary_pred);
43
-
44
-
45
- template<typename DerivedPolicy,
46
- typename InputIterator1,
47
- typename InputIterator2,
48
- typename OutputIterator1,
49
- typename OutputIterator2,
50
- typename BinaryPredicate>
51
- thrust::pair<OutputIterator1,OutputIterator2>
52
- unique_by_key_copy(execution_policy<DerivedPolicy> &exec,
53
- InputIterator1 keys_first,
54
- InputIterator1 keys_last,
55
- InputIterator2 values_first,
56
- OutputIterator1 keys_output,
57
- OutputIterator2 values_output,
58
- BinaryPredicate binary_pred);
59
-
60
-
61
- } // end namespace detail
62
- } // end namespace tbb
63
- } // end namespace system
64
- } // end namespace thrust
65
-
66
- #include <thrust/system/tbb/detail/unique_by_key.inl>
67
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/unicl-zero-shot-img-recog/model/text_encoder/build.py DELETED
@@ -1,31 +0,0 @@
1
- import os
2
-
3
- from transformers import CLIPTokenizer
4
- from transformers import AutoTokenizer
5
-
6
- from .registry import lang_encoders
7
- from .registry import is_lang_encoder
8
-
9
-
10
- def build_lang_encoder(config_encoder, tokenizer, verbose, **kwargs):
11
- model_name = config_encoder['NAME']
12
-
13
- if not is_lang_encoder(model_name):
14
- raise ValueError(f'Unknown model: {model_name}')
15
-
16
- return lang_encoders(model_name)(config_encoder, tokenizer, verbose, **kwargs)
17
-
18
-
19
- def build_tokenizer(config_encoder):
20
- tokenizer = None
21
- os.environ['TOKENIZERS_PARALLELISM'] = 'true'
22
- if config_encoder['TOKENIZER'] == 'clip':
23
- pretrained_tokenizer = config_encoder.get(
24
- 'PRETRAINED_TOKENIZER', 'openai/clip-vit-base-patch32'
25
- )
26
- tokenizer = CLIPTokenizer.from_pretrained(pretrained_tokenizer)
27
- tokenizer.add_special_tokens({'cls_token': tokenizer.eos_token})
28
- else:
29
- tokenizer = AutoTokenizer.from_pretrained(config_encoder['TOKENIZER'])
30
-
31
- return tokenizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CognitiveLabs/GPT-auto-webscraping/app.py DELETED
@@ -1,107 +0,0 @@
1
- from AssistantService import GPTAssistant
2
- from openai.error import AuthenticationError
3
- import streamlit as st
4
- from langsmith.run_helpers import traceable
5
- import configparser
6
- import os
7
-
8
- config = configparser.ConfigParser()
9
- config.read('config.ini')
10
- if 'DEFAULT' in config:
11
- assistant_api_key = config['DEFAULT'].get('API-KEY', '')
12
-
13
- os.environ["LANGCHAIN_TRACING_V2"]="true"
14
- os.environ["LANGCHAIN_ENDPOINT"]="https://api.smith.langchain.com"
15
- os.environ["LANGCHAIN_API_KEY"]=st.secrets["LANGCHAIN_API_KEY"]
16
- os.environ["LANGCHAIN_PROJECT"]=st.secrets["LANGCHAIN_PROJECT"]
17
-
18
- @traceable(run_type="tool")
19
- def start_session(session_started):
20
- st.session_state['session_started'] = session_started
21
- return session_started
22
-
23
- # change session_started to True
24
- if 'session_started' not in st.session_state:
25
- start_session(True)
26
-
27
- st.write("This app helps you to extract data from HTML code using web scraping. It uses *GPT-3.5-turbo-16k* to generate the code for you. \n *Contribute to this project on [GitHub](https://github.com/CognitiveLabs/GPT-auto-webscraping)*")
28
-
29
- with st.expander(label="Check out the video demo"):
30
- yt_video = st.video("https://www.youtube.com/watch?v=_zeCun4OlCc")
31
-
32
- info_text = """
33
- **Quick start** \n
34
- Fill the input with <HTML code>.
35
- - Choose a repeating element on the page, like a product on a list.
36
- - Inspect the HTML code and copy the element.
37
- - After generating the "output format" and the code, paste the complete HTML code of the page in the last input to test it
38
- """
39
- st.write(info_text)
40
- st.image("https://j.gifs.com/gpqvPl.gif", width=600)
41
-
42
-
43
-
44
- if assistant_api_key == '':
45
- assistant_api_key = st.secrets["API_KEY"]
46
- if assistant_api_key:
47
- gpt_assistant = GPTAssistant(assistant_api_key)
48
- else:
49
- gpt_assistant = GPTAssistant(assistant_api_key)
50
-
51
- # get the html content
52
- html_content = st.text_input("Paste the HTML tags of the item you want to extract:", max_chars=10000, help="example: <li>Product 1 </li>, watch the video above")
53
- # check if html_content is an url, and show error if it is
54
- if html_content:
55
- if html_content.startswith("http"):
56
- st.write("Please paste the HTML piece code, not the URL")
57
- html_content = None
58
-
59
- extract_button = st.button("Generate output format & code")
60
-
61
-
62
- if html_content and extract_button:
63
- try:
64
- st.write("1/2: Generating the output format...")
65
- output = gpt_assistant.chain_response_format(html_content)
66
- st.session_state['output_format'] = output
67
- except NameError:
68
- st.write("Complete the API key field")
69
- except AuthenticationError:
70
- st.write("Invalid API key")
71
-
72
- if 'output_format' in st.session_state:
73
- output_format = st.code(st.session_state['output_format'], language="json")
74
-
75
- try:
76
- st.write("2/2: Generating the code...")
77
- python_code = gpt_assistant.chain_code_generator(st.session_state['output_format'], html_content)
78
- st.session_state['code_generated'] = python_code
79
- st.session_state['code_generated_exec'] = python_code + "\nresult = extract_info(html_data)"
80
-
81
- except NameError:
82
- st.write("Complete the API key field")
83
- except AuthenticationError:
84
- st.write("Invalid API key")
85
-
86
- @traceable(run_type="tool")
87
- def test_the_code(code, full_content):
88
- exec(code, globals())
89
- if result:
90
- st.write("data extracted successfully")
91
- # show data in table
92
- st.table(result)
93
- else:
94
- st.write("error extracting data")
95
-
96
- return result or "error"
97
-
98
-
99
- if 'code_generated' in st.session_state:
100
- python_function_label = st.write("Here is your python function:")
101
- code_generated = st.code(st.session_state['code_generated'],language="python")
102
- full_content = st.text_input("Paste your complete HTML here:")
103
- test_code = st.button("Test the code")
104
- if full_content and test_code:
105
- html_data = full_content
106
- result = None
107
- test_the_code(st.session_state['code_generated_exec'], full_content=full_content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/tf.py DELETED
@@ -1,269 +0,0 @@
1
- from __future__ import print_function
2
-
3
-
4
- try:
5
- import tensorflow as tf
6
- from tensorflow.python.ops import nn
7
- relu = nn.relu
8
- slim = tf.contrib.slim
9
- sigmoid = nn.sigmoid
10
- softmax = nn.softmax
11
- except:
12
- print("tensorflow is not installed, util.tf can not be used.")
13
-
14
- def is_gpu_available(cuda_only=True):
15
- """
16
- code from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/platform/test.py
17
- Returns whether TensorFlow can access a GPU.
18
- Args:
19
- cuda_only: limit the search to CUDA gpus.
20
- Returns:
21
- True iff a gpu device of the requested kind is available.
22
- """
23
- from tensorflow.python.client import device_lib as _device_lib
24
-
25
- if cuda_only:
26
- return any((x.device_type == 'GPU')
27
- for x in _device_lib.list_local_devices())
28
- else:
29
- return any((x.device_type == 'GPU' or x.device_type == 'SYCL')
30
- for x in _device_lib.list_local_devices())
31
-
32
-
33
-
34
- def get_available_gpus(num_gpus = None):
35
- """
36
- Modified on http://stackoverflow.com/questions/38559755/how-to-get-current-available-gpus-in-tensorflow
37
- However, the original code will occupy all available gpu memory.
38
- The modified code need a parameter: num_gpus. It does nothing but return the device handler name
39
- It will work well on single-maching-training, but I don't know whether it will work well on a cluster.
40
- """
41
- if num_gpus == None:
42
- from tensorflow.python.client import device_lib as _device_lib
43
- local_device_protos = _device_lib.list_local_devices()
44
- return [x.name for x in local_device_protos if x.device_type == 'GPU']
45
- else:
46
- return ['/gpu:%d'%(idx) for idx in xrange(num_gpus)]
47
-
48
- def get_latest_ckpt(path):
49
- # tf.train.latest_checkpoint
50
- import util
51
- path = util.io.get_absolute_path(path)
52
- if util.io.is_dir(path):
53
- ckpt = tf.train.get_checkpoint_state(path)
54
- if ckpt is not None:
55
- ckpt_path = ckpt.model_checkpoint_path
56
- else:
57
- ckpt_path = None
58
- else:
59
- ckpt_path = path;
60
- return ckpt_path
61
-
62
- def get_all_ckpts(path):
63
- ckpt = tf.train.get_checkpoint_state(path)
64
- all_ckpts = ckpt.all_model_checkpoint_paths
65
- ckpts = [str(c) for c in all_ckpts]
66
- return ckpts
67
-
68
- def get_iter(ckpt):
69
- import util
70
- iter_ = int(util.str.find_all(ckpt, '.ckpt-\d+')[0].split('-')[-1])
71
- return iter_
72
-
73
- def get_init_fn(checkpoint_path, train_dir, ignore_missing_vars = False,
74
- checkpoint_exclude_scopes = None, model_name = None, checkpoint_model_scope = None):
75
- """
76
- code from github/SSD-tensorflow/tf_utils.py
77
- Returns a function run by the chief worker to warm-start the training.
78
- Note that the init_fn is only run when initializing the model during the very
79
- first global step.
80
-
81
- checkpoint_path: the checkpoint to be restored
82
- train_dir: the directory where checkpoints are stored during training.
83
- ignore_missing_vars: if False and there are variables in the model but not in the checkpoint, an error will be raised.
84
- checkpoint_model_scope and model_name: if the root scope of checkpoints and the model in session is different,
85
- (but the sub-scopes are all the same), specify them clearly
86
- checkpoint_exclude_scopes: variables to be excluded when restoring from checkpoint_path.
87
- Returns:
88
- An init function run by the supervisor.
89
- """
90
- import util
91
- if util.str.is_none_or_empty(checkpoint_path):
92
- return None
93
- # Warn the user if a checkpoint exists in the train_dir. Then ignore.
94
- if tf.train.latest_checkpoint(train_dir):
95
- tf.logging.info(
96
- 'Ignoring --checkpoint_path because a checkpoint already exists in %s'
97
- % train_dir)
98
- return None
99
-
100
- exclusions = []
101
- if checkpoint_exclude_scopes:
102
- exclusions = [scope.strip()
103
- for scope in checkpoint_exclude_scopes.split(',')]
104
-
105
- # TODO(sguada) variables.filter_variables()
106
- variables_to_restore = []
107
- for var in slim.get_model_variables():
108
- excluded = False
109
- for exclusion in exclusions:
110
- if var.op.name.startswith(exclusion):
111
- excluded = True
112
- break
113
- if not excluded:
114
- variables_to_restore.append(var)
115
- # Change model scope if necessary.
116
- if checkpoint_model_scope is not None:
117
- variables_to_restore = {checkpoint_model_scope + '/' + var.op.name : var for var in variables_to_restore}
118
- tf.logging.info('variables_to_restore: %r'%(variables_to_restore))
119
- checkpoint_path = get_latest_ckpt(checkpoint_path)
120
- tf.logging.info('Fine-tuning from %s. Ignoring missing vars: %s' % (checkpoint_path, ignore_missing_vars))
121
- print ('checkpoint_path', checkpoint_path)
122
- return slim.assign_from_checkpoint_fn(
123
- checkpoint_path,
124
- variables_to_restore,
125
- ignore_missing_vars=ignore_missing_vars)
126
-
127
-
128
- def get_variables_to_train(flags = None):
129
- """code from github/SSD-tensorflow/tf_utils.py
130
- Returns a list of variables to train.
131
-
132
- Returns:
133
- A list of variables to train by the optimizer.
134
- """
135
- if flags is None or flags.trainable_scopes is None:
136
- return tf.trainable_variables()
137
- else:
138
- scopes = [scope.strip() for scope in flags.trainable_scopes.split(',')]
139
-
140
- variables_to_train = []
141
- for scope in scopes:
142
- variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
143
- variables_to_train.extend(variables)
144
- return variables_to_train
145
-
146
- def Print(tensor, data, msg = '', file = None, mode = 'w'):
147
- from tensorflow.python.ops import control_flow_ops
148
- import util
149
- def np_print(*args):
150
- if util.str.contains(msg, '%'):
151
- message = msg%tuple(args)
152
- else:
153
- message = msg + ' %'*len(args)%tuple(args)
154
- if file is not None:
155
- file_path = util.io.get_absolute_path(file)
156
- print('writting message to file(%s):'%(file_path), message)
157
- with open(file_path, mode) as f:
158
- print(message, file = f)
159
- else:
160
- print(message)
161
- return control_flow_ops.with_dependencies([tf.py_func(np_print, data, [])], tensor)
162
-
163
- def get_variable_names_in_checkpoint(path, return_shapes = False, return_reader = False):
164
- """
165
- Args:
166
- path: the path to training directory containing checkpoints,
167
- or path to checkpoint
168
- Return:
169
- a list of variable names in the checkpoint
170
- """
171
- import util
172
- ckpt = get_latest_ckpt(path)
173
- ckpt_reader = tf.train.NewCheckpointReader(ckpt)
174
- ckpt_vars = ckpt_reader.get_variable_to_shape_map()
175
- names = [var for var in ckpt_vars]
176
- if return_shapes:
177
- return names, ckpt_vars
178
- def get(name):
179
- return ckpt_reader.get_tensor(name)
180
- if return_reader:
181
- return names, get
182
- return names
183
-
184
-
185
-
186
- def min_area_rect(xs, ys):
187
- import util
188
- rects = tf.py_func(util.img.min_area_rect, [xs, ys], xs.dtype)
189
- rects.set_shape([None, 5])
190
- return rects
191
-
192
-
193
- def gpu_config(config = None, allow_growth = None, gpu_memory_fraction = None):
194
- if config is None:
195
- config = tf.ConfigProto()
196
-
197
- if allow_growth is not None:
198
- config.gpu_options.allow_growth = allow_growth
199
-
200
- if gpu_memory_fraction is not None:
201
- config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
202
-
203
- return config
204
-
205
- def wait_for_checkpoint(path):
206
- from tensorflow.contrib.training.python.training import evaluation
207
- return evaluation.checkpoints_iterator(path)
208
-
209
- def focal_loss(labels, logits, gamma = 2.0, alpha = 0.75, normalize = True):
210
- labels = tf.where(labels > 0, tf.ones_like(labels), tf.zeros_like(labels))
211
- labels = tf.cast(labels, tf.float32)
212
- probs = tf.sigmoid(logits)
213
- CE = tf.nn.sigmoid_cross_entropy_with_logits(labels = labels, logits = logits)
214
-
215
- alpha_t = tf.ones_like(logits) * alpha
216
- alpha_t = tf.where(labels > 0, alpha_t, 1.0 - alpha_t)
217
- probs_t = tf.where(labels > 0, probs, 1.0 - probs)
218
-
219
- focal_matrix = alpha_t * tf.pow((1.0 - probs_t), gamma)
220
- fl = focal_matrix * CE
221
-
222
- fl = tf.reduce_sum(fl)
223
- if normalize:
224
- #n_pos = tf.reduce_sum(labels)
225
- #fl = fl / tf.cast(n_pos, tf.float32)
226
- total_weights = tf.stop_gradient(tf.reduce_sum(focal_matrix))
227
- fl = fl / total_weights
228
- return fl
229
-
230
-
231
- def focal_loss_layer_initializer(sigma = 0.01, pi = 0.01):
232
- import numpy as np
233
- b0 = - np.log((1 - pi) / pi)
234
- return tf.random_normal_initializer(stddev = sigma), \
235
- tf.constant_initializer(b0)
236
-
237
-
238
- def sum_gradients(clone_grads, do_summary = False):
239
- averaged_grads = []
240
- for grad_and_vars in zip(*clone_grads):
241
- grads = []
242
- var = grad_and_vars[0][1]
243
- try:
244
- for g, v in grad_and_vars:
245
- assert v == var
246
- grads.append(g)
247
- grad = tf.add_n(grads, name = v.op.name + '_summed_gradients')
248
- except:
249
- import pdb
250
- pdb.set_trace()
251
-
252
- averaged_grads.append((grad, v))
253
-
254
- if do_summary:
255
- tf.summary.histogram("variables_and_gradients_" + grad.op.name, grad)
256
- tf.summary.histogram("variables_and_gradients_" + v.op.name, v)
257
- tf.summary.scalar("variables_and_gradients_" + grad.op.name+\
258
- '_mean/var_mean', tf.reduce_mean(grad)/tf.reduce_mean(var))
259
- tf.summary.scalar("variables_and_gradients_" + v.op.name+'_mean',tf.reduce_mean(var))
260
- return averaged_grads
261
-
262
- def get_update_op():
263
- """
264
- Extremely important for BatchNorm
265
- """
266
- update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
267
- if update_ops:
268
- return tf.group(*update_ops)
269
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DHEIVER/ImageClassifierCataract/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: ImageClassifierCataract
3
- emoji: 📊
4
- colorFrom: yellow
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.44.4
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/http.py DELETED
@@ -1,70 +0,0 @@
1
- import http.server
2
- import sys
3
- from typing import Mapping, Tuple
4
-
5
- from . import __version__
6
- from .http_exceptions import HttpProcessingError as HttpProcessingError
7
- from .http_parser import (
8
- HeadersParser as HeadersParser,
9
- HttpParser as HttpParser,
10
- HttpRequestParser as HttpRequestParser,
11
- HttpResponseParser as HttpResponseParser,
12
- RawRequestMessage as RawRequestMessage,
13
- RawResponseMessage as RawResponseMessage,
14
- )
15
- from .http_websocket import (
16
- WS_CLOSED_MESSAGE as WS_CLOSED_MESSAGE,
17
- WS_CLOSING_MESSAGE as WS_CLOSING_MESSAGE,
18
- WS_KEY as WS_KEY,
19
- WebSocketError as WebSocketError,
20
- WebSocketReader as WebSocketReader,
21
- WebSocketWriter as WebSocketWriter,
22
- WSCloseCode as WSCloseCode,
23
- WSMessage as WSMessage,
24
- WSMsgType as WSMsgType,
25
- ws_ext_gen as ws_ext_gen,
26
- ws_ext_parse as ws_ext_parse,
27
- )
28
- from .http_writer import (
29
- HttpVersion as HttpVersion,
30
- HttpVersion10 as HttpVersion10,
31
- HttpVersion11 as HttpVersion11,
32
- StreamWriter as StreamWriter,
33
- )
34
-
35
- __all__ = (
36
- "HttpProcessingError",
37
- "RESPONSES",
38
- "SERVER_SOFTWARE",
39
- # .http_writer
40
- "StreamWriter",
41
- "HttpVersion",
42
- "HttpVersion10",
43
- "HttpVersion11",
44
- # .http_parser
45
- "HeadersParser",
46
- "HttpParser",
47
- "HttpRequestParser",
48
- "HttpResponseParser",
49
- "RawRequestMessage",
50
- "RawResponseMessage",
51
- # .http_websocket
52
- "WS_CLOSED_MESSAGE",
53
- "WS_CLOSING_MESSAGE",
54
- "WS_KEY",
55
- "WebSocketReader",
56
- "WebSocketWriter",
57
- "ws_ext_gen",
58
- "ws_ext_parse",
59
- "WSMessage",
60
- "WebSocketError",
61
- "WSMsgType",
62
- "WSCloseCode",
63
- )
64
-
65
-
66
- SERVER_SOFTWARE: str = "Python/{0[0]}.{0[1]} aiohttp/{1}".format(
67
- sys.version_info, __version__
68
- )
69
-
70
- RESPONSES: Mapping[int, Tuple[str, str]] = http.server.BaseHTTPRequestHandler.responses
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/modules/patch_feature_extractor.py DELETED
@@ -1,57 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.nn as nn
4
- from einops.layers.torch import Rearrange
5
-
6
-
7
- class PatchFeatureExtractor(nn.Module):
8
- x_mean = torch.FloatTensor(np.array([0.485, 0.456, 0.406])[None, :, None, None])
9
- x_std = torch.FloatTensor(np.array([0.229, 0.224, 0.225])[None, :, None, None])
10
-
11
- def __init__(self, patch_num=256, input_shape=None):
12
- super(PatchFeatureExtractor, self).__init__()
13
-
14
- if input_shape is None:
15
- input_shape = [3, 512, 1024]
16
- self.patch_dim = 1024
17
- self.patch_num = patch_num
18
-
19
- img_channel = input_shape[0]
20
- img_h = input_shape[1]
21
- img_w = input_shape[2]
22
-
23
- p_h, p_w = img_h, img_w // self.patch_num
24
- p_dim = p_h * p_w * img_channel
25
-
26
- self.patch_embedding = nn.Sequential(
27
- Rearrange('b c h (p_n p_w) -> b p_n (h p_w c)', p_w=p_w),
28
- nn.Linear(p_dim, self.patch_dim)
29
- )
30
-
31
- self.x_mean.requires_grad = False
32
- self.x_std.requires_grad = False
33
-
34
- def _prepare_x(self, x):
35
- x = x.clone()
36
- if self.x_mean.device != x.device:
37
- self.x_mean = self.x_mean.to(x.device)
38
- self.x_std = self.x_std.to(x.device)
39
- x[:, :3] = (x[:, :3] - self.x_mean) / self.x_std
40
-
41
- return x
42
-
43
- def forward(self, x):
44
- # x [b 3 512 1024]
45
- x = self._prepare_x(x) # [b 3 512 1024]
46
- x = self.patch_embedding(x) # [b 256(patch_num) 1024(d)]
47
- x = x.permute(0, 2, 1) # [b 1024(d) 256(patch_num)]
48
- return x
49
-
50
-
51
- if __name__ == '__main__':
52
- from PIL import Image
53
- extractor = PatchFeatureExtractor()
54
- img = np.array(Image.open("../../src/demo.png")).transpose((2, 0, 1))
55
- input = torch.Tensor([img]) # 1 3 512 1024
56
- feature = extractor(input)
57
- print(feature.shape) # 1, 1024, 256
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DeepDrivePL/PaddleSeg-Matting/README.md DELETED
@@ -1,37 +0,0 @@
1
- ---
2
- title: PaddleSeg Matting
3
- emoji: 📊
4
- colorFrom: indigo
5
- colorTo: yellow
6
- sdk: gradio
7
- app_file: app.py
8
- pinned: false
9
- ---
10
-
11
- # Configuration
12
-
13
- `title`: _string_
14
- Display title for the Space
15
-
16
- `emoji`: _string_
17
- Space emoji (emoji-only character allowed)
18
-
19
- `colorFrom`: _string_
20
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
21
-
22
- `colorTo`: _string_
23
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
24
-
25
- `sdk`: _string_
26
- Can be either `gradio` or `streamlit`
27
-
28
- `sdk_version` : _string_
29
- Only applicable for `streamlit` SDK.
30
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
-
32
- `app_file`: _string_
33
- Path to your main application file (which contains either `gradio` or `streamlit` Python code).
34
- Path is relative to the root of the repository.
35
-
36
- `pinned`: _boolean_
37
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dinoking/Guccio-AI-Designer/netdissect/segmodel/models.py DELETED
@@ -1,558 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torchvision
4
- from . import resnet, resnext
5
- try:
6
- from lib.nn import SynchronizedBatchNorm2d
7
- except ImportError:
8
- from torch.nn import BatchNorm2d as SynchronizedBatchNorm2d
9
-
10
-
11
- class SegmentationModuleBase(nn.Module):
12
- def __init__(self):
13
- super(SegmentationModuleBase, self).__init__()
14
-
15
- def pixel_acc(self, pred, label):
16
- _, preds = torch.max(pred, dim=1)
17
- valid = (label >= 0).long()
18
- acc_sum = torch.sum(valid * (preds == label).long())
19
- pixel_sum = torch.sum(valid)
20
- acc = acc_sum.float() / (pixel_sum.float() + 1e-10)
21
- return acc
22
-
23
-
24
- class SegmentationModule(SegmentationModuleBase):
25
- def __init__(self, net_enc, net_dec, crit, deep_sup_scale=None):
26
- super(SegmentationModule, self).__init__()
27
- self.encoder = net_enc
28
- self.decoder = net_dec
29
- self.crit = crit
30
- self.deep_sup_scale = deep_sup_scale
31
-
32
- def forward(self, feed_dict, *, segSize=None):
33
- if segSize is None: # training
34
- if self.deep_sup_scale is not None: # use deep supervision technique
35
- (pred, pred_deepsup) = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True))
36
- else:
37
- pred = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True))
38
-
39
- loss = self.crit(pred, feed_dict['seg_label'])
40
- if self.deep_sup_scale is not None:
41
- loss_deepsup = self.crit(pred_deepsup, feed_dict['seg_label'])
42
- loss = loss + loss_deepsup * self.deep_sup_scale
43
-
44
- acc = self.pixel_acc(pred, feed_dict['seg_label'])
45
- return loss, acc
46
- else: # inference
47
- pred = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True), segSize=segSize)
48
- return pred
49
-
50
-
51
- def conv3x3(in_planes, out_planes, stride=1, has_bias=False):
52
- "3x3 convolution with padding"
53
- return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
54
- padding=1, bias=has_bias)
55
-
56
-
57
- def conv3x3_bn_relu(in_planes, out_planes, stride=1):
58
- return nn.Sequential(
59
- conv3x3(in_planes, out_planes, stride),
60
- SynchronizedBatchNorm2d(out_planes),
61
- nn.ReLU(inplace=True),
62
- )
63
-
64
-
65
- class ModelBuilder():
66
- # custom weights initialization
67
- def weights_init(self, m):
68
- classname = m.__class__.__name__
69
- if classname.find('Conv') != -1:
70
- nn.init.kaiming_normal_(m.weight.data)
71
- elif classname.find('BatchNorm') != -1:
72
- m.weight.data.fill_(1.)
73
- m.bias.data.fill_(1e-4)
74
- #elif classname.find('Linear') != -1:
75
- # m.weight.data.normal_(0.0, 0.0001)
76
-
77
- def build_encoder(self, arch='resnet50_dilated8', fc_dim=512, weights=''):
78
- pretrained = True if len(weights) == 0 else False
79
- if arch == 'resnet34':
80
- raise NotImplementedError
81
- orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained)
82
- net_encoder = Resnet(orig_resnet)
83
- elif arch == 'resnet34_dilated8':
84
- raise NotImplementedError
85
- orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained)
86
- net_encoder = ResnetDilated(orig_resnet,
87
- dilate_scale=8)
88
- elif arch == 'resnet34_dilated16':
89
- raise NotImplementedError
90
- orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained)
91
- net_encoder = ResnetDilated(orig_resnet,
92
- dilate_scale=16)
93
- elif arch == 'resnet50':
94
- orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
95
- net_encoder = Resnet(orig_resnet)
96
- elif arch == 'resnet50_dilated8':
97
- orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
98
- net_encoder = ResnetDilated(orig_resnet,
99
- dilate_scale=8)
100
- elif arch == 'resnet50_dilated16':
101
- orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
102
- net_encoder = ResnetDilated(orig_resnet,
103
- dilate_scale=16)
104
- elif arch == 'resnet101':
105
- orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained)
106
- net_encoder = Resnet(orig_resnet)
107
- elif arch == 'resnet101_dilated8':
108
- orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained)
109
- net_encoder = ResnetDilated(orig_resnet,
110
- dilate_scale=8)
111
- elif arch == 'resnet101_dilated16':
112
- orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained)
113
- net_encoder = ResnetDilated(orig_resnet,
114
- dilate_scale=16)
115
- elif arch == 'resnext101':
116
- orig_resnext = resnext.__dict__['resnext101'](pretrained=pretrained)
117
- net_encoder = Resnet(orig_resnext) # we can still use class Resnet
118
- else:
119
- raise Exception('Architecture undefined!')
120
-
121
- # net_encoder.apply(self.weights_init)
122
- if len(weights) > 0:
123
- # print('Loading weights for net_encoder')
124
- net_encoder.load_state_dict(
125
- torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
126
- return net_encoder
127
-
128
- def build_decoder(self, arch='ppm_bilinear_deepsup',
129
- fc_dim=512, num_class=150,
130
- weights='', inference=False, use_softmax=False):
131
- if arch == 'c1_bilinear_deepsup':
132
- net_decoder = C1BilinearDeepSup(
133
- num_class=num_class,
134
- fc_dim=fc_dim,
135
- inference=inference,
136
- use_softmax=use_softmax)
137
- elif arch == 'c1_bilinear':
138
- net_decoder = C1Bilinear(
139
- num_class=num_class,
140
- fc_dim=fc_dim,
141
- inference=inference,
142
- use_softmax=use_softmax)
143
- elif arch == 'ppm_bilinear':
144
- net_decoder = PPMBilinear(
145
- num_class=num_class,
146
- fc_dim=fc_dim,
147
- inference=inference,
148
- use_softmax=use_softmax)
149
- elif arch == 'ppm_bilinear_deepsup':
150
- net_decoder = PPMBilinearDeepsup(
151
- num_class=num_class,
152
- fc_dim=fc_dim,
153
- inference=inference,
154
- use_softmax=use_softmax)
155
- elif arch == 'upernet_lite':
156
- net_decoder = UPerNet(
157
- num_class=num_class,
158
- fc_dim=fc_dim,
159
- inference=inference,
160
- use_softmax=use_softmax,
161
- fpn_dim=256)
162
- elif arch == 'upernet':
163
- net_decoder = UPerNet(
164
- num_class=num_class,
165
- fc_dim=fc_dim,
166
- inference=inference,
167
- use_softmax=use_softmax,
168
- fpn_dim=512)
169
- elif arch == 'upernet_tmp':
170
- net_decoder = UPerNetTmp(
171
- num_class=num_class,
172
- fc_dim=fc_dim,
173
- inference=inference,
174
- use_softmax=use_softmax,
175
- fpn_dim=512)
176
- else:
177
- raise Exception('Architecture undefined!')
178
-
179
- net_decoder.apply(self.weights_init)
180
- if len(weights) > 0:
181
- # print('Loading weights for net_decoder')
182
- net_decoder.load_state_dict(
183
- torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
184
- return net_decoder
185
-
186
-
187
- class Resnet(nn.Module):
188
- def __init__(self, orig_resnet):
189
- super(Resnet, self).__init__()
190
-
191
- # take pretrained resnet, except AvgPool and FC
192
- self.conv1 = orig_resnet.conv1
193
- self.bn1 = orig_resnet.bn1
194
- self.relu1 = orig_resnet.relu1
195
- self.conv2 = orig_resnet.conv2
196
- self.bn2 = orig_resnet.bn2
197
- self.relu2 = orig_resnet.relu2
198
- self.conv3 = orig_resnet.conv3
199
- self.bn3 = orig_resnet.bn3
200
- self.relu3 = orig_resnet.relu3
201
- self.maxpool = orig_resnet.maxpool
202
- self.layer1 = orig_resnet.layer1
203
- self.layer2 = orig_resnet.layer2
204
- self.layer3 = orig_resnet.layer3
205
- self.layer4 = orig_resnet.layer4
206
-
207
- def forward(self, x, return_feature_maps=False):
208
- conv_out = []
209
-
210
- x = self.relu1(self.bn1(self.conv1(x)))
211
- x = self.relu2(self.bn2(self.conv2(x)))
212
- x = self.relu3(self.bn3(self.conv3(x)))
213
- x = self.maxpool(x)
214
-
215
- x = self.layer1(x); conv_out.append(x);
216
- x = self.layer2(x); conv_out.append(x);
217
- x = self.layer3(x); conv_out.append(x);
218
- x = self.layer4(x); conv_out.append(x);
219
-
220
- if return_feature_maps:
221
- return conv_out
222
- return [x]
223
-
224
-
225
- class ResnetDilated(nn.Module):
226
- def __init__(self, orig_resnet, dilate_scale=8):
227
- super(ResnetDilated, self).__init__()
228
- from functools import partial
229
-
230
- if dilate_scale == 8:
231
- orig_resnet.layer3.apply(
232
- partial(self._nostride_dilate, dilate=2))
233
- orig_resnet.layer4.apply(
234
- partial(self._nostride_dilate, dilate=4))
235
- elif dilate_scale == 16:
236
- orig_resnet.layer4.apply(
237
- partial(self._nostride_dilate, dilate=2))
238
-
239
- # take pretrained resnet, except AvgPool and FC
240
- self.conv1 = orig_resnet.conv1
241
- self.bn1 = orig_resnet.bn1
242
- self.relu1 = orig_resnet.relu1
243
- self.conv2 = orig_resnet.conv2
244
- self.bn2 = orig_resnet.bn2
245
- self.relu2 = orig_resnet.relu2
246
- self.conv3 = orig_resnet.conv3
247
- self.bn3 = orig_resnet.bn3
248
- self.relu3 = orig_resnet.relu3
249
- self.maxpool = orig_resnet.maxpool
250
- self.layer1 = orig_resnet.layer1
251
- self.layer2 = orig_resnet.layer2
252
- self.layer3 = orig_resnet.layer3
253
- self.layer4 = orig_resnet.layer4
254
-
255
- def _nostride_dilate(self, m, dilate):
256
- classname = m.__class__.__name__
257
- if classname.find('Conv') != -1:
258
- # the convolution with stride
259
- if m.stride == (2, 2):
260
- m.stride = (1, 1)
261
- if m.kernel_size == (3, 3):
262
- m.dilation = (dilate//2, dilate//2)
263
- m.padding = (dilate//2, dilate//2)
264
- # other convoluions
265
- else:
266
- if m.kernel_size == (3, 3):
267
- m.dilation = (dilate, dilate)
268
- m.padding = (dilate, dilate)
269
-
270
- def forward(self, x, return_feature_maps=False):
271
- conv_out = []
272
-
273
- x = self.relu1(self.bn1(self.conv1(x)))
274
- x = self.relu2(self.bn2(self.conv2(x)))
275
- x = self.relu3(self.bn3(self.conv3(x)))
276
- x = self.maxpool(x)
277
-
278
- x = self.layer1(x); conv_out.append(x);
279
- x = self.layer2(x); conv_out.append(x);
280
- x = self.layer3(x); conv_out.append(x);
281
- x = self.layer4(x); conv_out.append(x);
282
-
283
- if return_feature_maps:
284
- return conv_out
285
- return [x]
286
-
287
-
288
- # last conv, bilinear upsample
289
- class C1BilinearDeepSup(nn.Module):
290
- def __init__(self, num_class=150, fc_dim=2048, inference=False, use_softmax=False):
291
- super(C1BilinearDeepSup, self).__init__()
292
- self.use_softmax = use_softmax
293
- self.inference = inference
294
-
295
- self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
296
- self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
297
-
298
- # last conv
299
- self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
300
- self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
301
-
302
- def forward(self, conv_out, segSize=None):
303
- conv5 = conv_out[-1]
304
-
305
- x = self.cbr(conv5)
306
- x = self.conv_last(x)
307
-
308
- if self.inference or self.use_softmax: # is True during inference
309
- x = nn.functional.interpolate(
310
- x, size=segSize, mode='bilinear', align_corners=False)
311
- if self.use_softmax:
312
- x = nn.functional.softmax(x, dim=1)
313
- return x
314
-
315
- # deep sup
316
- conv4 = conv_out[-2]
317
- _ = self.cbr_deepsup(conv4)
318
- _ = self.conv_last_deepsup(_)
319
-
320
- x = nn.functional.log_softmax(x, dim=1)
321
- _ = nn.functional.log_softmax(_, dim=1)
322
-
323
- return (x, _)
324
-
325
-
326
- # last conv, bilinear upsample
327
- class C1Bilinear(nn.Module):
328
- def __init__(self, num_class=150, fc_dim=2048, inference=False, use_softmax=False):
329
- super(C1Bilinear, self).__init__()
330
- self.use_softmax = use_softmax
331
- self.inference = inference
332
-
333
- self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
334
-
335
- # last conv
336
- self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
337
-
338
- def forward(self, conv_out, segSize=None):
339
- conv5 = conv_out[-1]
340
- x = self.cbr(conv5)
341
- x = self.conv_last(x)
342
-
343
- if self.inference or self.use_softmax: # is True during inference
344
- x = nn.functional.interpolate(
345
- x, size=segSize, mode='bilinear', align_corners=False)
346
- if self.use_softmax:
347
- x = nn.functional.softmax(x, dim=1)
348
- else:
349
- x = nn.functional.log_softmax(x, dim=1)
350
-
351
- return x
352
-
353
-
354
- # pyramid pooling, bilinear upsample
355
- class PPMBilinear(nn.Module):
356
- def __init__(self, num_class=150, fc_dim=4096,
357
- inference=False, use_softmax=False, pool_scales=(1, 2, 3, 6)):
358
- super(PPMBilinear, self).__init__()
359
- self.use_softmax = use_softmax
360
- self.inference = inference
361
-
362
- self.ppm = []
363
- for scale in pool_scales:
364
- self.ppm.append(nn.Sequential(
365
- nn.AdaptiveAvgPool2d(scale),
366
- nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
367
- SynchronizedBatchNorm2d(512),
368
- nn.ReLU(inplace=True)
369
- ))
370
- self.ppm = nn.ModuleList(self.ppm)
371
-
372
- self.conv_last = nn.Sequential(
373
- nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
374
- kernel_size=3, padding=1, bias=False),
375
- SynchronizedBatchNorm2d(512),
376
- nn.ReLU(inplace=True),
377
- nn.Dropout2d(0.1),
378
- nn.Conv2d(512, num_class, kernel_size=1)
379
- )
380
-
381
- def forward(self, conv_out, segSize=None):
382
- conv5 = conv_out[-1]
383
-
384
- input_size = conv5.size()
385
- ppm_out = [conv5]
386
- for pool_scale in self.ppm:
387
- ppm_out.append(nn.functional.interpolate(
388
- pool_scale(conv5),
389
- (input_size[2], input_size[3]),
390
- mode='bilinear', align_corners=False))
391
- ppm_out = torch.cat(ppm_out, 1)
392
-
393
- x = self.conv_last(ppm_out)
394
-
395
- if self.inference or self.use_softmax: # is True during inference
396
- x = nn.functional.interpolate(
397
- x, size=segSize, mode='bilinear', align_corners=False)
398
- if self.use_softmax:
399
- x = nn.functional.softmax(x, dim=1)
400
- else:
401
- x = nn.functional.log_softmax(x, dim=1)
402
- return x
403
-
404
-
405
- # pyramid pooling, bilinear upsample
406
- class PPMBilinearDeepsup(nn.Module):
407
- def __init__(self, num_class=150, fc_dim=4096,
408
- inference=False, use_softmax=False, pool_scales=(1, 2, 3, 6)):
409
- super(PPMBilinearDeepsup, self).__init__()
410
- self.use_softmax = use_softmax
411
- self.inference = inference
412
-
413
- self.ppm = []
414
- for scale in pool_scales:
415
- self.ppm.append(nn.Sequential(
416
- nn.AdaptiveAvgPool2d(scale),
417
- nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
418
- SynchronizedBatchNorm2d(512),
419
- nn.ReLU(inplace=True)
420
- ))
421
- self.ppm = nn.ModuleList(self.ppm)
422
- self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
423
-
424
- self.conv_last = nn.Sequential(
425
- nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
426
- kernel_size=3, padding=1, bias=False),
427
- SynchronizedBatchNorm2d(512),
428
- nn.ReLU(inplace=True),
429
- nn.Dropout2d(0.1),
430
- nn.Conv2d(512, num_class, kernel_size=1)
431
- )
432
- self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
433
- self.dropout_deepsup = nn.Dropout2d(0.1)
434
-
435
- def forward(self, conv_out, segSize=None):
436
- conv5 = conv_out[-1]
437
-
438
- input_size = conv5.size()
439
- ppm_out = [conv5]
440
- for pool_scale in self.ppm:
441
- ppm_out.append(nn.functional.interpolate(
442
- pool_scale(conv5),
443
- (input_size[2], input_size[3]),
444
- mode='bilinear', align_corners=False))
445
- ppm_out = torch.cat(ppm_out, 1)
446
-
447
- x = self.conv_last(ppm_out)
448
-
449
- if self.inference or self.use_softmax: # is True during inference
450
- x = nn.functional.interpolate(
451
- x, size=segSize, mode='bilinear', align_corners=False)
452
- if self.use_softmax:
453
- x = nn.functional.softmax(x, dim=1)
454
- return x
455
-
456
- # deep sup
457
- conv4 = conv_out[-2]
458
- _ = self.cbr_deepsup(conv4)
459
- _ = self.dropout_deepsup(_)
460
- _ = self.conv_last_deepsup(_)
461
-
462
- x = nn.functional.log_softmax(x, dim=1)
463
- _ = nn.functional.log_softmax(_, dim=1)
464
-
465
- return (x, _)
466
-
467
-
468
- # upernet
469
- class UPerNet(nn.Module):
470
- def __init__(self, num_class=150, fc_dim=4096,
471
- inference=False, use_softmax=False, pool_scales=(1, 2, 3, 6),
472
- fpn_inplanes=(256,512,1024,2048), fpn_dim=256):
473
- super(UPerNet, self).__init__()
474
- self.use_softmax = use_softmax
475
- self.inference = inference
476
-
477
- # PPM Module
478
- self.ppm_pooling = []
479
- self.ppm_conv = []
480
-
481
- for scale in pool_scales:
482
- self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale))
483
- self.ppm_conv.append(nn.Sequential(
484
- nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
485
- SynchronizedBatchNorm2d(512),
486
- nn.ReLU(inplace=True)
487
- ))
488
- self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
489
- self.ppm_conv = nn.ModuleList(self.ppm_conv)
490
- self.ppm_last_conv = conv3x3_bn_relu(fc_dim + len(pool_scales)*512, fpn_dim, 1)
491
-
492
- # FPN Module
493
- self.fpn_in = []
494
- for fpn_inplane in fpn_inplanes[:-1]: # skip the top layer
495
- self.fpn_in.append(nn.Sequential(
496
- nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False),
497
- SynchronizedBatchNorm2d(fpn_dim),
498
- nn.ReLU(inplace=True)
499
- ))
500
- self.fpn_in = nn.ModuleList(self.fpn_in)
501
-
502
- self.fpn_out = []
503
- for i in range(len(fpn_inplanes) - 1): # skip the top layer
504
- self.fpn_out.append(nn.Sequential(
505
- conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
506
- ))
507
- self.fpn_out = nn.ModuleList(self.fpn_out)
508
-
509
- self.conv_last = nn.Sequential(
510
- conv3x3_bn_relu(len(fpn_inplanes) * fpn_dim, fpn_dim, 1),
511
- nn.Conv2d(fpn_dim, num_class, kernel_size=1)
512
- )
513
-
514
- def forward(self, conv_out, segSize=None):
515
- conv5 = conv_out[-1]
516
-
517
- input_size = conv5.size()
518
- ppm_out = [conv5]
519
- for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv):
520
- ppm_out.append(pool_conv(nn.functional.interploate(
521
- pool_scale(conv5),
522
- (input_size[2], input_size[3]),
523
- mode='bilinear', align_corners=False)))
524
- ppm_out = torch.cat(ppm_out, 1)
525
- f = self.ppm_last_conv(ppm_out)
526
-
527
- fpn_feature_list = [f]
528
- for i in reversed(range(len(conv_out) - 1)):
529
- conv_x = conv_out[i]
530
- conv_x = self.fpn_in[i](conv_x) # lateral branch
531
-
532
- f = nn.functional.interpolate(
533
- f, size=conv_x.size()[2:], mode='bilinear', align_corners=False) # top-down branch
534
- f = conv_x + f
535
-
536
- fpn_feature_list.append(self.fpn_out[i](f))
537
-
538
- fpn_feature_list.reverse() # [P2 - P5]
539
- output_size = fpn_feature_list[0].size()[2:]
540
- fusion_list = [fpn_feature_list[0]]
541
- for i in range(1, len(fpn_feature_list)):
542
- fusion_list.append(nn.functional.interpolate(
543
- fpn_feature_list[i],
544
- output_size,
545
- mode='bilinear', align_corners=False))
546
- fusion_out = torch.cat(fusion_list, 1)
547
- x = self.conv_last(fusion_out)
548
-
549
- if self.inference or self.use_softmax: # is True during inference
550
- x = nn.functional.interpolate(
551
- x, size=segSize, mode='bilinear', align_corners=False)
552
- if self.use_softmax:
553
- x = nn.functional.softmax(x, dim=1)
554
- return x
555
-
556
- x = nn.functional.log_softmax(x, dim=1)
557
-
558
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/PTI/torch_utils/ops/__init__.py DELETED
@@ -1,9 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- # empty
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/PSG/OpenPSG/configs/motifs/panoptic_fpn_r101_fpn_1x_predcls_psg.py DELETED
@@ -1,28 +0,0 @@
1
- _base_ = './panoptic_fpn_r50_fpn_1x_predcls_psg.py'
2
-
3
- model = dict(backbone=dict(
4
- depth=101,
5
- init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
6
-
7
- # Log config
8
- project_name = 'openpsg'
9
- expt_name = 'motifs_panoptic_fpn_r101_fpn_1x_predcls_psg'
10
- work_dir = f'./work_dirs/{expt_name}'
11
-
12
- log_config = dict(
13
- interval=50,
14
- hooks=[
15
- dict(type='TextLoggerHook'),
16
- # dict(type='TensorboardLoggerHook')
17
- dict(
18
- type='WandbLoggerHook',
19
- init_kwargs=dict(
20
- project=project_name,
21
- name=expt_name,
22
- # config=work_dir + "/cfg.yaml"
23
- ),
24
- ),
25
- ],
26
- )
27
-
28
- load_from = 'work_dirs/checkpoints/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECE1786-AG/ArtIstic-GENREator/app.py DELETED
@@ -1,91 +0,0 @@
1
- import torch
2
- import gradio as gr
3
- from transformers import pipeline, T5ForConditionalGeneration, T5Tokenizer
4
- from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
5
-
6
- # generate lyrics
7
- lyrics_generator = pipeline("text-generation", "ECE1786-AG/lyrics-generator")
8
-
9
- # summarize lyrics
10
- model = T5ForConditionalGeneration.from_pretrained("Michau/t5-base-en-generate-headline")
11
- tokenizer = T5Tokenizer.from_pretrained("Michau/t5-base-en-generate-headline")
12
-
13
- # generate single cover
14
- scheduler = EulerDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-2", subfolder="scheduler")
15
- pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", scheduler=scheduler, revision="fp16", torch_dtype=torch.float16)
16
- device = "cuda" if torch.cuda.is_available() else "cpu"
17
- pipe = pipe.to(device)
18
-
19
- def generate_lyrics(genre, prompt):
20
- complete_prompt = "<BOS> <{0}>\n{1}".format(genre, prompt)
21
- lyrics = lyrics_generator(complete_prompt, max_length=1024)
22
- lyrics = lyrics[0]['generated_text']
23
- lyrics = lyrics.split('\n', 1)[1] # remove first line from the generated lyrics
24
-
25
- return lyrics
26
-
27
- def summarize_lyrics(lyrics):
28
- text = "headline: " + lyrics
29
- encoding = tokenizer.encode_plus(text, return_tensors = "pt")
30
- input_ids = encoding["input_ids"]
31
- attention_masks = encoding["attention_mask"]
32
- beam_outputs = model.generate(
33
- input_ids = input_ids,
34
- attention_mask = attention_masks,
35
- max_length = 100,
36
- num_beams = 5,
37
- early_stopping = True,
38
- )
39
- result = tokenizer.decode(beam_outputs[0])
40
- result = result.replace('<pad>', '')
41
- result = result.replace('</s>', '')
42
-
43
- return result
44
-
45
- def generate_cover(prompt, style, effect):
46
- prompt = summarize_lyrics(prompt) # call function summarize_lyrics to summarize lyrics
47
- prompt = prompt + ", " + effect + ", album cover, artistic, " + style
48
- print(prompt)
49
- image = pipe(prompt).images[0]
50
- return image
51
-
52
- demo = gr.Blocks()
53
- with demo:
54
- gr.HTML(
55
- """
56
- <div style="text-align: center; max-width: 700px; margin: 0 auto;">
57
- <div style="display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem;">
58
- <h1 style="font-weight: 900; margin-bottom: 7px;">ArtIstic GENREator</h1>
59
- </div>
60
- <p style="margin-bottom: 10px; font-size: 94%">Generate Inspirational Lyrics and Single Cover</p>
61
- </div>
62
- """
63
- )
64
-
65
- with gr.Row():
66
-
67
- # Left column (lyrics generation)
68
- with gr.Column():
69
- with gr.Accordion("Step 1. Generate Lyrics"):
70
- gr.Markdown("Enter the starting text and select genre to generate lyrics")
71
- with gr.Row():
72
- input_start_text = gr.Textbox(placeholder='I am', label="Starting Text")
73
- input_lyrics_type = gr.Radio(choices=['pop', 'rap', 'country', 'rock', 'r&b'], value='pop', label="Lyrics Genre")
74
- button_gen_lyrics = gr.Button("Generate Lyrics", variant="primary")
75
- output_generated_lyrics = gr.Textbox(label="Generated Lyrics", lines=8)
76
-
77
- # Right column (single cover generation)
78
- with gr.Column():
79
- with gr.Accordion("Step 2. Generate Single Cover"):
80
- gr.Markdown("Cover will be generated based on style, effect and generated lyrics")
81
- with gr.Row():
82
- input_cover_style = gr.Dropdown(choices=['painted', 'abstract', 'minimalist', 'illustrated', 'photographic', 'vintage'], value='painted', label="Track Cover Style")
83
- input_cover_effect = gr.Radio(choices=['black and white', 'highly detailed', 'blurred'], value='highly detailed', label="Track Cover Effect")
84
- button_gen_cover = gr.Button("Generate Cover", variant="primary")
85
- output_generated_cover = gr.Image(label="Generated Cover")
86
-
87
- # Bind functions to buttons
88
- button_gen_lyrics.click(fn=generate_lyrics, inputs=[input_lyrics_type , input_start_text], outputs=output_generated_lyrics)
89
- button_gen_cover.click(fn=generate_cover, inputs=[output_generated_lyrics, input_cover_style, input_cover_effect], outputs=output_generated_cover)
90
-
91
- demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/FridaZuley/RVC_HFKawaii/infer/lib/infer_pack/modules.py DELETED
@@ -1,521 +0,0 @@
1
- import copy
2
- import math
3
-
4
- import numpy as np
5
- import scipy
6
- import torch
7
- from torch import nn
8
- from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
9
- from torch.nn import functional as F
10
- from torch.nn.utils import remove_weight_norm, weight_norm
11
-
12
- from infer.lib.infer_pack import commons
13
- from infer.lib.infer_pack.commons import get_padding, init_weights
14
- from infer.lib.infer_pack.transforms import piecewise_rational_quadratic_transform
15
-
16
- LRELU_SLOPE = 0.1
17
-
18
-
19
- class LayerNorm(nn.Module):
20
- def __init__(self, channels, eps=1e-5):
21
- super().__init__()
22
- self.channels = channels
23
- self.eps = eps
24
-
25
- self.gamma = nn.Parameter(torch.ones(channels))
26
- self.beta = nn.Parameter(torch.zeros(channels))
27
-
28
- def forward(self, x):
29
- x = x.transpose(1, -1)
30
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
31
- return x.transpose(1, -1)
32
-
33
-
34
- class ConvReluNorm(nn.Module):
35
- def __init__(
36
- self,
37
- in_channels,
38
- hidden_channels,
39
- out_channels,
40
- kernel_size,
41
- n_layers,
42
- p_dropout,
43
- ):
44
- super().__init__()
45
- self.in_channels = in_channels
46
- self.hidden_channels = hidden_channels
47
- self.out_channels = out_channels
48
- self.kernel_size = kernel_size
49
- self.n_layers = n_layers
50
- self.p_dropout = p_dropout
51
- assert n_layers > 1, "Number of layers should be larger than 0."
52
-
53
- self.conv_layers = nn.ModuleList()
54
- self.norm_layers = nn.ModuleList()
55
- self.conv_layers.append(
56
- nn.Conv1d(
57
- in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
58
- )
59
- )
60
- self.norm_layers.append(LayerNorm(hidden_channels))
61
- self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
62
- for _ in range(n_layers - 1):
63
- self.conv_layers.append(
64
- nn.Conv1d(
65
- hidden_channels,
66
- hidden_channels,
67
- kernel_size,
68
- padding=kernel_size // 2,
69
- )
70
- )
71
- self.norm_layers.append(LayerNorm(hidden_channels))
72
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
73
- self.proj.weight.data.zero_()
74
- self.proj.bias.data.zero_()
75
-
76
- def forward(self, x, x_mask):
77
- x_org = x
78
- for i in range(self.n_layers):
79
- x = self.conv_layers[i](x * x_mask)
80
- x = self.norm_layers[i](x)
81
- x = self.relu_drop(x)
82
- x = x_org + self.proj(x)
83
- return x * x_mask
84
-
85
-
86
- class DDSConv(nn.Module):
87
- """
88
- Dialted and Depth-Separable Convolution
89
- """
90
-
91
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
92
- super().__init__()
93
- self.channels = channels
94
- self.kernel_size = kernel_size
95
- self.n_layers = n_layers
96
- self.p_dropout = p_dropout
97
-
98
- self.drop = nn.Dropout(p_dropout)
99
- self.convs_sep = nn.ModuleList()
100
- self.convs_1x1 = nn.ModuleList()
101
- self.norms_1 = nn.ModuleList()
102
- self.norms_2 = nn.ModuleList()
103
- for i in range(n_layers):
104
- dilation = kernel_size**i
105
- padding = (kernel_size * dilation - dilation) // 2
106
- self.convs_sep.append(
107
- nn.Conv1d(
108
- channels,
109
- channels,
110
- kernel_size,
111
- groups=channels,
112
- dilation=dilation,
113
- padding=padding,
114
- )
115
- )
116
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
117
- self.norms_1.append(LayerNorm(channels))
118
- self.norms_2.append(LayerNorm(channels))
119
-
120
- def forward(self, x, x_mask, g=None):
121
- if g is not None:
122
- x = x + g
123
- for i in range(self.n_layers):
124
- y = self.convs_sep[i](x * x_mask)
125
- y = self.norms_1[i](y)
126
- y = F.gelu(y)
127
- y = self.convs_1x1[i](y)
128
- y = self.norms_2[i](y)
129
- y = F.gelu(y)
130
- y = self.drop(y)
131
- x = x + y
132
- return x * x_mask
133
-
134
-
135
- class WN(torch.nn.Module):
136
- def __init__(
137
- self,
138
- hidden_channels,
139
- kernel_size,
140
- dilation_rate,
141
- n_layers,
142
- gin_channels=0,
143
- p_dropout=0,
144
- ):
145
- super(WN, self).__init__()
146
- assert kernel_size % 2 == 1
147
- self.hidden_channels = hidden_channels
148
- self.kernel_size = (kernel_size,)
149
- self.dilation_rate = dilation_rate
150
- self.n_layers = n_layers
151
- self.gin_channels = gin_channels
152
- self.p_dropout = p_dropout
153
-
154
- self.in_layers = torch.nn.ModuleList()
155
- self.res_skip_layers = torch.nn.ModuleList()
156
- self.drop = nn.Dropout(p_dropout)
157
-
158
- if gin_channels != 0:
159
- cond_layer = torch.nn.Conv1d(
160
- gin_channels, 2 * hidden_channels * n_layers, 1
161
- )
162
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
163
-
164
- for i in range(n_layers):
165
- dilation = dilation_rate**i
166
- padding = int((kernel_size * dilation - dilation) / 2)
167
- in_layer = torch.nn.Conv1d(
168
- hidden_channels,
169
- 2 * hidden_channels,
170
- kernel_size,
171
- dilation=dilation,
172
- padding=padding,
173
- )
174
- in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
175
- self.in_layers.append(in_layer)
176
-
177
- # last one is not necessary
178
- if i < n_layers - 1:
179
- res_skip_channels = 2 * hidden_channels
180
- else:
181
- res_skip_channels = hidden_channels
182
-
183
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
184
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
185
- self.res_skip_layers.append(res_skip_layer)
186
-
187
- def forward(self, x, x_mask, g=None, **kwargs):
188
- output = torch.zeros_like(x)
189
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
190
-
191
- if g is not None:
192
- g = self.cond_layer(g)
193
-
194
- for i in range(self.n_layers):
195
- x_in = self.in_layers[i](x)
196
- if g is not None:
197
- cond_offset = i * 2 * self.hidden_channels
198
- g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
199
- else:
200
- g_l = torch.zeros_like(x_in)
201
-
202
- acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
203
- acts = self.drop(acts)
204
-
205
- res_skip_acts = self.res_skip_layers[i](acts)
206
- if i < self.n_layers - 1:
207
- res_acts = res_skip_acts[:, : self.hidden_channels, :]
208
- x = (x + res_acts) * x_mask
209
- output = output + res_skip_acts[:, self.hidden_channels :, :]
210
- else:
211
- output = output + res_skip_acts
212
- return output * x_mask
213
-
214
- def remove_weight_norm(self):
215
- if self.gin_channels != 0:
216
- torch.nn.utils.remove_weight_norm(self.cond_layer)
217
- for l in self.in_layers:
218
- torch.nn.utils.remove_weight_norm(l)
219
- for l in self.res_skip_layers:
220
- torch.nn.utils.remove_weight_norm(l)
221
-
222
-
223
- class ResBlock1(torch.nn.Module):
224
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
225
- super(ResBlock1, self).__init__()
226
- self.convs1 = nn.ModuleList(
227
- [
228
- weight_norm(
229
- Conv1d(
230
- channels,
231
- channels,
232
- kernel_size,
233
- 1,
234
- dilation=dilation[0],
235
- padding=get_padding(kernel_size, dilation[0]),
236
- )
237
- ),
238
- weight_norm(
239
- Conv1d(
240
- channels,
241
- channels,
242
- kernel_size,
243
- 1,
244
- dilation=dilation[1],
245
- padding=get_padding(kernel_size, dilation[1]),
246
- )
247
- ),
248
- weight_norm(
249
- Conv1d(
250
- channels,
251
- channels,
252
- kernel_size,
253
- 1,
254
- dilation=dilation[2],
255
- padding=get_padding(kernel_size, dilation[2]),
256
- )
257
- ),
258
- ]
259
- )
260
- self.convs1.apply(init_weights)
261
-
262
- self.convs2 = nn.ModuleList(
263
- [
264
- weight_norm(
265
- Conv1d(
266
- channels,
267
- channels,
268
- kernel_size,
269
- 1,
270
- dilation=1,
271
- padding=get_padding(kernel_size, 1),
272
- )
273
- ),
274
- weight_norm(
275
- Conv1d(
276
- channels,
277
- channels,
278
- kernel_size,
279
- 1,
280
- dilation=1,
281
- padding=get_padding(kernel_size, 1),
282
- )
283
- ),
284
- weight_norm(
285
- Conv1d(
286
- channels,
287
- channels,
288
- kernel_size,
289
- 1,
290
- dilation=1,
291
- padding=get_padding(kernel_size, 1),
292
- )
293
- ),
294
- ]
295
- )
296
- self.convs2.apply(init_weights)
297
-
298
- def forward(self, x, x_mask=None):
299
- for c1, c2 in zip(self.convs1, self.convs2):
300
- xt = F.leaky_relu(x, LRELU_SLOPE)
301
- if x_mask is not None:
302
- xt = xt * x_mask
303
- xt = c1(xt)
304
- xt = F.leaky_relu(xt, LRELU_SLOPE)
305
- if x_mask is not None:
306
- xt = xt * x_mask
307
- xt = c2(xt)
308
- x = xt + x
309
- if x_mask is not None:
310
- x = x * x_mask
311
- return x
312
-
313
- def remove_weight_norm(self):
314
- for l in self.convs1:
315
- remove_weight_norm(l)
316
- for l in self.convs2:
317
- remove_weight_norm(l)
318
-
319
-
320
- class ResBlock2(torch.nn.Module):
321
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
322
- super(ResBlock2, self).__init__()
323
- self.convs = nn.ModuleList(
324
- [
325
- weight_norm(
326
- Conv1d(
327
- channels,
328
- channels,
329
- kernel_size,
330
- 1,
331
- dilation=dilation[0],
332
- padding=get_padding(kernel_size, dilation[0]),
333
- )
334
- ),
335
- weight_norm(
336
- Conv1d(
337
- channels,
338
- channels,
339
- kernel_size,
340
- 1,
341
- dilation=dilation[1],
342
- padding=get_padding(kernel_size, dilation[1]),
343
- )
344
- ),
345
- ]
346
- )
347
- self.convs.apply(init_weights)
348
-
349
- def forward(self, x, x_mask=None):
350
- for c in self.convs:
351
- xt = F.leaky_relu(x, LRELU_SLOPE)
352
- if x_mask is not None:
353
- xt = xt * x_mask
354
- xt = c(xt)
355
- x = xt + x
356
- if x_mask is not None:
357
- x = x * x_mask
358
- return x
359
-
360
- def remove_weight_norm(self):
361
- for l in self.convs:
362
- remove_weight_norm(l)
363
-
364
-
365
- class Log(nn.Module):
366
- def forward(self, x, x_mask, reverse=False, **kwargs):
367
- if not reverse:
368
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
369
- logdet = torch.sum(-y, [1, 2])
370
- return y, logdet
371
- else:
372
- x = torch.exp(x) * x_mask
373
- return x
374
-
375
-
376
- class Flip(nn.Module):
377
- def forward(self, x, *args, reverse=False, **kwargs):
378
- x = torch.flip(x, [1])
379
- if not reverse:
380
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
381
- return x, logdet
382
- else:
383
- return x
384
-
385
-
386
- class ElementwiseAffine(nn.Module):
387
- def __init__(self, channels):
388
- super().__init__()
389
- self.channels = channels
390
- self.m = nn.Parameter(torch.zeros(channels, 1))
391
- self.logs = nn.Parameter(torch.zeros(channels, 1))
392
-
393
- def forward(self, x, x_mask, reverse=False, **kwargs):
394
- if not reverse:
395
- y = self.m + torch.exp(self.logs) * x
396
- y = y * x_mask
397
- logdet = torch.sum(self.logs * x_mask, [1, 2])
398
- return y, logdet
399
- else:
400
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
401
- return x
402
-
403
-
404
- class ResidualCouplingLayer(nn.Module):
405
- def __init__(
406
- self,
407
- channels,
408
- hidden_channels,
409
- kernel_size,
410
- dilation_rate,
411
- n_layers,
412
- p_dropout=0,
413
- gin_channels=0,
414
- mean_only=False,
415
- ):
416
- assert channels % 2 == 0, "channels should be divisible by 2"
417
- super().__init__()
418
- self.channels = channels
419
- self.hidden_channels = hidden_channels
420
- self.kernel_size = kernel_size
421
- self.dilation_rate = dilation_rate
422
- self.n_layers = n_layers
423
- self.half_channels = channels // 2
424
- self.mean_only = mean_only
425
-
426
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
427
- self.enc = WN(
428
- hidden_channels,
429
- kernel_size,
430
- dilation_rate,
431
- n_layers,
432
- p_dropout=p_dropout,
433
- gin_channels=gin_channels,
434
- )
435
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
436
- self.post.weight.data.zero_()
437
- self.post.bias.data.zero_()
438
-
439
- def forward(self, x, x_mask, g=None, reverse=False):
440
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
441
- h = self.pre(x0) * x_mask
442
- h = self.enc(h, x_mask, g=g)
443
- stats = self.post(h) * x_mask
444
- if not self.mean_only:
445
- m, logs = torch.split(stats, [self.half_channels] * 2, 1)
446
- else:
447
- m = stats
448
- logs = torch.zeros_like(m)
449
-
450
- if not reverse:
451
- x1 = m + x1 * torch.exp(logs) * x_mask
452
- x = torch.cat([x0, x1], 1)
453
- logdet = torch.sum(logs, [1, 2])
454
- return x, logdet
455
- else:
456
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
457
- x = torch.cat([x0, x1], 1)
458
- return x
459
-
460
- def remove_weight_norm(self):
461
- self.enc.remove_weight_norm()
462
-
463
-
464
- class ConvFlow(nn.Module):
465
- def __init__(
466
- self,
467
- in_channels,
468
- filter_channels,
469
- kernel_size,
470
- n_layers,
471
- num_bins=10,
472
- tail_bound=5.0,
473
- ):
474
- super().__init__()
475
- self.in_channels = in_channels
476
- self.filter_channels = filter_channels
477
- self.kernel_size = kernel_size
478
- self.n_layers = n_layers
479
- self.num_bins = num_bins
480
- self.tail_bound = tail_bound
481
- self.half_channels = in_channels // 2
482
-
483
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
484
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
485
- self.proj = nn.Conv1d(
486
- filter_channels, self.half_channels * (num_bins * 3 - 1), 1
487
- )
488
- self.proj.weight.data.zero_()
489
- self.proj.bias.data.zero_()
490
-
491
- def forward(self, x, x_mask, g=None, reverse=False):
492
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
493
- h = self.pre(x0)
494
- h = self.convs(h, x_mask, g=g)
495
- h = self.proj(h) * x_mask
496
-
497
- b, c, t = x0.shape
498
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
499
-
500
- unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
501
- unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
502
- self.filter_channels
503
- )
504
- unnormalized_derivatives = h[..., 2 * self.num_bins :]
505
-
506
- x1, logabsdet = piecewise_rational_quadratic_transform(
507
- x1,
508
- unnormalized_widths,
509
- unnormalized_heights,
510
- unnormalized_derivatives,
511
- inverse=reverse,
512
- tails="linear",
513
- tail_bound=self.tail_bound,
514
- )
515
-
516
- x = torch.cat([x0, x1], 1) * x_mask
517
- logdet = torch.sum(logabsdet * x_mask, [1, 2])
518
- if not reverse:
519
- return x, logdet
520
- else:
521
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/FridaZuley/RVC_HFKawaii/infer/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py DELETED
@@ -1,98 +0,0 @@
1
- import numpy as np
2
- import parselmouth
3
-
4
- from infer.lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
5
-
6
-
7
- class PMF0Predictor(F0Predictor):
8
- def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
9
- self.hop_length = hop_length
10
- self.f0_min = f0_min
11
- self.f0_max = f0_max
12
- self.sampling_rate = sampling_rate
13
-
14
- def interpolate_f0(self, f0):
15
- """
16
- 对F0进行插值处理
17
- """
18
-
19
- data = np.reshape(f0, (f0.size, 1))
20
-
21
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
22
- vuv_vector[data > 0.0] = 1.0
23
- vuv_vector[data <= 0.0] = 0.0
24
-
25
- ip_data = data
26
-
27
- frame_number = data.size
28
- last_value = 0.0
29
- for i in range(frame_number):
30
- if data[i] <= 0.0:
31
- j = i + 1
32
- for j in range(i + 1, frame_number):
33
- if data[j] > 0.0:
34
- break
35
- if j < frame_number - 1:
36
- if last_value > 0.0:
37
- step = (data[j] - data[i - 1]) / float(j - i)
38
- for k in range(i, j):
39
- ip_data[k] = data[i - 1] + step * (k - i + 1)
40
- else:
41
- for k in range(i, j):
42
- ip_data[k] = data[j]
43
- else:
44
- for k in range(i, frame_number):
45
- ip_data[k] = last_value
46
- else:
47
- ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
48
- last_value = data[i]
49
-
50
- return ip_data[:, 0], vuv_vector[:, 0]
51
-
52
- def compute_f0(self, wav, p_len=None):
53
- x = wav
54
- if p_len is None:
55
- p_len = x.shape[0] // self.hop_length
56
- else:
57
- assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
58
- time_step = self.hop_length / self.sampling_rate * 1000
59
- f0 = (
60
- parselmouth.Sound(x, self.sampling_rate)
61
- .to_pitch_ac(
62
- time_step=time_step / 1000,
63
- voicing_threshold=0.6,
64
- pitch_floor=self.f0_min,
65
- pitch_ceiling=self.f0_max,
66
- )
67
- .selected_array["frequency"]
68
- )
69
-
70
- pad_size = (p_len - len(f0) + 1) // 2
71
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
72
- f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
73
- f0, uv = self.interpolate_f0(f0)
74
- return f0
75
-
76
- def compute_f0_uv(self, wav, p_len=None):
77
- x = wav
78
- if p_len is None:
79
- p_len = x.shape[0] // self.hop_length
80
- else:
81
- assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
82
- time_step = self.hop_length / self.sampling_rate * 1000
83
- f0 = (
84
- parselmouth.Sound(x, self.sampling_rate)
85
- .to_pitch_ac(
86
- time_step=time_step / 1000,
87
- voicing_threshold=0.6,
88
- pitch_floor=self.f0_min,
89
- pitch_ceiling=self.f0_max,
90
- )
91
- .selected_array["frequency"]
92
- )
93
-
94
- pad_size = (p_len - len(f0) + 1) // 2
95
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
96
- f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
97
- f0, uv = self.interpolate_f0(f0)
98
- return f0, uv
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/GT-RIPL/GPT-K/knowledge/retrieve.py DELETED
@@ -1,105 +0,0 @@
1
- import h5py
2
- import numpy as np
3
- from tqdm import tqdm
4
- import torch
5
- from knowledge import TextDB
6
-
7
-
8
- class ImageCropsIdx:
9
- def __init__(self, knowledge_idx, topk_w, topk_f, topk_n):
10
- topk = {"whole": topk_w, "five": topk_f, "nine": topk_n}
11
- self.topk = {k: v for k, v in topk.items() if v > 0}
12
-
13
- self.knowledge_idx, self.fdim, self.file_hash = self.load(knowledge_idx, self.topk)
14
-
15
- def load(self, knowledge_idx, topk):
16
- with h5py.File(knowledge_idx, "r") as f:
17
- fdim = f.attrs["fdim"]
18
- file_hash = f.attrs["file_hash"]
19
-
20
- knowledge_idx_ = {}
21
- for i in tqdm(range(len(f)), desc="Load sentence idx", dynamic_ncols=True, mininterval=1.0):
22
- knowledge_idx_[str(i)] = {"image_ids": f[f"{i}/image_ids"][:]}
23
- for k, v in topk.items():
24
- knowledge_idx_[str(i)][k] = {
25
- "index": f[f"{i}/{k}/index"][:, :, :v],
26
- "score": f[f"{i}/{k}/score"][:, :, :v],
27
- "query": f[f"{i}/{k}/query"][:]
28
- }
29
-
30
- knowledge_idx = {}
31
- for i in knowledge_idx_.keys():
32
- for j, id in enumerate(knowledge_idx_[i]["image_ids"]):
33
- knowledge_idx[id] = {}
34
- for k in topk.keys():
35
- knowledge_idx[id][k] = {
36
- "index": knowledge_idx_[i][k]["index"][j],
37
- "score": knowledge_idx_[i][k]["score"][j],
38
- "query": knowledge_idx_[i][k]["query"][j],
39
- }
40
-
41
- return knowledge_idx, fdim, file_hash
42
-
43
- def __getitem__(self, image_id):
44
- return self.knowledge_idx[image_id]
45
-
46
-
47
- class KnowAugImageCrops:
48
- def __init__(self, knowledge_db: TextDB, knowledge_idx: ImageCropsIdx, return_txt=False):
49
- self.knowledge_db = knowledge_db
50
- self.knowledge_idx = knowledge_idx
51
- assert knowledge_db.file_hash == knowledge_idx.file_hash
52
-
53
- self.ncrop = {"whole": 1, "five": 5, "nine": 9}
54
- self.topk = knowledge_idx.topk
55
- self.fdim = knowledge_idx.fdim
56
-
57
- self.return_txt = return_txt
58
-
59
- def __call__(self, image_id):
60
- ret = {}
61
- for k in self.topk.keys():
62
- ki = self.knowledge_idx[image_id][k]["index"].flatten()
63
- ke, kt = self.knowledge_db[ki]
64
- kq = self.knowledge_idx[image_id][k]["query"]
65
- kp = np.tile(np.arange(self.ncrop[k])[:, None], (1, self.topk[k])).flatten()
66
- ks = self.knowledge_idx[image_id][k]["score"].flatten()
67
-
68
- ke = torch.FloatTensor(ke)
69
- kq = torch.FloatTensor(kq)
70
- kp = torch.LongTensor(kp)
71
- ks = torch.FloatTensor(ks)
72
-
73
- ret[k] = {"embed": ke, "query": kq, "pos": kp, "score": ks}
74
- if self.return_txt:
75
- ret[k]["text"] = kt
76
-
77
- return ret
78
-
79
-
80
- class KnowAugImageCropsCombined:
81
- def __init__(
82
- self,
83
- knwl_aug_obj: KnowAugImageCrops,
84
- knwl_aug_attr: KnowAugImageCrops,
85
- knwl_aug_act: KnowAugImageCrops
86
- ):
87
- self.knwl_aug_obj = knwl_aug_obj
88
- self.knwl_aug_act = knwl_aug_act
89
- self.knwl_aug_attr = knwl_aug_attr
90
- self.fdim = knwl_aug_obj.fdim
91
-
92
- def __call__(self, image_id):
93
- knwl_obj = self.knwl_aug_obj(image_id)
94
- knwl_attr = self.knwl_aug_attr(image_id)
95
- knwl_act = self.knwl_aug_act(image_id)
96
-
97
- ret = {}
98
- for k in knwl_obj.keys():
99
- ret[k] = {
100
- "obj": knwl_obj[k],
101
- "attr": knwl_attr[k],
102
- "act": knwl_act[k]
103
- }
104
-
105
- return ret
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Gen-Sim/Gen-Sim/cliport/models/streams/two_stream_transport_lang_fusion.py DELETED
@@ -1,196 +0,0 @@
1
- import torch
2
- import numpy as np
3
-
4
- import cliport.models as models
5
- import cliport.models.core.fusion as fusion
6
- from cliport.models.core.transport import Transport
7
-
8
-
9
- class TwoStreamTransportLangFusion(Transport):
10
- """Two Stream Transport (a.k.a Place) module"""
11
-
12
- def __init__(self, stream_fcn, in_shape, n_rotations, crop_size, preprocess, cfg, device):
13
- self.fusion_type = cfg['train']['trans_stream_fusion_type']
14
- super().__init__(stream_fcn, in_shape, n_rotations, crop_size, preprocess, cfg, device)
15
-
16
- def _build_nets(self):
17
- stream_one_fcn, stream_two_fcn = self.stream_fcn
18
- stream_one_model = models.names[stream_one_fcn]
19
- stream_two_model = models.names[stream_two_fcn]
20
-
21
- self.key_stream_one = stream_one_model(self.in_shape, self.output_dim, self.cfg, self.device, self.preprocess)
22
- self.key_stream_two = stream_two_model(self.in_shape, self.output_dim, self.cfg, self.device, self.preprocess)
23
- self.query_stream_one = stream_one_model(self.kernel_shape, self.kernel_dim, self.cfg, self.device, self.preprocess)
24
- self.query_stream_two = stream_two_model(self.kernel_shape, self.kernel_dim, self.cfg, self.device, self.preprocess)
25
- self.fusion_key = fusion.names[self.fusion_type](input_dim=self.kernel_dim)
26
- self.fusion_query = fusion.names[self.fusion_type](input_dim=self.kernel_dim)
27
-
28
- print(f"Transport FCN - Stream One: {stream_one_fcn}, Stream Two: {stream_two_fcn}, Stream Fusion: {self.fusion_type}")
29
-
30
- def transport2(self, in_tensor, crop, l):
31
- logits = self.fusion_key(self.key_stream_one(in_tensor), self.key_stream_two(in_tensor, l))
32
- kernel = self.fusion_query(self.query_stream_one(crop), self.query_stream_two(crop, l))
33
- return logits, kernel
34
-
35
- def forward(self, inp_img, p, lang_goal, softmax=True):
36
- """Forward pass."""
37
- if len(inp_img.shape) < 4:
38
- inp_img = inp_img[None]
39
-
40
- if type(inp_img) is not torch.Tensor:
41
- in_data = inp_img # .reshape(in_shape)
42
- in_tens = torch.from_numpy(in_data).to(dtype=torch.float, device=self.device) # [B W H 6]
43
- else:
44
- in_data = inp_img
45
- in_tens = in_data
46
-
47
- in_tensor = torch.nn.functional.pad(in_tens, tuple(self.padding[[2,1,0]].reshape(-1)), mode='constant')
48
- if type(p[0]) is not torch.Tensor:
49
- p = torch.FloatTensor(p)[None]
50
-
51
- in_tensors = []
52
- crops = []
53
-
54
- # this for loop is fast.
55
- for i in range(len(in_tensor)):
56
- in_tensor_i = in_tensor[[i]]
57
- # Rotation pivot.
58
- pv = p[i] + self.pad_size
59
-
60
- # Crop before network (default for Transporters CoRL 2020).
61
- hcrop = self.pad_size
62
- in_tensor_i = in_tensor_i.permute(0, 3, 1, 2)
63
-
64
- crop = [in_tensor_i] * self.n_rotations
65
- crop = self.rotator(crop, pivot=pv.float())
66
- crop = torch.cat(crop, dim=0)
67
- crop = crop[:, :, int(pv[0]-hcrop):int(pv[0]+hcrop), int(pv[1]-hcrop):int(pv[1]+hcrop)]
68
-
69
- in_tensors.append(in_tensor_i)
70
- crops.append(crop)
71
-
72
- logits, kernels = self.transport(torch.cat(in_tensors,dim=0), torch.cat(crops, dim=0), lang_goal) #crops.shape:(8, 36, 6, 64, 64)
73
- res = self.correlate(logits, kernels, softmax)
74
- return res
75
-
76
- class TwoStreamTransportLangFusionLat(TwoStreamTransportLangFusion):
77
- """Two Stream Transport (a.k.a Place) module with lateral connections"""
78
-
79
- def __init__(self, stream_fcn, in_shape, n_rotations, crop_size, preprocess, cfg, device):
80
-
81
- self.fusion_type = cfg['train']['trans_stream_fusion_type']
82
- super().__init__(stream_fcn, in_shape, n_rotations, crop_size, preprocess, cfg, device)
83
-
84
- def transport(self, in_tensor, crop, l):
85
- key_out_one, key_lat_one = self.key_stream_one(in_tensor)
86
- key_out_two = self.key_stream_two(in_tensor, key_lat_one, l)
87
- logits = self.fusion_key(key_out_one, key_out_two)
88
-
89
- query_out_one, query_lat_one = self.query_stream_one(crop)
90
- query_out_two = self.query_stream_two(crop, query_lat_one, l)
91
- kernel = self.fusion_query(query_out_one, query_out_two)
92
-
93
- return logits, kernel
94
-
95
-
96
- class TwoStreamTransportLangFusionLatReduce(TwoStreamTransportLangFusionLat):
97
- """Two Stream Transport (a.k.a Place) module with lateral connections"""
98
-
99
- def __init__(self, stream_fcn, in_shape, n_rotations, crop_size, preprocess, cfg, device):
100
-
101
- self.fusion_type = cfg['train']['trans_stream_fusion_type']
102
- super().__init__(stream_fcn, in_shape, n_rotations, crop_size, preprocess, cfg, device)
103
-
104
- del self.query_stream_one
105
- del self.query_stream_two
106
- # del self.key_stream_one
107
- # del self.key_stream_two
108
-
109
- stream_one_fcn = 'plain_resnet_reduce_lat'
110
- stream_one_model = models.names[stream_one_fcn]
111
- stream_two_fcn = 'clip_ling'
112
- stream_two_model = models.names[stream_two_fcn]
113
-
114
-
115
-
116
- # self.key_stream_one = stream_one_model(self.in_shape, self.output_dim, self.cfg, self.device, self.preprocess)
117
- # self.key_stream_two = stream_two_model(self.in_shape, self.output_dim, self.cfg, self.device, self.preprocess)
118
-
119
- self.query_stream_one = stream_one_model(self.kernel_shape, self.kernel_dim, self.cfg, self.device, self.preprocess)
120
- self.query_stream_two = stream_two_model(self.kernel_shape, self.kernel_dim, self.cfg, self.device, self.preprocess)
121
-
122
- def transport(self, in_tensor, crop, l):
123
- key_out_one, key_lat_one = self.key_stream_one(in_tensor)
124
- key_out_two = self.key_stream_two(in_tensor, key_lat_one, l)
125
- logits = self.fusion_key(key_out_one, key_out_two)
126
-
127
- query_out_one, query_lat_one = self.query_stream_one(crop)
128
- query_out_two = self.query_stream_two(crop, query_lat_one, l)
129
- kernel = self.fusion_query(query_out_one, query_out_two)
130
-
131
- return logits, kernel
132
-
133
-
134
-
135
-
136
-
137
- class TwoStreamTransportLangFusionLatReduceOneStream(TwoStreamTransportLangFusionLatReduce):
138
- """Two Stream Transport (a.k.a Place) module with lateral connections"""
139
-
140
- def __init__(self, stream_fcn, in_shape, n_rotations, crop_size, preprocess, cfg, device):
141
-
142
- self.fusion_type = cfg['train']['trans_stream_fusion_type']
143
- super().__init__(stream_fcn, in_shape, n_rotations, crop_size, preprocess, cfg, device)
144
-
145
- del self.query_stream_one
146
- del self.query_stream_two
147
-
148
-
149
-
150
- def transport(self, in_tensor, crop, l):
151
- key_out_one, key_lat_one = self.key_stream_one(in_tensor)
152
- key_out_two = self.key_stream_two(in_tensor, key_lat_one, l)
153
- logits = self.fusion_key(key_out_one, key_out_two)
154
-
155
- query_out_one, query_lat_one = self.key_stream_one(crop)
156
- query_out_two = self.key_stream_two(crop, query_lat_one, l)
157
- kernel = self.fusion_query(query_out_one, query_out_two)
158
-
159
- return logits, kernel
160
-
161
-
162
-
163
-
164
- class TwoStreamTransportLangFusionLatPretrained18(TwoStreamTransportLangFusionLat):
165
- """Two Stream Transport (a.k.a Place) module with lateral connections"""
166
-
167
- def __init__(self, stream_fcn, in_shape, n_rotations, crop_size, preprocess, cfg, device):
168
-
169
- self.fusion_type = cfg['train']['trans_stream_fusion_type']
170
- super().__init__(stream_fcn, in_shape, n_rotations, crop_size, preprocess, cfg, device)
171
-
172
- del self.query_stream_one
173
- del self.query_stream_two
174
- # del self.key_stream_one
175
- # del self.key_stream_two
176
- stream_one_fcn = 'pretrained_resnet18'
177
- stream_one_model = models.names[stream_one_fcn]
178
- stream_two_fcn = 'clip_ling'
179
- stream_two_model = models.names[stream_two_fcn]
180
-
181
- # self.key_stream_one = stream_one_model(self.in_shape, self.output_dim, self.cfg, self.device, self.preprocess)
182
- # self.key_stream_two = stream_two_model(self.in_shape, self.output_dim, self.cfg, self.device, self.preprocess)
183
-
184
- self.query_stream_one = stream_one_model(self.kernel_shape, self.kernel_dim, self.cfg, self.device, self.preprocess)
185
- self.query_stream_two = stream_two_model(self.kernel_shape, self.kernel_dim, self.cfg, self.device, self.preprocess)
186
-
187
- def transport(self, in_tensor, crop, l):
188
- key_out_one, key_lat_one = self.key_stream_one(in_tensor)
189
- key_out_two = self.key_stream_two(in_tensor, key_lat_one, l)
190
- logits = self.fusion_key(key_out_one, key_out_two)
191
-
192
- query_out_one, query_lat_one = self.query_stream_one(crop)
193
- query_out_two = self.query_stream_two(crop, query_lat_one, l)
194
- kernel = self.fusion_query(query_out_one, query_out_two)
195
-
196
- return logits, kernel
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Gen-Sim/Gen-Sim/cliport/utils/__init__.py DELETED
File without changes
spaces/GeorgeOrville/bingo/postcss.config.js DELETED
@@ -1,6 +0,0 @@
1
- module.exports = {
2
- plugins: {
3
- tailwindcss: {},
4
- autoprefixer: {},
5
- },
6
- }