diff --git a/spaces/0x90e/ESRGAN-MANGA/util.py b/spaces/0x90e/ESRGAN-MANGA/util.py
deleted file mode 100644
index 0fa96927230b0d18680a5378154af57bec9aad35..0000000000000000000000000000000000000000
--- a/spaces/0x90e/ESRGAN-MANGA/util.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import os
-
-def is_google_colab():
- if os.getenv("COLAB_RELEASE_TAG"):
- return True
- return False
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crackear Photoshop Uma Soluo ou um Problema? Descubra os Prs e Contras.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crackear Photoshop Uma Soluo ou um Problema? Descubra os Prs e Contras.md
deleted file mode 100644
index 03b52d76534f4315b17f71e0588a2385cde74910..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crackear Photoshop Uma Soluo ou um Problema? Descubra os Prs e Contras.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
How to Crack Photoshop: Risks and Alternatives
-Photoshop is a widely used software for photo and image editing. However, the software can be quite expensive for some people. Some people may resort to Photoshop cracks to use the software for free. Although this may seem like a good idea at first, it is important to know the risks associated with cracked software. In this article, we will explain what Photoshop cracks are, why they are illegal and dangerous, and what are some better alternatives to get Photoshop legally and safely.
-What is a Photoshop Crack?
-A Photoshop crack is a software program used to bypass the activation process of Adobe Photoshop software. Cracks are usually created by third-party individuals or organizations and are not endorsed by Adobe. Using a cracked version of Photoshop can result in various risks, such as viruses, malware, or spyware installed on your computer. Moreover, cracked software may not receive updates from the manufacturer, which means that you may miss important security patches. It is important to note that Adobe does not support the use of cracked software and may take legal action against individuals or organizations that distribute cracks.
-como crackear photoshop
Download ⇒ https://byltly.com/2uKvIs
-Why is Using Photoshop Crack Illegal and Dangerous?
-There are some reasons why using Photoshop crack is illegal and dangerous. First of all, cracked software is illegal. By using a cracked version of Photoshop, you are breaking the law and may be subject to penalties. Additionally, cracked software is usually unstable and full of viruses. This can cause your computer to crash or worse, infect other computers with malware. Furthermore, Photoshop crack may also ban you from certain websites and online forums. Many websites have policies against using cracked software and will ban users who are caught doing so. Sometimes, your IP address may be blacklisted, preventing you from accessing certain websites or online services.
-What are Some Alternatives to Photoshop Crack?
-If you want to use Photoshop legally and safely, there are some alternatives to Photoshop crack that you can consider. One option is to use the free trial version of Photoshop that Adobe offers on its website. The trial version allows you to use all the features of Photoshop for 7 days without any cost. This way, you can test the software before deciding whether to buy it or not. Another option is to use Adobe's subscription plan, which gives you access to Photoshop and other Adobe products for a monthly or yearly fee. The subscription plan also includes cloud storage, online services, and regular updates. You can choose from different plans depending on your needs and budget.
-Conclusion
-Photoshop crack is not a good idea if you want to use Photoshop for photo and image editing. Cracked software is illegal, dangerous, and unreliable. It can expose you to various risks such as viruses, malware, spyware, legal issues, and bans. Instead of using Photoshop crack, you should consider using the free trial version or the subscription plan that Adobe offers on its website. These alternatives are legal, safe, and reliable. They also provide you with the best features and performance that Photoshop can offer.
ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Arcgis 10.8 Full Crack.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Arcgis 10.8 Full Crack.md
deleted file mode 100644
index 231c45994eb5b9fd329117e45b4da31c85aca452..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Arcgis 10.8 Full Crack.md
+++ /dev/null
@@ -1,17 +0,0 @@
-
-How to Free Download ArcGIS 10.8 Full Crack and Install It on Your PC
-ArcGIS is a powerful software that allows you to create, analyze, and visualize geographic data. It is widely used by professionals and students in various fields such as geography, urban planning, environmental science, engineering, and more. However, ArcGIS is not a free software and you need to purchase a license to use it.
-But what if you want to use ArcGIS for free without paying anything? Is there a way to free download ArcGIS 10.8 full crack and install it on your PC? The answer is yes, but you need to be careful and follow some steps to avoid any malware or viruses. Here is how you can do it:
-free download arcgis 10.8 full crack
Download File ✫✫✫ https://byltly.com/2uKxxw
-
-- First, you need to download the ArcGIS 10.8 setup file from a reliable source. You can use this link: https://www.esri.com/en-us/industries/overview. Click on the download button and choose the version 10.8 from the list.
-- Next, you need to download the ArcGIS 10.8 crack file from another source. You can use this link: https://crackdaily.com/arcgis-crack/. Scroll down and click on the green download button.
-- After downloading both files, you need to disable your antivirus software temporarily. This is because the crack file may be detected as a virus by some antivirus programs, but it is actually safe to use.
-- Then, you need to install ArcGIS 10.8 by running the setup file. Follow the instructions and complete the installation process.
-- Next, you need to copy the crack file and paste it in the ArcGIS installation folder. The default location is C:\Program Files (x86)\ArcGIS or C:\Program Files\ArcGIS depending on your system architecture.
-- After that, you need to run the crack file as administrator. Click on the crack button and wait for it to finish.
-- Finally, you need to restart your computer and enjoy using ArcGIS 10.8 full crack for free.
-
-Note: This method is only for educational purposes and we do not recommend using cracked software. If you like ArcGIS and want to support its development, please buy a license from its official website: https://www.esri.com/en-us/home.
ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Electude-motor-diagnosis-descargar.md b/spaces/1gistliPinn/ChatGPT4/Examples/Electude-motor-diagnosis-descargar.md
deleted file mode 100644
index d320789f08c5466b271b5f8fd9a1732bc8340808..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Electude-motor-diagnosis-descargar.md
+++ /dev/null
@@ -1,36 +0,0 @@
-electude-motor-diagnosis-descargar
DOWNLOAD ✶ https://imgfil.com/2uxXXn
-
--navegador-motor-diagnosis-e-letras/
-
-Thu, 05 Apr 2018 12:16:26 +0000
-
-motor-diagnosis-descargar-navegador-motor-diagnosis-e-letras/Siemens announces UFER Enterprise Platform for the industrial internet of things
-
-Siemens has announced UFER, an enterprise software platform that combines artificial intelligence and the industrial internet of things to help companies process vast amounts of information generated by industrial sensors and machines to transform business operations in production and the environment.
-
-Siemens AG’s UFER platform will go into pilot use in July 2018. The pilot is targeted at the automotive industry, where UFER will work together with Siemens’ new PlantWise system for the machine-to-machine industrial internet. PlantWise is expected to be released commercially in 2019.
-
-UFER has four main purposes:
-
-Reducing the risk of accidents and operational disruptions by processing data from industrial sensors and machines to make it available for analysis at a moment’s notice.
-
-Organizing data from multiple sources and producing a common, actionable view of plant operations.
-
-Enabling businesses to become more productive by using data from machines, sensors and the internet of things to optimize production and service processes.
-
-Reducing costs by optimizing maintenance processes, improving the efficiency of plant operations, and capturing data that can be used to develop new products or services.
-
-UFER will be developed in collaboration with IBM, leading international manufacturing company, as well as a number of smaller partner companies in a selected industry sector.
-
-UFER’s main focus is on the automotive industry. This industry has one of the most complex IT infrastructures in the world, including:
-
-Production lines for which plant operations require close monitoring, such as those for powertrain components and fuel injection systems;
-
-Networks of machines that capture and analyze plant operations data, such as the multi-sensor networks of forklifts, cranes and conveyor belts;
-
-PlantWise, which is ready for the commercial release in 2019, will feed UFER with production data from the plant, such as production rate and inventory information, and process it to make it available for analysis at a moment’s notice.
-
-The U 4fefd39f24
-
-
-
diff --git a/spaces/1line/AutoGPT/autogpt/commands/write_tests.py b/spaces/1line/AutoGPT/autogpt/commands/write_tests.py
deleted file mode 100644
index 35a086536c9d05d520a84b15ead49f775eacdcc9..0000000000000000000000000000000000000000
--- a/spaces/1line/AutoGPT/autogpt/commands/write_tests.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""A module that contains a function to generate test cases for the submitted code."""
-from __future__ import annotations
-
-import json
-
-from autogpt.llm_utils import call_ai_function
-
-
-def write_tests(code: str, focus: list[str]) -> str:
- """
- A function that takes in code and focus topics and returns a response from create
- chat completion api call.
-
- Parameters:
- focus (list): A list of suggestions around what needs to be improved.
- code (str): Code for test cases to be generated against.
- Returns:
- A result string from create chat completion. Test cases for the submitted code
- in response.
- """
-
- function_string = (
- "def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
- )
- args = [code, json.dumps(focus)]
- description_string = (
- "Generates test cases for the existing code, focusing on"
- " specific areas if required."
- )
-
- return call_ai_function(function_string, args, description_string)
diff --git a/spaces/1line/AutoGPT/autogpt/speech/macos_tts.py b/spaces/1line/AutoGPT/autogpt/speech/macos_tts.py
deleted file mode 100644
index 4c072ce256782e83a578b5181abf1a7b524c621b..0000000000000000000000000000000000000000
--- a/spaces/1line/AutoGPT/autogpt/speech/macos_tts.py
+++ /dev/null
@@ -1,21 +0,0 @@
-""" MacOS TTS Voice. """
-import os
-
-from autogpt.speech.base import VoiceBase
-
-
-class MacOSTTS(VoiceBase):
- """MacOS TTS Voice."""
-
- def _setup(self) -> None:
- pass
-
- def _speech(self, text: str, voice_index: int = 0) -> bool:
- """Play the given text."""
- if voice_index == 0:
- os.system(f'say "{text}"')
- elif voice_index == 1:
- os.system(f'say -v "Ava (Premium)" "{text}"')
- else:
- os.system(f'say -v Samantha "{text}"')
- return True
diff --git a/spaces/1phancelerku/anime-remove-background/Enjoy Unlimited VK Music Download with the Best Tools.md b/spaces/1phancelerku/anime-remove-background/Enjoy Unlimited VK Music Download with the Best Tools.md
deleted file mode 100644
index 3de57926b22b21d147ef7bd4020b22f7fdbd1431..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Enjoy Unlimited VK Music Download with the Best Tools.md
+++ /dev/null
@@ -1,127 +0,0 @@
-
-How to Download VK Music in 2023
-VK, also known as VKontakte, is a popular social media platform in Russia and Europe. It offers a wide range of media content, including movies, videos, photos, and music. Many users enjoy listening to music on VK, as it has a large collection of songs from various genres and artists. However, sometimes you may want to download music from VK to your device, so you can listen to it offline, without ads, or with other players. How can you do that?
-In this article, we will show you four available ways to download music from VK in 2023. You can choose the one that suits your needs and preferences best. Let's get started!
-download vk music
Download File ❤❤❤ https://jinyurl.com/2uNUpF
- Method 1: Use TunesKit Audio Capture to Record and Download VK Music
-TunesKit Audio Capture is a powerful audio recorder for Windows and Mac that can capture any sound from your computer. It can record and download VK music and other streaming audios from any programs and websites. It can also save the recordings in any format, including MP3, WAV, FLAC, AAC, etc. It preserves the original audio quality and ID3 tags of the VK music. Moreover, it supports batch recording and editing of multiple audio tracks.
-To use TunesKit Audio Capture to download music from VK, you need to follow these steps:
-
-- Download and install TunesKit Audio Capture on your computer.
-- Launch the program and check if there is a browser on the program list. If not, you can add it by drag-and-drop.
-- Go to the VK website and find the music you want to download.
-- Click the Format button and select MP3 or any other format you prefer.
-- Play the music on VK and TunesKit Audio Capture will start recording it automatically.
-- When the music ends, click the Stop button and edit the audio if needed.
-- Save the audio file to your computer and enjoy it offline.
-
- Method 2: Use VK Music Downloader Extension for Chrome to Download VK Music
-VK Music Downloader is a free extension for Chrome that helps you download your music on VK.com. It saves the original name of the soundtrack and allows you to download all playlists and groups of songs at once. It has no ads and the code is open and not obfuscated. However, it does not support batch downloading of songs.
-To use VK Music Downloader extension for Chrome to download music from VK, you need to follow these steps:
-
-- Add the extension to your Chrome browser from [6](https://chrome.google.com/webstore/detail/%D1%81%D0%BA%D0%B0%D1%87%D0%B0%D1%82%D1%8C-%D0%BC%D1%83%D0%B7%D1%8B%D0%BA%D1%83-%D1%81-%D0%B2%D0%BA/bgmpjmdignpongmfjpgaikghaajeidid?hl=en).
-- Go to the VK website and find the music you want to download.
-- Click on the green arrow icon next to the song title and select Download.
-- Save the audio file to your computer and enjoy it offline.
-
- Method 3: Use SaveFrom.net Online Service to Download VK Music
-SaveFrom.net is an online service that allows you to download videos and audios from various websites, including YouTube, Facebook, Instagram, Vimeo, and VK. It supports various formats, such as MP4, MP3, WEBM, etc. It is easy to use and does not require any installation or registration.
-To use SaveFrom.net online service to download music from VK, you need to follow these steps:
-
-- Go to [11](https://en -savefrom.net/) and paste the URL of the VK music you want to download.
-- Click on the Download button and choose the format and quality you prefer.
-- Save the audio file to your computer and enjoy it offline.
-
- Method 4: Use Music Downloader for VK Extension for Chrome to Download VK Music
-Music Downloader for VK is another free extension for Chrome that enables you to download music from VK.com. It adds a download button to each song on the VK website and allows you to download multiple songs at once. It also supports downloading music from other websites, such as SoundCloud, Bandcamp, YouTube, etc. However, it may not work with some songs due to copyright issues.
-How to download music from VKontakte
-VK music downloader Chrome extension
-VK MP3 downloader online
-Best VK music downloader for Windows and Mac
-Download VK music to iPhone or Android
-VK music downloader app for PC
-Free VK music downloader software
-Download VK music playlist in one click
-Download VK music with original quality and ID3 tags
-Download VK music in MP3, AAC, FLAC, WAV, M4A, or M4B format
-Download VK music without registration or login
-Download VK music with subtitles or lyrics
-Download VK music videos and convert to audio
-Download VK music offline and play without internet
-Download VK music from private or public groups
-Download VK music by genre, artist, album, or song name
-Download VK music faster and safer
-Download VK music legally and ethically
-Download VK music without ads or malware
-Download VK music with high speed and stability
-Download unlimited VK music for free
-Download multiple VK music tracks simultaneously
-Download VK music and transfer to iTunes or Spotify
-Download VK music and burn to CD or DVD
-Download VK music and edit with audio editor
-Download VK music and set as ringtone or alarm
-Download VK music and share with friends or family
-Download VK music and sync to cloud storage or devices
-Download VK music and enjoy on any player or device
-Download VK music and create your own playlist or library
-Tips and tricks for downloading VK music easily and efficiently
-Reviews and ratings of the best VK music downloaders in 2023
-Comparison of different methods to download VK music online or offline
-Pros and cons of various types of VK music downloaders for different needs
-FAQs and solutions for downloading VK music from Vkontakte
-How to download HD or 4K VK music videos from Vkontakte
-How to download live or streaming VK music from Vkontakte
-How to download podcasts or audiobooks from Vkontakte
-How to download radio or DJ mixes from Vkontakte
-How to download karaoke or instrumental tracks from Vkontakte
-How to download remixes or covers from Vkontakte
-How to download soundtracks or background music from Vkontakte
-How to download classical or jazz music from Vkontakte
-How to download rock or metal music from Vkontakte
-How to download pop or dance music from Vkontakte
-How to download rap or hip hop music from Vkontakte
-How to download country or folk music from Vkontakte
-How to download reggae or ska music from Vkontakte
-How to download electronic or ambient music from Vkontakte
-To use Music Downloader for VK extension for Chrome to download music from VK, you need to follow these steps:
-
-- Add the extension to your Chrome browser from [10](https://chrome.google.com/webstore/detail/music-downloader-for-vk/ahkohdihdjccebcfgjgffmpdjjknhgla?hl=en).
-- Go to the VK website and find the music you want to download.
-- Click on the download button next to the song title and select Download.
-- Save the audio file to your computer and enjoy it offline.
-
- Conclusion: How to Download VK Music in 2023
-In conclusion, we have shown you four available ways to download music from VK in 2023. You can use TunesKit Audio Capture, VK Music Downloader, SaveFrom.net, or Music Downloader for VK to get your favorite songs from VK.com. Each method has its own advantages and disadvantages, so you can choose the one that works best for you. We recommend TunesKit Audio Capture as the best method, as it can record and download any sound from your computer with high quality and ID3 tags. It also supports batch recording and editing of multiple audio tracks.
-We hope this article has helped you learn how to download music from VK in 2023. If you have any questions or suggestions, please feel free to leave a comment below. Thank you for reading!
- FAQs: How to Download VK Music in 2023
-Q1: Is it legal to download music from VK?
-A1: It depends on the source and the purpose of downloading music from VK. If the music is uploaded by the original artist or authorized by them, then it is legal to download it for personal use. However, if the music is pirated or infringes on someone else's rights, then it is illegal to download it. You should always respect the intellectual property rights of the creators and follow the terms of service of VK.com.
- Q2: How can I download music from VK on my Android phone?
-A2: You can use an app called SnapTube to download music from VK on your Android phone. SnapTube is a video and music downloader that supports various websites, including YouTube, Facebook, Instagram, VK, etc. You can download SnapTube from [9](https://www.snaptubeapp.com/). To use SnapTube to download music from VK, you need to follow these steps:
-
-- Open SnapTube and select VK from the list of supported sites.
-- Login with your VK account and find the music you want to download.
-- Tap on the Download button at the bottom right corner of the screen and choose MP3 or any other format you prefer.
-- Save the audio file to your phone and enjoy it offline.
-
- Q3: How can I download music from VK on my iPhone?
-A3: You can use an app called Documents by Readdle to download music from VK on your iPhone. Documents by Readdle is a file manager and media player that also has a built-in browser and downloader. You can download Documents by Readdle from [8](https://apps.apple.com/us/app/documents-by-readdle/id364901807). To use Documents by Readdle to download music from VK, you need to follow these steps:
-
-- Open Documents by Readdle and tap on the Browser icon at the bottom right corner of the screen.
-- Go to [7](https://en-savefrom.net/) and paste the URL of the VK music you want to download.
-- Tap on the Download button and choose MP3 or any other format you prefer.
-- Save the audio file to your iPhone and enjoy it offline.
-
- Q4: How can I transfer downloaded music from my computer to my phone? Q4: How can I transfer downloaded music from my computer to my phone?
-
A4: There are different ways to transfer downloaded music from your computer to your phone, depending on the type of your phone and the software you use. Here are some common methods:
-
-- Use a USB cable to connect your phone to your computer and copy the music files to your phone's storage or SD card.
-- Use a cloud service, such as Google Drive, Dropbox, or iCloud, to upload the music files from your computer and download them to your phone.
-- Use a wireless transfer app, such as AirDroid, Shareit, or Xender, to send the music files from your computer to your phone via Wi-Fi or Bluetooth.
-- Use a music streaming app, such as Spotify, Apple Music, or Amazon Music, to sync the music files from your computer to your phone.
-
- Q5: How can I play downloaded music from VK on my phone?
-A5: You can play downloaded music from VK on your phone using any music player app that supports the format of the audio files. For example, you can use VLC, MX Player, Poweramp, or Musicolet to play MP3, WAV, FLAC, or AAC files. You can also use the default music player app on your phone or the Documents by Readdle app if you downloaded the music using it.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/801artistry/RVC801/Applio-RVC-Fork/utils/clonerepo_experimental.py b/spaces/801artistry/RVC801/Applio-RVC-Fork/utils/clonerepo_experimental.py
deleted file mode 100644
index b0ae02648c1307562cf48033908edcf2996db5e2..0000000000000000000000000000000000000000
--- a/spaces/801artistry/RVC801/Applio-RVC-Fork/utils/clonerepo_experimental.py
+++ /dev/null
@@ -1,253 +0,0 @@
-import os
-import subprocess
-import shutil
-from concurrent.futures import ThreadPoolExecutor, as_completed
-from tqdm.notebook import tqdm
-from pathlib import Path
-import requests
-
-def run_script():
- def run_cmd(cmd):
- process = subprocess.run(cmd, shell=True, check=True, text=True)
- return process.stdout
-
- # Change the current directory to /content/
- os.chdir('/content/')
- print("Changing dir to /content/")
-
- # Your function to edit the file
- def edit_file(file_path):
- temp_file_path = "/tmp/temp_file.py"
- changes_made = False
- with open(file_path, "r") as file, open(temp_file_path, "w") as temp_file:
- previous_line = ""
- second_previous_line = ""
- for line in file:
- new_line = line.replace("value=160", "value=128")
- if new_line != line:
- print("Replaced 'value=160' with 'value=128'")
- changes_made = True
- line = new_line
-
- new_line = line.replace("crepe hop length: 160", "crepe hop length: 128")
- if new_line != line:
- print("Replaced 'crepe hop length: 160' with 'crepe hop length: 128'")
- changes_made = True
- line = new_line
-
- new_line = line.replace("value=0.88", "value=0.75")
- if new_line != line:
- print("Replaced 'value=0.88' with 'value=0.75'")
- changes_made = True
- line = new_line
-
- if "label=i18n(\"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络\")" in previous_line and "value=1," in line:
- new_line = line.replace("value=1,", "value=0.25,")
- if new_line != line:
- print("Replaced 'value=1,' with 'value=0.25,' based on the condition")
- changes_made = True
- line = new_line
-
- if "label=i18n(\"总训练轮数total_epoch\")" in previous_line and "value=20," in line:
- new_line = line.replace("value=20,", "value=500,")
- if new_line != line:
- print("Replaced 'value=20,' with 'value=500,' based on the condition for DEFAULT EPOCH")
- changes_made = True
- line = new_line
-
- if 'choices=["pm", "harvest", "dio", "crepe", "crepe-tiny", "mangio-crepe", "mangio-crepe-tiny"], # Fork Feature. Add Crepe-Tiny' in previous_line:
- if 'value="pm",' in line:
- new_line = line.replace('value="pm",', 'value="mangio-crepe",')
- if new_line != line:
- print("Replaced 'value=\"pm\",' with 'value=\"mangio-crepe\",' based on the condition")
- changes_made = True
- line = new_line
-
- new_line = line.replace('label=i18n("输入训练文件夹路径"), value="E:\\\\语音音频+标注\\\\米津玄师\\\\src"', 'label=i18n("输入训练文件夹路径"), value="/content/dataset/"')
- if new_line != line:
- print("Replaced 'label=i18n(\"输入训练文件夹路径\"), value=\"E:\\\\语音音频+标注\\\\米津玄师\\\\src\"' with 'label=i18n(\"输入训练文件夹路径\"), value=\"/content/dataset/\"'")
- changes_made = True
- line = new_line
-
- if 'label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"),' in second_previous_line:
- if 'value=i18n("否"),' in line:
- new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),')
- if new_line != line:
- print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE ONLY LATEST")
- changes_made = True
- line = new_line
-
- if 'label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"),' in second_previous_line:
- if 'value=i18n("否"),' in line:
- new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),')
- if new_line != line:
- print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE SMALL WEIGHTS")
- changes_made = True
- line = new_line
-
- temp_file.write(line)
- second_previous_line = previous_line
- previous_line = line
-
- # After finished, we replace the original file with the temp one
- import shutil
- shutil.move(temp_file_path, file_path)
-
- if changes_made:
- print("Changes made and file saved successfully.")
- else:
- print("No changes were needed.")
-
- # Define the repo path
- repo_path = '/content/Applio-RVC-Fork'
-
- def copy_all_files_in_directory(src_dir, dest_dir):
- # Iterate over all files in source directory
- for item in Path(src_dir).glob('*'):
- if item.is_file():
- # Copy each file to destination directory
- shutil.copy(item, dest_dir)
- else:
- # If it's a directory, make a new directory in the destination and copy the files recursively
- new_dest = Path(dest_dir) / item.name
- new_dest.mkdir(exist_ok=True)
- copy_all_files_in_directory(str(item), str(new_dest))
-
- def clone_and_copy_repo(repo_path):
- # New repository link
- new_repo_link = "https://github.com/IAHispano/Applio-RVC-Fork/"
- # Temporary path to clone the repository
- temp_repo_path = "/content/temp_Applio-RVC-Fork"
- # New folder name
- new_folder_name = "Applio-RVC-Fork"
-
- # Clone the latest code from the new repository to a temporary location
- run_cmd(f"git clone {new_repo_link} {temp_repo_path}")
- os.chdir(temp_repo_path)
-
- run_cmd(f"git checkout 3fa4dad3d8961e5ca2522e9e12c0b4ddb71ad402")
- run_cmd(f"git checkout f9e606c279cb49420597519b0a83b92be81e42e4")
- run_cmd(f"git checkout 9e305588844c5442d58add1061b29beeca89d679")
- run_cmd(f"git checkout bf92dc1eb54b4f28d6396a4d1820a25896cc9af8")
- run_cmd(f"git checkout c3810e197d3cb98039973b2f723edf967ecd9e61")
- run_cmd(f"git checkout a33159efd134c2413b0afe26a76b7dc87926d2de")
- run_cmd(f"git checkout 24e251fb62c662e39ac5cf9253cc65deb9be94ec")
- run_cmd(f"git checkout ad5667d3017e93232dba85969cddac1322ba2902")
- run_cmd(f"git checkout ce9715392cf52dd5a0e18e00d1b5e408f08dbf27")
- run_cmd(f"git checkout 7c7da3f2ac68f3bd8f3ad5ca5c700f18ab9f90eb")
- run_cmd(f"git checkout 4ac395eab101955e8960b50d772c26f592161764")
- run_cmd(f"git checkout b15b358702294c7375761584e5276c811ffab5e8")
- run_cmd(f"git checkout 1501793dc490982db9aca84a50647764caa66e51")
- run_cmd(f"git checkout 21f7faf57219c75e6ba837062350391a803e9ae2")
- run_cmd(f"git checkout b5eb689fbc409b49f065a431817f822f554cebe7")
- run_cmd(f"git checkout 7e02fae1ebf24cb151bf6cbe787d06734aa65862")
- run_cmd(f"git checkout 6aea5ea18ed0b9a1e03fa5d268d6bc3c616672a9")
- run_cmd(f"git checkout f0f9b25717e59116473fb42bd7f9252cfc32b398")
- run_cmd(f"git checkout b394de424088a81fc081224bc27338a8651ad3b2")
- run_cmd(f"git checkout f1999406a88b80c965d2082340f5ea2bfa9ab67a")
- run_cmd(f"git checkout d98a0fa8dc715308dfc73eac5c553b69c6ee072b")
- run_cmd(f"git checkout d73267a415fb0eba98477afa43ef71ffd82a7157")
- run_cmd(f"git checkout 1a03d01356ae79179e1fb8d8915dc9cc79925742")
- run_cmd(f"git checkout 81497bb3115e92c754300c9b3992df428886a3e9")
- run_cmd(f"git checkout c5af1f8edcf79cb70f065c0110e279e78e48caf9")
- run_cmd(f"git checkout cdb3c90109387fa4dfa92f53c3864c71170ffc77")
-
- # Edit the file here, before copying
- #edit_file(f"{temp_repo_path}/infer-web.py")
-
- # Copy all files from the cloned repository to the existing path
- copy_all_files_in_directory(temp_repo_path, repo_path)
- print(f"Copying all {new_folder_name} files from GitHub.")
-
- # Change working directory back to /content/
- os.chdir('/content/')
- print("Changed path back to /content/")
-
- # Remove the temporary cloned repository
- shutil.rmtree(temp_repo_path)
-
- # Call the function
- clone_and_copy_repo(repo_path)
-
- # Download the credentials file for RVC archive sheet
- os.makedirs('/content/Applio-RVC-Fork/stats/', exist_ok=True)
- run_cmd("wget -q https://cdn.discordapp.com/attachments/945486970883285045/1114717554481569802/peppy-generator-388800-07722f17a188.json -O /content/Applio-RVC-Fork/stats/peppy-generator-388800-07722f17a188.json")
-
- # Forcefully delete any existing torchcrepe dependencies downloaded from an earlier run just in case
- shutil.rmtree('/content/Applio-RVC-Fork/torchcrepe', ignore_errors=True)
- shutil.rmtree('/content/torchcrepe', ignore_errors=True)
-
- # Download the torchcrepe folder from the maxrmorrison/torchcrepe repository
- run_cmd("git clone https://github.com/maxrmorrison/torchcrepe.git")
- shutil.move('/content/torchcrepe/torchcrepe', '/content/Applio-RVC-Fork/')
- shutil.rmtree('/content/torchcrepe', ignore_errors=True) # Delete the torchcrepe repository folder
-
- # Change the current directory to /content/Applio-RVC-Fork
- os.chdir('/content/Applio-RVC-Fork')
- os.makedirs('pretrained', exist_ok=True)
- os.makedirs('uvr5_weights', exist_ok=True)
-
-def download_file(url, filepath):
- response = requests.get(url, stream=True)
- response.raise_for_status()
-
- with open(filepath, "wb") as file:
- for chunk in response.iter_content(chunk_size=8192):
- if chunk:
- file.write(chunk)
-
-def download_pretrained_models():
- pretrained_models = {
- "pretrained": [
- "D40k.pth",
- "G40k.pth",
- "f0D40k.pth",
- "f0G40k.pth"
- ],
- "pretrained_v2": [
- "D40k.pth",
- "G40k.pth",
- "f0D40k.pth",
- "f0G40k.pth",
- "f0G48k.pth",
- "f0D48k.pth"
- ],
- "uvr5_weights": [
- "HP2-人声vocals+非人声instrumentals.pth",
- "HP5-主旋律人声vocals+其他instrumentals.pth",
- "VR-DeEchoNormal.pth",
- "VR-DeEchoDeReverb.pth",
- "VR-DeEchoAggressive.pth",
- "HP5_only_main_vocal.pth",
- "HP3_all_vocals.pth",
- "HP2_all_vocals.pth"
- ]
- }
- part2 = "I"
- base_url = "https://huggingface.co/lj1995/VoiceConversionWebU" + part2 + "/resolve/main/"
- base_path = "/content/Applio-RVC-Fork/"
- base_pathm = base_path
-
- # Calculate total number of files to download
- total_files = sum(len(files) for files in pretrained_models.values()) + 1 # +1 for hubert_base.pt
-
- with tqdm(total=total_files, desc="Downloading files") as pbar:
- for folder, models in pretrained_models.items():
- folder_path = os.path.join(base_path, folder)
- os.makedirs(folder_path, exist_ok=True)
- for model in models:
- url = base_url + folder + "/" + model
- filepath = os.path.join(folder_path, model)
- download_file(url, filepath)
- pbar.update()
-
- # Download hubert_base.pt to the base path
- hubert_url = base_url + "hubert_base.pt"
- hubert_filepath = os.path.join(base_pathm, "hubert_base.pt")
- download_file(hubert_url, hubert_filepath)
- pbar.update()
-def clone_repository(run_download):
- with ThreadPoolExecutor(max_workers=2) as executor:
- executor.submit(run_script)
- if run_download:
- executor.submit(download_pretrained_models)
diff --git a/spaces/AIConsultant/MusicGen/audiocraft/solvers/base.py b/spaces/AIConsultant/MusicGen/audiocraft/solvers/base.py
deleted file mode 100644
index 0432e44a36838c5731711f9d54f81822b21f20bd..0000000000000000000000000000000000000000
--- a/spaces/AIConsultant/MusicGen/audiocraft/solvers/base.py
+++ /dev/null
@@ -1,631 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from abc import ABC, abstractmethod
-from contextlib import contextmanager
-from pathlib import Path
-import typing as tp
-
-import flashy
-import omegaconf
-import torch
-from torch import nn
-
-from .. import optim
-from ..optim import fsdp
-from ..utils import checkpoint
-from ..utils.autocast import TorchAutocast
-from ..utils.best_state import BestStateDictManager
-from ..utils.deadlock import DeadlockDetect
-from ..utils.profiler import Profiler
-from ..utils.utils import copy_state, dict_from_config, model_hash, with_rank_rng
-
-
-class StandardSolver(ABC, flashy.BaseSolver):
- """Standard solver for AudioCraft.
-
- The standard solver implements a base training loop with the following stages:
- train, valid, evaluate and generate that are expected to be all defined for
- solvers in AudioCraft. It also provides a nice default management of Dora history replay,
- checkpoint management across epoch, and logging configuration.
-
- AudioCraft solvers must inherit from the StandardSolver and define the methods
- associated to each stage as well as the show, build_model and build_dataloaders methods.
- """
- def __init__(self, cfg: omegaconf.DictConfig):
- super().__init__()
- self.logger.info(f"Instantiating solver {self.__class__.__name__} for XP {self.xp.sig}")
- self.logger.info(f"All XP logs are stored in {self.xp.folder}")
- self.cfg = cfg
- self.device = cfg.device
- self.model: nn.Module
- self._continue_best_source_keys = ['best_state', 'fsdp_best_state']
- self._fsdp_modules: tp.List[fsdp.FSDP] = []
- self._ema_sources: nn.ModuleDict = nn.ModuleDict()
- self.ema: tp.Optional[optim.ModuleDictEMA] = None
- self.dataloaders: tp.Dict[str, torch.utils.data.DataLoader] = dict()
- self._log_updates = self.cfg.logging.get('log_updates', 10)
- if self.cfg.logging.log_tensorboard:
- self.init_tensorboard(**self.cfg.get('tensorboard'))
- if self.cfg.logging.log_wandb and self:
- self.init_wandb(**self.cfg.get('wandb'))
- # keep a copy of the best performing state for stateful objects
- # used for evaluation and generation stages
- dtype_best: tp.Optional[torch.dtype] = None
- if self.cfg.fsdp.use:
- dtype_best = getattr(torch, self.cfg.fsdp.param_dtype) # type: ignore
- assert isinstance(dtype_best, torch.dtype)
- elif self.cfg.autocast:
- dtype_best = getattr(torch, self.cfg.autocast_dtype) # type: ignore
- assert isinstance(dtype_best, torch.dtype)
- self.best_state: BestStateDictManager = BestStateDictManager(dtype=dtype_best)
- # Hacky support for keeping a copy of the full best state in rank0.
- self.fsdp_best_state: tp.Dict[str, tp.Any] = {}
- self.register_stateful('best_state', 'fsdp_best_state') # register best_state object to keep it in state_dict
- self._new_best_state: bool = False # should save a new checkpoint
- # instantiate datasets and appropriate number of updates per epoch
- self.build_dataloaders()
- if self.cfg.execute_only is None:
- assert 'train' in self.dataloaders, "The train dataset split must be provided."
- assert 'valid' in self.dataloaders, "The valid dataset split must be provided."
- self.train_updates_per_epoch = len(self.dataloaders['train']) if 'train' in self.dataloaders else 0
- if self.cfg.optim.updates_per_epoch:
- self.train_updates_per_epoch = self.cfg.optim.updates_per_epoch
- self.total_updates = self.train_updates_per_epoch * self.cfg.optim.epochs
- # instantiate model & exponential moving average on the model
- self.build_model()
- self.logger.info("Model hash: %s", model_hash(self.model))
- assert 'model' in self.stateful.sources, \
- "Please register the model to stateful with self.register_stateful('model') in build_model."
- self.profiler = Profiler(self.model, **self.cfg.profiler)
- self.initialize_ema()
- self.register_stateful('ema')
- assert self.ema is None or 'ema' in self.stateful.sources, \
- "Please register the ema to stateful with self.register_stateful('ema') in build_model."
- self.deadlock_detect = DeadlockDetect(**self.cfg.deadlock)
- # basic statistics on the trained model
- model_size = sum(p.numel() for p in self.model.parameters() if p.requires_grad) / 1e6
- # one copy of grad, one copy of momentum, one copy of denominator and model weights.
- # and 4 bytes for each float!
- mem_usage = model_size * 4 * 4 / 1000
- self.logger.info("Model size: %.2f M params", model_size)
- self.logger.info("Base memory usage, with model, grad and optim: %.2f GB", mem_usage)
-
- @property
- def autocast(self):
- """Convenient autocast (or not) using the solver configuration."""
- return TorchAutocast(enabled=self.cfg.autocast, device_type=self.device, dtype=self.autocast_dtype)
-
- def _get_state_source(self, name) -> flashy.state.StateDictSource:
- # Internal utility to get a state source from the solver
- return self.stateful.sources[name]
-
- @property
- def best_metric_name(self) -> tp.Optional[str]:
- """Metric name used to identify the best state. This metric should be stored in the metrics
- used on the stage for best state identification (most likely, `valid`). If None, then
- no best state is saved.
- """
- return None
-
- def register_best_state(self, *args: str):
- """Register state sources in `BestStateDictManager` to keep their best states along with their
- latest states. The best state will be used at evaluation stages instead of the latest states.
-
- Shortcut around `BestStateDictManager.register` method. You can pass any number of
- attribute, included nested attributes and those will be included into the checkpoints
- and automatically restored when `BaseSolver.restore` is called.
- """
- for name in args:
- state_source = self._get_state_source(name)
- assert name in self.stateful.sources, "Registered states in best should be registered in stateful first!"
- self.best_state.register(name, state_source)
-
- def register_ema(self, *args: str):
- """Register state sources for exponential moving average.
-
- The registered sources are used to instantiate a ModuleDictEMA instance.
- The ModuleDictEMA keeps a `nn.ModuleDict` module that is updated when self.ema.step() is called
- and swapped with the original state sources with self.swap_ema_state() method.
-
- Usage:
- self.register_ema('model')
- """
- assert self.ema is None, "Cannot register state source to already instantiated EMA."
- for name in args:
- self._ema_sources[name] = getattr(self, name)
-
- def wrap_with_fsdp(self, model: torch.nn.Module, *args, **kwargs):
- model = fsdp.wrap_with_fsdp(self.cfg.fsdp, model, *args, **kwargs)
- if isinstance(model, fsdp.FSDP):
- self._fsdp_modules.append(model)
- return model
-
- def update_best_state_from_stage(self, stage_name: str = 'valid'):
- """Update latest best state based on pending metrics of a given stage. This method relies
- on the `BestStateDictManager.update` method to update the best state_dict with latest weights
- if the registered states happen to match to the best performing setup.
- """
- if self.best_metric_name is None:
- # when no best metric is defined, the last state is always the best
- self._new_best_state = True
- self.logger.info("Updating best state with current state.")
- else:
- assert stage_name in self._pending_metrics, f"Metrics for stage {stage_name} not found."
- assert self.best_metric_name in self._pending_metrics[stage_name], \
- f"Best metric not found in {stage_name} metrics. Cannot register best state"
- current_score = self._pending_metrics[stage_name][self.best_metric_name]
- all_best_metric_scores = [
- past_metrics[stage_name][self.best_metric_name]
- for past_metrics in self.history
- ]
- all_best_metric_scores.append(current_score)
- best_score = min(all_best_metric_scores)
- self._new_best_state = current_score == best_score
- if self._new_best_state:
- old_best = min(all_best_metric_scores[:-1] + [float('inf')])
- self.logger.info(
- f"New best state with {self.best_metric_name}={current_score:.3f} (was {old_best:.3f})")
-
- if self._new_best_state:
- if self.cfg.fsdp.use:
- # this will give an empty state dict on all ranks but the rank 0
- # which will have a copy in memory of the full model.
- with fsdp.switch_to_full_state_dict(self._fsdp_modules):
- for name in self.best_state.states.keys():
- state_source = self._get_state_source(name)
- self.best_state.update(name, state_source)
- # we save to a different dict.
- self.fsdp_best_state.update(self.best_state.state_dict())
- # We cannot efficiently load fsdp_best_state when using FSDP,
- # so we have do do a second pass, with the local shards.
- for name in self.best_state.states.keys():
- state_source = self._get_state_source(name)
- self.best_state.update(name, state_source)
-
- def _load_new_state_dict(self, state_dict: dict) -> dict:
- old_states = {}
- for name, new_state in state_dict.items():
- state_source = self._get_state_source(name)
- old_states[name] = copy_state(state_source.state_dict())
- state_source.load_state_dict(new_state)
- return old_states
-
- @contextmanager
- def swap_best_state(self):
- self.logger.debug(f"Swapping to best state for: {', '.join(self.best_state.state_dict().keys())}")
- old_states = self._load_new_state_dict(self.best_state.state_dict())
- try:
- yield
- finally:
- self.logger.debug("Swapping back from best to original state")
- for name, old_state in old_states.items():
- state_source = self._get_state_source(name)
- state_source.load_state_dict(old_state)
-
- @contextmanager
- def swap_ema_state(self):
- if self.ema is None:
- yield
- else:
- ema_state_dict = self.ema.state_dict()['state']
- self.logger.debug(f"Swapping to EMA state for: {', '.join(ema_state_dict.keys())}")
- old_states = self._load_new_state_dict(ema_state_dict)
- try:
- yield
- finally:
- self.logger.debug("Swapping back from EMA state to original state")
- for name, old_state in old_states.items():
- state_source = self._get_state_source(name)
- state_source.load_state_dict(old_state)
-
- @property
- def is_training(self):
- return self.current_stage == 'train'
-
- def log_model_summary(self, model: nn.Module):
- """Log model summary, architecture and size of the model."""
- self.logger.info(model)
- mb = sum(p.numel() for p in model.parameters()) * 4 / 2 ** 20
- self.logger.info("Size: %.1f MB", mb)
-
- @abstractmethod
- def build_model(self):
- """Method to implement to initialize model."""
- ...
-
- def initialize_ema(self):
- """Initialize exponential moving average with the registered sources.
- EMA object is created if the optim.ema.model.decay value is non-null.
- """
- from .builders import get_ema
- self.ema = get_ema(self._ema_sources, self.cfg.optim.ema)
- if self.ema is None:
- self.logger.info('No EMA on the model.')
- else:
- assert self.cfg.optim.ema.updates > 0
- self.logger.info(
- f'Initializing EMA on the model with decay = {self.ema.decay}'
- f' every {self.cfg.optim.ema.updates} updates'
- )
-
- @abstractmethod
- def build_dataloaders(self):
- """Method to implement to initialize dataloaders."""
- ...
-
- @abstractmethod
- def show(self):
- """Method to log any information without running the job."""
- ...
-
- @property
- def log_updates(self):
- # convenient access to log updates
- return self._log_updates
-
- def checkpoint_path(self, **kwargs):
- kwargs.setdefault('use_fsdp', self.cfg.fsdp.use)
- return self.folder / checkpoint.checkpoint_name(**kwargs)
-
- def epoch_checkpoint_path(self, epoch: int, **kwargs):
- kwargs.setdefault('use_fsdp', self.cfg.fsdp.use)
- return self.folder / checkpoint.checkpoint_name(str(epoch), **kwargs)
-
- def checkpoint_path_with_name(self, name: str, **kwargs):
- kwargs.setdefault('use_fsdp', self.cfg.fsdp.use)
- return self.folder / checkpoint.checkpoint_name(name=name, **kwargs)
-
- def save_checkpoints(self):
- """Save checkpoint, optionally keeping a copy for a given epoch."""
- is_sharded = self.cfg.fsdp.use
- if not flashy.distrib.is_rank_zero() and not is_sharded:
- return
- self.logger.info("Model hash: %s", model_hash(self.model))
- state = self.state_dict()
- epoch = self.epoch - 1 # pushing metrics will increase the epoch in Flashy, so we do -1 here
-
- # save minimal state_dict as new checkpoint every X epoch
- if self.cfg.checkpoint.save_every:
- if epoch % self.cfg.checkpoint.save_every == 0:
- minimal_state = state
- if self.cfg.checkpoint.keep_every_states is not None and len(self.cfg.checkpoint.keep_every_states) > 0:
- minimal_state = {
- name: source for name, source in state.items()
- if name in self.cfg.checkpoint.keep_every_states
- }
- epoch_checkpoint_path = self.epoch_checkpoint_path(epoch)
- checkpoint.save_checkpoint(minimal_state, epoch_checkpoint_path, is_sharded)
-
- # save checkpoint as latest checkpoint
- if self.cfg.checkpoint.save_last:
- last_checkpoint_path = self.checkpoint_path()
- checkpoint.save_checkpoint(state, last_checkpoint_path, is_sharded)
-
- # flush any stale checkpoint to reduce disk footprint
- checkpoint.flush_stale_checkpoints(self.checkpoint_path())
-
- def load_from_pretrained(self, name: str) -> dict:
- raise NotImplementedError("Solver does not provide a way to load pretrained models.")
-
- def load_checkpoints(self, load_best: bool = False, ignore_state_keys: tp.List[str] = []) -> tp.Optional[dict]:
- """Load last checkpoint or the one specified in continue_from.
-
- Args:
- load_best (bool): Whether to load from best state dict or not.
- Best state dict is always used when not loading the current xp.
- ignore_state_keys (list of str): List of sources to ignore when loading the state, e.g. `optimizer`.
- Returns:
- state (dict, optional): The loaded state dictionary.
- """
- # load checkpoints from xp folder or cfg.continue_from
- is_sharded = self.cfg.fsdp.use
- load_from_path: tp.Optional[Path] = None
- checkpoint_source: tp.Optional[checkpoint.CheckpointSource] = None
-
- if load_best:
- self.logger.info("Trying to load state_dict from best state.")
-
- state: tp.Optional[dict] = None
- rank0_checkpoint_path = self.checkpoint_path(use_fsdp=False)
- current_checkpoint_path = self.checkpoint_path()
- _pretrained_prefix = '//pretrained/'
- continue_pretrained = (self.cfg.continue_from or '').startswith(_pretrained_prefix)
- if rank0_checkpoint_path.exists():
- self.logger.info(f"Loading existing checkpoint: {current_checkpoint_path}")
- load_from_path = current_checkpoint_path
- checkpoint.check_sharded_checkpoint(current_checkpoint_path, rank0_checkpoint_path)
- checkpoint_source = checkpoint.CheckpointSource.CURRENT_XP
- elif self.cfg.continue_from and not continue_pretrained:
- self.logger.info(f"Continuing from provided checkpoint: {self.cfg.continue_from}")
- # we're always continuing from consolidated checkpoints: self.cfg.use_fsdp and not continue_best
- load_from_path = checkpoint.resolve_checkpoint_path(self.cfg.continue_from, use_fsdp=False)
- if load_from_path is None:
- self.logger.error('Could not resolve the continue_from checkpoint %s', self.cfg.continue_from)
- raise RuntimeError(f'Could not resolve continue_from checkpoint {self.cfg.continue_from}')
- checkpoint_source = checkpoint.CheckpointSource.OTHER
-
- if load_from_path is not None:
- state = checkpoint.load_checkpoint(load_from_path, is_sharded)
- elif continue_pretrained:
- self.logger.info("Loading a pretrained model. Ignoring 'load_best' and 'ignore_state_keys' params.")
- state = self.load_from_pretrained(self.cfg.continue_from[len(_pretrained_prefix):])
- checkpoint_source = checkpoint.CheckpointSource.PRETRAINED
- load_best = True
-
- # checkpoints are not from the current xp, we only retrieve the best state
- if checkpoint_source is not None and checkpoint_source != checkpoint.CheckpointSource.CURRENT_XP:
- assert state is not None
- self.logger.info("Checkpoint source is not the current xp: Load state_dict from best state.")
- load_best = True
- state = {key: state[key] for key in self._continue_best_source_keys if key in state}
- # loaded checkpoints are FSDP checkpoints: we're reading the best state
- # from FSDP and we drop the regular best_state
- if 'fsdp_best_state' in state and state['fsdp_best_state']:
- state.pop('best_state', None)
- self.logger.info("... Loaded checkpoint has FSDP best state")
- # FSDP is enabled in the solver, if the loaded checkpoints do not have FSDP support
- # then we're initializing FSDP best state with the regular best state
- elif self.cfg.fsdp.use:
- if 'fsdp_best_state' not in state or not state['fsdp_best_state']:
- # we swap non-FSDP checkpoints best_state to FSDP-compatible best state
- state['fsdp_best_state'] = state.pop('best_state')
- self.logger.info("... Loaded checkpoint does not have FSDP best state. Use regular best state")
-
- if state is not None:
- if load_best:
- self.logger.info("Ignoring keys when loading best %r", ignore_state_keys)
- for key in set(ignore_state_keys):
- if key in state:
- state.pop(key)
- has_best_state = 'best_state' in state or 'fsdp_best_state' in state
- assert has_best_state, ("Trying to load best state but neither 'best_state'",
- " or 'fsdp_best_state' found in checkpoints.")
- self.load_state_dict(state)
-
- # for FSDP, let's make extra sure nothing bad happened with out of sync
- # checkpoints across workers.
- epoch = float(self.epoch)
- avg_epoch = flashy.distrib.average_metrics({'epoch': epoch})['epoch']
- if avg_epoch != epoch:
- raise RuntimeError(
- f"Inconsistent loading of checkpoints happened, our epoch is {epoch} "
- f"but average of epochs is {avg_epoch}, at least one gpu must have a "
- "different epoch number.")
-
- # on load_best, properly reinitialize state_dict, best states and ema
- # otherwise we load from the current xp and don't alter anything
- if load_best:
- self.logger.info("Loading state_dict from best state.")
- if not self.cfg.fsdp.use and self.fsdp_best_state:
- # loading from an FSDP checkpoint but with FSDP deactivated
- self.logger.info("... Loading from FSDP best state dict.")
- self.best_state.load_state_dict(self.fsdp_best_state)
-
- # if load_best, we permanently override the regular state_dict with the best state
- if self.cfg.fsdp.use:
- self.logger.info("FSDP is used, loading from FSDP best state.")
- with fsdp.switch_to_full_state_dict(self._fsdp_modules):
- # this might be really fragile but okay for now.
- self.load_state_dict(self.fsdp_best_state)
- else:
- # we permanently swap the stateful objects to their best state
- self._load_new_state_dict(self.best_state.state_dict())
-
- # the EMA modules should also be instantiated with best state.
- # the easiest way to do so is to reinitialize a new EMA with best state loaded.
- if self.ema is not None:
- self.logger.info("Re-initializing EMA from best state")
- self.initialize_ema()
-
- if self.cfg.fsdp.use:
- self.logger.info("Re-initializing best state after using FSDP best state.")
- for name in self.best_state.states.keys():
- state_source = self._get_state_source(name)
- self.best_state.update(name, state_source)
-
- return state
-
- def restore(self, load_best: bool = False, replay_metrics: bool = False,
- ignore_state_keys: tp.List[str] = []) -> bool:
- """Restore the status of a solver for a given xp.
-
- Args:
- load_best (bool): if `True`, load the best state from the checkpoint.
- replay_metrics (bool): if `True`, logs all the metrics from past epochs.
- ignore_state_keys (list of str): list of sources to ignore when loading the state, e.g. `optimizer`.
- """
- self.logger.info("Restoring weights and history.")
- restored_checkpoints = self.load_checkpoints(load_best, ignore_state_keys)
-
- self.logger.info("Model hash: %s", model_hash(self.model))
-
- if replay_metrics and len(self.history) > 0:
- self.logger.info("Replaying past metrics...")
- for epoch, stages in enumerate(self.history):
- for stage_name, metrics in stages.items():
- # We manually log the metrics summary to the result logger
- # as we don't want to add them to the pending metrics
- self.result_logger._log_summary(stage_name, metrics, step=epoch + 1, step_name='epoch',
- formatter=self.get_formatter(stage_name))
- return restored_checkpoints is not None
-
- def commit(self, save_checkpoints: bool = True):
- """Commit metrics to dora and save checkpoints at the end of an epoch."""
- # we override commit to introduce more complex checkpoint saving behaviors
- self.history.append(self._pending_metrics) # This will increase self.epoch
- if save_checkpoints:
- self.save_checkpoints()
- self._start_epoch()
- if flashy.distrib.is_rank_zero():
- self.xp.link.update_history(self.history)
-
- def run_epoch(self):
- """Run a single epoch with all stages.
-
- Metrics for a given stage are stored in _pending_metrics and committed by the solver afterwards.
- Children solvers can extend this method with custom behavior, e.g.:
-
- def run_epoch(self):
- ... # custom code
- super().run_epoch()
- ... # custom code
- """
- self.run_stage('train', self.train)
- with torch.no_grad():
- with self.swap_ema_state():
- self.run_stage('valid', self.valid)
- # the best state is updated with EMA states if available
- self.update_best_state_from_stage('valid')
- with self.swap_best_state():
- if self.should_run_stage('evaluate'):
- self.run_stage('evaluate', self.evaluate)
- if self.should_run_stage('generate'):
- self.run_stage('generate', with_rank_rng()(self.generate))
-
- def run(self):
- """Training loop."""
- assert len(self.state_dict()) > 0
- self.restore(replay_metrics=True) # load checkpoint and replay history
- self.log_hyperparams(dict_from_config(self.cfg))
- for epoch in range(self.epoch, self.cfg.optim.epochs + 1):
- if self.should_stop_training():
- return
- self.run_epoch()
- # Commit will send the metrics to Dora and save checkpoints by default.
- self.commit()
-
- def should_stop_training(self) -> bool:
- """Check whether we should stop training or not."""
- return self.epoch > self.cfg.optim.epochs
-
- def should_run_stage(self, stage_name) -> bool:
- """Check whether we want to run the specified stages."""
- stage_every = self.cfg[stage_name].get('every', None)
- is_last_epoch = self.epoch == self.cfg.optim.epochs
- is_epoch_every = (stage_every and self.epoch % stage_every == 0)
- return is_last_epoch or is_epoch_every
-
- @abstractmethod
- def run_step(self, idx: int, batch: tp.Any, metrics: dict):
- """Perform one training or valid step on a given batch."""
- ...
-
- def common_train_valid(self, dataset_split: str, **kwargs: tp.Any):
- """Common logic for train and valid stages."""
- self.model.train(self.is_training)
-
- loader = self.dataloaders[dataset_split]
- # get a different order for distributed training, otherwise this will get ignored
- if flashy.distrib.world_size() > 1 \
- and isinstance(loader.sampler, torch.utils.data.distributed.DistributedSampler):
- loader.sampler.set_epoch(self.epoch)
- updates_per_epoch = self.train_updates_per_epoch if self.is_training else len(loader)
- if self.cfg.benchmark_no_load:
- self.logger.warning("Fake loading for benchmarking: re-using first batch")
- batch = next(iter(loader))
- loader = [batch] * updates_per_epoch # type: ignore
- lp = self.log_progress(self.current_stage, loader, total=updates_per_epoch, updates=self.log_updates)
- average = flashy.averager() # epoch wise average
- instant_average = flashy.averager() # average between two logging
- metrics: dict = {}
-
- with self.profiler, self.deadlock_detect: # profiler will only run for the first 20 updates.
- for idx, batch in enumerate(lp):
- self.deadlock_detect.update('batch')
- if idx >= updates_per_epoch:
- break
- metrics = {}
- metrics = self.run_step(idx, batch, metrics)
- self.deadlock_detect.update('step')
- # run EMA step
- if self.ema is not None and self.is_training and (idx + 1) % self.cfg.optim.ema.updates == 0:
- self.logger.debug("EMA model step")
- self.ema.step()
- self.deadlock_detect.update('ema')
- self.profiler.step()
- instant_metrics = instant_average(metrics)
- if lp.update(**instant_metrics):
- instant_average = flashy.averager() # reset averager between two logging
- metrics = average(metrics) # epoch wise average
- self.deadlock_detect.update('end_batch')
-
- metrics = flashy.distrib.average_metrics(metrics, updates_per_epoch)
- return metrics
-
- def train(self):
- """Train stage."""
- return self.common_train_valid('train')
-
- def valid(self):
- """Valid stage."""
- return self.common_train_valid('valid')
-
- @abstractmethod
- def evaluate(self):
- """Evaluate stage."""
- ...
-
- @abstractmethod
- def generate(self):
- """Generate stage."""
- ...
-
- def run_one_stage(self, stage_name: str):
- """Run only the specified stage.
- This method is useful to only generate samples from a trained experiment
- or rerun the validation or evaluation stages.
- """
- fn = {
- 'generate': with_rank_rng()(self.generate),
- 'evaluate': self.evaluate,
- 'valid': self.valid,
- }
- if stage_name not in fn:
- raise ValueError(f'Trying to run stage {stage_name} is not supported.')
- assert len(self.state_dict()) > 0
- self._start_epoch()
- with torch.no_grad(), self.swap_best_state():
- self.run_stage(stage_name, fn[stage_name])
- if not self.cfg.execute_inplace:
- self.commit(save_checkpoints=False)
-
- @staticmethod
- def get_eval_solver_from_sig(sig: str, dtype: tp.Optional[str] = None,
- device: tp.Optional[str] = None, autocast: bool = True,
- batch_size: tp.Optional[int] = None,
- override_cfg: tp.Optional[tp.Union[dict, omegaconf.DictConfig]] = None,
- **kwargs):
- """Mostly a convenience function around audiocraft.train.get_solver_from_sig,
- populating all the proper param, deactivating EMA, FSDP, loading the best state,
- basically all you need to get a solver ready to "play" with in single GPU mode
- and with minimal memory overhead.
-
- Args:
- sig (str): signature to load.
- dtype (str or None): potential dtype, as a string, i.e. 'float16'.
- device (str or None): potential device, as a string, i.e. 'cuda'.
- override_cfg (dict or omegaconf.DictConfig or None): potential device, as a string, i.e. 'cuda'.
- """
- from audiocraft import train
- our_override_cfg: tp.Dict[str, tp.Any] = {'optim': {'ema': {'use': False}}}
- our_override_cfg['autocast'] = autocast
- if dtype is not None:
- our_override_cfg['dtype'] = dtype
- if device is not None:
- our_override_cfg['device'] = device
- if batch_size is not None:
- our_override_cfg['dataset'] = {'batch_size': batch_size}
- if override_cfg is None:
- override_cfg = {}
- override_cfg = omegaconf.OmegaConf.merge(
- omegaconf.DictConfig(override_cfg), omegaconf.DictConfig(our_override_cfg)) # type: ignore
- solver = train.get_solver_from_sig(
- sig, override_cfg=override_cfg,
- load_best=True, disable_fsdp=True,
- ignore_state_keys=['optimizer', 'ema'], **kwargs)
- solver.model.eval()
- return solver
diff --git a/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/__init__.py b/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/AIGC-Audio/Make_An_Audio/wav_evaluation/models/__init__.py b/spaces/AIGC-Audio/Make_An_Audio/wav_evaluation/models/__init__.py
deleted file mode 100644
index aadad97ebc9ec23fdebab974a99e343de90f8afd..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/Make_An_Audio/wav_evaluation/models/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from . import clap
-from . import audio
-from . import utils
\ No newline at end of file
diff --git a/spaces/AIGText/GlyphControl/transfer.py b/spaces/AIGText/GlyphControl/transfer.py
deleted file mode 100644
index 3d48e4872474783e050b4276d544dad5b704f7dc..0000000000000000000000000000000000000000
--- a/spaces/AIGText/GlyphControl/transfer.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from omegaconf import OmegaConf
-from scripts.rendertext_tool import Render_Text, load_model_from_config
-import torch
-
-# cfg = OmegaConf.load("other_configs/config_ema.yaml")
-# model = load_model_from_config(cfg, "model_states.pt", verbose=True)
-# model = load_model_from_config(cfg, "mp_rank_00_model_states.pt", verbose=True)
-
-cfg = OmegaConf.load("other_configs/config_ema_unlock.yaml")
-epoch_idx = 39
-model = load_model_from_config(cfg, "epoch={:0>6d}.ckpt".format(epoch_idx), verbose=True)
-
-from pytorch_lightning.callbacks import ModelCheckpoint
-with model.ema_scope("store ema weights"):
- model_sd = model.state_dict()
- store_sd = {}
- for key in model_sd:
- if "ema" in key:
- continue
- store_sd[key] = model_sd[key]
- file_content = {
- 'state_dict': store_sd
- }
- torch.save(file_content, f"textcaps5K_epoch_{epoch_idx+1}_model_wo_ema.ckpt")
- print("has stored the transfered ckpt.")
-print("trial ends!")
diff --git a/spaces/AIGuardians/SummarizeWikipediaDocument/README.md b/spaces/AIGuardians/SummarizeWikipediaDocument/README.md
deleted file mode 100644
index e7aeb7a95980fa5ddcdb19d5570b391746c77a49..0000000000000000000000000000000000000000
--- a/spaces/AIGuardians/SummarizeWikipediaDocument/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Summaraize
-emoji: 🏢
-colorFrom: green
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.12.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_32xb64-warmup_in1k.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_32xb64-warmup_in1k.py
deleted file mode 100644
index 34d5288b9d3f9fcf3f0b409dc1c17906654c2170..0000000000000000000000000000000000000000
--- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_32xb64-warmup_in1k.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = [
- '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs64.py',
- '../_base_/schedules/imagenet_bs2048.py', '../_base_/default_runtime.py'
-]
diff --git a/spaces/Ababababababbababa/Arabic_poem_classifier/app.py b/spaces/Ababababababbababa/Arabic_poem_classifier/app.py
deleted file mode 100644
index bbf72b782320453cd5d9fb4e7e1ebd99fc972af8..0000000000000000000000000000000000000000
--- a/spaces/Ababababababbababa/Arabic_poem_classifier/app.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import gradio as gr
-
-description = "التعرف على خاصيات البيت الشعري"
-title = """هذا البرنامج يقوم بالتعرف على مختلف خاصيات البيت من الشعر.
-يمكنكم إختيار الخاصية من بين:
-- التعرف على البحر
-- التعرف على الروي
-التعرف على الموضوع-"""
-
-examples = [["سَلو قَلبي غَداةَ سَلا وَثابا لَعَلَّ عَلى الجَمالِ لَهُ عِتابا"], ["قفا نبك من ذِكرى حبيب ومنزلِ بسِقطِ اللِّوى بينَ الدَّخول فحَوْملِ"]]
-
-
-meter = gr.Interface.load("huggingface/Yah216/Arabic_poem_meter_3",
- description="من فضلك، أدخل البيت الشعري الذي تود التعرف عليه",
- examples=examples, title = "التعرف على البحر",
- inputs = gr.inputs.Textbox(lines = 3, label = "البيت")
-
-)
-rawiy = gr.Interface.load("huggingface/Yah216/Poem_Qafiyah_Detection",
- title ="التعرف على الروي",
- examples=examples,
- description="من فضلك، أدخل البيت الشعري الذي تود التعرف عليه",
- inputs = gr.inputs.Textbox(lines = 3, label = "البيت")
-
-)
-subject = gr.Interface.load(
- "huggingface/zenkri/autotrain-Arabic_Poetry_by_Subject-920730230",
- title="التعرف على الموضوع",
- examples=examples,
- description="من فضلك، أدخل البيت الشعري الذي تود التعرف عليه",
- inputs = gr.inputs.Textbox(lines = 3, label = "البيت")
-
-)
-demo = gr.TabbedInterface([meter, rawiy, subject], ["التعرف على البحر","التعرف على الروي","التعرف على الموضوع"])
-demo.launch()
-
diff --git a/spaces/Adapter/T2I-Adapter/experiments/README.md b/spaces/Adapter/T2I-Adapter/experiments/README.md
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/AkitoP/umamusume_bert_vits2/models.py b/spaces/AkitoP/umamusume_bert_vits2/models.py
deleted file mode 100644
index dd9e0c087357ecfc5a1548eddb5a30d77d2b5bf5..0000000000000000000000000000000000000000
--- a/spaces/AkitoP/umamusume_bert_vits2/models.py
+++ /dev/null
@@ -1,986 +0,0 @@
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-import attentions
-import monotonic_align
-
-from torch.nn import Conv1d, ConvTranspose1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-
-from commons import init_weights, get_padding
-from text import symbols, num_tones, num_languages
-
-
-class DurationDiscriminator(nn.Module): # vits2
- def __init__(
- self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0
- ):
- super().__init__()
-
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.gin_channels = gin_channels
-
- self.drop = nn.Dropout(p_dropout)
- self.conv_1 = nn.Conv1d(
- in_channels, filter_channels, kernel_size, padding=kernel_size // 2
- )
- self.norm_1 = modules.LayerNorm(filter_channels)
- self.conv_2 = nn.Conv1d(
- filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
- )
- self.norm_2 = modules.LayerNorm(filter_channels)
- self.dur_proj = nn.Conv1d(1, filter_channels, 1)
-
- self.pre_out_conv_1 = nn.Conv1d(
- 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
- )
- self.pre_out_norm_1 = modules.LayerNorm(filter_channels)
- self.pre_out_conv_2 = nn.Conv1d(
- filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
- )
- self.pre_out_norm_2 = modules.LayerNorm(filter_channels)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
-
- self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())
-
- def forward_probability(self, x, x_mask, dur, g=None):
- dur = self.dur_proj(dur)
- x = torch.cat([x, dur], dim=1)
- x = self.pre_out_conv_1(x * x_mask)
- x = torch.relu(x)
- x = self.pre_out_norm_1(x)
- x = self.drop(x)
- x = self.pre_out_conv_2(x * x_mask)
- x = torch.relu(x)
- x = self.pre_out_norm_2(x)
- x = self.drop(x)
- x = x * x_mask
- x = x.transpose(1, 2)
- output_prob = self.output_layer(x)
- return output_prob
-
- def forward(self, x, x_mask, dur_r, dur_hat, g=None):
- x = torch.detach(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.conv_1(x * x_mask)
- x = torch.relu(x)
- x = self.norm_1(x)
- x = self.drop(x)
- x = self.conv_2(x * x_mask)
- x = torch.relu(x)
- x = self.norm_2(x)
- x = self.drop(x)
-
- output_probs = []
- for dur in [dur_r, dur_hat]:
- output_prob = self.forward_probability(x, x_mask, dur, g)
- output_probs.append(output_prob)
-
- return output_probs
-
-
-class TransformerCouplingBlock(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- n_flows=4,
- gin_channels=0,
- share_parameter=False,
- ):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
-
- self.wn = (
- attentions.FFT(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- isflow=True,
- gin_channels=self.gin_channels,
- )
- if share_parameter
- else None
- )
-
- for i in range(n_flows):
- self.flows.append(
- modules.TransformerCouplingLayer(
- channels,
- hidden_channels,
- kernel_size,
- n_layers,
- n_heads,
- p_dropout,
- filter_channels,
- mean_only=True,
- wn_sharing_parameter=self.wn,
- gin_channels=self.gin_channels,
- )
- )
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-
-class StochasticDurationPredictor(nn.Module):
- def __init__(
- self,
- in_channels,
- filter_channels,
- kernel_size,
- p_dropout,
- n_flows=4,
- gin_channels=0,
- ):
- super().__init__()
- filter_channels = in_channels # it needs to be removed from future version.
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.log_flow = modules.Log()
- self.flows = nn.ModuleList()
- self.flows.append(modules.ElementwiseAffine(2))
- for i in range(n_flows):
- self.flows.append(
- modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)
- )
- self.flows.append(modules.Flip())
-
- self.post_pre = nn.Conv1d(1, filter_channels, 1)
- self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.post_convs = modules.DDSConv(
- filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout
- )
- self.post_flows = nn.ModuleList()
- self.post_flows.append(modules.ElementwiseAffine(2))
- for i in range(4):
- self.post_flows.append(
- modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)
- )
- self.post_flows.append(modules.Flip())
-
- self.pre = nn.Conv1d(in_channels, filter_channels, 1)
- self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.convs = modules.DDSConv(
- filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout
- )
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
-
- def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
- x = torch.detach(x)
- x = self.pre(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.convs(x, x_mask)
- x = self.proj(x) * x_mask
-
- if not reverse:
- flows = self.flows
- assert w is not None
-
- logdet_tot_q = 0
- h_w = self.post_pre(w)
- h_w = self.post_convs(h_w, x_mask)
- h_w = self.post_proj(h_w) * x_mask
- e_q = (
- torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype)
- * x_mask
- )
- z_q = e_q
- for flow in self.post_flows:
- z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
- logdet_tot_q += logdet_q
- z_u, z1 = torch.split(z_q, [1, 1], 1)
- u = torch.sigmoid(z_u) * x_mask
- z0 = (w - u) * x_mask
- logdet_tot_q += torch.sum(
- (F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]
- )
- logq = (
- torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q**2)) * x_mask, [1, 2])
- - logdet_tot_q
- )
-
- logdet_tot = 0
- z0, logdet = self.log_flow(z0, x_mask)
- logdet_tot += logdet
- z = torch.cat([z0, z1], 1)
- for flow in flows:
- z, logdet = flow(z, x_mask, g=x, reverse=reverse)
- logdet_tot = logdet_tot + logdet
- nll = (
- torch.sum(0.5 * (math.log(2 * math.pi) + (z**2)) * x_mask, [1, 2])
- - logdet_tot
- )
- return nll + logq # [b]
- else:
- flows = list(reversed(self.flows))
- flows = flows[:-2] + [flows[-1]] # remove a useless vflow
- z = (
- torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype)
- * noise_scale
- )
- for flow in flows:
- z = flow(z, x_mask, g=x, reverse=reverse)
- z0, z1 = torch.split(z, [1, 1], 1)
- logw = z0
- return logw
-
-
-class DurationPredictor(nn.Module):
- def __init__(
- self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0
- ):
- super().__init__()
-
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.gin_channels = gin_channels
-
- self.drop = nn.Dropout(p_dropout)
- self.conv_1 = nn.Conv1d(
- in_channels, filter_channels, kernel_size, padding=kernel_size // 2
- )
- self.norm_1 = modules.LayerNorm(filter_channels)
- self.conv_2 = nn.Conv1d(
- filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
- )
- self.norm_2 = modules.LayerNorm(filter_channels)
- self.proj = nn.Conv1d(filter_channels, 1, 1)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
-
- def forward(self, x, x_mask, g=None):
- x = torch.detach(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.conv_1(x * x_mask)
- x = torch.relu(x)
- x = self.norm_1(x)
- x = self.drop(x)
- x = self.conv_2(x * x_mask)
- x = torch.relu(x)
- x = self.norm_2(x)
- x = self.drop(x)
- x = self.proj(x * x_mask)
- return x * x_mask
-
-
-class TextEncoder(nn.Module):
- def __init__(
- self,
- n_vocab,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- gin_channels=0,
- ):
- super().__init__()
- self.n_vocab = n_vocab
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.gin_channels = gin_channels
- self.emb = nn.Embedding(len(symbols), hidden_channels)
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
- self.tone_emb = nn.Embedding(num_tones, hidden_channels)
- nn.init.normal_(self.tone_emb.weight, 0.0, hidden_channels**-0.5)
- self.language_emb = nn.Embedding(num_languages, hidden_channels)
- nn.init.normal_(self.language_emb.weight, 0.0, hidden_channels**-0.5)
- self.bert_proj = nn.Conv1d(1024, hidden_channels, 1)
- self.ja_bert_proj = nn.Conv1d(768, hidden_channels, 1)
-
- self.encoder = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- gin_channels=self.gin_channels,
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, tone, language, bert, ja_bert, g=None):
- bert_emb = self.bert_proj(bert).transpose(1, 2)
- ja_bert_emb = self.ja_bert_proj(ja_bert).transpose(1, 2)
- x = (
- self.emb(x)
- + self.tone_emb(tone)
- + self.language_emb(language)
- + bert_emb
- + ja_bert_emb
- ) * math.sqrt(
- self.hidden_channels
- ) # [b, t, h]
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
- x.dtype
- )
-
- x = self.encoder(x * x_mask, x_mask, g=g)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return x, m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0,
- ):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(
- modules.ResidualCouplingLayer(
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- mean_only=True,
- )
- )
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
-
-class Generator(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=0,
- ):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- print("Removing weight norm...")
- for layer in self.ups:
- remove_weight_norm(layer)
- for layer in self.resblocks:
- layer.remove_weight_norm()
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm is False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(
- Conv2d(
- 1,
- 32,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 32,
- 128,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 128,
- 512,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 512,
- 1024,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 1024,
- 1024,
- (kernel_size, 1),
- 1,
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- ]
- )
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for layer in self.convs:
- x = layer(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm is False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ]
- )
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for layer in self.convs:
- x = layer(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class ReferenceEncoder(nn.Module):
- """
- inputs --- [N, Ty/r, n_mels*r] mels
- outputs --- [N, ref_enc_gru_size]
- """
-
- def __init__(self, spec_channels, gin_channels=0):
- super().__init__()
- self.spec_channels = spec_channels
- ref_enc_filters = [32, 32, 64, 64, 128, 128]
- K = len(ref_enc_filters)
- filters = [1] + ref_enc_filters
- convs = [
- weight_norm(
- nn.Conv2d(
- in_channels=filters[i],
- out_channels=filters[i + 1],
- kernel_size=(3, 3),
- stride=(2, 2),
- padding=(1, 1),
- )
- )
- for i in range(K)
- ]
- self.convs = nn.ModuleList(convs)
- # self.wns = nn.ModuleList([weight_norm(num_features=ref_enc_filters[i]) for i in range(K)]) # noqa: E501
-
- out_channels = self.calculate_channels(spec_channels, 3, 2, 1, K)
- self.gru = nn.GRU(
- input_size=ref_enc_filters[-1] * out_channels,
- hidden_size=256 // 2,
- batch_first=True,
- )
- self.proj = nn.Linear(128, gin_channels)
-
- def forward(self, inputs, mask=None):
- N = inputs.size(0)
- out = inputs.view(N, 1, -1, self.spec_channels) # [N, 1, Ty, n_freqs]
- for conv in self.convs:
- out = conv(out)
- # out = wn(out)
- out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K]
-
- out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K]
- T = out.size(1)
- N = out.size(0)
- out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K]
-
- self.gru.flatten_parameters()
- memory, out = self.gru(out) # out --- [1, N, 128]
-
- return self.proj(out.squeeze(0))
-
- def calculate_channels(self, L, kernel_size, stride, pad, n_convs):
- for i in range(n_convs):
- L = (L - kernel_size + 2 * pad) // stride + 1
- return L
-
-
-class SynthesizerTrn(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(
- self,
- n_vocab,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- n_speakers=256,
- gin_channels=256,
- use_sdp=True,
- n_flow_layer=4,
- n_layers_trans_flow=6,
- flow_share_parameter=False,
- use_transformer_flow=True,
- **kwargs
- ):
- super().__init__()
- self.n_vocab = n_vocab
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.n_speakers = n_speakers
- self.gin_channels = gin_channels
- self.n_layers_trans_flow = n_layers_trans_flow
- self.use_spk_conditioned_encoder = kwargs.get(
- "use_spk_conditioned_encoder", True
- )
- self.use_sdp = use_sdp
- self.use_noise_scaled_mas = kwargs.get("use_noise_scaled_mas", False)
- self.mas_noise_scale_initial = kwargs.get("mas_noise_scale_initial", 0.01)
- self.noise_scale_delta = kwargs.get("noise_scale_delta", 2e-6)
- self.current_mas_noise_scale = self.mas_noise_scale_initial
- if self.use_spk_conditioned_encoder and gin_channels > 0:
- self.enc_gin_channels = gin_channels
- self.enc_p = TextEncoder(
- n_vocab,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- gin_channels=self.enc_gin_channels,
- )
- self.dec = Generator(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- if use_transformer_flow:
- self.flow = TransformerCouplingBlock(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers_trans_flow,
- 5,
- p_dropout,
- n_flow_layer,
- gin_channels=gin_channels,
- share_parameter=flow_share_parameter,
- )
- else:
- self.flow = ResidualCouplingBlock(
- inter_channels,
- hidden_channels,
- 5,
- 1,
- n_flow_layer,
- gin_channels=gin_channels,
- )
- self.sdp = StochasticDurationPredictor(
- hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels
- )
- self.dp = DurationPredictor(
- hidden_channels, 256, 3, 0.5, gin_channels=gin_channels
- )
-
- if n_speakers > 1:
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
- else:
- self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)
-
- def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert, ja_bert):
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)
- x, m_p, logs_p, x_mask = self.enc_p(
- x, x_lengths, tone, language, bert, ja_bert, g=g
- )
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
-
- with torch.no_grad():
- # negative cross-entropy
- s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
- neg_cent1 = torch.sum(
- -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True
- ) # [b, 1, t_s]
- neg_cent2 = torch.matmul(
- -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r
- ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent3 = torch.matmul(
- z_p.transpose(1, 2), (m_p * s_p_sq_r)
- ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent4 = torch.sum(
- -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True
- ) # [b, 1, t_s]
- neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
- if self.use_noise_scaled_mas:
- epsilon = (
- torch.std(neg_cent)
- * torch.randn_like(neg_cent)
- * self.current_mas_noise_scale
- )
- neg_cent = neg_cent + epsilon
-
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = (
- monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))
- .unsqueeze(1)
- .detach()
- )
-
- w = attn.sum(2)
-
- l_length_sdp = self.sdp(x, x_mask, w, g=g)
- l_length_sdp = l_length_sdp / torch.sum(x_mask)
-
- logw_ = torch.log(w + 1e-6) * x_mask
- logw = self.dp(x, x_mask, g=g)
- l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(
- x_mask
- ) # for averaging
-
- l_length = l_length_dp + l_length_sdp
-
- # expand prior
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
-
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- o = self.dec(z_slice, g=g)
- return (
- o,
- l_length,
- attn,
- ids_slice,
- x_mask,
- y_mask,
- (z, z_p, m_p, logs_p, m_q, logs_q),
- (x, logw, logw_),
- )
-
- def infer(
- self,
- x,
- x_lengths,
- sid,
- tone,
- language,
- bert,
- ja_bert,
- noise_scale=0.667,
- length_scale=1,
- noise_scale_w=0.8,
- max_len=None,
- sdp_ratio=0,
- y=None,
- ):
- # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)
- # g = self.gst(y)
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)
- x, m_p, logs_p, x_mask = self.enc_p(
- x, x_lengths, tone, language, bert, ja_bert, g=g
- )
- logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (
- sdp_ratio
- ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)
- w = torch.exp(logw) * x_mask * length_scale
- w_ceil = torch.ceil(w)
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(
- x_mask.dtype
- )
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = commons.generate_path(w_ceil, attn_mask)
-
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(
- 1, 2
- ) # [b, t', t], [b, t, d] -> [b, d, t']
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(
- 1, 2
- ) # [b, t', t], [b, t, d] -> [b, d, t']
-
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
- z = self.flow(z_p, y_mask, g=g, reverse=True)
- o = self.dec((z * y_mask)[:, :, :max_len], g=g)
- return o, attn, y_mask, (z, z_p, m_p, logs_p)
diff --git a/spaces/Akshat231/super_space/app.py b/spaces/Akshat231/super_space/app.py
deleted file mode 100644
index d044f942fcd808f11c44838bb5fe54268d17babb..0000000000000000000000000000000000000000
--- a/spaces/Akshat231/super_space/app.py
+++ /dev/null
@@ -1,122 +0,0 @@
-##THIS IS FOR SUPER-RESOLUTION\
-
-import gradio as gr
-from PIL import Image
-import tensorflow as tf
-import tensorflow_hub as hub
-import numpy as np
-import requests
-import cv2
-from tensorflow.python.keras.layers import Add, Conv2D, Input, Lambda
-from tensorflow.python.keras.models import Model
-
-
-super_resolution='./weights.h5'
-
-
-pre_mean = np.array([0.4488, 0.4371, 0.4040]) * 255
-
-
-#HELPER FUN
-def normalize(x, rgb_mean=pre_mean):
- return (x - rgb_mean) / 127.5
-
-#HELPER FUN
-def pixel_shuffle(scale):
- return lambda x: tf.nn.depth_to_space(x, scale)
-
-#HELPER FUN
-def denormalize(x, rgb_mean=pre_mean):
- return x * 127.5 + rgb_mean
-
-
-#MAIN FUN
-def res_block(x_in, filters, scaling):
- x = Conv2D(filters, 3, padding='same', activation='relu')(x_in)
- x = Conv2D(filters, 3, padding='same')(x)
- x =tf.keras.layers.LeakyReLU(alpha = 0.01)(x)
- x = tf.keras.layers.BatchNormalization()(x)
- if scaling:
- x = Lambda(lambda t: t * scaling)(x)
- x = Add()([x_in, x])
- return x
-
-
-
-#HELPER FUN
-def upsample(x, scale, num_filters):
- def upsample_1(x, factor, **kwargs):
- x = Conv2D(num_filters * (factor ** 2), 3, padding='same', **kwargs)(x)
- return Lambda(pixel_shuffle(scale=factor))(x)
-
- if scale == 2:
- x = upsample_1(x, 2, name='conv2d_1_scale_2')
- elif scale == 3:
- x = upsample_1(x, 3, name='conv2d_1_scale_3')
- elif scale == 4:
- x = upsample_1(x, 2, name='conv2d_1_scale_2')
- x = upsample_1(x, 2, name='conv2d_2_scale_2')
-
- return x
-
-#MAIN FUN
-def super_res(scale, num_filters=64, num_res_blocks=8, res_block_scaling=None):
- x_in = Input(shape=(None, None, 3))
- x = Lambda(normalize)(x_in)
- x = b = Conv2D(num_filters, 3, padding='same')(x)
-
- for i in range(num_res_blocks):
- b = res_block(b, num_filters, res_block_scaling)
- b = Conv2D(num_filters, 3, padding='same')(b)
- x = Add()([x, b])
-
- x = upsample(x, scale, num_filters)
- x = Conv2D(3, 3, padding='same')(x)
-
- x = Lambda(denormalize)(x)
- return Model(x_in, x, name="super_res")
-
-
-
-
-def load_image(path):
- return np.array(path)
-
-
-
-
-def resolve(model, lr_batch):
- lr_batch = tf.cast(lr_batch, tf.float32)
- sr_batch = model(lr_batch)
- sr_batch = tf.clip_by_value(sr_batch, 0, 255)
- sr_batch = tf.round(sr_batch)
- sr_batch = tf.cast(sr_batch, tf.uint8)
- return sr_batch
-
-
-
-def resolve_single(model, lr):
- return resolve(model, tf.expand_dims(lr, axis=0))[0]
-
-
-
-model=super_res(scale=4, num_res_blocks=16)
-
-
-model.load_weights(super_resolution)
-
-
-def predict_image(image):
- lr=load_image(image)
- sr = resolve_single(model, lr)
- numpy_array = sr.numpy()
- ima = Image.fromarray(numpy_array)
- return ima
-
-
-
-image=gr.inputs.Image()
-
-irface=gr.Interface(fn=predict_image, inputs=image, outputs=image,interpretation='default')
-
-irface.launch()
\ No newline at end of file
diff --git a/spaces/AkshayKollimarala/MYAIVOICESPEECH/app.py b/spaces/AkshayKollimarala/MYAIVOICESPEECH/app.py
deleted file mode 100644
index ca8b6d40b4ab898c70da92f4a4298de2baf703dc..0000000000000000000000000000000000000000
--- a/spaces/AkshayKollimarala/MYAIVOICESPEECH/app.py
+++ /dev/null
@@ -1,164 +0,0 @@
-import os
-import re
-import requests
-import json
-import gradio as gr
-from langchain.chat_models import ChatOpenAI
-from langchain import LLMChain, PromptTemplate
-from langchain.memory import ConversationBufferMemory
-
-OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
-PLAY_HT_API_KEY=os.getenv('PLAY_HT_API_KEY')
-PLAY_HT_USER_ID=os.getenv('PLAY_HT_USER_ID')
-
-PLAY_HT_VOICE_ID=os.getenv('PLAY_HT_VOICE_ID')
-play_ht_api_get_audio_url = "https://play.ht/api/v2/tts"
-
-
-template = """You are a helpful assistant to answer user queries.
-{chat_history}
-User: {user_message}
-Chatbot:"""
-
-prompt = PromptTemplate(
- input_variables=["chat_history", "user_message"], template=template
-)
-
-memory = ConversationBufferMemory(memory_key="chat_history")
-
-llm_chain = LLMChain(
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
- prompt=prompt,
- verbose=True,
- memory=memory,
-)
-
-headers = {
- "accept": "text/event-stream",
- "content-type": "application/json",
- "AUTHORIZATION": "Bearer "+ PLAY_HT_API_KEY,
- "X-USER-ID": PLAY_HT_USER_ID
-}
-
-
-def get_payload(text):
- return {
- "text": text,
- "voice": PLAY_HT_VOICE_ID,
- "quality": "medium",
- "output_format": "mp3",
- "speed": 1,
- "sample_rate": 24000,
- "seed": None,
- "temperature": None
- }
-
-def get_generated_audio(text):
- payload = get_payload(text)
- generated_response = {}
- try:
- response = requests.post(play_ht_api_get_audio_url, json=payload, headers=headers)
- response.raise_for_status()
- generated_response["type"]= 'SUCCESS'
- generated_response["response"] = response.text
- except requests.exceptions.RequestException as e:
- generated_response["type"]= 'ERROR'
- try:
- response_text = json.loads(response.text)
- if response_text['error_message']:
- generated_response["response"] = response_text['error_message']
- else:
- generated_response["response"] = response.text
- except Exception as e:
- generated_response["response"] = response.text
- except Exception as e:
- generated_response["type"]= 'ERROR'
- generated_response["response"] = response.text
- return generated_response
-
-def extract_urls(text):
- # Define the regex pattern for URLs
- url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+[/\w\.-]*'
-
- # Find all occurrences of URLs in the text
- urls = re.findall(url_pattern, text)
-
- return urls
-
-def get_audio_reply_for_question(text):
- generated_audio_event = get_generated_audio(text)
- #From get_generated_audio, you will get events in a string format, from that we need to extract the url
- final_response = {
- "audio_url": '',
- "message": ''
- }
- if generated_audio_event["type"] == 'SUCCESS':
- audio_urls = extract_urls(generated_audio_event["response"])
- if len(audio_urls) == 0:
- final_response['message'] = "No audio file link found in generated event"
- else:
- final_response['audio_url'] = audio_urls[-1]
- else:
- final_response['message'] = generated_audio_event['response']
- return final_response
-
-def download_url(url):
- try:
- # Send a GET request to the URL to fetch the content
- final_response = {
- 'content':'',
- 'error':''
- }
- response = requests.get(url)
- # Check if the request was successful (status code 200)
- if response.status_code == 200:
- final_response['content'] = response.content
- else:
- final_response['error'] = f"Failed to download the URL. Status code: {response.status_code}"
- except Exception as e:
- final_response['error'] = f"Failed to download the URL. Error: {e}"
- return final_response
-
-def get_filename_from_url(url):
- # Use os.path.basename() to extract the file name from the URL
- file_name = os.path.basename(url)
- return file_name
-
-def get_text_response(user_message):
- response = llm_chain.predict(user_message = user_message)
- return response
-
-def get_text_response_and_audio_response(user_message):
- response = get_text_response(user_message) # Getting the reply from Open AI
- audio_reply_for_question_response = get_audio_reply_for_question(response)
- final_response = {
- 'output_file_path': '',
- 'message':''
- }
- audio_url = audio_reply_for_question_response['audio_url']
- if audio_url:
- output_file_path=get_filename_from_url(audio_url)
- download_url_response = download_url(audio_url)
- audio_content = download_url_response['content']
- if audio_content:
- with open(output_file_path, "wb") as audio_file:
- audio_file.write(audio_content)
- final_response['output_file_path'] = output_file_path
- else:
- final_response['message'] = download_url_response['error']
- else:
- final_response['message'] = audio_reply_for_question_response['message']
- return final_response
-
-def chat_bot_response(message, history):
- text_and_audio_response = get_text_response_and_audio_response(message)
- output_file_path = text_and_audio_response['output_file_path']
- if output_file_path:
- return (text_and_audio_response['output_file_path'],)
- else:
- return text_and_audio_response['message']
-
-demo = gr.ChatInterface(chat_bot_response,examples=["How are you doing?","What are your interests?","Which places do you like to visit?"])
-
-if __name__ == "__main__":
- demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py
deleted file mode 100644
index dd7c16580d0620bc854f2c6eb7c881bdcd23020a..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py
+++ /dev/null
@@ -1,9 +0,0 @@
-_base_ = [
- '../_base_/models/deeplabv3_r50-d8.py',
- '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
- '../_base_/schedules/schedule_80k.py'
-]
-model = dict(
- decode_head=dict(align_corners=True),
- auxiliary_head=dict(align_corners=True),
- test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
diff --git a/spaces/Araby/BRATArA/app.py b/spaces/Araby/BRATArA/app.py
deleted file mode 100644
index 8dae9d6af44090d1c24ed9ca9b77836236d131e5..0000000000000000000000000000000000000000
--- a/spaces/Araby/BRATArA/app.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import streamlit as st
-from transformers import GPT2TokenizerFast, AutoModelForCausalLM
-from arabert.preprocess import ArabertPreprocessor
-
-# Load model and tokenizer and the model
-
-model_name = "malmarjeh/gpt2"
-tokenizer = GPT2TokenizerFast.from_pretrained("aubmindlab/aragpt2-base")
-model = AutoModelForCausalLM.from_pretrained(model_name)
-preprocessor = ArabertPreprocessor(model_name=model_name)
-
-# Streamlit UI
-st.title('Arabic Text Summarizer | By M.Araby')
-text = st.text_area("Paste your Arabic text here:")
-
-if st.button('Summarize'):
- if text:
- # Preprocess and tokenize input text
- processed_text = preprocessor.preprocess(text)
- formatted_text = '\n النص: ' + processed_text + ' \n الملخص: \n '
- tokenizer.add_special_tokens({'pad_token': ''})
- tokens = tokenizer.batch_encode_plus([formatted_text], return_tensors='pt', padding='max_length',
- max_length=150)
-
- # Generate summary
- output = model.generate(
- input_ids=tokens['input_ids'],
- repetition_penalty=2.0,
- num_beams=5,
- max_length=600,
- pad_token_id=tokenizer.pad_token_id,
- eos_token_id=tokenizer.eos_token_id,
- bos_token_id=tokenizer.bos_token_id,
- )
-
- # Decode and display the summarized text
- result = tokenizer.decode(output[0][150:], skip_special_tokens=True).strip()
- st.subheader("Original Text Input")
- st.write(text)
- st.subheader("Summarized Text Idea")
- st.write(result)
- else:
- st.warning("Please enter Arabic text to summarize.")
diff --git a/spaces/Arijit-hazra/my-image-captioner/app.py b/spaces/Arijit-hazra/my-image-captioner/app.py
deleted file mode 100644
index e4390b54f220584ef4b05275ba62a6516976b472..0000000000000000000000000000000000000000
--- a/spaces/Arijit-hazra/my-image-captioner/app.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import re
-import string
-import gradio as gr
-import tensorflow as tf
-from load_model import build
-
-IMG_SHAPE = (224,224,3)
-
-
-def custom_standardization(s):
- s = tf.strings.lower(s)
- s = tf.strings.regex_replace(s, f'[{re.escape(string.punctuation)}]', '')
- s = tf.strings.join(['[START]', s, '[END]'], separator=' ')
- return s
-
-model = build()
-
-rescale = lambda image : tf.image.resize(tf.convert_to_tensor(image), IMG_SHAPE[:-1])
-
-def single_img_transcribe(image, temperature=1):
- initial = model.word_to_index([['[START]']]) # (batch, sequence)
- img_features = model.feature_extractor(rescale(image)[tf.newaxis, ...])
-
- tokens = initial # (batch, sequence)
- for n in range(50):
- preds = model((img_features, tokens)).numpy() # (batch, sequence, vocab)
- preds = preds[:,-1, :] #(batch, vocab)
- if temperature==0:
- next = tf.argmax(preds, axis=-1)[:, tf.newaxis] # (batch, 1)
- else:
- next = tf.random.categorical(preds/temperature, num_samples=1) # (batch, 1)
- tokens = tf.concat([tokens, next], axis=1) # (batch, sequence)
-
- if next[0] == model.word_to_index('[END]'):
- break
-
- words = model.index_to_word(tokens[0, 1:-1])
- result = tf.strings.reduce_join(words, axis=-1, separator=' ')
- return result.numpy().decode()
-
-def img_transcribes(image):
- result = []
- for t in [0,0.5,1]:
- result.append(single_img_transcribe(image, t))
- return result
-
-gr.Interface(fn=img_transcribes,
- inputs=gr.Image(type="pil"),
- outputs=["text","text","text"]
- ).launch()
diff --git a/spaces/Armandoliv/t5-summarize-app-scitldr/README.md b/spaces/Armandoliv/t5-summarize-app-scitldr/README.md
deleted file mode 100644
index fd65abfd3baa7309a4cfaf46cd14e13599317392..0000000000000000000000000000000000000000
--- a/spaces/Armandoliv/t5-summarize-app-scitldr/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: T5 Summarize App Scitldr
-emoji: 💻
-colorFrom: red
-colorTo: green
-sdk: gradio
-sdk_version: 3.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/app.py b/spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/app.py
deleted file mode 100644
index e9c463a4f58179e0785756678119f56f902c9396..0000000000000000000000000000000000000000
--- a/spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/app.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import argparse
-from functools import partial
-import cv2
-import requests
-import os
-from io import BytesIO
-from PIL import Image
-import numpy as np
-from pathlib import Path
-import gradio as gr
-
-import warnings
-
-import torch
-
-os.system("python setup.py build develop --user")
-os.system("pip install packaging==21.3")
-warnings.filterwarnings("ignore")
-
-
-from groundingdino.models import build_model
-from groundingdino.util.slconfig import SLConfig
-from groundingdino.util.utils import clean_state_dict
-from groundingdino.util.inference import annotate, load_image, predict
-import groundingdino.datasets.transforms as T
-
-from huggingface_hub import hf_hub_download
-
-
-
-# Use this command for evaluate the GLIP-T model
-config_file = "groundingdino/config/GroundingDINO_SwinT_OGC.py"
-ckpt_repo_id = "ShilongLiu/GroundingDINO"
-ckpt_filenmae = "groundingdino_swint_ogc.pth"
-
-
-def load_model_hf(model_config_path, repo_id, filename, device='cpu'):
- args = SLConfig.fromfile(model_config_path)
- model = build_model(args)
- args.device = device
-
- cache_file = hf_hub_download(repo_id=repo_id, filename=filename)
- checkpoint = torch.load(cache_file, map_location='cpu')
- log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
- print("Model loaded from {} \n => {}".format(cache_file, log))
- _ = model.eval()
- return model
-
-def image_transform_grounding(init_image):
- transform = T.Compose([
- T.RandomResize([800], max_size=1333),
- T.ToTensor(),
- T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
- ])
- image, _ = transform(init_image, None) # 3, h, w
- return init_image, image
-
-def image_transform_grounding_for_vis(init_image):
- transform = T.Compose([
- T.RandomResize([800], max_size=1333),
- ])
- image, _ = transform(init_image, None) # 3, h, w
- return image
-
-model = load_model_hf(config_file, ckpt_repo_id, ckpt_filenmae)
-
-def run_grounding(input_image, grounding_caption, box_threshold, text_threshold):
- init_image = input_image.convert("RGB")
- original_size = init_image.size
-
- _, image_tensor = image_transform_grounding(init_image)
- image_pil: Image = image_transform_grounding_for_vis(init_image)
-
- # run grounidng
- boxes, logits, phrases = predict(model, image_tensor, grounding_caption, box_threshold, text_threshold, device='cpu')
- annotated_frame = annotate(image_source=np.asarray(image_pil), boxes=boxes, logits=logits, phrases=phrases)
- image_with_box = Image.fromarray(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))
-
-
- return image_with_box
-
-if __name__ == "__main__":
-
- css = """
- #mkd {
- height: 500px;
- overflow: auto;
- border: 1px solid #ccc;
- }
-"""
- block = gr.Blocks(css=css).queue()
- with block:
- gr.Markdown("