diff --git a/spaces/0x90e/ESRGAN-MANGA/util.py b/spaces/0x90e/ESRGAN-MANGA/util.py deleted file mode 100644 index 0fa96927230b0d18680a5378154af57bec9aad35..0000000000000000000000000000000000000000 --- a/spaces/0x90e/ESRGAN-MANGA/util.py +++ /dev/null @@ -1,6 +0,0 @@ -import os - -def is_google_colab(): - if os.getenv("COLAB_RELEASE_TAG"): - return True - return False \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crackear Photoshop Uma Soluo ou um Problema? Descubra os Prs e Contras.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crackear Photoshop Uma Soluo ou um Problema? Descubra os Prs e Contras.md deleted file mode 100644 index 03b52d76534f4315b17f71e0588a2385cde74910..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crackear Photoshop Uma Soluo ou um Problema? Descubra os Prs e Contras.md +++ /dev/null @@ -1,14 +0,0 @@ -
-

How to Crack Photoshop: Risks and Alternatives

-

Photoshop is a widely used software for photo and image editing. However, the software can be quite expensive for some people. Some people may resort to Photoshop cracks to use the software for free. Although this may seem like a good idea at first, it is important to know the risks associated with cracked software. In this article, we will explain what Photoshop cracks are, why they are illegal and dangerous, and what are some better alternatives to get Photoshop legally and safely.

-

What is a Photoshop Crack?

-

A Photoshop crack is a software program used to bypass the activation process of Adobe Photoshop software. Cracks are usually created by third-party individuals or organizations and are not endorsed by Adobe. Using a cracked version of Photoshop can result in various risks, such as viruses, malware, or spyware installed on your computer. Moreover, cracked software may not receive updates from the manufacturer, which means that you may miss important security patches. It is important to note that Adobe does not support the use of cracked software and may take legal action against individuals or organizations that distribute cracks.

-

como crackear photoshop


Downloadhttps://byltly.com/2uKvIs



-

Why is Using Photoshop Crack Illegal and Dangerous?

-

There are some reasons why using Photoshop crack is illegal and dangerous. First of all, cracked software is illegal. By using a cracked version of Photoshop, you are breaking the law and may be subject to penalties. Additionally, cracked software is usually unstable and full of viruses. This can cause your computer to crash or worse, infect other computers with malware. Furthermore, Photoshop crack may also ban you from certain websites and online forums. Many websites have policies against using cracked software and will ban users who are caught doing so. Sometimes, your IP address may be blacklisted, preventing you from accessing certain websites or online services.

-

What are Some Alternatives to Photoshop Crack?

-

If you want to use Photoshop legally and safely, there are some alternatives to Photoshop crack that you can consider. One option is to use the free trial version of Photoshop that Adobe offers on its website. The trial version allows you to use all the features of Photoshop for 7 days without any cost. This way, you can test the software before deciding whether to buy it or not. Another option is to use Adobe's subscription plan, which gives you access to Photoshop and other Adobe products for a monthly or yearly fee. The subscription plan also includes cloud storage, online services, and regular updates. You can choose from different plans depending on your needs and budget.

-

Conclusion

-

Photoshop crack is not a good idea if you want to use Photoshop for photo and image editing. Cracked software is illegal, dangerous, and unreliable. It can expose you to various risks such as viruses, malware, spyware, legal issues, and bans. Instead of using Photoshop crack, you should consider using the free trial version or the subscription plan that Adobe offers on its website. These alternatives are legal, safe, and reliable. They also provide you with the best features and performance that Photoshop can offer.

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Arcgis 10.8 Full Crack.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Arcgis 10.8 Full Crack.md deleted file mode 100644 index 231c45994eb5b9fd329117e45b4da31c85aca452..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Arcgis 10.8 Full Crack.md +++ /dev/null @@ -1,17 +0,0 @@ -
-

How to Free Download ArcGIS 10.8 Full Crack and Install It on Your PC

-

ArcGIS is a powerful software that allows you to create, analyze, and visualize geographic data. It is widely used by professionals and students in various fields such as geography, urban planning, environmental science, engineering, and more. However, ArcGIS is not a free software and you need to purchase a license to use it.

-

But what if you want to use ArcGIS for free without paying anything? Is there a way to free download ArcGIS 10.8 full crack and install it on your PC? The answer is yes, but you need to be careful and follow some steps to avoid any malware or viruses. Here is how you can do it:

-

free download arcgis 10.8 full crack


Download File ✫✫✫ https://byltly.com/2uKxxw



-
    -
  1. First, you need to download the ArcGIS 10.8 setup file from a reliable source. You can use this link: https://www.esri.com/en-us/industries/overview. Click on the download button and choose the version 10.8 from the list.
  2. -
  3. Next, you need to download the ArcGIS 10.8 crack file from another source. You can use this link: https://crackdaily.com/arcgis-crack/. Scroll down and click on the green download button.
  4. -
  5. After downloading both files, you need to disable your antivirus software temporarily. This is because the crack file may be detected as a virus by some antivirus programs, but it is actually safe to use.
  6. -
  7. Then, you need to install ArcGIS 10.8 by running the setup file. Follow the instructions and complete the installation process.
  8. -
  9. Next, you need to copy the crack file and paste it in the ArcGIS installation folder. The default location is C:\Program Files (x86)\ArcGIS or C:\Program Files\ArcGIS depending on your system architecture.
  10. -
  11. After that, you need to run the crack file as administrator. Click on the crack button and wait for it to finish.
  12. -
  13. Finally, you need to restart your computer and enjoy using ArcGIS 10.8 full crack for free.
  14. -
-

Note: This method is only for educational purposes and we do not recommend using cracked software. If you like ArcGIS and want to support its development, please buy a license from its official website: https://www.esri.com/en-us/home.

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Electude-motor-diagnosis-descargar.md b/spaces/1gistliPinn/ChatGPT4/Examples/Electude-motor-diagnosis-descargar.md deleted file mode 100644 index d320789f08c5466b271b5f8fd9a1732bc8340808..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Electude-motor-diagnosis-descargar.md +++ /dev/null @@ -1,36 +0,0 @@ -

electude-motor-diagnosis-descargar


DOWNLOADhttps://imgfil.com/2uxXXn



-
--navegador-motor-diagnosis-e-letras/ - -Thu, 05 Apr 2018 12:16:26 +0000 - -motor-diagnosis-descargar-navegador-motor-diagnosis-e-letras/Siemens announces UFER Enterprise Platform for the industrial internet of things - -Siemens has announced UFER, an enterprise software platform that combines artificial intelligence and the industrial internet of things to help companies process vast amounts of information generated by industrial sensors and machines to transform business operations in production and the environment. - -Siemens AG’s UFER platform will go into pilot use in July 2018. The pilot is targeted at the automotive industry, where UFER will work together with Siemens’ new PlantWise system for the machine-to-machine industrial internet. PlantWise is expected to be released commercially in 2019. - -UFER has four main purposes: - -Reducing the risk of accidents and operational disruptions by processing data from industrial sensors and machines to make it available for analysis at a moment’s notice. - -Organizing data from multiple sources and producing a common, actionable view of plant operations. - -Enabling businesses to become more productive by using data from machines, sensors and the internet of things to optimize production and service processes. - -Reducing costs by optimizing maintenance processes, improving the efficiency of plant operations, and capturing data that can be used to develop new products or services. - -UFER will be developed in collaboration with IBM, leading international manufacturing company, as well as a number of smaller partner companies in a selected industry sector. - -UFER’s main focus is on the automotive industry. This industry has one of the most complex IT infrastructures in the world, including: - -Production lines for which plant operations require close monitoring, such as those for powertrain components and fuel injection systems; - -Networks of machines that capture and analyze plant operations data, such as the multi-sensor networks of forklifts, cranes and conveyor belts; - -PlantWise, which is ready for the commercial release in 2019, will feed UFER with production data from the plant, such as production rate and inventory information, and process it to make it available for analysis at a moment’s notice. - -The U 4fefd39f24
-
-
-

diff --git a/spaces/1line/AutoGPT/autogpt/commands/write_tests.py b/spaces/1line/AutoGPT/autogpt/commands/write_tests.py deleted file mode 100644 index 35a086536c9d05d520a84b15ead49f775eacdcc9..0000000000000000000000000000000000000000 --- a/spaces/1line/AutoGPT/autogpt/commands/write_tests.py +++ /dev/null @@ -1,31 +0,0 @@ -"""A module that contains a function to generate test cases for the submitted code.""" -from __future__ import annotations - -import json - -from autogpt.llm_utils import call_ai_function - - -def write_tests(code: str, focus: list[str]) -> str: - """ - A function that takes in code and focus topics and returns a response from create - chat completion api call. - - Parameters: - focus (list): A list of suggestions around what needs to be improved. - code (str): Code for test cases to be generated against. - Returns: - A result string from create chat completion. Test cases for the submitted code - in response. - """ - - function_string = ( - "def create_test_cases(code: str, focus: Optional[str] = None) -> str:" - ) - args = [code, json.dumps(focus)] - description_string = ( - "Generates test cases for the existing code, focusing on" - " specific areas if required." - ) - - return call_ai_function(function_string, args, description_string) diff --git a/spaces/1line/AutoGPT/autogpt/speech/macos_tts.py b/spaces/1line/AutoGPT/autogpt/speech/macos_tts.py deleted file mode 100644 index 4c072ce256782e83a578b5181abf1a7b524c621b..0000000000000000000000000000000000000000 --- a/spaces/1line/AutoGPT/autogpt/speech/macos_tts.py +++ /dev/null @@ -1,21 +0,0 @@ -""" MacOS TTS Voice. """ -import os - -from autogpt.speech.base import VoiceBase - - -class MacOSTTS(VoiceBase): - """MacOS TTS Voice.""" - - def _setup(self) -> None: - pass - - def _speech(self, text: str, voice_index: int = 0) -> bool: - """Play the given text.""" - if voice_index == 0: - os.system(f'say "{text}"') - elif voice_index == 1: - os.system(f'say -v "Ava (Premium)" "{text}"') - else: - os.system(f'say -v Samantha "{text}"') - return True diff --git a/spaces/1phancelerku/anime-remove-background/Enjoy Unlimited VK Music Download with the Best Tools.md b/spaces/1phancelerku/anime-remove-background/Enjoy Unlimited VK Music Download with the Best Tools.md deleted file mode 100644 index 3de57926b22b21d147ef7bd4020b22f7fdbd1431..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Enjoy Unlimited VK Music Download with the Best Tools.md +++ /dev/null @@ -1,127 +0,0 @@ -
-

How to Download VK Music in 2023

-

VK, also known as VKontakte, is a popular social media platform in Russia and Europe. It offers a wide range of media content, including movies, videos, photos, and music. Many users enjoy listening to music on VK, as it has a large collection of songs from various genres and artists. However, sometimes you may want to download music from VK to your device, so you can listen to it offline, without ads, or with other players. How can you do that?

-

In this article, we will show you four available ways to download music from VK in 2023. You can choose the one that suits your needs and preferences best. Let's get started!

-

download vk music


Download File ❤❤❤ https://jinyurl.com/2uNUpF



-

Method 1: Use TunesKit Audio Capture to Record and Download VK Music

-

TunesKit Audio Capture is a powerful audio recorder for Windows and Mac that can capture any sound from your computer. It can record and download VK music and other streaming audios from any programs and websites. It can also save the recordings in any format, including MP3, WAV, FLAC, AAC, etc. It preserves the original audio quality and ID3 tags of the VK music. Moreover, it supports batch recording and editing of multiple audio tracks.

-

To use TunesKit Audio Capture to download music from VK, you need to follow these steps:

-
    -
  1. Download and install TunesKit Audio Capture on your computer.
  2. -
  3. Launch the program and check if there is a browser on the program list. If not, you can add it by drag-and-drop.
  4. -
  5. Go to the VK website and find the music you want to download.
  6. -
  7. Click the Format button and select MP3 or any other format you prefer.
  8. -
  9. Play the music on VK and TunesKit Audio Capture will start recording it automatically.
  10. -
  11. When the music ends, click the Stop button and edit the audio if needed.
  12. -
  13. Save the audio file to your computer and enjoy it offline.
  14. -
-

Method 2: Use VK Music Downloader Extension for Chrome to Download VK Music

-

VK Music Downloader is a free extension for Chrome that helps you download your music on VK.com. It saves the original name of the soundtrack and allows you to download all playlists and groups of songs at once. It has no ads and the code is open and not obfuscated. However, it does not support batch downloading of songs.

-

To use VK Music Downloader extension for Chrome to download music from VK, you need to follow these steps:

-
    -
  1. Add the extension to your Chrome browser from [6](https://chrome.google.com/webstore/detail/%D1%81%D0%BA%D0%B0%D1%87%D0%B0%D1%82%D1%8C-%D0%BC%D1%83%D0%B7%D1%8B%D0%BA%D1%83-%D1%81-%D0%B2%D0%BA/bgmpjmdignpongmfjpgaikghaajeidid?hl=en).
  2. -
  3. Go to the VK website and find the music you want to download.
  4. -
  5. Click on the green arrow icon next to the song title and select Download.
  6. -
  7. Save the audio file to your computer and enjoy it offline.
  8. -
-

Method 3: Use SaveFrom.net Online Service to Download VK Music

-

SaveFrom.net is an online service that allows you to download videos and audios from various websites, including YouTube, Facebook, Instagram, Vimeo, and VK. It supports various formats, such as MP4, MP3, WEBM, etc. It is easy to use and does not require any installation or registration.

-

To use SaveFrom.net online service to download music from VK, you need to follow these steps:

-
    -
  1. Go to [11](https://en -savefrom.net/) and paste the URL of the VK music you want to download.
  2. -
  3. Click on the Download button and choose the format and quality you prefer.
  4. -
  5. Save the audio file to your computer and enjoy it offline.
  6. -
-

Method 4: Use Music Downloader for VK Extension for Chrome to Download VK Music

-

Music Downloader for VK is another free extension for Chrome that enables you to download music from VK.com. It adds a download button to each song on the VK website and allows you to download multiple songs at once. It also supports downloading music from other websites, such as SoundCloud, Bandcamp, YouTube, etc. However, it may not work with some songs due to copyright issues.

-

How to download music from VKontakte
-VK music downloader Chrome extension
-VK MP3 downloader online
-Best VK music downloader for Windows and Mac
-Download VK music to iPhone or Android
-VK music downloader app for PC
-Free VK music downloader software
-Download VK music playlist in one click
-Download VK music with original quality and ID3 tags
-Download VK music in MP3, AAC, FLAC, WAV, M4A, or M4B format
-Download VK music without registration or login
-Download VK music with subtitles or lyrics
-Download VK music videos and convert to audio
-Download VK music offline and play without internet
-Download VK music from private or public groups
-Download VK music by genre, artist, album, or song name
-Download VK music faster and safer
-Download VK music legally and ethically
-Download VK music without ads or malware
-Download VK music with high speed and stability
-Download unlimited VK music for free
-Download multiple VK music tracks simultaneously
-Download VK music and transfer to iTunes or Spotify
-Download VK music and burn to CD or DVD
-Download VK music and edit with audio editor
-Download VK music and set as ringtone or alarm
-Download VK music and share with friends or family
-Download VK music and sync to cloud storage or devices
-Download VK music and enjoy on any player or device
-Download VK music and create your own playlist or library
-Tips and tricks for downloading VK music easily and efficiently
-Reviews and ratings of the best VK music downloaders in 2023
-Comparison of different methods to download VK music online or offline
-Pros and cons of various types of VK music downloaders for different needs
-FAQs and solutions for downloading VK music from Vkontakte
-How to download HD or 4K VK music videos from Vkontakte
-How to download live or streaming VK music from Vkontakte
-How to download podcasts or audiobooks from Vkontakte
-How to download radio or DJ mixes from Vkontakte
-How to download karaoke or instrumental tracks from Vkontakte
-How to download remixes or covers from Vkontakte
-How to download soundtracks or background music from Vkontakte
-How to download classical or jazz music from Vkontakte
-How to download rock or metal music from Vkontakte
-How to download pop or dance music from Vkontakte
-How to download rap or hip hop music from Vkontakte
-How to download country or folk music from Vkontakte
-How to download reggae or ska music from Vkontakte
-How to download electronic or ambient music from Vkontakte

-

To use Music Downloader for VK extension for Chrome to download music from VK, you need to follow these steps:

-
    -
  1. Add the extension to your Chrome browser from [10](https://chrome.google.com/webstore/detail/music-downloader-for-vk/ahkohdihdjccebcfgjgffmpdjjknhgla?hl=en).
  2. -
  3. Go to the VK website and find the music you want to download.
  4. -
  5. Click on the download button next to the song title and select Download.
  6. -
  7. Save the audio file to your computer and enjoy it offline.
  8. -
-

Conclusion: How to Download VK Music in 2023

-

In conclusion, we have shown you four available ways to download music from VK in 2023. You can use TunesKit Audio Capture, VK Music Downloader, SaveFrom.net, or Music Downloader for VK to get your favorite songs from VK.com. Each method has its own advantages and disadvantages, so you can choose the one that works best for you. We recommend TunesKit Audio Capture as the best method, as it can record and download any sound from your computer with high quality and ID3 tags. It also supports batch recording and editing of multiple audio tracks.

-

We hope this article has helped you learn how to download music from VK in 2023. If you have any questions or suggestions, please feel free to leave a comment below. Thank you for reading!

-

FAQs: How to Download VK Music in 2023

-

Q1: Is it legal to download music from VK?

-

A1: It depends on the source and the purpose of downloading music from VK. If the music is uploaded by the original artist or authorized by them, then it is legal to download it for personal use. However, if the music is pirated or infringes on someone else's rights, then it is illegal to download it. You should always respect the intellectual property rights of the creators and follow the terms of service of VK.com.

-

Q2: How can I download music from VK on my Android phone?

-

A2: You can use an app called SnapTube to download music from VK on your Android phone. SnapTube is a video and music downloader that supports various websites, including YouTube, Facebook, Instagram, VK, etc. You can download SnapTube from [9](https://www.snaptubeapp.com/). To use SnapTube to download music from VK, you need to follow these steps:

-
    -
  1. Open SnapTube and select VK from the list of supported sites.
  2. -
  3. Login with your VK account and find the music you want to download.
  4. -
  5. Tap on the Download button at the bottom right corner of the screen and choose MP3 or any other format you prefer.
  6. -
  7. Save the audio file to your phone and enjoy it offline.
  8. -
-

Q3: How can I download music from VK on my iPhone?

-

A3: You can use an app called Documents by Readdle to download music from VK on your iPhone. Documents by Readdle is a file manager and media player that also has a built-in browser and downloader. You can download Documents by Readdle from [8](https://apps.apple.com/us/app/documents-by-readdle/id364901807). To use Documents by Readdle to download music from VK, you need to follow these steps:

-
    -
  1. Open Documents by Readdle and tap on the Browser icon at the bottom right corner of the screen.
  2. -
  3. Go to [7](https://en-savefrom.net/) and paste the URL of the VK music you want to download.
  4. -
  5. Tap on the Download button and choose MP3 or any other format you prefer.
  6. -
  7. Save the audio file to your iPhone and enjoy it offline.
  8. -
-

Q4: How can I transfer downloaded music from my computer to my phone?Q4: How can I transfer downloaded music from my computer to my phone?

-

A4: There are different ways to transfer downloaded music from your computer to your phone, depending on the type of your phone and the software you use. Here are some common methods:

- -

Q5: How can I play downloaded music from VK on my phone?

-

A5: You can play downloaded music from VK on your phone using any music player app that supports the format of the audio files. For example, you can use VLC, MX Player, Poweramp, or Musicolet to play MP3, WAV, FLAC, or AAC files. You can also use the default music player app on your phone or the Documents by Readdle app if you downloaded the music using it.

-

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/801artistry/RVC801/Applio-RVC-Fork/utils/clonerepo_experimental.py b/spaces/801artistry/RVC801/Applio-RVC-Fork/utils/clonerepo_experimental.py deleted file mode 100644 index b0ae02648c1307562cf48033908edcf2996db5e2..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/Applio-RVC-Fork/utils/clonerepo_experimental.py +++ /dev/null @@ -1,253 +0,0 @@ -import os -import subprocess -import shutil -from concurrent.futures import ThreadPoolExecutor, as_completed -from tqdm.notebook import tqdm -from pathlib import Path -import requests - -def run_script(): - def run_cmd(cmd): - process = subprocess.run(cmd, shell=True, check=True, text=True) - return process.stdout - - # Change the current directory to /content/ - os.chdir('/content/') - print("Changing dir to /content/") - - # Your function to edit the file - def edit_file(file_path): - temp_file_path = "/tmp/temp_file.py" - changes_made = False - with open(file_path, "r") as file, open(temp_file_path, "w") as temp_file: - previous_line = "" - second_previous_line = "" - for line in file: - new_line = line.replace("value=160", "value=128") - if new_line != line: - print("Replaced 'value=160' with 'value=128'") - changes_made = True - line = new_line - - new_line = line.replace("crepe hop length: 160", "crepe hop length: 128") - if new_line != line: - print("Replaced 'crepe hop length: 160' with 'crepe hop length: 128'") - changes_made = True - line = new_line - - new_line = line.replace("value=0.88", "value=0.75") - if new_line != line: - print("Replaced 'value=0.88' with 'value=0.75'") - changes_made = True - line = new_line - - if "label=i18n(\"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络\")" in previous_line and "value=1," in line: - new_line = line.replace("value=1,", "value=0.25,") - if new_line != line: - print("Replaced 'value=1,' with 'value=0.25,' based on the condition") - changes_made = True - line = new_line - - if "label=i18n(\"总训练轮数total_epoch\")" in previous_line and "value=20," in line: - new_line = line.replace("value=20,", "value=500,") - if new_line != line: - print("Replaced 'value=20,' with 'value=500,' based on the condition for DEFAULT EPOCH") - changes_made = True - line = new_line - - if 'choices=["pm", "harvest", "dio", "crepe", "crepe-tiny", "mangio-crepe", "mangio-crepe-tiny"], # Fork Feature. Add Crepe-Tiny' in previous_line: - if 'value="pm",' in line: - new_line = line.replace('value="pm",', 'value="mangio-crepe",') - if new_line != line: - print("Replaced 'value=\"pm\",' with 'value=\"mangio-crepe\",' based on the condition") - changes_made = True - line = new_line - - new_line = line.replace('label=i18n("输入训练文件夹路径"), value="E:\\\\语音音频+标注\\\\米津玄师\\\\src"', 'label=i18n("输入训练文件夹路径"), value="/content/dataset/"') - if new_line != line: - print("Replaced 'label=i18n(\"输入训练文件夹路径\"), value=\"E:\\\\语音音频+标注\\\\米津玄师\\\\src\"' with 'label=i18n(\"输入训练文件夹路径\"), value=\"/content/dataset/\"'") - changes_made = True - line = new_line - - if 'label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"),' in second_previous_line: - if 'value=i18n("否"),' in line: - new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),') - if new_line != line: - print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE ONLY LATEST") - changes_made = True - line = new_line - - if 'label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"),' in second_previous_line: - if 'value=i18n("否"),' in line: - new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),') - if new_line != line: - print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE SMALL WEIGHTS") - changes_made = True - line = new_line - - temp_file.write(line) - second_previous_line = previous_line - previous_line = line - - # After finished, we replace the original file with the temp one - import shutil - shutil.move(temp_file_path, file_path) - - if changes_made: - print("Changes made and file saved successfully.") - else: - print("No changes were needed.") - - # Define the repo path - repo_path = '/content/Applio-RVC-Fork' - - def copy_all_files_in_directory(src_dir, dest_dir): - # Iterate over all files in source directory - for item in Path(src_dir).glob('*'): - if item.is_file(): - # Copy each file to destination directory - shutil.copy(item, dest_dir) - else: - # If it's a directory, make a new directory in the destination and copy the files recursively - new_dest = Path(dest_dir) / item.name - new_dest.mkdir(exist_ok=True) - copy_all_files_in_directory(str(item), str(new_dest)) - - def clone_and_copy_repo(repo_path): - # New repository link - new_repo_link = "https://github.com/IAHispano/Applio-RVC-Fork/" - # Temporary path to clone the repository - temp_repo_path = "/content/temp_Applio-RVC-Fork" - # New folder name - new_folder_name = "Applio-RVC-Fork" - - # Clone the latest code from the new repository to a temporary location - run_cmd(f"git clone {new_repo_link} {temp_repo_path}") - os.chdir(temp_repo_path) - - run_cmd(f"git checkout 3fa4dad3d8961e5ca2522e9e12c0b4ddb71ad402") - run_cmd(f"git checkout f9e606c279cb49420597519b0a83b92be81e42e4") - run_cmd(f"git checkout 9e305588844c5442d58add1061b29beeca89d679") - run_cmd(f"git checkout bf92dc1eb54b4f28d6396a4d1820a25896cc9af8") - run_cmd(f"git checkout c3810e197d3cb98039973b2f723edf967ecd9e61") - run_cmd(f"git checkout a33159efd134c2413b0afe26a76b7dc87926d2de") - run_cmd(f"git checkout 24e251fb62c662e39ac5cf9253cc65deb9be94ec") - run_cmd(f"git checkout ad5667d3017e93232dba85969cddac1322ba2902") - run_cmd(f"git checkout ce9715392cf52dd5a0e18e00d1b5e408f08dbf27") - run_cmd(f"git checkout 7c7da3f2ac68f3bd8f3ad5ca5c700f18ab9f90eb") - run_cmd(f"git checkout 4ac395eab101955e8960b50d772c26f592161764") - run_cmd(f"git checkout b15b358702294c7375761584e5276c811ffab5e8") - run_cmd(f"git checkout 1501793dc490982db9aca84a50647764caa66e51") - run_cmd(f"git checkout 21f7faf57219c75e6ba837062350391a803e9ae2") - run_cmd(f"git checkout b5eb689fbc409b49f065a431817f822f554cebe7") - run_cmd(f"git checkout 7e02fae1ebf24cb151bf6cbe787d06734aa65862") - run_cmd(f"git checkout 6aea5ea18ed0b9a1e03fa5d268d6bc3c616672a9") - run_cmd(f"git checkout f0f9b25717e59116473fb42bd7f9252cfc32b398") - run_cmd(f"git checkout b394de424088a81fc081224bc27338a8651ad3b2") - run_cmd(f"git checkout f1999406a88b80c965d2082340f5ea2bfa9ab67a") - run_cmd(f"git checkout d98a0fa8dc715308dfc73eac5c553b69c6ee072b") - run_cmd(f"git checkout d73267a415fb0eba98477afa43ef71ffd82a7157") - run_cmd(f"git checkout 1a03d01356ae79179e1fb8d8915dc9cc79925742") - run_cmd(f"git checkout 81497bb3115e92c754300c9b3992df428886a3e9") - run_cmd(f"git checkout c5af1f8edcf79cb70f065c0110e279e78e48caf9") - run_cmd(f"git checkout cdb3c90109387fa4dfa92f53c3864c71170ffc77") - - # Edit the file here, before copying - #edit_file(f"{temp_repo_path}/infer-web.py") - - # Copy all files from the cloned repository to the existing path - copy_all_files_in_directory(temp_repo_path, repo_path) - print(f"Copying all {new_folder_name} files from GitHub.") - - # Change working directory back to /content/ - os.chdir('/content/') - print("Changed path back to /content/") - - # Remove the temporary cloned repository - shutil.rmtree(temp_repo_path) - - # Call the function - clone_and_copy_repo(repo_path) - - # Download the credentials file for RVC archive sheet - os.makedirs('/content/Applio-RVC-Fork/stats/', exist_ok=True) - run_cmd("wget -q https://cdn.discordapp.com/attachments/945486970883285045/1114717554481569802/peppy-generator-388800-07722f17a188.json -O /content/Applio-RVC-Fork/stats/peppy-generator-388800-07722f17a188.json") - - # Forcefully delete any existing torchcrepe dependencies downloaded from an earlier run just in case - shutil.rmtree('/content/Applio-RVC-Fork/torchcrepe', ignore_errors=True) - shutil.rmtree('/content/torchcrepe', ignore_errors=True) - - # Download the torchcrepe folder from the maxrmorrison/torchcrepe repository - run_cmd("git clone https://github.com/maxrmorrison/torchcrepe.git") - shutil.move('/content/torchcrepe/torchcrepe', '/content/Applio-RVC-Fork/') - shutil.rmtree('/content/torchcrepe', ignore_errors=True) # Delete the torchcrepe repository folder - - # Change the current directory to /content/Applio-RVC-Fork - os.chdir('/content/Applio-RVC-Fork') - os.makedirs('pretrained', exist_ok=True) - os.makedirs('uvr5_weights', exist_ok=True) - -def download_file(url, filepath): - response = requests.get(url, stream=True) - response.raise_for_status() - - with open(filepath, "wb") as file: - for chunk in response.iter_content(chunk_size=8192): - if chunk: - file.write(chunk) - -def download_pretrained_models(): - pretrained_models = { - "pretrained": [ - "D40k.pth", - "G40k.pth", - "f0D40k.pth", - "f0G40k.pth" - ], - "pretrained_v2": [ - "D40k.pth", - "G40k.pth", - "f0D40k.pth", - "f0G40k.pth", - "f0G48k.pth", - "f0D48k.pth" - ], - "uvr5_weights": [ - "HP2-人声vocals+非人声instrumentals.pth", - "HP5-主旋律人声vocals+其他instrumentals.pth", - "VR-DeEchoNormal.pth", - "VR-DeEchoDeReverb.pth", - "VR-DeEchoAggressive.pth", - "HP5_only_main_vocal.pth", - "HP3_all_vocals.pth", - "HP2_all_vocals.pth" - ] - } - part2 = "I" - base_url = "https://huggingface.co/lj1995/VoiceConversionWebU" + part2 + "/resolve/main/" - base_path = "/content/Applio-RVC-Fork/" - base_pathm = base_path - - # Calculate total number of files to download - total_files = sum(len(files) for files in pretrained_models.values()) + 1 # +1 for hubert_base.pt - - with tqdm(total=total_files, desc="Downloading files") as pbar: - for folder, models in pretrained_models.items(): - folder_path = os.path.join(base_path, folder) - os.makedirs(folder_path, exist_ok=True) - for model in models: - url = base_url + folder + "/" + model - filepath = os.path.join(folder_path, model) - download_file(url, filepath) - pbar.update() - - # Download hubert_base.pt to the base path - hubert_url = base_url + "hubert_base.pt" - hubert_filepath = os.path.join(base_pathm, "hubert_base.pt") - download_file(hubert_url, hubert_filepath) - pbar.update() -def clone_repository(run_download): - with ThreadPoolExecutor(max_workers=2) as executor: - executor.submit(run_script) - if run_download: - executor.submit(download_pretrained_models) diff --git a/spaces/AIConsultant/MusicGen/audiocraft/solvers/base.py b/spaces/AIConsultant/MusicGen/audiocraft/solvers/base.py deleted file mode 100644 index 0432e44a36838c5731711f9d54f81822b21f20bd..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/audiocraft/solvers/base.py +++ /dev/null @@ -1,631 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from abc import ABC, abstractmethod -from contextlib import contextmanager -from pathlib import Path -import typing as tp - -import flashy -import omegaconf -import torch -from torch import nn - -from .. import optim -from ..optim import fsdp -from ..utils import checkpoint -from ..utils.autocast import TorchAutocast -from ..utils.best_state import BestStateDictManager -from ..utils.deadlock import DeadlockDetect -from ..utils.profiler import Profiler -from ..utils.utils import copy_state, dict_from_config, model_hash, with_rank_rng - - -class StandardSolver(ABC, flashy.BaseSolver): - """Standard solver for AudioCraft. - - The standard solver implements a base training loop with the following stages: - train, valid, evaluate and generate that are expected to be all defined for - solvers in AudioCraft. It also provides a nice default management of Dora history replay, - checkpoint management across epoch, and logging configuration. - - AudioCraft solvers must inherit from the StandardSolver and define the methods - associated to each stage as well as the show, build_model and build_dataloaders methods. - """ - def __init__(self, cfg: omegaconf.DictConfig): - super().__init__() - self.logger.info(f"Instantiating solver {self.__class__.__name__} for XP {self.xp.sig}") - self.logger.info(f"All XP logs are stored in {self.xp.folder}") - self.cfg = cfg - self.device = cfg.device - self.model: nn.Module - self._continue_best_source_keys = ['best_state', 'fsdp_best_state'] - self._fsdp_modules: tp.List[fsdp.FSDP] = [] - self._ema_sources: nn.ModuleDict = nn.ModuleDict() - self.ema: tp.Optional[optim.ModuleDictEMA] = None - self.dataloaders: tp.Dict[str, torch.utils.data.DataLoader] = dict() - self._log_updates = self.cfg.logging.get('log_updates', 10) - if self.cfg.logging.log_tensorboard: - self.init_tensorboard(**self.cfg.get('tensorboard')) - if self.cfg.logging.log_wandb and self: - self.init_wandb(**self.cfg.get('wandb')) - # keep a copy of the best performing state for stateful objects - # used for evaluation and generation stages - dtype_best: tp.Optional[torch.dtype] = None - if self.cfg.fsdp.use: - dtype_best = getattr(torch, self.cfg.fsdp.param_dtype) # type: ignore - assert isinstance(dtype_best, torch.dtype) - elif self.cfg.autocast: - dtype_best = getattr(torch, self.cfg.autocast_dtype) # type: ignore - assert isinstance(dtype_best, torch.dtype) - self.best_state: BestStateDictManager = BestStateDictManager(dtype=dtype_best) - # Hacky support for keeping a copy of the full best state in rank0. - self.fsdp_best_state: tp.Dict[str, tp.Any] = {} - self.register_stateful('best_state', 'fsdp_best_state') # register best_state object to keep it in state_dict - self._new_best_state: bool = False # should save a new checkpoint - # instantiate datasets and appropriate number of updates per epoch - self.build_dataloaders() - if self.cfg.execute_only is None: - assert 'train' in self.dataloaders, "The train dataset split must be provided." - assert 'valid' in self.dataloaders, "The valid dataset split must be provided." - self.train_updates_per_epoch = len(self.dataloaders['train']) if 'train' in self.dataloaders else 0 - if self.cfg.optim.updates_per_epoch: - self.train_updates_per_epoch = self.cfg.optim.updates_per_epoch - self.total_updates = self.train_updates_per_epoch * self.cfg.optim.epochs - # instantiate model & exponential moving average on the model - self.build_model() - self.logger.info("Model hash: %s", model_hash(self.model)) - assert 'model' in self.stateful.sources, \ - "Please register the model to stateful with self.register_stateful('model') in build_model." - self.profiler = Profiler(self.model, **self.cfg.profiler) - self.initialize_ema() - self.register_stateful('ema') - assert self.ema is None or 'ema' in self.stateful.sources, \ - "Please register the ema to stateful with self.register_stateful('ema') in build_model." - self.deadlock_detect = DeadlockDetect(**self.cfg.deadlock) - # basic statistics on the trained model - model_size = sum(p.numel() for p in self.model.parameters() if p.requires_grad) / 1e6 - # one copy of grad, one copy of momentum, one copy of denominator and model weights. - # and 4 bytes for each float! - mem_usage = model_size * 4 * 4 / 1000 - self.logger.info("Model size: %.2f M params", model_size) - self.logger.info("Base memory usage, with model, grad and optim: %.2f GB", mem_usage) - - @property - def autocast(self): - """Convenient autocast (or not) using the solver configuration.""" - return TorchAutocast(enabled=self.cfg.autocast, device_type=self.device, dtype=self.autocast_dtype) - - def _get_state_source(self, name) -> flashy.state.StateDictSource: - # Internal utility to get a state source from the solver - return self.stateful.sources[name] - - @property - def best_metric_name(self) -> tp.Optional[str]: - """Metric name used to identify the best state. This metric should be stored in the metrics - used on the stage for best state identification (most likely, `valid`). If None, then - no best state is saved. - """ - return None - - def register_best_state(self, *args: str): - """Register state sources in `BestStateDictManager` to keep their best states along with their - latest states. The best state will be used at evaluation stages instead of the latest states. - - Shortcut around `BestStateDictManager.register` method. You can pass any number of - attribute, included nested attributes and those will be included into the checkpoints - and automatically restored when `BaseSolver.restore` is called. - """ - for name in args: - state_source = self._get_state_source(name) - assert name in self.stateful.sources, "Registered states in best should be registered in stateful first!" - self.best_state.register(name, state_source) - - def register_ema(self, *args: str): - """Register state sources for exponential moving average. - - The registered sources are used to instantiate a ModuleDictEMA instance. - The ModuleDictEMA keeps a `nn.ModuleDict` module that is updated when self.ema.step() is called - and swapped with the original state sources with self.swap_ema_state() method. - - Usage: - self.register_ema('model') - """ - assert self.ema is None, "Cannot register state source to already instantiated EMA." - for name in args: - self._ema_sources[name] = getattr(self, name) - - def wrap_with_fsdp(self, model: torch.nn.Module, *args, **kwargs): - model = fsdp.wrap_with_fsdp(self.cfg.fsdp, model, *args, **kwargs) - if isinstance(model, fsdp.FSDP): - self._fsdp_modules.append(model) - return model - - def update_best_state_from_stage(self, stage_name: str = 'valid'): - """Update latest best state based on pending metrics of a given stage. This method relies - on the `BestStateDictManager.update` method to update the best state_dict with latest weights - if the registered states happen to match to the best performing setup. - """ - if self.best_metric_name is None: - # when no best metric is defined, the last state is always the best - self._new_best_state = True - self.logger.info("Updating best state with current state.") - else: - assert stage_name in self._pending_metrics, f"Metrics for stage {stage_name} not found." - assert self.best_metric_name in self._pending_metrics[stage_name], \ - f"Best metric not found in {stage_name} metrics. Cannot register best state" - current_score = self._pending_metrics[stage_name][self.best_metric_name] - all_best_metric_scores = [ - past_metrics[stage_name][self.best_metric_name] - for past_metrics in self.history - ] - all_best_metric_scores.append(current_score) - best_score = min(all_best_metric_scores) - self._new_best_state = current_score == best_score - if self._new_best_state: - old_best = min(all_best_metric_scores[:-1] + [float('inf')]) - self.logger.info( - f"New best state with {self.best_metric_name}={current_score:.3f} (was {old_best:.3f})") - - if self._new_best_state: - if self.cfg.fsdp.use: - # this will give an empty state dict on all ranks but the rank 0 - # which will have a copy in memory of the full model. - with fsdp.switch_to_full_state_dict(self._fsdp_modules): - for name in self.best_state.states.keys(): - state_source = self._get_state_source(name) - self.best_state.update(name, state_source) - # we save to a different dict. - self.fsdp_best_state.update(self.best_state.state_dict()) - # We cannot efficiently load fsdp_best_state when using FSDP, - # so we have do do a second pass, with the local shards. - for name in self.best_state.states.keys(): - state_source = self._get_state_source(name) - self.best_state.update(name, state_source) - - def _load_new_state_dict(self, state_dict: dict) -> dict: - old_states = {} - for name, new_state in state_dict.items(): - state_source = self._get_state_source(name) - old_states[name] = copy_state(state_source.state_dict()) - state_source.load_state_dict(new_state) - return old_states - - @contextmanager - def swap_best_state(self): - self.logger.debug(f"Swapping to best state for: {', '.join(self.best_state.state_dict().keys())}") - old_states = self._load_new_state_dict(self.best_state.state_dict()) - try: - yield - finally: - self.logger.debug("Swapping back from best to original state") - for name, old_state in old_states.items(): - state_source = self._get_state_source(name) - state_source.load_state_dict(old_state) - - @contextmanager - def swap_ema_state(self): - if self.ema is None: - yield - else: - ema_state_dict = self.ema.state_dict()['state'] - self.logger.debug(f"Swapping to EMA state for: {', '.join(ema_state_dict.keys())}") - old_states = self._load_new_state_dict(ema_state_dict) - try: - yield - finally: - self.logger.debug("Swapping back from EMA state to original state") - for name, old_state in old_states.items(): - state_source = self._get_state_source(name) - state_source.load_state_dict(old_state) - - @property - def is_training(self): - return self.current_stage == 'train' - - def log_model_summary(self, model: nn.Module): - """Log model summary, architecture and size of the model.""" - self.logger.info(model) - mb = sum(p.numel() for p in model.parameters()) * 4 / 2 ** 20 - self.logger.info("Size: %.1f MB", mb) - - @abstractmethod - def build_model(self): - """Method to implement to initialize model.""" - ... - - def initialize_ema(self): - """Initialize exponential moving average with the registered sources. - EMA object is created if the optim.ema.model.decay value is non-null. - """ - from .builders import get_ema - self.ema = get_ema(self._ema_sources, self.cfg.optim.ema) - if self.ema is None: - self.logger.info('No EMA on the model.') - else: - assert self.cfg.optim.ema.updates > 0 - self.logger.info( - f'Initializing EMA on the model with decay = {self.ema.decay}' - f' every {self.cfg.optim.ema.updates} updates' - ) - - @abstractmethod - def build_dataloaders(self): - """Method to implement to initialize dataloaders.""" - ... - - @abstractmethod - def show(self): - """Method to log any information without running the job.""" - ... - - @property - def log_updates(self): - # convenient access to log updates - return self._log_updates - - def checkpoint_path(self, **kwargs): - kwargs.setdefault('use_fsdp', self.cfg.fsdp.use) - return self.folder / checkpoint.checkpoint_name(**kwargs) - - def epoch_checkpoint_path(self, epoch: int, **kwargs): - kwargs.setdefault('use_fsdp', self.cfg.fsdp.use) - return self.folder / checkpoint.checkpoint_name(str(epoch), **kwargs) - - def checkpoint_path_with_name(self, name: str, **kwargs): - kwargs.setdefault('use_fsdp', self.cfg.fsdp.use) - return self.folder / checkpoint.checkpoint_name(name=name, **kwargs) - - def save_checkpoints(self): - """Save checkpoint, optionally keeping a copy for a given epoch.""" - is_sharded = self.cfg.fsdp.use - if not flashy.distrib.is_rank_zero() and not is_sharded: - return - self.logger.info("Model hash: %s", model_hash(self.model)) - state = self.state_dict() - epoch = self.epoch - 1 # pushing metrics will increase the epoch in Flashy, so we do -1 here - - # save minimal state_dict as new checkpoint every X epoch - if self.cfg.checkpoint.save_every: - if epoch % self.cfg.checkpoint.save_every == 0: - minimal_state = state - if self.cfg.checkpoint.keep_every_states is not None and len(self.cfg.checkpoint.keep_every_states) > 0: - minimal_state = { - name: source for name, source in state.items() - if name in self.cfg.checkpoint.keep_every_states - } - epoch_checkpoint_path = self.epoch_checkpoint_path(epoch) - checkpoint.save_checkpoint(minimal_state, epoch_checkpoint_path, is_sharded) - - # save checkpoint as latest checkpoint - if self.cfg.checkpoint.save_last: - last_checkpoint_path = self.checkpoint_path() - checkpoint.save_checkpoint(state, last_checkpoint_path, is_sharded) - - # flush any stale checkpoint to reduce disk footprint - checkpoint.flush_stale_checkpoints(self.checkpoint_path()) - - def load_from_pretrained(self, name: str) -> dict: - raise NotImplementedError("Solver does not provide a way to load pretrained models.") - - def load_checkpoints(self, load_best: bool = False, ignore_state_keys: tp.List[str] = []) -> tp.Optional[dict]: - """Load last checkpoint or the one specified in continue_from. - - Args: - load_best (bool): Whether to load from best state dict or not. - Best state dict is always used when not loading the current xp. - ignore_state_keys (list of str): List of sources to ignore when loading the state, e.g. `optimizer`. - Returns: - state (dict, optional): The loaded state dictionary. - """ - # load checkpoints from xp folder or cfg.continue_from - is_sharded = self.cfg.fsdp.use - load_from_path: tp.Optional[Path] = None - checkpoint_source: tp.Optional[checkpoint.CheckpointSource] = None - - if load_best: - self.logger.info("Trying to load state_dict from best state.") - - state: tp.Optional[dict] = None - rank0_checkpoint_path = self.checkpoint_path(use_fsdp=False) - current_checkpoint_path = self.checkpoint_path() - _pretrained_prefix = '//pretrained/' - continue_pretrained = (self.cfg.continue_from or '').startswith(_pretrained_prefix) - if rank0_checkpoint_path.exists(): - self.logger.info(f"Loading existing checkpoint: {current_checkpoint_path}") - load_from_path = current_checkpoint_path - checkpoint.check_sharded_checkpoint(current_checkpoint_path, rank0_checkpoint_path) - checkpoint_source = checkpoint.CheckpointSource.CURRENT_XP - elif self.cfg.continue_from and not continue_pretrained: - self.logger.info(f"Continuing from provided checkpoint: {self.cfg.continue_from}") - # we're always continuing from consolidated checkpoints: self.cfg.use_fsdp and not continue_best - load_from_path = checkpoint.resolve_checkpoint_path(self.cfg.continue_from, use_fsdp=False) - if load_from_path is None: - self.logger.error('Could not resolve the continue_from checkpoint %s', self.cfg.continue_from) - raise RuntimeError(f'Could not resolve continue_from checkpoint {self.cfg.continue_from}') - checkpoint_source = checkpoint.CheckpointSource.OTHER - - if load_from_path is not None: - state = checkpoint.load_checkpoint(load_from_path, is_sharded) - elif continue_pretrained: - self.logger.info("Loading a pretrained model. Ignoring 'load_best' and 'ignore_state_keys' params.") - state = self.load_from_pretrained(self.cfg.continue_from[len(_pretrained_prefix):]) - checkpoint_source = checkpoint.CheckpointSource.PRETRAINED - load_best = True - - # checkpoints are not from the current xp, we only retrieve the best state - if checkpoint_source is not None and checkpoint_source != checkpoint.CheckpointSource.CURRENT_XP: - assert state is not None - self.logger.info("Checkpoint source is not the current xp: Load state_dict from best state.") - load_best = True - state = {key: state[key] for key in self._continue_best_source_keys if key in state} - # loaded checkpoints are FSDP checkpoints: we're reading the best state - # from FSDP and we drop the regular best_state - if 'fsdp_best_state' in state and state['fsdp_best_state']: - state.pop('best_state', None) - self.logger.info("... Loaded checkpoint has FSDP best state") - # FSDP is enabled in the solver, if the loaded checkpoints do not have FSDP support - # then we're initializing FSDP best state with the regular best state - elif self.cfg.fsdp.use: - if 'fsdp_best_state' not in state or not state['fsdp_best_state']: - # we swap non-FSDP checkpoints best_state to FSDP-compatible best state - state['fsdp_best_state'] = state.pop('best_state') - self.logger.info("... Loaded checkpoint does not have FSDP best state. Use regular best state") - - if state is not None: - if load_best: - self.logger.info("Ignoring keys when loading best %r", ignore_state_keys) - for key in set(ignore_state_keys): - if key in state: - state.pop(key) - has_best_state = 'best_state' in state or 'fsdp_best_state' in state - assert has_best_state, ("Trying to load best state but neither 'best_state'", - " or 'fsdp_best_state' found in checkpoints.") - self.load_state_dict(state) - - # for FSDP, let's make extra sure nothing bad happened with out of sync - # checkpoints across workers. - epoch = float(self.epoch) - avg_epoch = flashy.distrib.average_metrics({'epoch': epoch})['epoch'] - if avg_epoch != epoch: - raise RuntimeError( - f"Inconsistent loading of checkpoints happened, our epoch is {epoch} " - f"but average of epochs is {avg_epoch}, at least one gpu must have a " - "different epoch number.") - - # on load_best, properly reinitialize state_dict, best states and ema - # otherwise we load from the current xp and don't alter anything - if load_best: - self.logger.info("Loading state_dict from best state.") - if not self.cfg.fsdp.use and self.fsdp_best_state: - # loading from an FSDP checkpoint but with FSDP deactivated - self.logger.info("... Loading from FSDP best state dict.") - self.best_state.load_state_dict(self.fsdp_best_state) - - # if load_best, we permanently override the regular state_dict with the best state - if self.cfg.fsdp.use: - self.logger.info("FSDP is used, loading from FSDP best state.") - with fsdp.switch_to_full_state_dict(self._fsdp_modules): - # this might be really fragile but okay for now. - self.load_state_dict(self.fsdp_best_state) - else: - # we permanently swap the stateful objects to their best state - self._load_new_state_dict(self.best_state.state_dict()) - - # the EMA modules should also be instantiated with best state. - # the easiest way to do so is to reinitialize a new EMA with best state loaded. - if self.ema is not None: - self.logger.info("Re-initializing EMA from best state") - self.initialize_ema() - - if self.cfg.fsdp.use: - self.logger.info("Re-initializing best state after using FSDP best state.") - for name in self.best_state.states.keys(): - state_source = self._get_state_source(name) - self.best_state.update(name, state_source) - - return state - - def restore(self, load_best: bool = False, replay_metrics: bool = False, - ignore_state_keys: tp.List[str] = []) -> bool: - """Restore the status of a solver for a given xp. - - Args: - load_best (bool): if `True`, load the best state from the checkpoint. - replay_metrics (bool): if `True`, logs all the metrics from past epochs. - ignore_state_keys (list of str): list of sources to ignore when loading the state, e.g. `optimizer`. - """ - self.logger.info("Restoring weights and history.") - restored_checkpoints = self.load_checkpoints(load_best, ignore_state_keys) - - self.logger.info("Model hash: %s", model_hash(self.model)) - - if replay_metrics and len(self.history) > 0: - self.logger.info("Replaying past metrics...") - for epoch, stages in enumerate(self.history): - for stage_name, metrics in stages.items(): - # We manually log the metrics summary to the result logger - # as we don't want to add them to the pending metrics - self.result_logger._log_summary(stage_name, metrics, step=epoch + 1, step_name='epoch', - formatter=self.get_formatter(stage_name)) - return restored_checkpoints is not None - - def commit(self, save_checkpoints: bool = True): - """Commit metrics to dora and save checkpoints at the end of an epoch.""" - # we override commit to introduce more complex checkpoint saving behaviors - self.history.append(self._pending_metrics) # This will increase self.epoch - if save_checkpoints: - self.save_checkpoints() - self._start_epoch() - if flashy.distrib.is_rank_zero(): - self.xp.link.update_history(self.history) - - def run_epoch(self): - """Run a single epoch with all stages. - - Metrics for a given stage are stored in _pending_metrics and committed by the solver afterwards. - Children solvers can extend this method with custom behavior, e.g.: - - def run_epoch(self): - ... # custom code - super().run_epoch() - ... # custom code - """ - self.run_stage('train', self.train) - with torch.no_grad(): - with self.swap_ema_state(): - self.run_stage('valid', self.valid) - # the best state is updated with EMA states if available - self.update_best_state_from_stage('valid') - with self.swap_best_state(): - if self.should_run_stage('evaluate'): - self.run_stage('evaluate', self.evaluate) - if self.should_run_stage('generate'): - self.run_stage('generate', with_rank_rng()(self.generate)) - - def run(self): - """Training loop.""" - assert len(self.state_dict()) > 0 - self.restore(replay_metrics=True) # load checkpoint and replay history - self.log_hyperparams(dict_from_config(self.cfg)) - for epoch in range(self.epoch, self.cfg.optim.epochs + 1): - if self.should_stop_training(): - return - self.run_epoch() - # Commit will send the metrics to Dora and save checkpoints by default. - self.commit() - - def should_stop_training(self) -> bool: - """Check whether we should stop training or not.""" - return self.epoch > self.cfg.optim.epochs - - def should_run_stage(self, stage_name) -> bool: - """Check whether we want to run the specified stages.""" - stage_every = self.cfg[stage_name].get('every', None) - is_last_epoch = self.epoch == self.cfg.optim.epochs - is_epoch_every = (stage_every and self.epoch % stage_every == 0) - return is_last_epoch or is_epoch_every - - @abstractmethod - def run_step(self, idx: int, batch: tp.Any, metrics: dict): - """Perform one training or valid step on a given batch.""" - ... - - def common_train_valid(self, dataset_split: str, **kwargs: tp.Any): - """Common logic for train and valid stages.""" - self.model.train(self.is_training) - - loader = self.dataloaders[dataset_split] - # get a different order for distributed training, otherwise this will get ignored - if flashy.distrib.world_size() > 1 \ - and isinstance(loader.sampler, torch.utils.data.distributed.DistributedSampler): - loader.sampler.set_epoch(self.epoch) - updates_per_epoch = self.train_updates_per_epoch if self.is_training else len(loader) - if self.cfg.benchmark_no_load: - self.logger.warning("Fake loading for benchmarking: re-using first batch") - batch = next(iter(loader)) - loader = [batch] * updates_per_epoch # type: ignore - lp = self.log_progress(self.current_stage, loader, total=updates_per_epoch, updates=self.log_updates) - average = flashy.averager() # epoch wise average - instant_average = flashy.averager() # average between two logging - metrics: dict = {} - - with self.profiler, self.deadlock_detect: # profiler will only run for the first 20 updates. - for idx, batch in enumerate(lp): - self.deadlock_detect.update('batch') - if idx >= updates_per_epoch: - break - metrics = {} - metrics = self.run_step(idx, batch, metrics) - self.deadlock_detect.update('step') - # run EMA step - if self.ema is not None and self.is_training and (idx + 1) % self.cfg.optim.ema.updates == 0: - self.logger.debug("EMA model step") - self.ema.step() - self.deadlock_detect.update('ema') - self.profiler.step() - instant_metrics = instant_average(metrics) - if lp.update(**instant_metrics): - instant_average = flashy.averager() # reset averager between two logging - metrics = average(metrics) # epoch wise average - self.deadlock_detect.update('end_batch') - - metrics = flashy.distrib.average_metrics(metrics, updates_per_epoch) - return metrics - - def train(self): - """Train stage.""" - return self.common_train_valid('train') - - def valid(self): - """Valid stage.""" - return self.common_train_valid('valid') - - @abstractmethod - def evaluate(self): - """Evaluate stage.""" - ... - - @abstractmethod - def generate(self): - """Generate stage.""" - ... - - def run_one_stage(self, stage_name: str): - """Run only the specified stage. - This method is useful to only generate samples from a trained experiment - or rerun the validation or evaluation stages. - """ - fn = { - 'generate': with_rank_rng()(self.generate), - 'evaluate': self.evaluate, - 'valid': self.valid, - } - if stage_name not in fn: - raise ValueError(f'Trying to run stage {stage_name} is not supported.') - assert len(self.state_dict()) > 0 - self._start_epoch() - with torch.no_grad(), self.swap_best_state(): - self.run_stage(stage_name, fn[stage_name]) - if not self.cfg.execute_inplace: - self.commit(save_checkpoints=False) - - @staticmethod - def get_eval_solver_from_sig(sig: str, dtype: tp.Optional[str] = None, - device: tp.Optional[str] = None, autocast: bool = True, - batch_size: tp.Optional[int] = None, - override_cfg: tp.Optional[tp.Union[dict, omegaconf.DictConfig]] = None, - **kwargs): - """Mostly a convenience function around audiocraft.train.get_solver_from_sig, - populating all the proper param, deactivating EMA, FSDP, loading the best state, - basically all you need to get a solver ready to "play" with in single GPU mode - and with minimal memory overhead. - - Args: - sig (str): signature to load. - dtype (str or None): potential dtype, as a string, i.e. 'float16'. - device (str or None): potential device, as a string, i.e. 'cuda'. - override_cfg (dict or omegaconf.DictConfig or None): potential device, as a string, i.e. 'cuda'. - """ - from audiocraft import train - our_override_cfg: tp.Dict[str, tp.Any] = {'optim': {'ema': {'use': False}}} - our_override_cfg['autocast'] = autocast - if dtype is not None: - our_override_cfg['dtype'] = dtype - if device is not None: - our_override_cfg['device'] = device - if batch_size is not None: - our_override_cfg['dataset'] = {'batch_size': batch_size} - if override_cfg is None: - override_cfg = {} - override_cfg = omegaconf.OmegaConf.merge( - omegaconf.DictConfig(override_cfg), omegaconf.DictConfig(our_override_cfg)) # type: ignore - solver = train.get_solver_from_sig( - sig, override_cfg=override_cfg, - load_best=True, disable_fsdp=True, - ignore_state_keys=['optimizer', 'ema'], **kwargs) - solver.model.eval() - return solver diff --git a/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/__init__.py b/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/AIGC-Audio/Make_An_Audio/wav_evaluation/models/__init__.py b/spaces/AIGC-Audio/Make_An_Audio/wav_evaluation/models/__init__.py deleted file mode 100644 index aadad97ebc9ec23fdebab974a99e343de90f8afd..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/Make_An_Audio/wav_evaluation/models/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from . import clap -from . import audio -from . import utils \ No newline at end of file diff --git a/spaces/AIGText/GlyphControl/transfer.py b/spaces/AIGText/GlyphControl/transfer.py deleted file mode 100644 index 3d48e4872474783e050b4276d544dad5b704f7dc..0000000000000000000000000000000000000000 --- a/spaces/AIGText/GlyphControl/transfer.py +++ /dev/null @@ -1,26 +0,0 @@ -from omegaconf import OmegaConf -from scripts.rendertext_tool import Render_Text, load_model_from_config -import torch - -# cfg = OmegaConf.load("other_configs/config_ema.yaml") -# model = load_model_from_config(cfg, "model_states.pt", verbose=True) -# model = load_model_from_config(cfg, "mp_rank_00_model_states.pt", verbose=True) - -cfg = OmegaConf.load("other_configs/config_ema_unlock.yaml") -epoch_idx = 39 -model = load_model_from_config(cfg, "epoch={:0>6d}.ckpt".format(epoch_idx), verbose=True) - -from pytorch_lightning.callbacks import ModelCheckpoint -with model.ema_scope("store ema weights"): - model_sd = model.state_dict() - store_sd = {} - for key in model_sd: - if "ema" in key: - continue - store_sd[key] = model_sd[key] - file_content = { - 'state_dict': store_sd - } - torch.save(file_content, f"textcaps5K_epoch_{epoch_idx+1}_model_wo_ema.ckpt") - print("has stored the transfered ckpt.") -print("trial ends!") diff --git a/spaces/AIGuardians/SummarizeWikipediaDocument/README.md b/spaces/AIGuardians/SummarizeWikipediaDocument/README.md deleted file mode 100644 index e7aeb7a95980fa5ddcdb19d5570b391746c77a49..0000000000000000000000000000000000000000 --- a/spaces/AIGuardians/SummarizeWikipediaDocument/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Summaraize -emoji: 🏢 -colorFrom: green -colorTo: yellow -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_32xb64-warmup_in1k.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_32xb64-warmup_in1k.py deleted file mode 100644 index 34d5288b9d3f9fcf3f0b409dc1c17906654c2170..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_32xb64-warmup_in1k.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs64.py', - '../_base_/schedules/imagenet_bs2048.py', '../_base_/default_runtime.py' -] diff --git a/spaces/Ababababababbababa/Arabic_poem_classifier/app.py b/spaces/Ababababababbababa/Arabic_poem_classifier/app.py deleted file mode 100644 index bbf72b782320453cd5d9fb4e7e1ebd99fc972af8..0000000000000000000000000000000000000000 --- a/spaces/Ababababababbababa/Arabic_poem_classifier/app.py +++ /dev/null @@ -1,36 +0,0 @@ -import gradio as gr - -description = "التعرف على خاصيات البيت الشعري" -title = """هذا البرنامج يقوم بالتعرف على مختلف خاصيات البيت من الشعر. -يمكنكم إختيار الخاصية من بين: -- التعرف على البحر -- التعرف على الروي -التعرف على الموضوع-""" - -examples = [["سَلو قَلبي غَداةَ سَلا وَثابا لَعَلَّ عَلى الجَمالِ لَهُ عِتابا"], ["قفا نبك من ذِكرى حبيب ومنزلِ بسِقطِ اللِّوى بينَ الدَّخول فحَوْملِ"]] - - -meter = gr.Interface.load("huggingface/Yah216/Arabic_poem_meter_3", - description="من فضلك، أدخل البيت الشعري الذي تود التعرف عليه", - examples=examples, title = "التعرف على البحر", - inputs = gr.inputs.Textbox(lines = 3, label = "البيت") - -) -rawiy = gr.Interface.load("huggingface/Yah216/Poem_Qafiyah_Detection", - title ="التعرف على الروي", - examples=examples, - description="من فضلك، أدخل البيت الشعري الذي تود التعرف عليه", - inputs = gr.inputs.Textbox(lines = 3, label = "البيت") - -) -subject = gr.Interface.load( - "huggingface/zenkri/autotrain-Arabic_Poetry_by_Subject-920730230", - title="التعرف على الموضوع", - examples=examples, - description="من فضلك، أدخل البيت الشعري الذي تود التعرف عليه", - inputs = gr.inputs.Textbox(lines = 3, label = "البيت") - -) -demo = gr.TabbedInterface([meter, rawiy, subject], ["التعرف على البحر","التعرف على الروي","التعرف على الموضوع"]) -demo.launch() - diff --git a/spaces/Adapter/T2I-Adapter/experiments/README.md b/spaces/Adapter/T2I-Adapter/experiments/README.md deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/AkitoP/umamusume_bert_vits2/models.py b/spaces/AkitoP/umamusume_bert_vits2/models.py deleted file mode 100644 index dd9e0c087357ecfc5a1548eddb5a30d77d2b5bf5..0000000000000000000000000000000000000000 --- a/spaces/AkitoP/umamusume_bert_vits2/models.py +++ /dev/null @@ -1,986 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -from commons import init_weights, get_padding -from text import symbols, num_tones, num_languages - - -class DurationDiscriminator(nn.Module): # vits2 - def __init__( - self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0 - ): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d( - in_channels, filter_channels, kernel_size, padding=kernel_size // 2 - ) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d( - filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 - ) - self.norm_2 = modules.LayerNorm(filter_channels) - self.dur_proj = nn.Conv1d(1, filter_channels, 1) - - self.pre_out_conv_1 = nn.Conv1d( - 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 - ) - self.pre_out_norm_1 = modules.LayerNorm(filter_channels) - self.pre_out_conv_2 = nn.Conv1d( - filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 - ) - self.pre_out_norm_2 = modules.LayerNorm(filter_channels) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid()) - - def forward_probability(self, x, x_mask, dur, g=None): - dur = self.dur_proj(dur) - x = torch.cat([x, dur], dim=1) - x = self.pre_out_conv_1(x * x_mask) - x = torch.relu(x) - x = self.pre_out_norm_1(x) - x = self.drop(x) - x = self.pre_out_conv_2(x * x_mask) - x = torch.relu(x) - x = self.pre_out_norm_2(x) - x = self.drop(x) - x = x * x_mask - x = x.transpose(1, 2) - output_prob = self.output_layer(x) - return output_prob - - def forward(self, x, x_mask, dur_r, dur_hat, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - - output_probs = [] - for dur in [dur_r, dur_hat]: - output_prob = self.forward_probability(x, x_mask, dur, g) - output_probs.append(output_prob) - - return output_probs - - -class TransformerCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - n_flows=4, - gin_channels=0, - share_parameter=False, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - - self.wn = ( - attentions.FFT( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - isflow=True, - gin_channels=self.gin_channels, - ) - if share_parameter - else None - ) - - for i in range(n_flows): - self.flows.append( - modules.TransformerCouplingLayer( - channels, - hidden_channels, - kernel_size, - n_layers, - n_heads, - p_dropout, - filter_channels, - mean_only=True, - wn_sharing_parameter=self.wn, - gin_channels=self.gin_channels, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class StochasticDurationPredictor(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - p_dropout, - n_flows=4, - gin_channels=0, - ): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append( - modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3) - ) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv( - filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout - ) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append( - modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3) - ) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv( - filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout - ) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = ( - torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) - * x_mask - ) - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum( - (F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2] - ) - logq = ( - torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q**2)) * x_mask, [1, 2]) - - logdet_tot_q - ) - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = ( - torch.sum(0.5 * (math.log(2 * math.pi) + (z**2)) * x_mask, [1, 2]) - - logdet_tot - ) - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = ( - torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) - * noise_scale - ) - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__( - self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0 - ): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d( - in_channels, filter_channels, kernel_size, padding=kernel_size // 2 - ) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d( - filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 - ) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__( - self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=0, - ): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - self.emb = nn.Embedding(len(symbols), hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - self.tone_emb = nn.Embedding(num_tones, hidden_channels) - nn.init.normal_(self.tone_emb.weight, 0.0, hidden_channels**-0.5) - self.language_emb = nn.Embedding(num_languages, hidden_channels) - nn.init.normal_(self.language_emb.weight, 0.0, hidden_channels**-0.5) - self.bert_proj = nn.Conv1d(1024, hidden_channels, 1) - self.ja_bert_proj = nn.Conv1d(768, hidden_channels, 1) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=self.gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, tone, language, bert, ja_bert, g=None): - bert_emb = self.bert_proj(bert).transpose(1, 2) - ja_bert_emb = self.ja_bert_proj(ja_bert).transpose(1, 2) - x = ( - self.emb(x) - + self.tone_emb(tone) - + self.language_emb(language) - + bert_emb - + ja_bert_emb - ) * math.sqrt( - self.hidden_channels - ) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - - x = self.encoder(x * x_mask, x_mask, g=g) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print("Removing weight norm...") - for layer in self.ups: - remove_weight_norm(layer) - for layer in self.resblocks: - layer.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm is False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for layer in self.convs: - x = layer(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm is False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for layer in self.convs: - x = layer(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class ReferenceEncoder(nn.Module): - """ - inputs --- [N, Ty/r, n_mels*r] mels - outputs --- [N, ref_enc_gru_size] - """ - - def __init__(self, spec_channels, gin_channels=0): - super().__init__() - self.spec_channels = spec_channels - ref_enc_filters = [32, 32, 64, 64, 128, 128] - K = len(ref_enc_filters) - filters = [1] + ref_enc_filters - convs = [ - weight_norm( - nn.Conv2d( - in_channels=filters[i], - out_channels=filters[i + 1], - kernel_size=(3, 3), - stride=(2, 2), - padding=(1, 1), - ) - ) - for i in range(K) - ] - self.convs = nn.ModuleList(convs) - # self.wns = nn.ModuleList([weight_norm(num_features=ref_enc_filters[i]) for i in range(K)]) # noqa: E501 - - out_channels = self.calculate_channels(spec_channels, 3, 2, 1, K) - self.gru = nn.GRU( - input_size=ref_enc_filters[-1] * out_channels, - hidden_size=256 // 2, - batch_first=True, - ) - self.proj = nn.Linear(128, gin_channels) - - def forward(self, inputs, mask=None): - N = inputs.size(0) - out = inputs.view(N, 1, -1, self.spec_channels) # [N, 1, Ty, n_freqs] - for conv in self.convs: - out = conv(out) - # out = wn(out) - out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K] - - out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K] - T = out.size(1) - N = out.size(0) - out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K] - - self.gru.flatten_parameters() - memory, out = self.gru(out) # out --- [1, N, 128] - - return self.proj(out.squeeze(0)) - - def calculate_channels(self, L, kernel_size, stride, pad, n_convs): - for i in range(n_convs): - L = (L - kernel_size + 2 * pad) // stride + 1 - return L - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__( - self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=256, - gin_channels=256, - use_sdp=True, - n_flow_layer=4, - n_layers_trans_flow=6, - flow_share_parameter=False, - use_transformer_flow=True, - **kwargs - ): - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - self.n_layers_trans_flow = n_layers_trans_flow - self.use_spk_conditioned_encoder = kwargs.get( - "use_spk_conditioned_encoder", True - ) - self.use_sdp = use_sdp - self.use_noise_scaled_mas = kwargs.get("use_noise_scaled_mas", False) - self.mas_noise_scale_initial = kwargs.get("mas_noise_scale_initial", 0.01) - self.noise_scale_delta = kwargs.get("noise_scale_delta", 2e-6) - self.current_mas_noise_scale = self.mas_noise_scale_initial - if self.use_spk_conditioned_encoder and gin_channels > 0: - self.enc_gin_channels = gin_channels - self.enc_p = TextEncoder( - n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=self.enc_gin_channels, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - if use_transformer_flow: - self.flow = TransformerCouplingBlock( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers_trans_flow, - 5, - p_dropout, - n_flow_layer, - gin_channels=gin_channels, - share_parameter=flow_share_parameter, - ) - else: - self.flow = ResidualCouplingBlock( - inter_channels, - hidden_channels, - 5, - 1, - n_flow_layer, - gin_channels=gin_channels, - ) - self.sdp = StochasticDurationPredictor( - hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels - ) - self.dp = DurationPredictor( - hidden_channels, 256, 3, 0.5, gin_channels=gin_channels - ) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - else: - self.ref_enc = ReferenceEncoder(spec_channels, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert, ja_bert): - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1) - x, m_p, logs_p, x_mask = self.enc_p( - x, x_lengths, tone, language, bert, ja_bert, g=g - ) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum( - -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True - ) # [b, 1, t_s] - neg_cent2 = torch.matmul( - -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r - ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul( - z_p.transpose(1, 2), (m_p * s_p_sq_r) - ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum( - -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True - ) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - if self.use_noise_scaled_mas: - epsilon = ( - torch.std(neg_cent) - * torch.randn_like(neg_cent) - * self.current_mas_noise_scale - ) - neg_cent = neg_cent + epsilon - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = ( - monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)) - .unsqueeze(1) - .detach() - ) - - w = attn.sum(2) - - l_length_sdp = self.sdp(x, x_mask, w, g=g) - l_length_sdp = l_length_sdp / torch.sum(x_mask) - - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum( - x_mask - ) # for averaging - - l_length = l_length_dp + l_length_sdp - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return ( - o, - l_length, - attn, - ids_slice, - x_mask, - y_mask, - (z, z_p, m_p, logs_p, m_q, logs_q), - (x, logw, logw_), - ) - - def infer( - self, - x, - x_lengths, - sid, - tone, - language, - bert, - ja_bert, - noise_scale=0.667, - length_scale=1, - noise_scale_w=0.8, - max_len=None, - sdp_ratio=0, - y=None, - ): - # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert) - # g = self.gst(y) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1) - x, m_p, logs_p, x_mask = self.enc_p( - x, x_lengths, tone, language, bert, ja_bert, g=g - ) - logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * ( - sdp_ratio - ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to( - x_mask.dtype - ) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose( - 1, 2 - ) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose( - 1, 2 - ) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:, :, :max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) diff --git a/spaces/Akshat231/super_space/app.py b/spaces/Akshat231/super_space/app.py deleted file mode 100644 index d044f942fcd808f11c44838bb5fe54268d17babb..0000000000000000000000000000000000000000 --- a/spaces/Akshat231/super_space/app.py +++ /dev/null @@ -1,122 +0,0 @@ -##THIS IS FOR SUPER-RESOLUTION\ - -import gradio as gr -from PIL import Image -import tensorflow as tf -import tensorflow_hub as hub -import numpy as np -import requests -import cv2 -from tensorflow.python.keras.layers import Add, Conv2D, Input, Lambda -from tensorflow.python.keras.models import Model - - -super_resolution='./weights.h5' - - -pre_mean = np.array([0.4488, 0.4371, 0.4040]) * 255 - - -#HELPER FUN -def normalize(x, rgb_mean=pre_mean): - return (x - rgb_mean) / 127.5 - -#HELPER FUN -def pixel_shuffle(scale): - return lambda x: tf.nn.depth_to_space(x, scale) - -#HELPER FUN -def denormalize(x, rgb_mean=pre_mean): - return x * 127.5 + rgb_mean - - -#MAIN FUN -def res_block(x_in, filters, scaling): - x = Conv2D(filters, 3, padding='same', activation='relu')(x_in) - x = Conv2D(filters, 3, padding='same')(x) - x =tf.keras.layers.LeakyReLU(alpha = 0.01)(x) - x = tf.keras.layers.BatchNormalization()(x) - if scaling: - x = Lambda(lambda t: t * scaling)(x) - x = Add()([x_in, x]) - return x - - - -#HELPER FUN -def upsample(x, scale, num_filters): - def upsample_1(x, factor, **kwargs): - x = Conv2D(num_filters * (factor ** 2), 3, padding='same', **kwargs)(x) - return Lambda(pixel_shuffle(scale=factor))(x) - - if scale == 2: - x = upsample_1(x, 2, name='conv2d_1_scale_2') - elif scale == 3: - x = upsample_1(x, 3, name='conv2d_1_scale_3') - elif scale == 4: - x = upsample_1(x, 2, name='conv2d_1_scale_2') - x = upsample_1(x, 2, name='conv2d_2_scale_2') - - return x - -#MAIN FUN -def super_res(scale, num_filters=64, num_res_blocks=8, res_block_scaling=None): - x_in = Input(shape=(None, None, 3)) - x = Lambda(normalize)(x_in) - x = b = Conv2D(num_filters, 3, padding='same')(x) - - for i in range(num_res_blocks): - b = res_block(b, num_filters, res_block_scaling) - b = Conv2D(num_filters, 3, padding='same')(b) - x = Add()([x, b]) - - x = upsample(x, scale, num_filters) - x = Conv2D(3, 3, padding='same')(x) - - x = Lambda(denormalize)(x) - return Model(x_in, x, name="super_res") - - - - -def load_image(path): - return np.array(path) - - - - -def resolve(model, lr_batch): - lr_batch = tf.cast(lr_batch, tf.float32) - sr_batch = model(lr_batch) - sr_batch = tf.clip_by_value(sr_batch, 0, 255) - sr_batch = tf.round(sr_batch) - sr_batch = tf.cast(sr_batch, tf.uint8) - return sr_batch - - - -def resolve_single(model, lr): - return resolve(model, tf.expand_dims(lr, axis=0))[0] - - - -model=super_res(scale=4, num_res_blocks=16) - - -model.load_weights(super_resolution) - - -def predict_image(image): - lr=load_image(image) - sr = resolve_single(model, lr) - numpy_array = sr.numpy() - ima = Image.fromarray(numpy_array) - return ima - - - -image=gr.inputs.Image() - -irface=gr.Interface(fn=predict_image, inputs=image, outputs=image,interpretation='default') - -irface.launch() \ No newline at end of file diff --git a/spaces/AkshayKollimarala/MYAIVOICESPEECH/app.py b/spaces/AkshayKollimarala/MYAIVOICESPEECH/app.py deleted file mode 100644 index ca8b6d40b4ab898c70da92f4a4298de2baf703dc..0000000000000000000000000000000000000000 --- a/spaces/AkshayKollimarala/MYAIVOICESPEECH/app.py +++ /dev/null @@ -1,164 +0,0 @@ -import os -import re -import requests -import json -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') -PLAY_HT_API_KEY=os.getenv('PLAY_HT_API_KEY') -PLAY_HT_USER_ID=os.getenv('PLAY_HT_USER_ID') - -PLAY_HT_VOICE_ID=os.getenv('PLAY_HT_VOICE_ID') -play_ht_api_get_audio_url = "https://play.ht/api/v2/tts" - - -template = """You are a helpful assistant to answer user queries. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -headers = { - "accept": "text/event-stream", - "content-type": "application/json", - "AUTHORIZATION": "Bearer "+ PLAY_HT_API_KEY, - "X-USER-ID": PLAY_HT_USER_ID -} - - -def get_payload(text): - return { - "text": text, - "voice": PLAY_HT_VOICE_ID, - "quality": "medium", - "output_format": "mp3", - "speed": 1, - "sample_rate": 24000, - "seed": None, - "temperature": None - } - -def get_generated_audio(text): - payload = get_payload(text) - generated_response = {} - try: - response = requests.post(play_ht_api_get_audio_url, json=payload, headers=headers) - response.raise_for_status() - generated_response["type"]= 'SUCCESS' - generated_response["response"] = response.text - except requests.exceptions.RequestException as e: - generated_response["type"]= 'ERROR' - try: - response_text = json.loads(response.text) - if response_text['error_message']: - generated_response["response"] = response_text['error_message'] - else: - generated_response["response"] = response.text - except Exception as e: - generated_response["response"] = response.text - except Exception as e: - generated_response["type"]= 'ERROR' - generated_response["response"] = response.text - return generated_response - -def extract_urls(text): - # Define the regex pattern for URLs - url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+[/\w\.-]*' - - # Find all occurrences of URLs in the text - urls = re.findall(url_pattern, text) - - return urls - -def get_audio_reply_for_question(text): - generated_audio_event = get_generated_audio(text) - #From get_generated_audio, you will get events in a string format, from that we need to extract the url - final_response = { - "audio_url": '', - "message": '' - } - if generated_audio_event["type"] == 'SUCCESS': - audio_urls = extract_urls(generated_audio_event["response"]) - if len(audio_urls) == 0: - final_response['message'] = "No audio file link found in generated event" - else: - final_response['audio_url'] = audio_urls[-1] - else: - final_response['message'] = generated_audio_event['response'] - return final_response - -def download_url(url): - try: - # Send a GET request to the URL to fetch the content - final_response = { - 'content':'', - 'error':'' - } - response = requests.get(url) - # Check if the request was successful (status code 200) - if response.status_code == 200: - final_response['content'] = response.content - else: - final_response['error'] = f"Failed to download the URL. Status code: {response.status_code}" - except Exception as e: - final_response['error'] = f"Failed to download the URL. Error: {e}" - return final_response - -def get_filename_from_url(url): - # Use os.path.basename() to extract the file name from the URL - file_name = os.path.basename(url) - return file_name - -def get_text_response(user_message): - response = llm_chain.predict(user_message = user_message) - return response - -def get_text_response_and_audio_response(user_message): - response = get_text_response(user_message) # Getting the reply from Open AI - audio_reply_for_question_response = get_audio_reply_for_question(response) - final_response = { - 'output_file_path': '', - 'message':'' - } - audio_url = audio_reply_for_question_response['audio_url'] - if audio_url: - output_file_path=get_filename_from_url(audio_url) - download_url_response = download_url(audio_url) - audio_content = download_url_response['content'] - if audio_content: - with open(output_file_path, "wb") as audio_file: - audio_file.write(audio_content) - final_response['output_file_path'] = output_file_path - else: - final_response['message'] = download_url_response['error'] - else: - final_response['message'] = audio_reply_for_question_response['message'] - return final_response - -def chat_bot_response(message, history): - text_and_audio_response = get_text_response_and_audio_response(message) - output_file_path = text_and_audio_response['output_file_path'] - if output_file_path: - return (text_and_audio_response['output_file_path'],) - else: - return text_and_audio_response['message'] - -demo = gr.ChatInterface(chat_bot_response,examples=["How are you doing?","What are your interests?","Which places do you like to visit?"]) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index dd7c16580d0620bc854f2c6eb7c881bdcd23020a..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True), - test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/spaces/Araby/BRATArA/app.py b/spaces/Araby/BRATArA/app.py deleted file mode 100644 index 8dae9d6af44090d1c24ed9ca9b77836236d131e5..0000000000000000000000000000000000000000 --- a/spaces/Araby/BRATArA/app.py +++ /dev/null @@ -1,43 +0,0 @@ -import streamlit as st -from transformers import GPT2TokenizerFast, AutoModelForCausalLM -from arabert.preprocess import ArabertPreprocessor - -# Load model and tokenizer and the model - -model_name = "malmarjeh/gpt2" -tokenizer = GPT2TokenizerFast.from_pretrained("aubmindlab/aragpt2-base") -model = AutoModelForCausalLM.from_pretrained(model_name) -preprocessor = ArabertPreprocessor(model_name=model_name) - -# Streamlit UI -st.title('Arabic Text Summarizer | By M.Araby') -text = st.text_area("Paste your Arabic text here:") - -if st.button('Summarize'): - if text: - # Preprocess and tokenize input text - processed_text = preprocessor.preprocess(text) - formatted_text = '\n النص: ' + processed_text + ' \n الملخص: \n ' - tokenizer.add_special_tokens({'pad_token': ''}) - tokens = tokenizer.batch_encode_plus([formatted_text], return_tensors='pt', padding='max_length', - max_length=150) - - # Generate summary - output = model.generate( - input_ids=tokens['input_ids'], - repetition_penalty=2.0, - num_beams=5, - max_length=600, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - bos_token_id=tokenizer.bos_token_id, - ) - - # Decode and display the summarized text - result = tokenizer.decode(output[0][150:], skip_special_tokens=True).strip() - st.subheader("Original Text Input") - st.write(text) - st.subheader("Summarized Text Idea") - st.write(result) - else: - st.warning("Please enter Arabic text to summarize.") diff --git a/spaces/Arijit-hazra/my-image-captioner/app.py b/spaces/Arijit-hazra/my-image-captioner/app.py deleted file mode 100644 index e4390b54f220584ef4b05275ba62a6516976b472..0000000000000000000000000000000000000000 --- a/spaces/Arijit-hazra/my-image-captioner/app.py +++ /dev/null @@ -1,50 +0,0 @@ -import re -import string -import gradio as gr -import tensorflow as tf -from load_model import build - -IMG_SHAPE = (224,224,3) - - -def custom_standardization(s): - s = tf.strings.lower(s) - s = tf.strings.regex_replace(s, f'[{re.escape(string.punctuation)}]', '') - s = tf.strings.join(['[START]', s, '[END]'], separator=' ') - return s - -model = build() - -rescale = lambda image : tf.image.resize(tf.convert_to_tensor(image), IMG_SHAPE[:-1]) - -def single_img_transcribe(image, temperature=1): - initial = model.word_to_index([['[START]']]) # (batch, sequence) - img_features = model.feature_extractor(rescale(image)[tf.newaxis, ...]) - - tokens = initial # (batch, sequence) - for n in range(50): - preds = model((img_features, tokens)).numpy() # (batch, sequence, vocab) - preds = preds[:,-1, :] #(batch, vocab) - if temperature==0: - next = tf.argmax(preds, axis=-1)[:, tf.newaxis] # (batch, 1) - else: - next = tf.random.categorical(preds/temperature, num_samples=1) # (batch, 1) - tokens = tf.concat([tokens, next], axis=1) # (batch, sequence) - - if next[0] == model.word_to_index('[END]'): - break - - words = model.index_to_word(tokens[0, 1:-1]) - result = tf.strings.reduce_join(words, axis=-1, separator=' ') - return result.numpy().decode() - -def img_transcribes(image): - result = [] - for t in [0,0.5,1]: - result.append(single_img_transcribe(image, t)) - return result - -gr.Interface(fn=img_transcribes, - inputs=gr.Image(type="pil"), - outputs=["text","text","text"] - ).launch() diff --git a/spaces/Armandoliv/t5-summarize-app-scitldr/README.md b/spaces/Armandoliv/t5-summarize-app-scitldr/README.md deleted file mode 100644 index fd65abfd3baa7309a4cfaf46cd14e13599317392..0000000000000000000000000000000000000000 --- a/spaces/Armandoliv/t5-summarize-app-scitldr/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: T5 Summarize App Scitldr -emoji: 💻 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/app.py b/spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/app.py deleted file mode 100644 index e9c463a4f58179e0785756678119f56f902c9396..0000000000000000000000000000000000000000 --- a/spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/app.py +++ /dev/null @@ -1,129 +0,0 @@ -import argparse -from functools import partial -import cv2 -import requests -import os -from io import BytesIO -from PIL import Image -import numpy as np -from pathlib import Path -import gradio as gr - -import warnings - -import torch - -os.system("python setup.py build develop --user") -os.system("pip install packaging==21.3") -warnings.filterwarnings("ignore") - - -from groundingdino.models import build_model -from groundingdino.util.slconfig import SLConfig -from groundingdino.util.utils import clean_state_dict -from groundingdino.util.inference import annotate, load_image, predict -import groundingdino.datasets.transforms as T - -from huggingface_hub import hf_hub_download - - - -# Use this command for evaluate the GLIP-T model -config_file = "groundingdino/config/GroundingDINO_SwinT_OGC.py" -ckpt_repo_id = "ShilongLiu/GroundingDINO" -ckpt_filenmae = "groundingdino_swint_ogc.pth" - - -def load_model_hf(model_config_path, repo_id, filename, device='cpu'): - args = SLConfig.fromfile(model_config_path) - model = build_model(args) - args.device = device - - cache_file = hf_hub_download(repo_id=repo_id, filename=filename) - checkpoint = torch.load(cache_file, map_location='cpu') - log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False) - print("Model loaded from {} \n => {}".format(cache_file, log)) - _ = model.eval() - return model - -def image_transform_grounding(init_image): - transform = T.Compose([ - T.RandomResize([800], max_size=1333), - T.ToTensor(), - T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) - ]) - image, _ = transform(init_image, None) # 3, h, w - return init_image, image - -def image_transform_grounding_for_vis(init_image): - transform = T.Compose([ - T.RandomResize([800], max_size=1333), - ]) - image, _ = transform(init_image, None) # 3, h, w - return image - -model = load_model_hf(config_file, ckpt_repo_id, ckpt_filenmae) - -def run_grounding(input_image, grounding_caption, box_threshold, text_threshold): - init_image = input_image.convert("RGB") - original_size = init_image.size - - _, image_tensor = image_transform_grounding(init_image) - image_pil: Image = image_transform_grounding_for_vis(init_image) - - # run grounidng - boxes, logits, phrases = predict(model, image_tensor, grounding_caption, box_threshold, text_threshold, device='cpu') - annotated_frame = annotate(image_source=np.asarray(image_pil), boxes=boxes, logits=logits, phrases=phrases) - image_with_box = Image.fromarray(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)) - - - return image_with_box - -if __name__ == "__main__": - - css = """ - #mkd { - height: 500px; - overflow: auto; - border: 1px solid #ccc; - } -""" - block = gr.Blocks(css=css).queue() - with block: - gr.Markdown("

Grounding DINO

") - gr.Markdown("

Open-World Detection with Grounding DINO

") - gr.Markdown("

Note the model runs on CPU, so it may take a while to run the model.

") - - with gr.Row(): - with gr.Column(): - input_image = gr.Image(source='upload', type="pil") - grounding_caption = gr.Textbox(label="Detection Prompt") - run_button = gr.Button(label="Run") - with gr.Accordion("Advanced options", open=False): - box_threshold = gr.Slider( - label="Box Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.001 - ) - text_threshold = gr.Slider( - label="Text Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.001 - ) - - with gr.Column(): - gallery = gr.outputs.Image( - type="pil", - # label="grounding results" - ).style(full_width=True, full_height=True) - # gallery = gr.Gallery(label="Generated images", show_label=False).style( - # grid=[1], height="auto", container=True, full_width=True, full_height=True) - - run_button.click(fn=run_grounding, inputs=[ - input_image, grounding_caption, box_threshold, text_threshold], outputs=[gallery]) - gr.Examples( - [["watermelon.jpg", "watermelon", 0.25, 0.25]], - inputs = [input_image, grounding_caption, box_threshold, text_threshold], - outputs = [gallery], - fn=run_grounding, - cache_examples=True, - label='Try this example input!' - ) - block.launch(share=True, show_api=False, show_error=True) - diff --git a/spaces/Audio-AGI/WavJourney/README.md b/spaces/Audio-AGI/WavJourney/README.md deleted file mode 100644 index 6514202f6a3a92fe09c96516f15d4e7848d85484..0000000000000000000000000000000000000000 --- a/spaces/Audio-AGI/WavJourney/README.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: WavJourney -emoji: 🔥 -colorFrom: blue -colorTo: purple -sdk: docker -pinned: false -license: cc-by-nc-4.0 ---- -# 🎵 WavJourney: Compositional Audio Creation with LLMs -[![arXiv](https://img.shields.io/badge/arXiv-Paper-.svg)](https://arxiv.org/abs/2307.14335) [![GitHub Stars](https://img.shields.io/github/stars/Audio-AGI/WavJourney?style=social)](https://github.com/Audio-AGI/WavJourney/) [![githubio](https://img.shields.io/badge/GitHub.io-Demo_Page-blue?logo=Github&style=flat-square)](https://audio-agi.github.io/WavJourney_demopage/) [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/Audio-AGI/WavJourney) - - -This repository contains the official implementation of ["WavJourney: Compositional Audio Creation with Large Language Models"](https://audio-agi.github.io/WavJourney_demopage/WavJourney_arXiv.pdf). - -Starting with a text prompt, WavJourney can create audio content with engaging storylines encompassing personalized speakers, lifelike speech in context, emotionally resonant music compositions, and impactful sound effects that enhance the auditory experience. Check the audio examples in the [Project Page](https://audio-agi.github.io/WavJourney_demopage/)! - - - -
- - -## Preliminaries -1. Install the environment: -```bash -bash ./scripts/EnvsSetup.sh -``` -2. Activate the conda environment: -```bash -conda activate WavJourney -``` - -3. (Optional) You can modify the default configuration in `config.yaml`, check the details described in the configuration file. -4. Pre-download the models (might take some time): -```bash -python scripts/download_models.py -``` - -5. Set the WAVJOURNEY_OPENAI_KEY in the environment variable for accessing [GPT-4 API](https://platform.openai.com/account/api-keys) [[Guidance](https://help.openai.com/en/articles/7102672-how-can-i-access-gpt-4)] -```bash -export WAVJOURNEY_OPENAI_KEY=your_openai_key_here -``` - -6. Set environment variables for using API services -```bash -# Set the port for the WAVJOURNEY service to 8021 -export WAVJOURNEY_SERVICE_PORT=8021 - -# Set the URL for the WAVJOURNEY service to 127.0.0.1 -export WAVJOURNEY_SERVICE_URL=127.0.0.1 - -# Limit the maximum script lines for WAVJOURNEY to 999 -export WAVJOURNEY_MAX_SCRIPT_LINES=999 -``` - - -7. Start Python API services (e.g., Text-to-Speech, Text-to-Audio) -```bash -bash scripts/start_services.sh -``` - -## Web APP - ```bash -bash scripts/start_ui.sh - ``` - -## Commandline Usage - ```bash - python wavjourney_cli.py -f --input-text "Generate a one-minute introduction to quantum mechanics" - ``` - - -## Kill the services -You can kill the running services via this command: - ```bash -python scripts/kill_services.py - ``` - -## (Advanced features) Speaker customization -You can add voice presets to WavJourney to customize the voice actors. Simply provide the voice id, the description and a sample wav file, and WavJourney will pick the voice automatically based on the audio script. Predefined system voice presets are in `data/voice_presets`. - -You can manage voice presets via UI. Specifically, if you want to add voice to voice presets. Run the script via command line below: -```bash -python add_voice_preset.py --id "id" --desc "description" --wav-path path/to/wav --session-id '' -``` -What makes for good voice prompt? See detailed instructions here. -## Hardware requirement -- The VRAM of the GPU in the default configuration should be greater than 16 GB. -- Operation system: Linux. - -## Citation -If you find this work useful, you can cite the paper below: - - @article{liu2023wavjourney, - title = {WavJourney: Compositional Audio Creation with Large Language Models}, - author = {Liu, Xubo and Zhu, Zhongkai and Liu, Haohe and Yuan, Yi and Huang, Qiushi and Liang, Jinhua and Cao, Yin and Kong, Qiuqiang and Plumbley, Mark D and Wang, Wenwu}, - journal = {arXiv preprint arXiv:2307.14335}, - year = {2023} - } - -[!["Buy Me A Coffee"](https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png)](https://www.buymeacoffee.com/liuxubo) - -## Appreciation -- [Bark](https://github.com/suno-ai/bark) for a zero-shot text-to-speech synthesis model. -- [AudioCraft](https://github.com/facebookresearch/audiocraft) for state-of-the-art audio generation models. - -## Disclaimer -We are not responsible for audio generated using semantics created by this model. Just don't use it for illegal purposes. - diff --git a/spaces/AutoLLM/AutoAgents/autoagents/spaces/app.py b/spaces/AutoLLM/AutoAgents/autoagents/spaces/app.py deleted file mode 100644 index be58431ed446bf6283c17274a41e37ec3eb24409..0000000000000000000000000000000000000000 --- a/spaces/AutoLLM/AutoAgents/autoagents/spaces/app.py +++ /dev/null @@ -1,153 +0,0 @@ -import os -import asyncio -import random -from datetime import date, datetime, timezone, timedelta -from ast import literal_eval - -import streamlit as st -import openai - -from autoagents.utils.constants import MAIN_HEADER, MAIN_CAPTION, SAMPLE_QUESTIONS -from autoagents.agents.search import ActionRunner - -from langchain.chat_models import ChatOpenAI - - -async def run(): - output_acc = "" - st.session_state["random"] = random.randint(0, 99) - if "task" not in st.session_state: - st.session_state.task = None - if "model_name" not in st.session_state: - st.session_state.model_name = "gpt-3.5-turbo" - - st.set_page_config( - page_title="Search Agent", - page_icon="🤖", - layout="wide", - initial_sidebar_state="expanded", - ) - - st.title(MAIN_HEADER) - st.caption(MAIN_CAPTION) - - with st.form("my_form", clear_on_submit=False): - st.markdown("", unsafe_allow_html=True) - user_input = st.text_input( - "You: ", - key="input", - placeholder="Ask me anything ...", - label_visibility="hidden", - ) - - submitted = st.form_submit_button( - "Search", help="Hit to submit the search query." - ) - - # Ask the user to enter their OpenAI API key - if (api_key := st.sidebar.text_input("OpenAI api-key", type="password")): - api_org = None - else: - api_key, api_org = os.getenv("OPENAI_API_KEY"), os.getenv("OPENAI_API_ORG") - with st.sidebar: - model_dict = { - "gpt-3.5-turbo": "GPT-3.5-turbo", - "gpt-4": "GPT-4 (Better but slower)", - } - st.radio( - "OpenAI model", - model_dict.keys(), - key="model_name", - format_func=lambda x: model_dict[x], - ) - - time_zone = str(datetime.now(timezone(timedelta(0))).astimezone().tzinfo) - st.markdown(f"**The system time zone is {time_zone} and the date is {date.today()}**") - - st.markdown("**Example Queries:**") - for q in SAMPLE_QUESTIONS: - st.markdown(f"*{q}*") - - if not api_key: - st.warning( - "API key required to try this app. The API key is not stored in any form. [This](https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key) might help." - ) - elif api_org and st.session_state.model_name == "gpt-4": - st.warning( - "The free API key does not support GPT-4. Please switch to GPT-3.5-turbo or input your own API key." - ) - else: - outputq = asyncio.Queue() - runner = ActionRunner(outputq, - ChatOpenAI(openai_api_key=api_key, - openai_organization=api_org, - temperature=0, - model_name=st.session_state.model_name), - persist_logs=True) # log to HF-dataset - - async def cleanup(e): - st.error(e) - await st.session_state.task - st.session_state.task = None - st.stop() - - placeholder = st.empty() - - if user_input and submitted: - if st.session_state.task is not None: - with placeholder.container(): - st.session_state.task.cancel() - st.warning("Previous search aborted", icon="⚠️") - - st.session_state.task = asyncio.create_task( - runner.run(user_input, outputq) - ) - iterations = 0 - with st.expander("Search Results", expanded=True): - while True: - with st.spinner("Wait for it..."): - output = await outputq.get() - placeholder.empty() - if isinstance(output, Exception): - if isinstance(output, openai.error.AuthenticationError): - await cleanup(f"AuthenticationError: Invalid OpenAI API key.") - elif isinstance(output, openai.error.InvalidRequestError) \ - and output._message == "The model: `gpt-4` does not exist": - await cleanup(f"The free API key does not support GPT-4. Please switch to GPT-3.5-turbo or input your own API key.") - elif isinstance(output, openai.error.OpenAIError): - await cleanup(output) - elif isinstance(output, RuntimeWarning): - st.warning(output) - continue - else: - await cleanup("Something went wrong. Please try searching again.") - return - try: - output_fmt = literal_eval(output) - st.json(output_fmt, expanded=False) - st.write("---") - iterations += 1 - except: - output_acc += "\n" + output - st.markdown(f"
{output}
", - unsafe_allow_html=True) - if iterations >= runner.agent_executor.max_iterations: - await cleanup( - f"Maximum iterations ({iterations}) exceeded. You can try running the search again or try a variation of the query." - ) - return - if "Final Answer:" in output: - break - # Found the answer - final_answer = await st.session_state.task - final_answer = final_answer.replace("$", "\$") - # st.success accepts md - st.success(final_answer, icon="✅") - st.balloons() - st.session_state.task = None - st.stop() - -if __name__ == "__main__": - loop = asyncio.new_event_loop() - loop.set_debug(enabled=False) - loop.run_until_complete(run()) diff --git a/spaces/BAAI/vid2vid-zero/Dockerfile b/spaces/BAAI/vid2vid-zero/Dockerfile deleted file mode 100644 index 9eaa78f8facc5066f018183f108824842f11b7e8..0000000000000000000000000000000000000000 --- a/spaces/BAAI/vid2vid-zero/Dockerfile +++ /dev/null @@ -1,57 +0,0 @@ -FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu22.04 -ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y --no-install-recommends \ - git \ - git-lfs \ - wget \ - curl \ - # ffmpeg \ - ffmpeg \ - x264 \ - # python build dependencies \ - build-essential \ - libssl-dev \ - zlib1g-dev \ - libbz2-dev \ - libreadline-dev \ - libsqlite3-dev \ - libncursesw5-dev \ - xz-utils \ - tk-dev \ - libxml2-dev \ - libxmlsec1-dev \ - libffi-dev \ - liblzma-dev && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -RUN useradd -m -u 1000 user -USER user -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:${PATH} -WORKDIR ${HOME}/app - -RUN curl https://pyenv.run | bash -ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH} -ENV PYTHON_VERSION=3.10.9 -RUN pyenv install ${PYTHON_VERSION} && \ - pyenv global ${PYTHON_VERSION} && \ - pyenv rehash && \ - pip install --no-cache-dir -U pip setuptools wheel - -RUN pip install --no-cache-dir -U torch==1.13.1 torchvision==0.14.1 -COPY --chown=1000 requirements.txt /tmp/requirements.txt -RUN pip install --no-cache-dir -U -r /tmp/requirements.txt - -COPY --chown=1000 . ${HOME}/app -# RUN cd Tune-A-Video && patch -p1 < ../patch -ENV PYTHONPATH=${HOME}/app \ - PYTHONUNBUFFERED=1 \ - GRADIO_ALLOW_FLAGGING=never \ - GRADIO_NUM_PORTS=1 \ - GRADIO_SERVER_NAME=0.0.0.0 \ - GRADIO_THEME=huggingface \ - SYSTEM=spaces -CMD ["python", "app.py"] diff --git a/spaces/Bala2-03-2003/MygenvioceAI/app.py b/spaces/Bala2-03-2003/MygenvioceAI/app.py deleted file mode 100644 index ca8b6d40b4ab898c70da92f4a4298de2baf703dc..0000000000000000000000000000000000000000 --- a/spaces/Bala2-03-2003/MygenvioceAI/app.py +++ /dev/null @@ -1,164 +0,0 @@ -import os -import re -import requests -import json -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') -PLAY_HT_API_KEY=os.getenv('PLAY_HT_API_KEY') -PLAY_HT_USER_ID=os.getenv('PLAY_HT_USER_ID') - -PLAY_HT_VOICE_ID=os.getenv('PLAY_HT_VOICE_ID') -play_ht_api_get_audio_url = "https://play.ht/api/v2/tts" - - -template = """You are a helpful assistant to answer user queries. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -headers = { - "accept": "text/event-stream", - "content-type": "application/json", - "AUTHORIZATION": "Bearer "+ PLAY_HT_API_KEY, - "X-USER-ID": PLAY_HT_USER_ID -} - - -def get_payload(text): - return { - "text": text, - "voice": PLAY_HT_VOICE_ID, - "quality": "medium", - "output_format": "mp3", - "speed": 1, - "sample_rate": 24000, - "seed": None, - "temperature": None - } - -def get_generated_audio(text): - payload = get_payload(text) - generated_response = {} - try: - response = requests.post(play_ht_api_get_audio_url, json=payload, headers=headers) - response.raise_for_status() - generated_response["type"]= 'SUCCESS' - generated_response["response"] = response.text - except requests.exceptions.RequestException as e: - generated_response["type"]= 'ERROR' - try: - response_text = json.loads(response.text) - if response_text['error_message']: - generated_response["response"] = response_text['error_message'] - else: - generated_response["response"] = response.text - except Exception as e: - generated_response["response"] = response.text - except Exception as e: - generated_response["type"]= 'ERROR' - generated_response["response"] = response.text - return generated_response - -def extract_urls(text): - # Define the regex pattern for URLs - url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+[/\w\.-]*' - - # Find all occurrences of URLs in the text - urls = re.findall(url_pattern, text) - - return urls - -def get_audio_reply_for_question(text): - generated_audio_event = get_generated_audio(text) - #From get_generated_audio, you will get events in a string format, from that we need to extract the url - final_response = { - "audio_url": '', - "message": '' - } - if generated_audio_event["type"] == 'SUCCESS': - audio_urls = extract_urls(generated_audio_event["response"]) - if len(audio_urls) == 0: - final_response['message'] = "No audio file link found in generated event" - else: - final_response['audio_url'] = audio_urls[-1] - else: - final_response['message'] = generated_audio_event['response'] - return final_response - -def download_url(url): - try: - # Send a GET request to the URL to fetch the content - final_response = { - 'content':'', - 'error':'' - } - response = requests.get(url) - # Check if the request was successful (status code 200) - if response.status_code == 200: - final_response['content'] = response.content - else: - final_response['error'] = f"Failed to download the URL. Status code: {response.status_code}" - except Exception as e: - final_response['error'] = f"Failed to download the URL. Error: {e}" - return final_response - -def get_filename_from_url(url): - # Use os.path.basename() to extract the file name from the URL - file_name = os.path.basename(url) - return file_name - -def get_text_response(user_message): - response = llm_chain.predict(user_message = user_message) - return response - -def get_text_response_and_audio_response(user_message): - response = get_text_response(user_message) # Getting the reply from Open AI - audio_reply_for_question_response = get_audio_reply_for_question(response) - final_response = { - 'output_file_path': '', - 'message':'' - } - audio_url = audio_reply_for_question_response['audio_url'] - if audio_url: - output_file_path=get_filename_from_url(audio_url) - download_url_response = download_url(audio_url) - audio_content = download_url_response['content'] - if audio_content: - with open(output_file_path, "wb") as audio_file: - audio_file.write(audio_content) - final_response['output_file_path'] = output_file_path - else: - final_response['message'] = download_url_response['error'] - else: - final_response['message'] = audio_reply_for_question_response['message'] - return final_response - -def chat_bot_response(message, history): - text_and_audio_response = get_text_response_and_audio_response(message) - output_file_path = text_and_audio_response['output_file_path'] - if output_file_path: - return (text_and_audio_response['output_file_path'],) - else: - return text_and_audio_response['message'] - -demo = gr.ChatInterface(chat_bot_response,examples=["How are you doing?","What are your interests?","Which places do you like to visit?"]) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/BartPoint/VoiceChange/app_multi.py b/spaces/BartPoint/VoiceChange/app_multi.py deleted file mode 100644 index 67890355f12e2d2d5420ce8e31a722020f349b24..0000000000000000000000000000000000000000 --- a/spaces/BartPoint/VoiceChange/app_multi.py +++ /dev/null @@ -1,469 +0,0 @@ -from typing import Union - -from argparse import ArgumentParser - -import asyncio -import json -import hashlib -from os import path, getenv - -import gradio as gr - -import torch - -import numpy as np -import librosa - -import edge_tts - -import config -import util -from infer_pack.models import ( - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono -) -from vc_infer_pipeline import VC - -# Reference: https://huggingface.co/spaces/zomehwh/rvc-models/blob/main/app.py#L21 # noqa -in_hf_space = getenv('SYSTEM') == 'spaces' - -# Argument parsing -arg_parser = ArgumentParser() -arg_parser.add_argument( - '--hubert', - default=getenv('RVC_HUBERT', 'hubert_base.pt'), - help='path to hubert base model (default: hubert_base.pt)' -) -arg_parser.add_argument( - '--config', - default=getenv('RVC_MULTI_CFG', 'multi_config.json'), - help='path to config file (default: multi_config.json)' -) -arg_parser.add_argument( - '--api', - action='store_true', - help='enable api endpoint' -) -arg_parser.add_argument( - '--cache-examples', - action='store_true', - help='enable example caching, please remember delete gradio_cached_examples folder when example config has been modified' # noqa -) -args = arg_parser.parse_args() - -app_css = ''' -#model_info img { - max-width: 100px; - max-height: 100px; - float: right; -} - -#model_info p { - margin: unset; -} -''' - -app = gr.Blocks( - theme=gr.themes.Soft(primary_hue="orange", secondary_hue="slate"), - css=app_css, - analytics_enabled=False -) - -# Load hubert model -hubert_model = util.load_hubert_model(config.device, args.hubert) -hubert_model.eval() - -# Load models -multi_cfg = json.load(open(args.config, 'r')) -loaded_models = [] - -for model_name in multi_cfg.get('models'): - print(f'Loading model: {model_name}') - - # Load model info - model_info = json.load( - open(path.join('model', model_name, 'config.json'), 'r') - ) - - # Load RVC checkpoint - cpt = torch.load( - path.join('model', model_name, model_info['model']), - map_location='cpu' - ) - tgt_sr = cpt['config'][-1] - cpt['config'][-3] = cpt['weight']['emb_g.weight'].shape[0] # n_spk - - if_f0 = cpt.get('f0', 1) - net_g: Union[SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono] - if if_f0 == 1: - net_g = SynthesizerTrnMs768NSFsid( - *cpt['config'], - is_half=util.is_half(config.device) - ) - else: - net_g = SynthesizerTrnMs768NSFsid_nono(*cpt['config']) - - del net_g.enc_q - - # According to original code, this thing seems necessary. - print(net_g.load_state_dict(cpt['weight'], strict=False)) - - net_g.eval().to(config.device) - net_g = net_g.half() if util.is_half(config.device) else net_g.float() - - vc = VC(tgt_sr, config) - - loaded_models.append(dict( - name=model_name, - metadata=model_info, - vc=vc, - net_g=net_g, - if_f0=if_f0, - target_sr=tgt_sr - )) - -print(f'Models loaded: {len(loaded_models)}') - -# Edge TTS speakers -tts_speakers_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) # noqa - - -# https://github.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/blob/main/infer-web.py#L118 # noqa -def vc_func( - input_audio, model_index, pitch_adjust, f0_method, feat_ratio, - filter_radius, rms_mix_rate, resample_option -): - if input_audio is None: - return (None, 'Please provide input audio.') - - if model_index is None: - return (None, 'Please select a model.') - - model = loaded_models[model_index] - - # Reference: so-vits - (audio_samp, audio_npy) = input_audio - - # https://huggingface.co/spaces/zomehwh/rvc-models/blob/main/app.py#L49 - # Can be change well, we will see - if (audio_npy.shape[0] / audio_samp) > 320 and in_hf_space: - return (None, 'Input audio is longer than 60 secs.') - - # Bloody hell: https://stackoverflow.com/questions/26921836/ - if audio_npy.dtype != np.float32: # :thonk: - audio_npy = ( - audio_npy / np.iinfo(audio_npy.dtype).max - ).astype(np.float32) - - if len(audio_npy.shape) > 1: - audio_npy = librosa.to_mono(audio_npy.transpose(1, 0)) - - if audio_samp != 16000: - audio_npy = librosa.resample( - audio_npy, - orig_sr=audio_samp, - target_sr=16000 - ) - - pitch_int = int(pitch_adjust) - - resample = ( - 0 if resample_option == 'Disable resampling' - else int(resample_option) - ) - - times = [0, 0, 0] - - checksum = hashlib.sha512() - checksum.update(audio_npy.tobytes()) - - output_audio = model['vc'].pipeline( - hubert_model, - model['net_g'], - model['metadata'].get('speaker_id', 0), - audio_npy, - checksum.hexdigest(), - times, - pitch_int, - f0_method, - path.join('model', model['name'], model['metadata']['feat_index']), - feat_ratio, - model['if_f0'], - filter_radius, - model['target_sr'], - resample, - rms_mix_rate, - 'v2' - ) - - out_sr = ( - resample if resample >= 16000 and model['target_sr'] != resample - else model['target_sr'] - ) - - print(f'npy: {times[0]}s, f0: {times[1]}s, infer: {times[2]}s') - return ((out_sr, output_audio), 'Success') - - -async def edge_tts_vc_func( - input_text, model_index, tts_speaker, pitch_adjust, f0_method, feat_ratio, - filter_radius, rms_mix_rate, resample_option -): - if input_text is None: - return (None, 'Please provide TTS text.') - - if tts_speaker is None: - return (None, 'Please select TTS speaker.') - - if model_index is None: - return (None, 'Please select a model.') - - speaker = tts_speakers_list[tts_speaker]['ShortName'] - (tts_np, tts_sr) = await util.call_edge_tts(speaker, input_text) - return vc_func( - (tts_sr, tts_np), - model_index, - pitch_adjust, - f0_method, - feat_ratio, - filter_radius, - rms_mix_rate, - resample_option - ) - - -def update_model_info(model_index): - if model_index is None: - return str( - '### Model info\n' - 'Please select a model from dropdown above.' - ) - - model = loaded_models[model_index] - model_icon = model['metadata'].get('icon', '') - - return str( - '### Model info\n' - '![model icon]({icon})' - '**{name}**\n\n' - 'Author: {author}\n\n' - 'Source: {source}\n\n' - '{note}' - ).format( - name=model['metadata'].get('name'), - author=model['metadata'].get('author', 'Anonymous'), - source=model['metadata'].get('source', 'Unknown'), - note=model['metadata'].get('note', ''), - icon=( - model_icon - if model_icon.startswith(('http://', 'https://')) - else '/file/model/%s/%s' % (model['name'], model_icon) - ) - ) - - -def _example_vc( - input_audio, model_index, pitch_adjust, f0_method, feat_ratio, - filter_radius, rms_mix_rate, resample_option -): - (audio, message) = vc_func( - input_audio, model_index, pitch_adjust, f0_method, feat_ratio, - filter_radius, rms_mix_rate, resample_option - ) - return ( - audio, - message, - update_model_info(model_index) - ) - - -async def _example_edge_tts( - input_text, model_index, tts_speaker, pitch_adjust, f0_method, feat_ratio, - filter_radius, rms_mix_rate, resample_option -): - (audio, message) = await edge_tts_vc_func( - input_text, model_index, tts_speaker, pitch_adjust, f0_method, - feat_ratio, filter_radius, rms_mix_rate, resample_option - ) - return ( - audio, - message, - update_model_info(model_index) - ) - - -with app: - gr.Markdown( - '## A simplistic Web interface\n' - 'RVC interface, project based on [RVC-WebUI](https://github.com/fumiama/Retrieval-based-Voice-Conversion-WebUI)' # thx noqa - 'A lot of inspiration from what\'s already out there, including [zomehwh/rvc-models](https://huggingface.co/spaces/zomehwh/rvc-models) & [DJQmUKV/rvc-inference](https://huggingface.co/spaces/DJQmUKV/rvc-inference).\n ' # thx noqa - ) - - with gr.Row(): - with gr.Column(): - with gr.Tab('Audio conversion'): - input_audio = gr.Audio(label='Input audio') - - vc_convert_btn = gr.Button('Convert', variant='primary') - - with gr.Tab('TTS conversion'): - tts_input = gr.TextArea( - label='TTS input text' - ) - tts_speaker = gr.Dropdown( - [ - '%s (%s)' % ( - s['FriendlyName'], - s['Gender'] - ) - for s in tts_speakers_list - ], - label='TTS speaker', - type='index' - ) - - tts_convert_btn = gr.Button('Convert', variant='primary') - - pitch_adjust = gr.Slider( - label='Pitch', - minimum=-24, - maximum=24, - step=1, - value=0 - ) - f0_method = gr.Radio( - label='f0 methods', - choices=['pm', 'harvest', 'crepe'], - value='pm', - interactive=True - ) - - with gr.Accordion('Advanced options', open=False): - feat_ratio = gr.Slider( - label='Feature ratio', - minimum=0, - maximum=1, - step=0.1, - value=0.6 - ) - filter_radius = gr.Slider( - label='Filter radius', - minimum=0, - maximum=7, - step=1, - value=3 - ) - rms_mix_rate = gr.Slider( - label='Volume envelope mix rate', - minimum=0, - maximum=1, - step=0.1, - value=1 - ) - resample_rate = gr.Dropdown( - [ - 'Disable resampling', - '16000', - '22050', - '44100', - '48000' - ], - label='Resample rate', - value='Disable resampling' - ) - - with gr.Column(): - # Model select - model_index = gr.Dropdown( - [ - '%s - %s' % ( - m['metadata'].get('source', 'Unknown'), - m['metadata'].get('name') - ) - for m in loaded_models - ], - label='Model', - type='index' - ) - - # Model info - with gr.Box(): - model_info = gr.Markdown( - '### Model info\n' - 'Please select a model from dropdown above.', - elem_id='model_info' - ) - - output_audio = gr.Audio(label='Output audio') - output_msg = gr.Textbox(label='Output message') - - multi_examples = multi_cfg.get('examples') - if ( - multi_examples and - multi_examples.get('vc') and multi_examples.get('tts_vc') - ): - with gr.Accordion('Sweet sweet examples', open=False): - with gr.Row(): - # VC Example - if multi_examples.get('vc'): - gr.Examples( - label='Audio conversion examples', - examples=multi_examples.get('vc'), - inputs=[ - input_audio, model_index, pitch_adjust, f0_method, - feat_ratio - ], - outputs=[output_audio, output_msg, model_info], - fn=_example_vc, - cache_examples=args.cache_examples, - run_on_click=args.cache_examples - ) - - # Edge TTS Example - if multi_examples.get('tts_vc'): - gr.Examples( - label='TTS conversion examples', - examples=multi_examples.get('tts_vc'), - inputs=[ - tts_input, model_index, tts_speaker, pitch_adjust, - f0_method, feat_ratio - ], - outputs=[output_audio, output_msg, model_info], - fn=_example_edge_tts, - cache_examples=args.cache_examples, - run_on_click=args.cache_examples - ) - - vc_convert_btn.click( - vc_func, - [ - input_audio, model_index, pitch_adjust, f0_method, feat_ratio, - filter_radius, rms_mix_rate, resample_rate - ], - [output_audio, output_msg], - api_name='audio_conversion' - ) - - tts_convert_btn.click( - edge_tts_vc_func, - [ - tts_input, model_index, tts_speaker, pitch_adjust, f0_method, - feat_ratio, filter_radius, rms_mix_rate, resample_rate - ], - [output_audio, output_msg], - api_name='tts_conversion' - ) - - model_index.change( - update_model_info, - inputs=[model_index], - outputs=[model_info], - show_progress=False, - queue=False - ) - -app.queue( - concurrency_count=1, - max_size=20, - api_open=args.api -).launch() \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Aparcamiento De Coches Multijugador Sudfrica Edicin Apk Descargar.md b/spaces/Benson/text-generation/Examples/Aparcamiento De Coches Multijugador Sudfrica Edicin Apk Descargar.md deleted file mode 100644 index 231c36d39005d9275c61ccc88067d084ca775635..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Aparcamiento De Coches Multijugador Sudfrica Edicin Apk Descargar.md +++ /dev/null @@ -1,134 +0,0 @@ - -

Aparcamiento de coches multijugador Sudáfrica Edición APK Descargar: Una guía para los usuarios de Android

-

Si usted está buscando un juego de estacionamiento de coches realista y divertido que ofrece más que solo estacionamiento, es posible que desee echa un vistazo a Parking Multijugador. Y si usted está en Sudáfrica o desea experimentar la cultura del automóvil de Sudáfrica, es posible que desee probar el Aparcamiento Multijugador South áfrica Edition APK. En este artículo, te diremos qué es el Car Parking Multiplayer, qué hace que South áfrica Edition sea diferente, cómo descargarlo e instalarlo en tu dispositivo Android, y algunos consejos y trucos para jugarlo.

-

¿Qué es el Aparcamiento Multijugador?

-

Car Parking Multiplayer es un juego que puede engañarte con su nombre bastante engañoso. Pero, es mucho más que solo estar aparcando tu coche. Es una experiencia de mundo abierto donde se puede conducir gratis y sí, todavía trabajar en ese aparcamiento si lo desea. Incluso puedes saltar de tu coche y caminar. Hay diferentes áreas que se pueden explorar en el juego. Cada una es como su propio mundo abierto. Puedes optar por jugar en modo de un solo jugador o en modo online si quieres una escena más caótica (de forma divertida) .

-

aparcamiento de coches multijugador sudáfrica edición apk descargar


DOWNLOAD === https://bltlly.com/2v6IE9



-

Características de Aparcamiento Multijugador

-

Aparcamiento de coches multijugador tiene las siguientes características :

-
    -
  • Modo multijugador de mundo abierto
      -
    • Caminar libremente
    • -
    • Mundo abierto libre con gasolineras reales y servicios de automóviles
    • -
    • Compite contra jugadores reales en las carreras multijugador
    • -
    • Intercambiar coches con jugadores reales
    • -
    • Miles de jugadores reales cada día
    • -
    • Lista de amigos
    • -
    • Chat de voz
    • -
    • Modo de policía
    • -
    -
  • -
  • Personalización del coche
      -
    • Suspensión ajustable, ángulo de rueda y más
    • -
    • Ajuste del motor: motor de intercambio, turbo, caja de cambios y escape
    • -
    • Visual auto tungs: Vynils dinámico, partes del cuerpo del coche
    • -
    -
  • -
  • Mundo abierto de alta calidad
      - -
    • 100 coches con el interior real
    • -
    • 16 pieles de jugador
    • -
    • Edificios con interior
    • -
    -
  • -
  • Juego interesante
      -
    • 82 desafíos de estacionamiento y conducción en la vida real
    • -
    • Diferentes vehículos: Grúa, camioneta, camiones, coches deportivos y clásicos
    • -
    -
  • -
-

Opiniones de Aparcamiento multijugador

-

Aparcamiento de coches multijugador ha recibido en su mayoría críticas positivas de los usuarios en Google Play Store y App Store. Tiene una calificación de 4.4 de 5 estrellas en Google Play Store y una calificación de 4.3 de 5 estrellas en App Store . Estos son algunos de los comentarios de los usuarios:

-

"Increíble juego! No hay errores en este juego o retrasos ect. (base en el dispositivo que utiliza) Me encantan los gráficos y los coches. Los coches son tan realistas y los sonidos son increíbles. Me encanta cómo puedes personalizar tu coche y hacer que se vea genial. El modo multijugador es increíble. Puedes chatear con otros jugadores y competir con ellos. También puedes unirte a un clan o crear tu propio clan. Este juego es muy divertido y adictivo. Recomiendo este juego a todos los que aman los coches y los juegos de estacionamiento."

-

"Este juego es muy bueno pero necesita algunas mejoras como agregar más coches, más mapas, más personalizaciones, más modos de juego, más desafíos, etc. Además, el juego se bloquea a veces y los controles no son muy suaves. Los gráficos son agradables, pero pueden ser mejores. El juego es divertido de jugar con amigos, pero se vuelve aburrido después de un tiempo. Espero que los desarrolladores actualicen el juego pronto y lo hagan más agradable."

- -

¿Qué es el aparcamiento multijugador South áfrica Edition?

-

Car Parking Multijugador South áfrica Edition es una versión modificada de Car Parking Multijugador que está especialmente diseñado para los usuarios sudafricanos o los fans de la cultura del coche de Sudáfrica. No es una versión oficial del juego, sino un archivo APK hecho por fans que se puede descargar e instalar en dispositivos Android .

-

Diferencias entre Aparcamiento de coches multijugador y Aparcamiento de coches multijugador Sudáfrica Edición

-

Car Parking Multijugador South áfrica Edition tiene algunas diferencias con el juego original Car Parking Multijugador, como :

-
    -
  • Más coches que son populares en Sudáfrica, como BMW E30, VW Golf Mk1, Toyota Corolla, Nissan 1400, etc.
  • -
  • Más personalizaciones que reflejan la cultura del automóvil sudafricano, como llantas giratorias, escapes fuertes, pegatinas, banderas, etc.
  • -
  • Más mapas que se basan en ubicaciones reales en Sudáfrica, como Ciudad del Cabo, Johannesburgo, Durban, etc.
  • -
  • Más música inspirada en la escena musical sudafricana, como kwaito, gqom, amapiano, etc.
  • -
  • Más idiomas que se hablan en Sudáfrica, como afrikaans, zulú, xhosa, etc.
  • -
-

Beneficios de Aparcamiento Multijugador South áfrica Edition

-

Car Parking Multijugador South áfrica Edition tiene algunos beneficios para los usuarios que quieren disfrutar del juego con un toque sudafricano, como :

-
    -
  • Más variedad y diversidad en términos de coches, personalizaciones, mapas, música e idiomas
  • -
  • Más diversión y emoción en términos de jugabilidad, gráficos, efectos de sonido e interacciones
  • -
  • Más conexión y comunidad con otros jugadores que comparten el mismo interés y pasión por la cultura del automóvil de Sudáfrica
  • -
  • Más soporte y actualizaciones de los desarrolladores que se dedican a mejorar el juego y agregar nuevas características
  • -
-

Cómo descargar e instalar el estacionamiento de coches multijugador South áfrica Edition APK en dispositivos Android?

- -

Requisitos para descargar e instalar el estacionamiento de coches multijugador South áfrica Edition APK

-

Para descargar e instalar Aparcamiento Multijugador South áfrica Edition APK en su dispositivo Android, es necesario tener :

-

-
    -
  • Un dispositivo Android que se ejecuta en Android 4.1 o superior
  • -
  • Una conexión a Internet estable
  • -
  • Una aplicación de administrador de archivos que puede acceder al archivo APK
  • -
  • Una cantidad suficiente de espacio de almacenamiento en su dispositivo
  • -
  • Un permiso para instalar aplicaciones de fuentes desconocidas en la configuración del dispositivo
  • -
-

Pasos para Descargar e Instalar Aparcamiento Multijugador South áfrica Edition APK

-

Para descargar e instalar Aparcamiento Multijugador South áfrica Edition APK en su dispositivo Android, es necesario seguir estos pasos :

-
    -
  1. Ir a un sitio web de confianza que proporciona el enlace para descargar Car Parking Multi jugador South áfrica Edition APK. Por ejemplo, puede visitar [este sitio web] para obtener la última versión del archivo APK.
  2. -
  3. Haga clic en el botón de descarga y espere a que el archivo APK se descargue en su dispositivo.
  4. -
  5. Una vez que se complete la descarga, localizar el archivo APK en su dispositivo utilizando una aplicación de administrador de archivos. Puede encontrarlo en la carpeta Descargas o en cualquier otra carpeta donde lo haya guardado.
  6. -
  7. Toque en el archivo APK y seleccione Instalar. Es posible que vea un mensaje de advertencia que dice "Para su seguridad, el teléfono no está permitido instalar aplicaciones desconocidas de esta fuente". Si ves este mensaje, ve a la configuración del dispositivo y habilita la opción de instalar aplicaciones de fuentes desconocidas. Esta opción puede estar en Seguridad, Privacidad o Aplicaciones dependiendo del modelo de dispositivo y la versión de Android.
  8. - -
  9. Espere a que termine el proceso de instalación. Puede tardar unos minutos dependiendo del rendimiento del dispositivo y la velocidad de Internet.
  10. -
  11. Una vez que se hace la instalación, se puede abrir el juego y disfrutar de aparcamiento multijugador South áfrica Edition en su dispositivo Android.
  12. -
-

Consejos y Trucos para Jugar Aparcamiento Multijugador South áfrica Edition

-

Ahora que ha descargado e instalado Car Parking Multiplayer South áfrica Edition en su dispositivo Android, es posible que desee conocer algunos consejos y trucos para jugarlo. Estos son algunos de ellos:

-

Cómo seleccionar un coche y un jugador

-

Para seleccionar un coche y un jugador en Car Parking Multiplayer South áfrica Edition, debe hacer lo siguiente:

-
    -
  • Toque en el icono de menú en la esquina superior izquierda de la pantalla.
  • -
  • Toque en Garaje para ver la lista de coches que usted posee o puede comprar.
  • -
  • Toque en el coche que desea utilizar y luego toque en Select.
  • -
  • Toque en Volver para volver al menú.
  • -
  • Toque en Perfil para ver la lista de jugadores entre los que puede elegir.
  • -
  • Toque en el reproductor que desea utilizar y luego toque en Select.
  • -
  • Toque en Volver para volver al menú.
  • -
  • Toque en Jugar para comenzar a jugar el juego con el coche y el jugador seleccionado.
  • -
-

Cómo ajustar su coche y ajustar su relación de engranajes

-

Para afinar su coche y ajustar su relación de engranajes en Car Parking Multiplayer South áfrica Edition, debe hacer lo siguiente:

-
    -
  • Toque en el icono de menú en la esquina superior izquierda de la pantalla.
  • -
  • Toque en Garaje para ver la lista de coches que usted posee o puede comprar.
  • -
  • Toque en el coche que desea sintonizar y luego toque en Tune.
  • -
  • Verá cuatro pestañas: Motor, Suspensión, Ruedas y Cuerpo. Puede deslizar hacia la izquierda o hacia la derecha para cambiar entre ellas.
  • - -
  • Para ajustar su relación de cambios, toque en Caja de cambios en la pestaña Motor. Verá un gráfico que muestra cómo cambia su velocidad con diferentes marchas. Puede arrastrar los puntos en el gráfico para cambiar la relación de transmisión para cada marcha. También puede pulsar en Auto o Manual para cambiar entre modos de transmisión automática o manual.
  • -
  • Una vez que haya terminado de ajustar su coche, toque en Guardar para aplicar los cambios.
  • -
-

Cómo Deriva, Donut, y Burnout

-

Para la deriva, donut, y burnout en el estacionamiento de coches multijugador South áfrica Edition, es necesario hacer lo siguiente:

-
    -
  • Para la deriva, debe usar una combinación de dirección, acelerador, freno y freno de mano. Puede usar los controles de inclinación o táctiles para la dirección. Para iniciar la deriva, es necesario acelerar y girar bruscamente en una esquina. Luego, debe aplicar el freno de mano brevemente para hacer que sus ruedas traseras pierdan tracción. Después de eso, debe equilibrar el acelerador y la dirección para mantener el ángulo de deriva y la dirección. También puede utilizar el freno de mano o freno de mano de nuevo si necesita ajustar su deriva. Para terminar con la deriva, es necesario soltar el acelerador y la dirección y enderezar el coche.
  • -
  • Para la rosquilla, debe usar una combinación de dirección, acelerador y freno de mano. Puede usar controles de inclinación o táctiles para la dirección. Para comenzar a donar, debe acelerar y girar bruscamente en una dirección. Entonces, es necesario aplicar el freno de mano para hacer que su coche gire alrededor de su centro. Después de eso, es necesario mantener el acelerador y la dirección constante para mantener el círculo de la dona. También puede cambiar la dirección de su dona cambiando la dirección. Para terminar donuting, es necesario soltar el acelerador y el freno de mano y enderezar su coche.
  • - -
-

Conclusión

-

Car Parking Multijugador South áfrica Edition es un juego que ofrece una experiencia de aparcamiento realista y divertido con un toque sudafricano. Tiene más coches, personalizaciones, mapas, música e idiomas que reflejan la cultura del automóvil sudafricano. También tiene un modo de mundo abierto, un modo de ajuste de coche, y un modo multijugador que hacen el juego más agradable y emocionante. Puede descargar e instalar Parking Multijugador South áfrica Edition APK en su dispositivo Android siguiendo los pasos en este artículo. También puedes usar los consejos y trucos de este artículo para mejorar tu juego y divertirte más.

-

Preguntas frecuentes

-

Aquí están algunas de las preguntas más frecuentes sobre Car Parking Multiplayer South áfrica Edition:

-

¿Es gratis Car Parking Multijugador South áfrica Edition?

-

Sí, Parking Multijugador South áfrica Edition es gratis para descargar y jugar. Sin embargo, puede contener algunas compras en la aplicación que requieren dinero real.

-

¿Es seguro el aparcamiento multijugador South áfrica Edition?

-

Sí, Parking Multijugador South áfrica Edition es seguro, siempre y cuando se descarga desde un sitio web de confianza. Sin embargo, siempre debe tener cuidado al descargar e instalar aplicaciones de fuentes desconocidas, ya que pueden contener virus o malware que pueden dañar su dispositivo.

-

¿Es compatible Car Parking Multiplayer South áfrica Edition con mi dispositivo?

-

Car Parking Multijugador South áfrica Edition es compatible con la mayoría de los dispositivos Android que se ejecutan en Android 4.1 o superior. Sin embargo, algunos dispositivos pueden tener diferentes especificaciones o problemas de rendimiento que pueden afectar la calidad o funcionalidad del juego.

-

¿Cómo puedo actualizar Car Parking Multijugador South áfrica Edition?

- -

¿Cómo puedo contactar a los desarrolladores de Car Parking Multiplayer South áfrica Edition?

-

Para contactar a los desarrolladores de Car Parking Multiplayer South áfrica Edition, puedes visitar su página de Facebook o su canal de YouTube y dejar un comentario o un mensaje.

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Asfalto Nitro 9 Leyendas Mod Apk.md b/spaces/Benson/text-generation/Examples/Asfalto Nitro 9 Leyendas Mod Apk.md deleted file mode 100644 index 2001fb4da4e341236f8b4ef59c584c7ee860ae80..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Asfalto Nitro 9 Leyendas Mod Apk.md +++ /dev/null @@ -1,76 +0,0 @@ - -

Nitro asfalto 9 leyendas Mod Apk: Una guía para los aficionados a las carreras

-

Si eres un fan de los juegos de carreras, es posible que hayas oído hablar de Asphalt Nitro 9 Legends, uno de los juegos más populares y emocionantes del género. Este juego le permite tomar el volante de los coches reales de gama alta de renombrados fabricantes de automóviles legendarios, como Ferrari, Porsche, Lamborghini, y W Motors, entre muchas otras marcas internacionales. Puedes conducir, impulsar y realizar acrobacias en ubicaciones dinámicas de la vida real en modo individual o multijugador.

-

Pero ¿y si quieres disfrutar del juego sin limitaciones ni restricciones? ¿Qué pasa si quieres tener dinero ilimitado y fichas, desbloquear todos los coches y pistas, personalizar sus vehículos y actualizarlos, y competir con otros jugadores en línea o fuera de línea? Bueno, hay una manera de hacerlo. Puede descargar e instalar Asphalt Nitro 9 Leyendas Mod Apk, una versión modificada del juego que le da acceso a todas estas características y más.

-

asfalto nitro 9 leyendas mod apk


Download File ===== https://bltlly.com/2v6LmQ



-

En este artículo, le guiará a través de las características, proceso de descarga, pasos de instalación, consejos y trucos, pros y contras, y la conclusión de Asphalt Nitro 9 Leyendas Mod Apk. Sigue leyendo para saber más.

-

Características de Asphalt Nitro 9 Leyendas Mod Apk

-

Asfalto Nitro 9 Leyendas Mod Apk es una versión modificada del juego original que le da recursos ilimitados y características que mejoran su experiencia de juego. Estas son algunas de las características que puedes disfrutar con este mod apk:

-
    -
  • Dinero ilimitado y fichas: Con este apk mod, nunca se quedará sin dinero o fichas para comprar nuevos coches, pistas, mejoras, o artículos. Puedes gastar todo lo que quieras sin preocuparte por tu presupuesto.
  • - -
  • Personalizar sus vehículos y actualizarlos: Con este apk mod, puede personalizar sus vehículos de acuerdo a su preferencia. Puedes cambiar su color, pintura, calcomanías, ruedas, etc. También puedes mejorar su rendimiento mejorando su motor, transmisión, suspensión, frenos, nitro boost, etc.
  • -
  • Disfrutar de gráficos realistas y efectos de sonido: Con este apk mod, se puede disfrutar de los impresionantes gráficos y efectos de sonido del juego. El juego utiliza un motor de física realista que simula el movimiento y el comportamiento de los coches. El juego también cuenta con efectos de sonido de alta calidad que te hacen sentir como si estuvieras en una carrera real.
  • -
  • Compite con otros jugadores en línea o fuera de línea: Con este apk mod, puede competir con otros jugadores en línea o fuera de línea. Puedes unirte a carreras multijugador online y desafiar a tus amigos u otros jugadores de todo el mundo. También puedes jugar offline en modo carrera y completar varias misiones y desafíos.
  • -
-

Cómo descargar e instalar Asphalt Nitro 9 leyendas Mod Apk

-

Si desea descargar e instalar Asphalt Nitro 9 leyendas Mod Apk, es necesario seguir estos sencillos pasos:

-
    -
  1. Paso 1: Descargar el archivo apk mod de una fuente de confianza. Usted puede encontrar muchos sitios web que ofrecen el archivo apk mod para Asphalt Nitro 9 Leyendas, pero no todos ellos son seguros y fiables. Debes asegurarte de que el archivo que descargas esté libre de virus, malware o cualquier otro contenido dañino. Puede utilizar este enlace para descargar el archivo apk mod de forma segura.
  2. -
  3. Paso 2: Habilitar fuentes desconocidas en la configuración del dispositivo. Antes de poder instalar el archivo apk mod, debe habilitar fuentes desconocidas en la configuración de su dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, vaya a la configuración del dispositivo, luego a la seguridad, luego a fuentes desconocidas y conéctela.
  4. - -
  5. Paso 4: Disfruta del juego con recursos y funciones ilimitadas. Ahora que ha instalado Asphalt Nitro 9 Leyendas Mod Apk, se puede disfrutar del juego con dinero ilimitado, fichas, coches, pistas, opciones de personalización, y más. También puedes competir con otros jugadores online o offline y divertirte.
  6. -
-

Consejos y trucos para jugar Asphalt Nitro 9 leyendas Mod Apk

-

Asfalto Nitro 9 Leyendas Mod Apk es un juego divertido y emocionante que le mantendrá entretenido durante horas. Sin embargo, si quieres mejorar tus habilidades y rendimiento en el juego, puedes seguir estos consejos y trucos:

-
    -
  • Elija el coche adecuado para cada pista y modo. El juego ofrece una variedad de coches de diferentes marcas y categorías, pero no todos ellos son adecuados para cada pista o modo. Es necesario tener en cuenta la velocidad, aceleración, manejo, nitro, y otros factores de cada coche antes de elegir uno. Por ejemplo, si usted está jugando en una pista con curvas, es posible que desee elegir un coche con buen manejo y nitro. Si usted está jugando en una pista recta, es posible que desee elegir un coche con alta velocidad y aceleración.
  • -
  • Utilice nitro boost sabiamente y estratégicamente. Nitro boost es una de las características más importantes del juego, ya que puede darte una ventaja sobre tus oponentes. Sin embargo, necesitas usarlo sabiamente y estratégicamente. No debes usarlo todo de una vez ni desperdiciarlo en momentos innecesarios. Debes guardarlo para momentos cruciales, como adelantar a tus rivales, escapar de obstáculos o colisiones, o llegar a la meta. También debe usarlo en combinación con acrobacias y derivas para ganar más puntos y recompensas.
  • - -
  • Evitar obstáculos y colisiones con otros coches. El juego cuenta con muchos obstáculos y desafíos que pueden obstaculizar su progreso o dañar su coche. Estos incluyen coches de tráfico, coches de policía, helicópteros, barricadas, picos, etc. Usted debe evitar estos obstáculos tanto como sea posible, ya que pueden ralentizar o dañar su coche. También debes evitar colisiones con otros coches, especialmente tus rivales, ya que pueden sacarte de la pista o hacerte perder tu posición. Usted debe tratar de esquivar o escapar de ellos, o utilizar su impulso nitro para alejarse de ellos.
  • -
  • Utilice diferentes ángulos de cámara y controles para adaptarse a sus preferencias. El juego ofrece diferentes ángulos de cámara y controles que puedes usar para jugar el juego. Puede elegir entre vista en primera persona, en tercera persona o de arriba hacia abajo, según su preferencia. También puede elegir entre los controles de inclinación, toque o toque, dependiendo de su comodidad. Puede cambiar estos ajustes en el menú de opciones del juego.
  • -
-

Pros y contras de Asphalt Nitro 9 Leyendas Mod Apk

-

Asfalto Nitro 9 Leyendas Mod Apk es un gran juego que ofrece muchos beneficios y ventajas para los aficionados a las carreras. Sin embargo, también tiene algunos inconvenientes y desventajas que usted debe tener en cuenta. Estos son algunos de los pros y los contras de Asphalt Nitro 9 Leyendas Mod Apk:

- - -Pros -Contras - - -
    -
  • Divertido, adictivo y desafiante juego
  • -
  • Dinero ilimitado y tokens
  • -
  • Desbloquear todos los coches y pistas
  • -
  • Personalizar sus vehículos y actualizarlos
  • -
  • Disfruta de gráficos realistas y efectos de sonido
  • -
  • Compite con otros jugadores online o offline
  • -
-
    -
  • Puede causar retraso o estrellarse en algunos dispositivos
  • -
  • Puede no ser compatible con algunos dispositivos o versiones
  • -
  • No se puede actualizar regularmente o con frecuencia
  • -
  • No puede ser soportado por los desarrolladores o editores oficiales
  • - -
  • Puede exponer su dispositivo a riesgos o amenazas de seguridad
  • -
- -
-

Conclusión

-

En conclusión, Asfalto Nitro 9 Leyendas Mod Apk es un juego fantástico que le dará horas de diversión y emoción. Es una versión modificada del juego original que le da recursos ilimitados y características que mejoran su experiencia de juego. Puede descargarlo e instalarlo de forma fácil y segura siguiendo los pasos que proporcionamos en este artículo. También puedes seguir los consejos y trucos que compartimos para mejorar tus habilidades y rendimiento en el juego. Sin embargo, también debe ser consciente de los pros y los contras de Asphalt Nitro 9 Leyendas Mod Apk, y decidir si vale la pena jugar o no.

-

Si usted es un fanático de las carreras, le recomendamos que pruebe Asphalt Nitro 9 Leyendas Mod Apk y ver por sí mismo lo increíble que es. No te arrepentirás.

-

Preguntas frecuentes (preguntas frecuentes)

-

Aquí están algunas de las preguntas y respuestas más comunes sobre Asphalt Nitro 9 Legends Mod Apk:

-

-

Q: ¿Es libre de Asphalt Nitro 9 Leyendas Mod Apk?

-

A: Sí, Asphalt Nitro 9 Leyendas Mod Apk es gratis para descargar y jugar. Usted no necesita pagar nada para disfrutar del juego.

-

Q: ¿Es seguro Asphalt Nitro 9 Legends Mod Apk?

-

A: Sí, Asfalto Nitro 9 Leyendas Mod Apk es seguro para descargar e instalar. Sin embargo, siempre debe descargarlo de una fuente de confianza y habilitar fuentes desconocidas en la configuración del dispositivo antes de instalarlo.

-

Q: ¿Es legal Asphalt Nitro 9 Legends Mod Apk?

-

A: No, Asphalt Nitro 9 Leyendas Mod Apk no es legal. Es una versión modificada del juego original que viola los términos y condiciones de los desarrolladores y editores oficiales. También puede infringir los derechos de propiedad intelectual del juego original.

-

Q: Es Asphalt Nitro 9 Leyendas Mod Apk en línea o fuera de línea?

- -

Q: ¿Cómo puedo actualizar Asphalt Nitro 9 Legends Mod Apk?

-

A: Para actualizar Asphalt Nitro 9 leyendas Mod Apk, es necesario descargar e instalar la última versión del archivo apk mod de una fuente de confianza. Es posible que tenga que desinstalar la versión anterior del archivo apk mod antes de instalar el nuevo.

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Camioneros De Europa 3 Apk 36.6.md b/spaces/Benson/text-generation/Examples/Camioneros De Europa 3 Apk 36.6.md deleted file mode 100644 index d872bc888664198a5fb77f20f85945d9c05ae2ef..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Camioneros De Europa 3 Apk 36.6.md +++ /dev/null @@ -1,76 +0,0 @@ - -

Camioneros de Europa 3 APK 36.6: Un juego de simulador de camiones realista y divertido

-

Si eres un fan de los juegos de simuladores de camiones, es posible que hayas oído hablar de Truckers of Europe 3, un juego popular que te permite experimentar la vida de un conductor de camiones en Europa. En este artículo, te contaremos todo lo que necesitas saber sobre este juego, incluyendo qué es, cuáles son sus características, cómo descargarlo e instalarlo, cómo jugarlo y por qué deberías jugarlo. ¡Así que abróchate el cinturón y prepárate para un emocionante viaje!

-

Introducción

-

¿Qué es Camioneros de Europa 3?

-

Truckers of Europe 3 es un juego de simulador de camioneros desarrollado por Jerryisgaming, un canal de YouTube que crea videos de juegos. El juego fue lanzado en 2020 y ha sido actualizado regularmente con nuevas características y mejoras. La última versión del juego es 0.36.6, que fue lanzado el 30 de septiembre de 2021.

-

camioneros de europa 3 apk 36.6


Download Zip ❤❤❤ https://bltlly.com/2v6LwC



-

¿Cuáles son las características de Camioneros de Europa 3?

-

Truckers of Europe 3 tiene muchas características que lo convierten en uno de los mejores juegos de simuladores de camiones en el mercado. Algunas de estas características son:

-
    -
  • Un gran mapa de Europa con más de 50 ciudades y países para explorar.
  • -
  • Una variedad de camiones y remolques para elegir, cada uno con diferentes especificaciones y rendimiento.
  • -
  • Un sistema de conducción realista con transmisión manual, volante, pedales, indicadores, espejos, luces, bocina, limpiaparabrisas, etc.
  • -
  • Un sistema de tráfico realista con coches, autobuses, camiones, motocicletas, policía, ambulancia, etc.
  • -
  • Un sistema meteorológico realista con ciclo de día y noche, lluvia, nieve, niebla, etc.
  • -
  • Un sistema de daños realista con desgaste de neumáticos, consumo de combustible, sobrecalentamiento del motor, etc.
  • -
  • Un modo de carrera con misiones, contratos, entrega de carga, ingresos, gastos, etc.
  • -
  • Un sistema de habilidades con niveles, puntos, ventajas, etc.
  • -
  • Un sistema de personalización con trabajos de pintura, accesorios, calcomanías, etc.
  • - -
-

Cómo descargar e instalar camioneros de Europa 3 APK 36.6?

-

Requisitos

-

Para descargar e instalar Camioneros de Europa 3 APK 36.6 en su dispositivo Android, es necesario cumplir con los siguientes requisitos:

-
    -
  • Tu dispositivo debe tener la versión de Android 4.4 o superior.
  • -
  • Su dispositivo debe tener al menos 1 GB de RAM y 500 MB de espacio de almacenamiento libre.
  • -
  • Su dispositivo debe tener una conexión a Internet estable.
  • -
  • Su dispositivo debe permitir la instalación desde fuentes desconocidas. Puede habilitar esta opción yendo a Configuración > Seguridad > Fuentes desconocidas.
  • -
-

Pasos

-

Para descargar e instalar Camioneros de Europa 3 APK 36.6 en su dispositivo Android, debe seguir estos pasos:

-
    -
  1. Ir a este enlace y descargar el archivo APK.
  2. -
  3. Localice el archivo descargado en el administrador de archivos de su dispositivo y toque en él para iniciar el proceso de instalación.
  4. -
  5. Siga las instrucciones en la pantalla y conceda los permisos necesarios a la aplicación.
  6. -
  7. Espere a que la instalación se complete y luego inicie la aplicación desde el cajón de aplicaciones o la pantalla de inicio.
  8. -
  9. Disfruta jugando Camioneros de Europa 3 en tu dispositivo Android!
  10. -
-

Cómo jugar Camioneros de Europa 3?

-

Elige tu camión y remolque

-

Lo primero que tienes que hacer cuando empiezas a jugar Truckers of Europe 3 es elegir tu camión y remolque. Puede hacer esto yendo al menú del garaje y navegando a través de las opciones disponibles. También puede comprar nuevos camiones y remolques con el dinero que gana de sus misiones. Cada camión y remolque tiene diferentes atributos, como velocidad, potencia, capacidad de combustible, peso de carga, etc. Debe elegir el que se adapte a sus preferencias y necesidades.

-

Conduce por toda Europa y entrega carga

- -

Mientras conduce, tendrá que seguir las reglas de tráfico y regulaciones, tales como límites de velocidad, semáforos, señales, etc. También tendrá que lidiar con condiciones de tráfico realistas, como automóviles, autobuses, camiones, motocicletas, policía, ambulancia, etc. También tendrá que enfrentarse a condiciones meteorológicas realistas, como lluvia, nieve, niebla, etc. También tendrá que controlar el estado de su camión, como el nivel de combustible, la temperatura del motor, la presión de los neumáticos, etc. También tendrá que prestar atención al estado de su conductor, como el nivel de fatiga, el nivel de hambre, etc. Tendrá que parar en gasolineras, áreas de descanso, restaurantes, etc. para llenar su tanque de combustible, descansar a su conductor, comer algo de comida, etc.

-

Cuando llegue a su destino, tendrá que aparcar su camión y remolque en el lugar designado y descargar la carga. A continuación, recibirá su recompensa y comentarios sobre su rendimiento. También ganarás puntos de experiencia que te ayudarán a subir de nivel y desbloquear nuevas habilidades y beneficios.

-

Personaliza tu camión y mejora tus habilidades

-

Otro aspecto divertido de Truckers of Europe 3 es que puedes personalizar tu camión y mejorar tus habilidades. Puedes hacer esto yendo al menú del taller y gastando algo de dinero en varios artículos y servicios. Puede cambiar el color de su camión, agregar algunos accesorios, aplicar algunas calcomanías, etc. También puede reparar su camión si está dañado o afinarlo si no funciona bien.

-

También puedes ir al menú de habilidades y gastar algunos puntos en varias habilidades y beneficios que mejorarán tus habilidades y beneficios como conductor de camión. Puede mejorar su eficiencia de combustible, manejo de carga, seguridad de conducción, habilidades de negociación, etc. También puede desbloquear nuevos tipos de carga, contratos, camiones, remolques, etc.

-

-

¿Por qué deberías jugar Camioneros de Europa 3?

-

Gráficos realistas y física

- -

Tráfico y clima diversos y dinámicos

-

Otra razón por la que deberías jugar a Truckers of Europe 3 es porque tiene un tráfico diverso y dinámico y el clima hará que tu experiencia de conducción sea más desafiante y agradable. El juego tiene un gran mapa de Europa con más de 50 ciudades y países para explorar. Cada ciudad y país tiene sus propias características únicas, tales como monumentos, edificios, carreteras, señales, etc. El juego también tiene un sistema de tráfico diverso que incluye automóviles, autobuses, camiones, motocicletas, policía, ambulancia, etc. Cada vehículo tiene su propio comportamiento, velocidad, dirección, etc. El juego también tiene un sistema de tiempo dinámico que incluye ciclo de día y noche, lluvia, nieve, niebla, etc. Cada condición climática tiene su propio efecto en la visibilidad, tracción, manejo, etc. del camión y el remolque.

-

Misiones y logros desafiantes y gratificantes

-

Otra razón por la que deberías jugar a Truckers of Europe 3 es porque tiene misiones desafiantes y gratificantes y logros que te mantendrán motivado y entretenido. El juego tiene un modo de carrera que le permite comenzar como un conductor de camión novato y trabajar su camino hasta convertirse en un camionero profesional. Puede hacerlo completando varias misiones y contratos que implican la entrega de carga a diferentes destinos en toda Europa. También puede ganar dinero de sus entregas y gastarlo en la compra de nuevos camiones y remolques o la personalización de los existentes. También puede ganar puntos de experiencia de sus entregas y gastarlos en mejorar sus habilidades y beneficios. También puedes desbloquear nuevos tipos de carga, contratos, camiones, remolques, etc. a medida que avanzas en el juego.

- -

Conclusión

-

Truckers of Europe 3 es un juego de simulador de camiones realista y divertido que te permite experimentar la vida de un conductor de camiones en Europa. Puede elegir su camión y remolque, conducir por toda Europa y entregar carga, personalizar su camión y mejorar sus habilidades, y ganar dinero y logros. El juego tiene gráficos y física realistas, tráfico y clima diversos y dinámicos, y misiones y logros desafiantes y gratificantes. Si usted está buscando un juego de simulador de camiones que le mantendrá enganchado durante horas, definitivamente debe probar Camioneros de Europa 3 APK 36.6.

-

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes sobre los camioneros de Europa 3:

-
    -
  • Q: ¿Los camioneros de Europa 3 son libres de jugar?
  • -
  • A: Sí, Truckers of Europe 3 es gratis. Puedes descargarlo e instalarlo desde el enlace proporcionado en este artículo. Sin embargo, el juego puede contener algunas compras en la aplicación que pueden mejorar su experiencia de juego.
  • -
  • Q: ¿Es seguro descargar e instalar Truckers of Europe 3?
  • -
  • A: Sí, Truckers of Europe 3 es seguro para descargar e instalar. El archivo APK proporcionado en este artículo es verificado y probado por nuestro equipo. Sin embargo, siempre debe descargar e instalar archivos APK de fuentes confiables solamente.
  • -
  • Q: ¿Es Truckers of Europe 3 compatible con mi dispositivo?
  • -
  • A: Camioneros de Europa 3 es compatible con la mayoría de los dispositivos Android que tienen la versión Android 4.4 o superior. Sin embargo, algunos dispositivos pueden no ser compatibles con el juego debido a limitaciones de hardware o software.
  • -
  • P: ¿Cómo puedo contactar al desarrollador de Truckers of Europe 3?
  • -
  • A: Puedes contactar al desarrollador de Truckers of Europe 3 visitando su canal de YouTube Jerryisgaming o su página de Facebook Jerryisgaming. También puedes dejar tus comentarios, sugerencias o consultas en la sección de comentarios de sus videos o publicaciones.
  • -
  • P: ¿Cómo puedo apoyar el desarrollo de Camioneros de Europa 3?
  • - -

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Estrellas Pelea Hack Ios.md b/spaces/Benson/text-generation/Examples/Descargar Estrellas Pelea Hack Ios.md deleted file mode 100644 index 516d398df61b62811ac5d43e8205d838c0714cbc..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Estrellas Pelea Hack Ios.md +++ /dev/null @@ -1,80 +0,0 @@ - -

Cómo descargar Genshin Impact Cloud y disfrutar del juego en cualquier lugar

-

Genshin Impact es uno de los juegos más populares y exitosos de 2020, con millones de jugadores en todo el mundo. El juego es un RPG de acción de mundo abierto que te permite explorar un mundo vasto y hermoso llamado Teyvat, donde puedes conocer a varios personajes, luchar contra enemigos, reunir recursos y completar misiones. El juego está disponible en múltiples plataformas, como PC, PlayStation 4, PlayStation 5, iOS y Android. Sin embargo, si quieres jugar el juego en tu dispositivo móvil, puedes enfrentarte a algunos desafíos, como espacio de almacenamiento limitado, bajo rendimiento o dispositivos incompatibles. Ahí es donde entra Genshin Impact Cloud.

-

¿Qué es la nube de impacto Genshin y por qué debe probarlo

-

Genshin Impact Cloud es un servicio que te permite jugar el juego en tu dispositivo móvil sin descargar el juego entero

-

Genshin Impact Cloud es una nueva característica que fue introducida por miHoYo, el desarrollador de Genshin Impact, en abril de 2021. Es un servicio de juegos en la nube que le permite transmitir el juego desde un servidor a su dispositivo móvil a través de Internet. Esto significa que no necesitas descargar o instalar el juego en tu dispositivo, lo que puede ahorrarte mucho espacio de almacenamiento. El tamaño del juego de Genshin Impact en dispositivos móviles es de alrededor de 5,2 GB, pero con Genshin Impact Cloud, solo necesitas descargar una aplicación de 56 MB de tamaño. Esto también puede ayudarle a evitar largos tiempos de carga o actualizaciones.

-

descargar estrellas pelea hack ios


Download ===> https://bltlly.com/2v6KEj



-

Genshin Impact Cloud tiene muchos beneficios, como ahorrar espacio de almacenamiento, mejorar el rendimiento y admitir cross-save

-

Además de ahorrar espacio de almacenamiento, Genshin Impact Cloud también tiene otras ventajas que pueden mejorar su experiencia de juego. Por ejemplo:

-
    - -
  • Genshin Impact Cloud soporta cross-save y cross-play. Esto significa que puedes acceder a tu progreso y datos existentes desde otras plataformas iniciando sesión con tu cuenta miHoYo. También puedes jugar con tus amigos que usan diferentes dispositivos, como PC o PlayStation.
  • -
  • Genshin Impact Cloud es de uso gratuito. No es necesario pagar tarifas o suscripciones adicionales para utilizar el servicio. También puede hacer compras en la aplicación como de costumbre, y disfrutar de todas las actualizaciones y eventos del juego.
  • -
-

Con Genshin Impact Cloud, puede disfrutar del juego en cualquier lugar y en cualquier momento, siempre y cuando tenga una conexión a Internet estable y un dispositivo compatible.

-

Cómo descargar Genshin Impact Cloud en dispositivos Android

-

Genshin Impact Cloud está disponible actualmente solo para usuarios de Android en Malasia y Singapur como prueba beta

-

Genshin Impact Cloud todavía está en sus primeras etapas de desarrollo, y todavía no está disponible para todas las regiones y plataformas. Actualmente, el servicio solo está abierto para los usuarios de Android en Malasia y Singapur como una prueba beta. Esto significa que solo un número limitado de jugadores puede acceder al servicio, y puede haber algunos errores o errores durante el juego. miHoYo no ha anunciado cuándo se ampliará el servicio a otras regiones y plataformas, pero es probable que lo hagan en el futuro.

-

Para descargar Genshin Impact Cloud, necesita tener una cuenta miHoYo y registrarse para la prueba beta en el sitio web oficial

-

Si eres un usuario de Android en Malasia o Singapur, y quieres probar Genshin Impact Cloud, debes seguir estos pasos:

-
    -
  1. Crea una cuenta miHoYo si aún no tienes una. Puede hacer esto visitando https://account.mihoyo.com/#/register y llenando la información requerida.
  2. -
  3. Visite None: - """ - Raise an option parsing error using parser.error(). - - Args: - parser: an OptionParser instance. - option: an Option instance. - msg: the error text. - """ - msg = f"{option} error: {msg}" - msg = textwrap.fill(" ".join(msg.split())) - parser.error(msg) - - -def make_option_group(group: Dict[str, Any], parser: ConfigOptionParser) -> OptionGroup: - """ - Return an OptionGroup object - group -- assumed to be dict with 'name' and 'options' keys - parser -- an optparse Parser - """ - option_group = OptionGroup(parser, group["name"]) - for option in group["options"]: - option_group.add_option(option()) - return option_group - - -def check_dist_restriction(options: Values, check_target: bool = False) -> None: - """Function for determining if custom platform options are allowed. - - :param options: The OptionParser options. - :param check_target: Whether or not to check if --target is being used. - """ - dist_restriction_set = any( - [ - options.python_version, - options.platforms, - options.abis, - options.implementation, - ] - ) - - binary_only = FormatControl(set(), {":all:"}) - sdist_dependencies_allowed = ( - options.format_control != binary_only and not options.ignore_dependencies - ) - - # Installations or downloads using dist restrictions must not combine - # source distributions and dist-specific wheels, as they are not - # guaranteed to be locally compatible. - if dist_restriction_set and sdist_dependencies_allowed: - raise CommandError( - "When restricting platform and interpreter constraints using " - "--python-version, --platform, --abi, or --implementation, " - "either --no-deps must be set, or --only-binary=:all: must be " - "set and --no-binary must not be set (or must be set to " - ":none:)." - ) - - if check_target: - if dist_restriction_set and not options.target_dir: - raise CommandError( - "Can not use any platform or abi specific options unless " - "installing via '--target'" - ) - - -def _path_option_check(option: Option, opt: str, value: str) -> str: - return os.path.expanduser(value) - - -def _package_name_option_check(option: Option, opt: str, value: str) -> str: - return canonicalize_name(value) - - -class PipOption(Option): - TYPES = Option.TYPES + ("path", "package_name") - TYPE_CHECKER = Option.TYPE_CHECKER.copy() - TYPE_CHECKER["package_name"] = _package_name_option_check - TYPE_CHECKER["path"] = _path_option_check - - -########### -# options # -########### - -help_: Callable[..., Option] = partial( - Option, - "-h", - "--help", - dest="help", - action="help", - help="Show help.", -) - -debug_mode: Callable[..., Option] = partial( - Option, - "--debug", - dest="debug_mode", - action="store_true", - default=False, - help=( - "Let unhandled exceptions propagate outside the main subroutine, " - "instead of logging them to stderr." - ), -) - -isolated_mode: Callable[..., Option] = partial( - Option, - "--isolated", - dest="isolated_mode", - action="store_true", - default=False, - help=( - "Run pip in an isolated mode, ignoring environment variables and user " - "configuration." - ), -) - -require_virtualenv: Callable[..., Option] = partial( - Option, - "--require-virtualenv", - "--require-venv", - dest="require_venv", - action="store_true", - default=False, - help=( - "Allow pip to only run in a virtual environment; " - "exit with an error otherwise." - ), -) - -override_externally_managed: Callable[..., Option] = partial( - Option, - "--break-system-packages", - dest="override_externally_managed", - action="store_true", - help="Allow pip to modify an EXTERNALLY-MANAGED Python installation", -) - -python: Callable[..., Option] = partial( - Option, - "--python", - dest="python", - help="Run pip with the specified Python interpreter.", -) - -verbose: Callable[..., Option] = partial( - Option, - "-v", - "--verbose", - dest="verbose", - action="count", - default=0, - help="Give more output. Option is additive, and can be used up to 3 times.", -) - -no_color: Callable[..., Option] = partial( - Option, - "--no-color", - dest="no_color", - action="store_true", - default=False, - help="Suppress colored output.", -) - -version: Callable[..., Option] = partial( - Option, - "-V", - "--version", - dest="version", - action="store_true", - help="Show version and exit.", -) - -quiet: Callable[..., Option] = partial( - Option, - "-q", - "--quiet", - dest="quiet", - action="count", - default=0, - help=( - "Give less output. Option is additive, and can be used up to 3" - " times (corresponding to WARNING, ERROR, and CRITICAL logging" - " levels)." - ), -) - -progress_bar: Callable[..., Option] = partial( - Option, - "--progress-bar", - dest="progress_bar", - type="choice", - choices=["on", "off"], - default="on", - help="Specify whether the progress bar should be used [on, off] (default: on)", -) - -log: Callable[..., Option] = partial( - PipOption, - "--log", - "--log-file", - "--local-log", - dest="log", - metavar="path", - type="path", - help="Path to a verbose appending log.", -) - -no_input: Callable[..., Option] = partial( - Option, - # Don't ask for input - "--no-input", - dest="no_input", - action="store_true", - default=False, - help="Disable prompting for input.", -) - -keyring_provider: Callable[..., Option] = partial( - Option, - "--keyring-provider", - dest="keyring_provider", - choices=["auto", "disabled", "import", "subprocess"], - default="auto", - help=( - "Enable the credential lookup via the keyring library if user input is allowed." - " Specify which mechanism to use [disabled, import, subprocess]." - " (default: disabled)" - ), -) - -proxy: Callable[..., Option] = partial( - Option, - "--proxy", - dest="proxy", - type="str", - default="", - help="Specify a proxy in the form scheme://[user:passwd@]proxy.server:port.", -) - -retries: Callable[..., Option] = partial( - Option, - "--retries", - dest="retries", - type="int", - default=5, - help="Maximum number of retries each connection should attempt " - "(default %default times).", -) - -timeout: Callable[..., Option] = partial( - Option, - "--timeout", - "--default-timeout", - metavar="sec", - dest="timeout", - type="float", - default=15, - help="Set the socket timeout (default %default seconds).", -) - - -def exists_action() -> Option: - return Option( - # Option when path already exist - "--exists-action", - dest="exists_action", - type="choice", - choices=["s", "i", "w", "b", "a"], - default=[], - action="append", - metavar="action", - help="Default action when a path already exists: " - "(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort.", - ) - - -cert: Callable[..., Option] = partial( - PipOption, - "--cert", - dest="cert", - type="path", - metavar="path", - help=( - "Path to PEM-encoded CA certificate bundle. " - "If provided, overrides the default. " - "See 'SSL Certificate Verification' in pip documentation " - "for more information." - ), -) - -client_cert: Callable[..., Option] = partial( - PipOption, - "--client-cert", - dest="client_cert", - type="path", - default=None, - metavar="path", - help="Path to SSL client certificate, a single file containing the " - "private key and the certificate in PEM format.", -) - -index_url: Callable[..., Option] = partial( - Option, - "-i", - "--index-url", - "--pypi-url", - dest="index_url", - metavar="URL", - default=PyPI.simple_url, - help="Base URL of the Python Package Index (default %default). " - "This should point to a repository compliant with PEP 503 " - "(the simple repository API) or a local directory laid out " - "in the same format.", -) - - -def extra_index_url() -> Option: - return Option( - "--extra-index-url", - dest="extra_index_urls", - metavar="URL", - action="append", - default=[], - help="Extra URLs of package indexes to use in addition to " - "--index-url. Should follow the same rules as " - "--index-url.", - ) - - -no_index: Callable[..., Option] = partial( - Option, - "--no-index", - dest="no_index", - action="store_true", - default=False, - help="Ignore package index (only looking at --find-links URLs instead).", -) - - -def find_links() -> Option: - return Option( - "-f", - "--find-links", - dest="find_links", - action="append", - default=[], - metavar="url", - help="If a URL or path to an html file, then parse for links to " - "archives such as sdist (.tar.gz) or wheel (.whl) files. " - "If a local path or file:// URL that's a directory, " - "then look for archives in the directory listing. " - "Links to VCS project URLs are not supported.", - ) - - -def trusted_host() -> Option: - return Option( - "--trusted-host", - dest="trusted_hosts", - action="append", - metavar="HOSTNAME", - default=[], - help="Mark this host or host:port pair as trusted, even though it " - "does not have valid or any HTTPS.", - ) - - -def constraints() -> Option: - return Option( - "-c", - "--constraint", - dest="constraints", - action="append", - default=[], - metavar="file", - help="Constrain versions using the given constraints file. " - "This option can be used multiple times.", - ) - - -def requirements() -> Option: - return Option( - "-r", - "--requirement", - dest="requirements", - action="append", - default=[], - metavar="file", - help="Install from the given requirements file. " - "This option can be used multiple times.", - ) - - -def editable() -> Option: - return Option( - "-e", - "--editable", - dest="editables", - action="append", - default=[], - metavar="path/url", - help=( - "Install a project in editable mode (i.e. setuptools " - '"develop mode") from a local project path or a VCS url.' - ), - ) - - -def _handle_src(option: Option, opt_str: str, value: str, parser: OptionParser) -> None: - value = os.path.abspath(value) - setattr(parser.values, option.dest, value) - - -src: Callable[..., Option] = partial( - PipOption, - "--src", - "--source", - "--source-dir", - "--source-directory", - dest="src_dir", - type="path", - metavar="dir", - default=get_src_prefix(), - action="callback", - callback=_handle_src, - help="Directory to check out editable projects into. " - 'The default in a virtualenv is "/src". ' - 'The default for global installs is "/src".', -) - - -def _get_format_control(values: Values, option: Option) -> Any: - """Get a format_control object.""" - return getattr(values, option.dest) - - -def _handle_no_binary( - option: Option, opt_str: str, value: str, parser: OptionParser -) -> None: - existing = _get_format_control(parser.values, option) - FormatControl.handle_mutual_excludes( - value, - existing.no_binary, - existing.only_binary, - ) - - -def _handle_only_binary( - option: Option, opt_str: str, value: str, parser: OptionParser -) -> None: - existing = _get_format_control(parser.values, option) - FormatControl.handle_mutual_excludes( - value, - existing.only_binary, - existing.no_binary, - ) - - -def no_binary() -> Option: - format_control = FormatControl(set(), set()) - return Option( - "--no-binary", - dest="format_control", - action="callback", - callback=_handle_no_binary, - type="str", - default=format_control, - help="Do not use binary packages. Can be supplied multiple times, and " - 'each time adds to the existing value. Accepts either ":all:" to ' - 'disable all binary packages, ":none:" to empty the set (notice ' - "the colons), or one or more package names with commas between " - "them (no colons). Note that some packages are tricky to compile " - "and may fail to install when this option is used on them.", - ) - - -def only_binary() -> Option: - format_control = FormatControl(set(), set()) - return Option( - "--only-binary", - dest="format_control", - action="callback", - callback=_handle_only_binary, - type="str", - default=format_control, - help="Do not use source packages. Can be supplied multiple times, and " - 'each time adds to the existing value. Accepts either ":all:" to ' - 'disable all source packages, ":none:" to empty the set, or one ' - "or more package names with commas between them. Packages " - "without binary distributions will fail to install when this " - "option is used on them.", - ) - - -platforms: Callable[..., Option] = partial( - Option, - "--platform", - dest="platforms", - metavar="platform", - action="append", - default=None, - help=( - "Only use wheels compatible with . Defaults to the " - "platform of the running system. Use this option multiple times to " - "specify multiple platforms supported by the target interpreter." - ), -) - - -# This was made a separate function for unit-testing purposes. -def _convert_python_version(value: str) -> Tuple[Tuple[int, ...], Optional[str]]: - """ - Convert a version string like "3", "37", or "3.7.3" into a tuple of ints. - - :return: A 2-tuple (version_info, error_msg), where `error_msg` is - non-None if and only if there was a parsing error. - """ - if not value: - # The empty string is the same as not providing a value. - return (None, None) - - parts = value.split(".") - if len(parts) > 3: - return ((), "at most three version parts are allowed") - - if len(parts) == 1: - # Then we are in the case of "3" or "37". - value = parts[0] - if len(value) > 1: - parts = [value[0], value[1:]] - - try: - version_info = tuple(int(part) for part in parts) - except ValueError: - return ((), "each version part must be an integer") - - return (version_info, None) - - -def _handle_python_version( - option: Option, opt_str: str, value: str, parser: OptionParser -) -> None: - """ - Handle a provided --python-version value. - """ - version_info, error_msg = _convert_python_version(value) - if error_msg is not None: - msg = "invalid --python-version value: {!r}: {}".format( - value, - error_msg, - ) - raise_option_error(parser, option=option, msg=msg) - - parser.values.python_version = version_info - - -python_version: Callable[..., Option] = partial( - Option, - "--python-version", - dest="python_version", - metavar="python_version", - action="callback", - callback=_handle_python_version, - type="str", - default=None, - help=dedent( - """\ - The Python interpreter version to use for wheel and "Requires-Python" - compatibility checks. Defaults to a version derived from the running - interpreter. The version can be specified using up to three dot-separated - integers (e.g. "3" for 3.0.0, "3.7" for 3.7.0, or "3.7.3"). A major-minor - version can also be given as a string without dots (e.g. "37" for 3.7.0). - """ - ), -) - - -implementation: Callable[..., Option] = partial( - Option, - "--implementation", - dest="implementation", - metavar="implementation", - default=None, - help=( - "Only use wheels compatible with Python " - "implementation , e.g. 'pp', 'jy', 'cp', " - " or 'ip'. If not specified, then the current " - "interpreter implementation is used. Use 'py' to force " - "implementation-agnostic wheels." - ), -) - - -abis: Callable[..., Option] = partial( - Option, - "--abi", - dest="abis", - metavar="abi", - action="append", - default=None, - help=( - "Only use wheels compatible with Python abi , e.g. 'pypy_41'. " - "If not specified, then the current interpreter abi tag is used. " - "Use this option multiple times to specify multiple abis supported " - "by the target interpreter. Generally you will need to specify " - "--implementation, --platform, and --python-version when using this " - "option." - ), -) - - -def add_target_python_options(cmd_opts: OptionGroup) -> None: - cmd_opts.add_option(platforms()) - cmd_opts.add_option(python_version()) - cmd_opts.add_option(implementation()) - cmd_opts.add_option(abis()) - - -def make_target_python(options: Values) -> TargetPython: - target_python = TargetPython( - platforms=options.platforms, - py_version_info=options.python_version, - abis=options.abis, - implementation=options.implementation, - ) - - return target_python - - -def prefer_binary() -> Option: - return Option( - "--prefer-binary", - dest="prefer_binary", - action="store_true", - default=False, - help="Prefer older binary packages over newer source packages.", - ) - - -cache_dir: Callable[..., Option] = partial( - PipOption, - "--cache-dir", - dest="cache_dir", - default=USER_CACHE_DIR, - metavar="dir", - type="path", - help="Store the cache data in .", -) - - -def _handle_no_cache_dir( - option: Option, opt: str, value: str, parser: OptionParser -) -> None: - """ - Process a value provided for the --no-cache-dir option. - - This is an optparse.Option callback for the --no-cache-dir option. - """ - # The value argument will be None if --no-cache-dir is passed via the - # command-line, since the option doesn't accept arguments. However, - # the value can be non-None if the option is triggered e.g. by an - # environment variable, like PIP_NO_CACHE_DIR=true. - if value is not None: - # Then parse the string value to get argument error-checking. - try: - strtobool(value) - except ValueError as exc: - raise_option_error(parser, option=option, msg=str(exc)) - - # Originally, setting PIP_NO_CACHE_DIR to a value that strtobool() - # converted to 0 (like "false" or "no") caused cache_dir to be disabled - # rather than enabled (logic would say the latter). Thus, we disable - # the cache directory not just on values that parse to True, but (for - # backwards compatibility reasons) also on values that parse to False. - # In other words, always set it to False if the option is provided in - # some (valid) form. - parser.values.cache_dir = False - - -no_cache: Callable[..., Option] = partial( - Option, - "--no-cache-dir", - dest="cache_dir", - action="callback", - callback=_handle_no_cache_dir, - help="Disable the cache.", -) - -no_deps: Callable[..., Option] = partial( - Option, - "--no-deps", - "--no-dependencies", - dest="ignore_dependencies", - action="store_true", - default=False, - help="Don't install package dependencies.", -) - -ignore_requires_python: Callable[..., Option] = partial( - Option, - "--ignore-requires-python", - dest="ignore_requires_python", - action="store_true", - help="Ignore the Requires-Python information.", -) - -no_build_isolation: Callable[..., Option] = partial( - Option, - "--no-build-isolation", - dest="build_isolation", - action="store_false", - default=True, - help="Disable isolation when building a modern source distribution. " - "Build dependencies specified by PEP 518 must be already installed " - "if this option is used.", -) - -check_build_deps: Callable[..., Option] = partial( - Option, - "--check-build-dependencies", - dest="check_build_deps", - action="store_true", - default=False, - help="Check the build dependencies when PEP517 is used.", -) - - -def _handle_no_use_pep517( - option: Option, opt: str, value: str, parser: OptionParser -) -> None: - """ - Process a value provided for the --no-use-pep517 option. - - This is an optparse.Option callback for the no_use_pep517 option. - """ - # Since --no-use-pep517 doesn't accept arguments, the value argument - # will be None if --no-use-pep517 is passed via the command-line. - # However, the value can be non-None if the option is triggered e.g. - # by an environment variable, for example "PIP_NO_USE_PEP517=true". - if value is not None: - msg = """A value was passed for --no-use-pep517, - probably using either the PIP_NO_USE_PEP517 environment variable - or the "no-use-pep517" config file option. Use an appropriate value - of the PIP_USE_PEP517 environment variable or the "use-pep517" - config file option instead. - """ - raise_option_error(parser, option=option, msg=msg) - - # If user doesn't wish to use pep517, we check if setuptools and wheel are installed - # and raise error if it is not. - packages = ("setuptools", "wheel") - if not all(importlib.util.find_spec(package) for package in packages): - msg = ( - f"It is not possible to use --no-use-pep517 " - f"without {' and '.join(packages)} installed." - ) - raise_option_error(parser, option=option, msg=msg) - - # Otherwise, --no-use-pep517 was passed via the command-line. - parser.values.use_pep517 = False - - -use_pep517: Any = partial( - Option, - "--use-pep517", - dest="use_pep517", - action="store_true", - default=None, - help="Use PEP 517 for building source distributions " - "(use --no-use-pep517 to force legacy behaviour).", -) - -no_use_pep517: Any = partial( - Option, - "--no-use-pep517", - dest="use_pep517", - action="callback", - callback=_handle_no_use_pep517, - default=None, - help=SUPPRESS_HELP, -) - - -def _handle_config_settings( - option: Option, opt_str: str, value: str, parser: OptionParser -) -> None: - key, sep, val = value.partition("=") - if sep != "=": - parser.error(f"Arguments to {opt_str} must be of the form KEY=VAL") # noqa - dest = getattr(parser.values, option.dest) - if dest is None: - dest = {} - setattr(parser.values, option.dest, dest) - if key in dest: - if isinstance(dest[key], list): - dest[key].append(val) - else: - dest[key] = [dest[key], val] - else: - dest[key] = val - - -config_settings: Callable[..., Option] = partial( - Option, - "-C", - "--config-settings", - dest="config_settings", - type=str, - action="callback", - callback=_handle_config_settings, - metavar="settings", - help="Configuration settings to be passed to the PEP 517 build backend. " - "Settings take the form KEY=VALUE. Use multiple --config-settings options " - "to pass multiple keys to the backend.", -) - -build_options: Callable[..., Option] = partial( - Option, - "--build-option", - dest="build_options", - metavar="options", - action="append", - help="Extra arguments to be supplied to 'setup.py bdist_wheel'.", -) - -global_options: Callable[..., Option] = partial( - Option, - "--global-option", - dest="global_options", - action="append", - metavar="options", - help="Extra global options to be supplied to the setup.py " - "call before the install or bdist_wheel command.", -) - -no_clean: Callable[..., Option] = partial( - Option, - "--no-clean", - action="store_true", - default=False, - help="Don't clean up build directories.", -) - -pre: Callable[..., Option] = partial( - Option, - "--pre", - action="store_true", - default=False, - help="Include pre-release and development versions. By default, " - "pip only finds stable versions.", -) - -disable_pip_version_check: Callable[..., Option] = partial( - Option, - "--disable-pip-version-check", - dest="disable_pip_version_check", - action="store_true", - default=False, - help="Don't periodically check PyPI to determine whether a new version " - "of pip is available for download. Implied with --no-index.", -) - -root_user_action: Callable[..., Option] = partial( - Option, - "--root-user-action", - dest="root_user_action", - default="warn", - choices=["warn", "ignore"], - help="Action if pip is run as a root user. By default, a warning message is shown.", -) - - -def _handle_merge_hash( - option: Option, opt_str: str, value: str, parser: OptionParser -) -> None: - """Given a value spelled "algo:digest", append the digest to a list - pointed to in a dict by the algo name.""" - if not parser.values.hashes: - parser.values.hashes = {} - try: - algo, digest = value.split(":", 1) - except ValueError: - parser.error( - "Arguments to {} must be a hash name " # noqa - "followed by a value, like --hash=sha256:" - "abcde...".format(opt_str) - ) - if algo not in STRONG_HASHES: - parser.error( - "Allowed hash algorithms for {} are {}.".format( # noqa - opt_str, ", ".join(STRONG_HASHES) - ) - ) - parser.values.hashes.setdefault(algo, []).append(digest) - - -hash: Callable[..., Option] = partial( - Option, - "--hash", - # Hash values eventually end up in InstallRequirement.hashes due to - # __dict__ copying in process_line(). - dest="hashes", - action="callback", - callback=_handle_merge_hash, - type="string", - help="Verify that the package's archive matches this " - "hash before installing. Example: --hash=sha256:abcdef...", -) - - -require_hashes: Callable[..., Option] = partial( - Option, - "--require-hashes", - dest="require_hashes", - action="store_true", - default=False, - help="Require a hash to check each requirement against, for " - "repeatable installs. This option is implied when any package in a " - "requirements file has a --hash option.", -) - - -list_path: Callable[..., Option] = partial( - PipOption, - "--path", - dest="path", - type="path", - action="append", - help="Restrict to the specified installation path for listing " - "packages (can be used multiple times).", -) - - -def check_list_path_option(options: Values) -> None: - if options.path and (options.user or options.local): - raise CommandError("Cannot combine '--path' with '--user' or '--local'") - - -list_exclude: Callable[..., Option] = partial( - PipOption, - "--exclude", - dest="excludes", - action="append", - metavar="package", - type="package_name", - help="Exclude specified package from the output", -) - - -no_python_version_warning: Callable[..., Option] = partial( - Option, - "--no-python-version-warning", - dest="no_python_version_warning", - action="store_true", - default=False, - help="Silence deprecation warnings for upcoming unsupported Pythons.", -) - - -# Features that are now always on. A warning is printed if they are used. -ALWAYS_ENABLED_FEATURES = [ - "no-binary-enable-wheel-cache", # always on since 23.1 -] - -use_new_feature: Callable[..., Option] = partial( - Option, - "--use-feature", - dest="features_enabled", - metavar="feature", - action="append", - default=[], - choices=[ - "fast-deps", - "truststore", - ] - + ALWAYS_ENABLED_FEATURES, - help="Enable new functionality, that may be backward incompatible.", -) - -use_deprecated_feature: Callable[..., Option] = partial( - Option, - "--use-deprecated", - dest="deprecated_features_enabled", - metavar="feature", - action="append", - default=[], - choices=[ - "legacy-resolver", - ], - help=("Enable deprecated functionality, that will be removed in the future."), -) - - -########## -# groups # -########## - -general_group: Dict[str, Any] = { - "name": "General Options", - "options": [ - help_, - debug_mode, - isolated_mode, - require_virtualenv, - python, - verbose, - version, - quiet, - log, - no_input, - keyring_provider, - proxy, - retries, - timeout, - exists_action, - trusted_host, - cert, - client_cert, - cache_dir, - no_cache, - disable_pip_version_check, - no_color, - no_python_version_warning, - use_new_feature, - use_deprecated_feature, - ], -} - -index_group: Dict[str, Any] = { - "name": "Package Index Options", - "options": [ - index_url, - extra_index_url, - no_index, - find_links, - ], -} diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/markup.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/markup.py deleted file mode 100644 index fd80d8c1129722b84771bd6a0f6ccfd57f5cf78e..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/markup.py +++ /dev/null @@ -1,246 +0,0 @@ -import re -from ast import literal_eval -from operator import attrgetter -from typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union - -from ._emoji_replace import _emoji_replace -from .emoji import EmojiVariant -from .errors import MarkupError -from .style import Style -from .text import Span, Text - -RE_TAGS = re.compile( - r"""((\\*)\[([a-z#/@][^[]*?)])""", - re.VERBOSE, -) - -RE_HANDLER = re.compile(r"^([\w.]*?)(\(.*?\))?$") - - -class Tag(NamedTuple): - """A tag in console markup.""" - - name: str - """The tag name. e.g. 'bold'.""" - parameters: Optional[str] - """Any additional parameters after the name.""" - - def __str__(self) -> str: - return ( - self.name if self.parameters is None else f"{self.name} {self.parameters}" - ) - - @property - def markup(self) -> str: - """Get the string representation of this tag.""" - return ( - f"[{self.name}]" - if self.parameters is None - else f"[{self.name}={self.parameters}]" - ) - - -_ReStringMatch = Match[str] # regex match object -_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub -_EscapeSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re - - -def escape( - markup: str, - _escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#/@][^[]*?])").sub, -) -> str: - """Escapes text so that it won't be interpreted as markup. - - Args: - markup (str): Content to be inserted in to markup. - - Returns: - str: Markup with square brackets escaped. - """ - - def escape_backslashes(match: Match[str]) -> str: - """Called by re.sub replace matches.""" - backslashes, text = match.groups() - return f"{backslashes}{backslashes}\\{text}" - - markup = _escape(escape_backslashes, markup) - return markup - - -def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]: - """Parse markup in to an iterable of tuples of (position, text, tag). - - Args: - markup (str): A string containing console markup - - """ - position = 0 - _divmod = divmod - _Tag = Tag - for match in RE_TAGS.finditer(markup): - full_text, escapes, tag_text = match.groups() - start, end = match.span() - if start > position: - yield start, markup[position:start], None - if escapes: - backslashes, escaped = _divmod(len(escapes), 2) - if backslashes: - # Literal backslashes - yield start, "\\" * backslashes, None - start += backslashes * 2 - if escaped: - # Escape of tag - yield start, full_text[len(escapes) :], None - position = end - continue - text, equals, parameters = tag_text.partition("=") - yield start, None, _Tag(text, parameters if equals else None) - position = end - if position < len(markup): - yield position, markup[position:], None - - -def render( - markup: str, - style: Union[str, Style] = "", - emoji: bool = True, - emoji_variant: Optional[EmojiVariant] = None, -) -> Text: - """Render console markup in to a Text instance. - - Args: - markup (str): A string containing console markup. - emoji (bool, optional): Also render emoji code. Defaults to True. - - Raises: - MarkupError: If there is a syntax error in the markup. - - Returns: - Text: A test instance. - """ - emoji_replace = _emoji_replace - if "[" not in markup: - return Text( - emoji_replace(markup, default_variant=emoji_variant) if emoji else markup, - style=style, - ) - text = Text(style=style) - append = text.append - normalize = Style.normalize - - style_stack: List[Tuple[int, Tag]] = [] - pop = style_stack.pop - - spans: List[Span] = [] - append_span = spans.append - - _Span = Span - _Tag = Tag - - def pop_style(style_name: str) -> Tuple[int, Tag]: - """Pop tag matching given style name.""" - for index, (_, tag) in enumerate(reversed(style_stack), 1): - if tag.name == style_name: - return pop(-index) - raise KeyError(style_name) - - for position, plain_text, tag in _parse(markup): - if plain_text is not None: - # Handle open brace escapes, where the brace is not part of a tag. - plain_text = plain_text.replace("\\[", "[") - append(emoji_replace(plain_text) if emoji else plain_text) - elif tag is not None: - if tag.name.startswith("/"): # Closing tag - style_name = tag.name[1:].strip() - - if style_name: # explicit close - style_name = normalize(style_name) - try: - start, open_tag = pop_style(style_name) - except KeyError: - raise MarkupError( - f"closing tag '{tag.markup}' at position {position} doesn't match any open tag" - ) from None - else: # implicit close - try: - start, open_tag = pop() - except IndexError: - raise MarkupError( - f"closing tag '[/]' at position {position} has nothing to close" - ) from None - - if open_tag.name.startswith("@"): - if open_tag.parameters: - handler_name = "" - parameters = open_tag.parameters.strip() - handler_match = RE_HANDLER.match(parameters) - if handler_match is not None: - handler_name, match_parameters = handler_match.groups() - parameters = ( - "()" if match_parameters is None else match_parameters - ) - - try: - meta_params = literal_eval(parameters) - except SyntaxError as error: - raise MarkupError( - f"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}" - ) - except Exception as error: - raise MarkupError( - f"error parsing {open_tag.parameters!r}; {error}" - ) from None - - if handler_name: - meta_params = ( - handler_name, - meta_params - if isinstance(meta_params, tuple) - else (meta_params,), - ) - - else: - meta_params = () - - append_span( - _Span( - start, len(text), Style(meta={open_tag.name: meta_params}) - ) - ) - else: - append_span(_Span(start, len(text), str(open_tag))) - - else: # Opening tag - normalized_tag = _Tag(normalize(tag.name), tag.parameters) - style_stack.append((len(text), normalized_tag)) - - text_length = len(text) - while style_stack: - start, tag = style_stack.pop() - style = str(tag) - if style: - append_span(_Span(start, text_length, style)) - - text.spans = sorted(spans[::-1], key=attrgetter("start")) - return text - - -if __name__ == "__main__": # pragma: no cover - - MARKUP = [ - "[red]Hello World[/red]", - "[magenta]Hello [b]World[/b]", - "[bold]Bold[italic] bold and italic [/bold]italic[/italic]", - "Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog", - ":warning-emoji: [bold red blink] DANGER![/]", - ] - - from pip._vendor.rich import print - from pip._vendor.rich.table import Table - - grid = Table("Markup", "Result", padding=(0, 1)) - - for markup in MARKUP: - grid.add_row(Text(markup), markup) - - print(grid) diff --git a/spaces/Big-Web/MMSD/env/Scripts/deactivate.bat b/spaces/Big-Web/MMSD/env/Scripts/deactivate.bat deleted file mode 100644 index 62a39a7584f4d7c5fbc31758e3e9e7eff700276d..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Scripts/deactivate.bat +++ /dev/null @@ -1,22 +0,0 @@ -@echo off - -if defined _OLD_VIRTUAL_PROMPT ( - set "PROMPT=%_OLD_VIRTUAL_PROMPT%" -) -set _OLD_VIRTUAL_PROMPT= - -if defined _OLD_VIRTUAL_PYTHONHOME ( - set "PYTHONHOME=%_OLD_VIRTUAL_PYTHONHOME%" - set _OLD_VIRTUAL_PYTHONHOME= -) - -if defined _OLD_VIRTUAL_PATH ( - set "PATH=%_OLD_VIRTUAL_PATH%" -) - -set _OLD_VIRTUAL_PATH= - -set VIRTUAL_ENV= -set VIRTUAL_ENV_PROMPT= - -:END diff --git a/spaces/CAMP-ViL/Xplainer/inference.py b/spaces/CAMP-ViL/Xplainer/inference.py deleted file mode 100644 index 2ab6b61c2b1fa8039af58a5f5c4823b6c2f56cab..0000000000000000000000000000000000000000 --- a/spaces/CAMP-ViL/Xplainer/inference.py +++ /dev/null @@ -1,116 +0,0 @@ -import argparse -import gc -from pathlib import Path - -import torch -from torch.utils.data import DataLoader -from tqdm import tqdm - -from chestxray14 import ChestXray14Dataset -from chexpert import CheXpertDataset -from descriptors import disease_descriptors_chexpert, disease_descriptors_chestxray14 -from model import InferenceModel -from utils import calculate_auroc - -torch.multiprocessing.set_sharing_strategy('file_system') - - -def inference_chexpert(): - split = 'test' - dataset = CheXpertDataset(f'data/chexpert/{split}_labels.csv') # also do test - dataloader = DataLoader(dataset, batch_size=1, shuffle=False, collate_fn=lambda x: x, num_workers=0) - inference_model = InferenceModel() - all_descriptors = inference_model.get_all_descriptors(disease_descriptors_chexpert) - - all_labels = [] - all_probs_neg = [] - - for batch in tqdm(dataloader): - batch = batch[0] - image_paths, labels, keys = batch - image_paths = [Path(image_path) for image_path in image_paths] - agg_probs = [] - agg_negative_probs = [] - for image_path in image_paths: - probs, negative_probs = inference_model.get_descriptor_probs(image_path, descriptors=all_descriptors) - agg_probs.append(probs) - agg_negative_probs.append(negative_probs) - probs = {} # Aggregated - negative_probs = {} # Aggregated - for key in agg_probs[0].keys(): - probs[key] = sum([p[key] for p in agg_probs]) / len(agg_probs) # Mean Aggregation - - for key in agg_negative_probs[0].keys(): - negative_probs[key] = sum([p[key] for p in agg_negative_probs]) / len(agg_negative_probs) # Mean Aggregation - - disease_probs, negative_disease_probs = inference_model.get_diseases_probs(disease_descriptors_chexpert, pos_probs=probs, - negative_probs=negative_probs) - predicted_diseases, prob_vector_neg_prompt = inference_model.get_predictions_bin_prompting(disease_descriptors_chexpert, - disease_probs=disease_probs, - negative_disease_probs=negative_disease_probs, - keys=keys) - all_labels.append(labels) - all_probs_neg.append(prob_vector_neg_prompt) - - all_labels = torch.stack(all_labels) - all_probs_neg = torch.stack(all_probs_neg) - - # evaluation - existing_mask = sum(all_labels, 0) > 0 - all_labels_clean = all_labels[:, existing_mask] - all_probs_neg_clean = all_probs_neg[:, existing_mask] - all_keys_clean = [key for idx, key in enumerate(keys) if existing_mask[idx]] - - overall_auroc, per_disease_auroc = calculate_auroc(all_probs_neg_clean, all_labels_clean) - print(f"AUROC: {overall_auroc:.5f}\n") - for idx, key in enumerate(all_keys_clean): - print(f'{key}: {per_disease_auroc[idx]:.5f}') - - -def inference_chestxray14(): - dataset = ChestXray14Dataset(f'data/chestxray14/Data_Entry_2017_v2020_modified.csv') - dataloader = DataLoader(dataset, batch_size=1, shuffle=False, collate_fn=lambda x: x, num_workers=1) - inference_model = InferenceModel() - all_descriptors = inference_model.get_all_descriptors(disease_descriptors_chestxray14) - - all_labels = [] - all_probs_neg = [] - for batch in tqdm(dataloader): - batch = batch[0] - image_path, labels, keys = batch - image_path = Path(image_path) - probs, negative_probs = inference_model.get_descriptor_probs(image_path, descriptors=all_descriptors) - disease_probs, negative_disease_probs = inference_model.get_diseases_probs(disease_descriptors_chestxray14, pos_probs=probs, - negative_probs=negative_probs) - predicted_diseases, prob_vector_neg_prompt = inference_model.get_predictions_bin_prompting(disease_descriptors_chestxray14, - disease_probs=disease_probs, - negative_disease_probs=negative_disease_probs, - keys=keys) - all_labels.append(labels) - all_probs_neg.append(prob_vector_neg_prompt) - gc.collect() - - all_labels = torch.stack(all_labels) - all_probs_neg = torch.stack(all_probs_neg) - - existing_mask = sum(all_labels, 0) > 0 - all_labels_clean = all_labels[:, existing_mask] - all_probs_neg_clean = all_probs_neg[:, existing_mask] - all_keys_clean = [key for idx, key in enumerate(keys) if existing_mask[idx]] - - overall_auroc, per_disease_auroc = calculate_auroc(all_probs_neg_clean[:, 1:], all_labels_clean[:, 1:]) - print(f"AUROC: {overall_auroc:.5f}\n") - for idx, key in enumerate(all_keys_clean[1:]): - print(f'{key}: {per_disease_auroc[idx]:.5f}') - - -if __name__ == '__main__': - # add argument parser - parser = argparse.ArgumentParser() - parser.add_argument('--dataset', type=str, default='chexpert', help='chexpert or chestxray14') - args = parser.parse_args() - - if args.dataset == 'chexpert': - inference_chexpert() - elif args.dataset == 'chestxray14': - inference_chestxray14() diff --git a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/iterator_facade_category.h b/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/iterator_facade_category.h deleted file mode 100644 index e00d3ef054bd740b801e47cc1344e38621d8c055..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/iterator_facade_category.h +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace thrust -{ - -namespace detail -{ - - -// adapted from http://www.boost.org/doc/libs/1_37_0/libs/iterator/doc/iterator_facade.html#iterator-category -// -// in our implementation, R need not be a reference type to result in a category -// derived from forward_XXX_iterator_tag -// -// iterator-category(T,V,R) := -// if(T is convertible to input_host_iterator_tag -// || T is convertible to output_host_iterator_tag -// || T is convertible to input_device_iterator_tag -// || T is convertible to output_device_iterator_tag -// ) -// return T -// -// else if (T is not convertible to incrementable_traversal_tag) -// the program is ill-formed -// -// else return a type X satisfying the following two constraints: -// -// 1. X is convertible to X1, and not to any more-derived -// type, where X1 is defined by: -// -// if (T is convertible to forward_traversal_tag) -// { -// if (T is convertible to random_access_traversal_tag) -// X1 = random_access_host_iterator_tag -// else if (T is convertible to bidirectional_traversal_tag) -// X1 = bidirectional_host_iterator_tag -// else -// X1 = forward_host_iterator_tag -// } -// else -// { -// if (T is convertible to single_pass_traversal_tag -// && R is convertible to V) -// X1 = input_host_iterator_tag -// else -// X1 = T -// } -// -// 2. category-to-traversal(X) is convertible to the most -// derived traversal tag type to which X is also convertible, -// and not to any more-derived traversal tag type. - - -template - struct iterator_facade_default_category; - - -// Thrust's implementation of iterator_facade_default_category is slightly -// different from Boost's equivalent. -// Thrust does not check is_convertible because Reference -// may not be a complete type at this point, and implementations of is_convertible -// typically require that both types be complete. -// Instead, it simply assumes that if is_convertible, -// then the category is input_iterator_tag - - -// this is the function for standard system iterators -template - struct iterator_facade_default_category_std : - thrust::detail::eval_if< - thrust::detail::is_convertible::value, - thrust::detail::eval_if< - thrust::detail::is_convertible::value, - thrust::detail::identity_, - thrust::detail::eval_if< - thrust::detail::is_convertible::value, - thrust::detail::identity_, - thrust::detail::identity_ - > - >, - thrust::detail::eval_if< // XXX note we differ from Boost here - thrust::detail::is_convertible::value, - thrust::detail::identity_, - thrust::detail::identity_ - > - > -{ -}; // end iterator_facade_default_category_std - - -// this is the function for host system iterators -template - struct iterator_facade_default_category_host : - thrust::detail::eval_if< - thrust::detail::is_convertible::value, - thrust::detail::eval_if< - thrust::detail::is_convertible::value, - thrust::detail::identity_, - thrust::detail::eval_if< - thrust::detail::is_convertible::value, - thrust::detail::identity_, - thrust::detail::identity_ - > - >, - thrust::detail::eval_if< // XXX note we differ from Boost here - thrust::detail::is_convertible::value, - thrust::detail::identity_, - thrust::detail::identity_ - > - > -{ -}; // end iterator_facade_default_category_host - - -// this is the function for device system iterators -template - struct iterator_facade_default_category_device : - thrust::detail::eval_if< - thrust::detail::is_convertible::value, - thrust::detail::eval_if< - thrust::detail::is_convertible::value, - thrust::detail::identity_, - thrust::detail::eval_if< - thrust::detail::is_convertible::value, - thrust::detail::identity_, - thrust::detail::identity_ - > - >, - thrust::detail::eval_if< - thrust::detail::is_convertible::value, // XXX note we differ from Boost here - thrust::detail::identity_, - thrust::detail::identity_ - > - > -{ -}; // end iterator_facade_default_category_device - - -// this is the function for any system iterators -template - struct iterator_facade_default_category_any -{ - typedef thrust::detail::iterator_category_with_system_and_traversal< - typename iterator_facade_default_category_std::type, - thrust::any_system_tag, - Traversal - > type; -}; // end iterator_facade_default_category_any - - -template - struct iterator_facade_default_category - // check for any system - : thrust::detail::eval_if< - thrust::detail::is_convertible::value, - iterator_facade_default_category_any, - - // check for host system - thrust::detail::eval_if< - thrust::detail::is_convertible::value, - iterator_facade_default_category_host, - - // check for device system - thrust::detail::eval_if< - thrust::detail::is_convertible::value, - iterator_facade_default_category_device, - - // if we don't recognize the system, get a standard iterator category - // and combine it with System & Traversal - thrust::detail::identity_< - thrust::detail::iterator_category_with_system_and_traversal< - typename iterator_facade_default_category_std::type, - System, - Traversal - > - > - > - > - > -{}; - - -template - struct iterator_facade_category_impl -{ - typedef typename iterator_facade_default_category< - System,Traversal,ValueParam,Reference - >::type category; - - // we must be able to deduce both Traversal & System from category - // otherwise, munge them all together - typedef typename thrust::detail::eval_if< - thrust::detail::and_< - thrust::detail::is_same< - Traversal, - typename thrust::detail::iterator_category_to_traversal::type - >, - thrust::detail::is_same< - System, - typename thrust::detail::iterator_category_to_system::type - > - >::value, - thrust::detail::identity_, - thrust::detail::identity_ > - >::type type; -}; // end iterator_facade_category_impl - - -template - struct iterator_facade_category -{ - typedef typename - thrust::detail::eval_if< - thrust::detail::is_iterator_category::value, - thrust::detail::identity_, // categories are fine as-is - iterator_facade_category_impl - >::type type; -}; // end iterator_facade_category - - -} // end detail -} // end thrust - diff --git a/spaces/CVPR/WALT/mmdet/models/necks/channel_mapper.py b/spaces/CVPR/WALT/mmdet/models/necks/channel_mapper.py deleted file mode 100644 index a4f5ed44caefb1612df67785b1f4f0d9ec46ee93..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/necks/channel_mapper.py +++ /dev/null @@ -1,74 +0,0 @@ -import torch.nn as nn -from mmcv.cnn import ConvModule, xavier_init - -from ..builder import NECKS - - -@NECKS.register_module() -class ChannelMapper(nn.Module): - r"""Channel Mapper to reduce/increase channels of backbone features. - - This is used to reduce/increase channels of backbone features. - - Args: - in_channels (List[int]): Number of input channels per scale. - out_channels (int): Number of output channels (used at each scale). - kernel_size (int, optional): kernel_size for reducing channels (used - at each scale). Default: 3. - conv_cfg (dict, optional): Config dict for convolution layer. - Default: None. - norm_cfg (dict, optional): Config dict for normalization layer. - Default: None. - act_cfg (dict, optional): Config dict for activation layer in - ConvModule. Default: dict(type='ReLU'). - - Example: - >>> import torch - >>> in_channels = [2, 3, 5, 7] - >>> scales = [340, 170, 84, 43] - >>> inputs = [torch.rand(1, c, s, s) - ... for c, s in zip(in_channels, scales)] - >>> self = ChannelMapper(in_channels, 11, 3).eval() - >>> outputs = self.forward(inputs) - >>> for i in range(len(outputs)): - ... print(f'outputs[{i}].shape = {outputs[i].shape}') - outputs[0].shape = torch.Size([1, 11, 340, 340]) - outputs[1].shape = torch.Size([1, 11, 170, 170]) - outputs[2].shape = torch.Size([1, 11, 84, 84]) - outputs[3].shape = torch.Size([1, 11, 43, 43]) - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size=3, - conv_cfg=None, - norm_cfg=None, - act_cfg=dict(type='ReLU')): - super(ChannelMapper, self).__init__() - assert isinstance(in_channels, list) - - self.convs = nn.ModuleList() - for in_channel in in_channels: - self.convs.append( - ConvModule( - in_channel, - out_channels, - kernel_size, - padding=(kernel_size - 1) // 2, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - - # default init_weights for conv(msra) and norm in ConvModule - def init_weights(self): - """Initialize the weights of ChannelMapper module.""" - for m in self.modules(): - if isinstance(m, nn.Conv2d): - xavier_init(m, distribution='uniform') - - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == len(self.convs) - outs = [self.convs[i](inputs[i]) for i in range(len(inputs))] - return tuple(outs) diff --git a/spaces/CVPR/regionclip-demo/datasets/README.md b/spaces/CVPR/regionclip-demo/datasets/README.md deleted file mode 100644 index 0eb44cc3b23beeb1755ab8d12002d26f13434235..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/datasets/README.md +++ /dev/null @@ -1,140 +0,0 @@ -# Use Builtin Datasets - -A dataset can be used by accessing [DatasetCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.DatasetCatalog) -for its data, or [MetadataCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.MetadataCatalog) for its metadata (class names, etc). -This document explains how to setup the builtin datasets so they can be used by the above APIs. -[Use Custom Datasets](https://detectron2.readthedocs.io/tutorials/datasets.html) gives a deeper dive on how to use `DatasetCatalog` and `MetadataCatalog`, -and how to add new datasets to them. - -Detectron2 has builtin support for a few datasets. -The datasets are assumed to exist in a directory specified by the environment variable -`DETECTRON2_DATASETS`. -Under this directory, detectron2 will look for datasets in the structure described below, if needed. -``` -$DETECTRON2_DATASETS/ - coco/ - lvis/ - cityscapes/ - VOC20{07,12}/ -``` - -You can set the location for builtin datasets by `export DETECTRON2_DATASETS=/path/to/datasets`. -If left unset, the default is `./datasets` relative to your current working directory. - -The [model zoo](https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md) -contains configs and models that use these builtin datasets. - -## Expected dataset structure for [COCO instance/keypoint detection](https://cocodataset.org/#download): - -``` -coco/ - annotations/ - instances_{train,val}2017.json - person_keypoints_{train,val}2017.json - {train,val}2017/ - # image files that are mentioned in the corresponding json -``` - -You can use the 2014 version of the dataset as well. - -Some of the builtin tests (`dev/run_*_tests.sh`) uses a tiny version of the COCO dataset, -which you can download with `./datasets/prepare_for_tests.sh`. - -## Expected dataset structure for PanopticFPN: - -Extract panoptic annotations from [COCO website](https://cocodataset.org/#download) -into the following structure: -``` -coco/ - annotations/ - panoptic_{train,val}2017.json - panoptic_{train,val}2017/ # png annotations - panoptic_stuff_{train,val}2017/ # generated by the script mentioned below -``` - -Install panopticapi by: -``` -pip install git+https://github.com/cocodataset/panopticapi.git -``` -Then, run `python datasets/prepare_panoptic_fpn.py`, to extract semantic annotations from panoptic annotations. - -## Expected dataset structure for [LVIS instance segmentation](https://www.lvisdataset.org/dataset): -``` -coco/ - {train,val,test}2017/ -lvis/ - lvis_v0.5_{train,val}.json - lvis_v0.5_image_info_test.json - lvis_v1_{train,val}.json - lvis_v1_image_info_test{,_challenge}.json -``` - -Install lvis-api by: -``` -pip install git+https://github.com/lvis-dataset/lvis-api.git -``` - -To evaluate models trained on the COCO dataset using LVIS annotations, -run `python datasets/prepare_cocofied_lvis.py` to prepare "cocofied" LVIS annotations. - -## Expected dataset structure for [cityscapes](https://www.cityscapes-dataset.com/downloads/): -``` -cityscapes/ - gtFine/ - train/ - aachen/ - color.png, instanceIds.png, labelIds.png, polygons.json, - labelTrainIds.png - ... - val/ - test/ - # below are generated Cityscapes panoptic annotation - cityscapes_panoptic_train.json - cityscapes_panoptic_train/ - cityscapes_panoptic_val.json - cityscapes_panoptic_val/ - cityscapes_panoptic_test.json - cityscapes_panoptic_test/ - leftImg8bit/ - train/ - val/ - test/ -``` -Install cityscapes scripts by: -``` -pip install git+https://github.com/mcordts/cityscapesScripts.git -``` - -Note: to create labelTrainIds.png, first prepare the above structure, then run cityscapesescript with: -``` -CITYSCAPES_DATASET=/path/to/abovementioned/cityscapes python cityscapesscripts/preparation/createTrainIdLabelImgs.py -``` -These files are not needed for instance segmentation. - -Note: to generate Cityscapes panoptic dataset, run cityscapesescript with: -``` -CITYSCAPES_DATASET=/path/to/abovementioned/cityscapes python cityscapesscripts/preparation/createPanopticImgs.py -``` -These files are not needed for semantic and instance segmentation. - -## Expected dataset structure for [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/index.html): -``` -VOC20{07,12}/ - Annotations/ - ImageSets/ - Main/ - trainval.txt - test.txt - # train.txt or val.txt, if you use these splits - JPEGImages/ -``` - -## Expected dataset structure for [ADE20k Scene Parsing](http://sceneparsing.csail.mit.edu/): -``` -ADEChallengeData2016/ - annotations/ - annotations_detectron2/ - images/ - objectInfo150.txt -``` -The directory `annotations_detectron2` is generated by running `python datasets/prepare_ade20k_sem_seg.py`. diff --git a/spaces/CarlDennis/HYTTS/text/ger_to_ipa.py b/spaces/CarlDennis/HYTTS/text/ger_to_ipa.py deleted file mode 100644 index ee1f6f1bcfa2fc3437fb6d5f579640482f1ad5f4..0000000000000000000000000000000000000000 --- a/spaces/CarlDennis/HYTTS/text/ger_to_ipa.py +++ /dev/null @@ -1,397 +0,0 @@ -# -*- coding: utf-8 -*- -import re -from os.path import join, abspath, dirname -from collections import defaultdict -import epitran - -epi = epitran.Epitran("deu-Latn-nar") - - -def mode_type(mode_in): - """In the case of "sql", this will return an sqlite cursor.""" - if mode_in.lower() == "sql": - import sqlite3 - conn = sqlite3.connect(join(abspath(dirname(__file__)), "./Resources/de.db")) - return conn.cursor() - - -#TESTS -#NUMBERS ARE TOO HARD! - - - -def preprocess(words): - """Returns a string of words stripped of punctuation""" - punct_str = '!"#$%&\'()*+,-./:;<=>/?@[\\]^_`{|}~«» ' - return ' '.join([w.strip(punct_str).lower() for w in words.split()]) - - -def preserve_punc(words): - """converts words to IPA and finds punctuation before and after the word.""" - words_preserved = [] - for w in words.split(): - punct_list = ["", preprocess(w), ""] - before = re.search("^([^A-Za-z0-9]+)[A-Za-z]", w) - after = re.search("[A-Za-z]([^A-Za-z0-9]+)$", w) - if before: - punct_list[0] = str(before.group(1)) - if after: - punct_list[2] = str(after.group(1)) - words_preserved.append(punct_list) - return words_preserved - - - -def apply_punct(triple, as_str=False): - """places surrounding punctuation back on center on a list of preserve_punc triples""" - if type(triple[0]) == list: - for i, t in enumerate(triple): - triple[i] = str(''.join(triple[i])) - if as_str: - return ' '.join(triple) - return triple - if as_str: - return str(''.join(t for t in triple)) - return [''.join(t for t in triple)] - - -def _punct_replace_word(original, transcription): - """Get the IPA transcription of word with the original punctuation marks""" - for i, trans_list in enumerate(transcription): - for j, item in enumerate(trans_list): - triple = [original[i][0]] + [item] + [original[i][2]] - transcription[i][j] = apply_punct(triple, as_str=True) - return transcription - - -def fetch_words(words_in, db_type="sql"): - """fetches a list of words from the database""" - asset = mode_type(db_type) - f_result = [] - if db_type.lower() == "sql": - for word in words_in: - asset.execute("SELECT Words, phonemes FROM De_words WHERE Words IN (?)", (word,)) - result = asset.fetchall() - flag = True - try: - f_result.append(result.pop()) - flag = False - except IndexError: - pass - if result == [] and flag is True: - result = epi.transliterate(word) - f_result.append((word, result)) - f_result = list(filter(None,f_result)) - f_set = set(f_result) - d = defaultdict(list) - for k, v in f_set: - d[k].append(v) - return list(d.items()) - -def get_deu(tokens_in, db_type="sql"): - """query the SQL database for the words and return the phonemes in the order of user_in""" - result = fetch_words(tokens_in, db_type) - ordered = [] - for word in tokens_in: - this_word = [[i[1] for i in result if i[0] == word]][0] - if this_word: - ordered.append(this_word[0]) - else: - ordered.append(["__IGNORE__" + word]) - return ordered - - -def deu_to_ipa(deu_list, mark=True): - """converts the deu word lists into IPA transcriptions""" - symbols = {} - ipa_list = [] # the final list of IPA tokens to be returned - for word_list in deu_list: - ipa_word_list = [] # the word list for each word - for word in word_list: - if re.sub("\d*", "", word.replace("__IGNORE__", "")) == "": - pass # do not delete token if it's all numbers - else: - word = re.sub("[0-9]", "", word) - ipa_form = '' - if word.startswith("__IGNORE__"): - ipa_form = word.replace("__IGNORE__", "") - # mark words we couldn't transliterate with an asterisk: - - if mark: - if not re.sub("\d*", "", ipa_form) == "": - ipa_form += "*" - else: - for piece in word.split(" "): - marked = False - unmarked = piece - if piece[0] in ["ˈ", "ˌ"] or piece[0] is None: - marked = True - mark = piece - unmarked = piece[1:] - - if unmarked in symbols: - if marked: - ipa_form += mark + symbols[unmarked] - else: - ipa_form += symbols[unmarked] - - else: - ipa_form += piece - swap_list = [["ˈər", "əˈr"], ["ˈie", "iˈe"]] - for sym in swap_list: - if not ipa_form.startswith(sym[0]): - ipa_form = ipa_form.replace(sym[0], sym[1]) - ipa_word_list.append(ipa_form) - ipa_list.append(sorted(list(set(ipa_word_list)))) - return ipa_list - - -def get_top(ipa_list): - """Returns only the one result for a query. If multiple entries for words are found, only the first is used.""" - return ' '.join([word_list[-1] for word_list in ipa_list]) - - -def get_all(ipa_list): - """utilizes an algorithm to discover and return all possible combinations of IPA transcriptions""" - final_size = 1 - for word_list in ipa_list: - final_size *= len(word_list) - list_all = ["" for s in range(final_size)] - for i in range(len(ipa_list)): - if i == 0: - swtich_rate = final_size / len(ipa_list[i]) - else: - swtich_rate /= len(ipa_list[i]) - k = 0 - for j in range(final_size): - if (j+1) % int(swtich_rate) == 0: - k += 1 - if k == len(ipa_list[i]): - k = 0 - list_all[j] = list_all[j] + ipa_list[i][k] + " " - return sorted([sent[:-1] for sent in list_all]) - - -def ipa_list(words_in, keep_punct=True, db_type="sql"): - """Returns a list of all the discovered IPA transcriptions for each word.""" - if type(words_in) == str: - words = [preserve_punc(w.lower())[0] for w in words_in.split()] - else: - words = [preserve_punc(w.lower())[0] for w in words_in] - deu = get_deu([w[1] for w in words], db_type=db_type) - ipa = deu_to_ipa(deu) - if keep_punct: - ipa = _punct_replace_word(words, ipa) - return ipa - - -def isin_deu(word, db_type="sql"): - """checks if a word is in the deu dictionary. Doesn't strip punctuation. - If given more than one word, returns True only if all words are present.""" - if type(word) == str: - word = [preprocess(w) for w in word.split()] - results = fetch_words(word, db_type) - as_set = list(set(t[0] for t in results)) - return len(as_set) == len(set(word)) - -def replace_number(text): - text = text.replace("1","eins ") - text = text.replace("2","zwei ") - text = text.replace("3","drei ") - text = text.replace("4","vier ") - text = text.replace("5","fünf ") - text = text.replace("6","sechs ") - text = text.replace("7","sieben ") - text = text.replace("8","acht ") - text = text.replace("9","neun ") - text = text.replace("0","null ") - return text - - - -def convert(text, retrieve_all=False, keep_punct=True, mode="sql"): - """takes either a string or list of German words and converts them to IPA""" - text = replace_number(text) - ipa = ipa_list( - words_in=text, - keep_punct=keep_punct, - db_type=mode) - if retrieve_all: - return get_all(ipa) - return get_top(ipa) - - - -_decimal_number_re = re.compile(r'\d+\,\d+') -_euros_pre = re.compile(r'€([0-9\,]*[0-9]+)') -_euros_re = re.compile(r'([0-9\,]*[0-9]+)€') -_ordinal_re = re.compile(r'(der |die |das )([0-9]+)\.') -_clock_re=re.compile(r'\d{1,2}\:\d{2}') -_number_re = re.compile(r'[0-9]+') - -def base(text): - text = text.replace("1", "eins ") - text = text.replace("2", "zwei ") - text = text.replace("3", "drei ") - text = text.replace("4", "vier ") - text = text.replace("5", "fünf ") - text = text.replace("6", "sechs ") - text = text.replace("7", "sieben ") - text = text.replace("8", "acht ") - text = text.replace("9", "neun ") - text = text.replace("0", "null ") - return text - -def tens_to_word(num): - tens = num[0] - ones = num[1] - ones_word = base(ones) - - if num =="10": - return "zehn" - elif num=="11": - return "elf" - elif num=="12": - return "zwölf" - - if tens == "1": - if ones == "6": - ones_word = ones_word[:-1] - elif ones == "7": - ones_word = ones_word[:-2] - return ones_word + "zehn" - else: - tens_word = base(tens) - if ones == "1": - ones_word = ones_word[:-1] - if tens == "2": - tens_word = "zwan" - elif tens == "6": - tens_word = tens_word[:-1] - elif tens == "7": - tens_word = tens_word[:-2] - if tens == "3": - tens_word += "ßig" - else: - tens_word += "zig" - if ones == "0": - return tens_word - else: - return ones_word + " und " + tens_word - -def huns_to_word(num): - huns = num[0] - tens = num[1] - - if huns == "1": - huns_word= "hundert" - else: - huns_word = base(huns)+" hundert" - - remain = num_to_word(num[1:]) - if remain != "": - remain = " " + remain - return huns_word + remain - -def thos_to_word(num): - thos = num[0] - if thos == "1": - thos_word= "tausend" - else: - thos_word = base(thos)+" tausend" - remain=num_to_word(num[1:]) - if remain!="": - remain=" "+remain - return thos_word+remain - -def num_to_word(num): - num=num.lstrip("0") - if num=="": - return("") - digit=len(num) - if digit==1: - return base(num) - elif digit==2: - return tens_to_word(num) - elif digit == 3: - return huns_to_word(num) - elif digit == 4: - return thos_to_word(num) - else: - return base(num) - -def number_to_words(m): - m=m.group(0).lstrip("0") - if m=="": - return"null" - return num_to_word(m) - -def _expand_ordinal(m): - - pre=m.group(1) - m = m.group(2).lstrip("0") - - if m=="": - return"NULL" - num=int(m) - if num<=19 & num>=1: - if num ==1: - return "erste" - elif num==3: - return "dritte" - elif num==7: - return "siebte" - elif num==8: - return "achte" - else: - return pre + num_to_word(m) + "te" - else: - return pre + num_to_word(m) + "ste" - -def _expand_decimal(m): - match=m.group(0) - parts = match.split(',') - if int(parts[0])==0: - return '%s komma %s' % ("null", base(parts[1])) - return '%s komma %s' % (num_to_word(parts[0]),base(parts[1])) - -def _expand_euros(m): - match = m.group(1) - parts = match.split(',') - if len(parts) > 2: - return match + ' euro' # Unexpected format - euros = int(parts[0]) if parts[0] else 0 - cents = int(parts[1])*10 if len(parts) > 1 and parts[1] else 0 - if euros and cents: - return '%s euro %s' % (euros, cents) - elif euros: - return '%s euro' % (euros) - elif cents: - return '%s cent' % (cents) - else: - return 'null euro' - -def _expand_clock(m): - match = m.group(0) - parts = match.split(':') - if int(parts[0]) == 0: - return '%s Uhr %s' % ("null",num_to_word(parts[1])) - elif int(parts[0]) == 1: - return '%s Uhr %s' % ("ein", num_to_word(parts[1])) - return '%s Uhr %s' % (num_to_word(parts[0]),num_to_word(parts[1])) - -def normalize_numbers(text): - text = re.sub(_euros_pre, _expand_euros, text) - text = re.sub(_euros_re, _expand_euros, text) - text = re.sub(_clock_re, _expand_clock, text) - text = re.sub(_decimal_number_re, _expand_decimal, text) - text = re.sub(_ordinal_re, _expand_ordinal, text) - text = re.sub(_number_re, number_to_words, text) - text=text.replace(" "," ") - return text - -def collapse_whitespace(text): - return re.sub(r'\s+', ' ', text) - -def mark_dark_l(text): - return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text) diff --git a/spaces/ChrisCaviar/ControlNet-v1-1/app_normal.py b/spaces/ChrisCaviar/ControlNet-v1-1/app_normal.py deleted file mode 100644 index 1519b3095bc5250016066754ebe6c0328773f24a..0000000000000000000000000000000000000000 --- a/spaces/ChrisCaviar/ControlNet-v1-1/app_normal.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env python - -import gradio as gr - -from utils import randomize_seed_fn - - -def create_demo(process, max_images=12, default_num_images=3): - with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - image = gr.Image() - prompt = gr.Textbox(label='Prompt') - run_button = gr.Button('Run') - with gr.Accordion('Advanced options', open=False): - preprocessor_name = gr.Radio(label='Preprocessor', - choices=['NormalBae', 'None'], - type='value', - value='NormalBae') - num_samples = gr.Slider(label='Images', - minimum=1, - maximum=max_images, - value=default_num_images, - step=1) - image_resolution = gr.Slider(label='Image resolution', - minimum=256, - maximum=512, - value=512, - step=256) - preprocess_resolution = gr.Slider( - label='Preprocess resolution', - minimum=128, - maximum=512, - value=384, - step=1) - num_steps = gr.Slider(label='Number of steps', - minimum=1, - maximum=100, - value=20, - step=1) - guidance_scale = gr.Slider(label='Guidance scale', - minimum=0.1, - maximum=30.0, - value=9.0, - step=0.1) - seed = gr.Slider(label='Seed', - minimum=0, - maximum=1000000, - step=1, - value=0, - randomize=True) - randomize_seed = gr.Checkbox(label='Randomize seed', - value=True) - a_prompt = gr.Textbox( - label='Additional prompt', - value='best quality, extremely detailed') - n_prompt = gr.Textbox( - label='Negative prompt', - value= - 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality' - ) - with gr.Column(): - result = gr.Gallery(label='Output', show_label=False).style( - columns=2, object_fit='scale-down') - inputs = [ - image, - prompt, - a_prompt, - n_prompt, - num_samples, - image_resolution, - preprocess_resolution, - num_steps, - guidance_scale, - seed, - preprocessor_name, - ] - prompt.submit( - fn=randomize_seed_fn, - inputs=[seed, randomize_seed], - outputs=seed, - ).then( - fn=process, - inputs=inputs, - outputs=result, - ) - run_button.click( - fn=randomize_seed_fn, - inputs=[seed, randomize_seed], - outputs=seed, - ).then( - fn=process, - inputs=inputs, - outputs=result, - api_name='normal', - ) - return demo - - -if __name__ == '__main__': - from model import Model - model = Model(task_name='NormalBae') - demo = create_demo(model.process_normal) - demo.queue().launch() diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/knock/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/knock/__init__.py deleted file mode 100644 index 0a44b37fc5563deea8406c6ddf109860a56e9485..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/meme_generator/memes/knock/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -from pathlib import Path -from typing import List - -from PIL.Image import Image as IMG -from pil_utils import BuildImage - -from meme_generator import add_meme -from meme_generator.utils import save_gif - -img_dir = Path(__file__).parent / "images" - - -def knock(images: List[BuildImage], texts, args): - img = images[0].convert("RGBA").square() - # fmt: off - locs = [(60, 308, 210, 195), (60, 308, 210, 198), (45, 330, 250, 172), (58, 320, 218, 180), - (60, 310, 215, 193), (40, 320, 250, 285), (48, 308, 226, 192), (51, 301, 223, 200)] - # fmt: on - frames: List[IMG] = [] - for i in range(8): - frame = BuildImage.open(img_dir / f"{i}.png") - x, y, w, h = locs[i] - frame.paste(img.resize((w, h)), (x, y), below=True) - frames.append(frame.image) - return save_gif(frames, 0.04) - - -add_meme("knock", knock, min_images=1, max_images=1, keywords=["敲"]) diff --git a/spaces/Clebersla/RVC_V2_Huggingface_Version/i18n.py b/spaces/Clebersla/RVC_V2_Huggingface_Version/i18n.py deleted file mode 100644 index 37f310fadd0b48b2f364877158fb2105d645fc03..0000000000000000000000000000000000000000 --- a/spaces/Clebersla/RVC_V2_Huggingface_Version/i18n.py +++ /dev/null @@ -1,28 +0,0 @@ -import locale -import json -import os - - -def load_language_list(language): - with open(f"./i18n/{language}.json", "r", encoding="utf-8") as f: - language_list = json.load(f) - return language_list - - -class I18nAuto: - def __init__(self, language=None): - if language in ["Auto", None]: - language = locale.getdefaultlocale()[ - 0 - ] # getlocale can't identify the system's language ((None, None)) - if not os.path.exists(f"./i18n/{language}.json"): - language = "en_US" - self.language = language - # print("Use Language:", language) - self.language_map = load_language_list(language) - - def __call__(self, key): - return self.language_map.get(key, key) - - def print(self): - print("Use Language:", self.language) diff --git a/spaces/Codecooker/rvcapi/src/mdx.py b/spaces/Codecooker/rvcapi/src/mdx.py deleted file mode 100644 index 575c456fbdf9e5b5955401f41a3a58ddc27267b3..0000000000000000000000000000000000000000 --- a/spaces/Codecooker/rvcapi/src/mdx.py +++ /dev/null @@ -1,287 +0,0 @@ -import gc -import hashlib -import os -import queue -import threading -import warnings - -import librosa -import numpy as np -import onnxruntime as ort -import soundfile as sf -import torch -from tqdm import tqdm - -warnings.filterwarnings("ignore") -stem_naming = {'Vocals': 'Instrumental', 'Other': 'Instruments', 'Instrumental': 'Vocals', 'Drums': 'Drumless', 'Bass': 'Bassless'} - - -class MDXModel: - def __init__(self, device, dim_f, dim_t, n_fft, hop=1024, stem_name=None, compensation=1.000): - self.dim_f = dim_f - self.dim_t = dim_t - self.dim_c = 4 - self.n_fft = n_fft - self.hop = hop - self.stem_name = stem_name - self.compensation = compensation - - self.n_bins = self.n_fft // 2 + 1 - self.chunk_size = hop * (self.dim_t - 1) - self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to(device) - - out_c = self.dim_c - - self.freq_pad = torch.zeros([1, out_c, self.n_bins - self.dim_f, self.dim_t]).to(device) - - def stft(self, x): - x = x.reshape([-1, self.chunk_size]) - x = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True, return_complex=True) - x = torch.view_as_real(x) - x = x.permute([0, 3, 1, 2]) - x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape([-1, 4, self.n_bins, self.dim_t]) - return x[:, :, :self.dim_f] - - def istft(self, x, freq_pad=None): - freq_pad = self.freq_pad.repeat([x.shape[0], 1, 1, 1]) if freq_pad is None else freq_pad - x = torch.cat([x, freq_pad], -2) - # c = 4*2 if self.target_name=='*' else 2 - x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape([-1, 2, self.n_bins, self.dim_t]) - x = x.permute([0, 2, 3, 1]) - x = x.contiguous() - x = torch.view_as_complex(x) - x = torch.istft(x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True) - return x.reshape([-1, 2, self.chunk_size]) - - -class MDX: - DEFAULT_SR = 44100 - # Unit: seconds - DEFAULT_CHUNK_SIZE = 0 * DEFAULT_SR - DEFAULT_MARGIN_SIZE = 1 * DEFAULT_SR - - DEFAULT_PROCESSOR = 0 - - def __init__(self, model_path: str, params: MDXModel, processor=DEFAULT_PROCESSOR): - - # Set the device and the provider (CPU or CUDA) - self.device = torch.device(f'cuda:{processor}') if processor >= 0 else torch.device('cpu') - self.provider = ['CUDAExecutionProvider'] if processor >= 0 else ['CPUExecutionProvider'] - - self.model = params - - # Load the ONNX model using ONNX Runtime - self.ort = ort.InferenceSession(model_path, providers=self.provider) - # Preload the model for faster performance - self.ort.run(None, {'input': torch.rand(1, 4, params.dim_f, params.dim_t).numpy()}) - self.process = lambda spec: self.ort.run(None, {'input': spec.cpu().numpy()})[0] - - self.prog = None - - @staticmethod - def get_hash(model_path): - try: - with open(model_path, 'rb') as f: - f.seek(- 10000 * 1024, 2) - model_hash = hashlib.md5(f.read()).hexdigest() - except: - model_hash = hashlib.md5(open(model_path, 'rb').read()).hexdigest() - - return model_hash - - @staticmethod - def segment(wave, combine=True, chunk_size=DEFAULT_CHUNK_SIZE, margin_size=DEFAULT_MARGIN_SIZE): - """ - Segment or join segmented wave array - - Args: - wave: (np.array) Wave array to be segmented or joined - combine: (bool) If True, combines segmented wave array. If False, segments wave array. - chunk_size: (int) Size of each segment (in samples) - margin_size: (int) Size of margin between segments (in samples) - - Returns: - numpy array: Segmented or joined wave array - """ - - if combine: - processed_wave = None # Initializing as None instead of [] for later numpy array concatenation - for segment_count, segment in enumerate(wave): - start = 0 if segment_count == 0 else margin_size - end = None if segment_count == len(wave) - 1 else -margin_size - if margin_size == 0: - end = None - if processed_wave is None: # Create array for first segment - processed_wave = segment[:, start:end] - else: # Concatenate to existing array for subsequent segments - processed_wave = np.concatenate((processed_wave, segment[:, start:end]), axis=-1) - - else: - processed_wave = [] - sample_count = wave.shape[-1] - - if chunk_size <= 0 or chunk_size > sample_count: - chunk_size = sample_count - - if margin_size > chunk_size: - margin_size = chunk_size - - for segment_count, skip in enumerate(range(0, sample_count, chunk_size)): - - margin = 0 if segment_count == 0 else margin_size - end = min(skip + chunk_size + margin_size, sample_count) - start = skip - margin - - cut = wave[:, start:end].copy() - processed_wave.append(cut) - - if end == sample_count: - break - - return processed_wave - - def pad_wave(self, wave): - """ - Pad the wave array to match the required chunk size - - Args: - wave: (np.array) Wave array to be padded - - Returns: - tuple: (padded_wave, pad, trim) - - padded_wave: Padded wave array - - pad: Number of samples that were padded - - trim: Number of samples that were trimmed - """ - n_sample = wave.shape[1] - trim = self.model.n_fft // 2 - gen_size = self.model.chunk_size - 2 * trim - pad = gen_size - n_sample % gen_size - - # Padded wave - wave_p = np.concatenate((np.zeros((2, trim)), wave, np.zeros((2, pad)), np.zeros((2, trim))), 1) - - mix_waves = [] - for i in range(0, n_sample + pad, gen_size): - waves = np.array(wave_p[:, i:i + self.model.chunk_size]) - mix_waves.append(waves) - - mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(self.device) - - return mix_waves, pad, trim - - def _process_wave(self, mix_waves, trim, pad, q: queue.Queue, _id: int): - """ - Process each wave segment in a multi-threaded environment - - Args: - mix_waves: (torch.Tensor) Wave segments to be processed - trim: (int) Number of samples trimmed during padding - pad: (int) Number of samples padded during padding - q: (queue.Queue) Queue to hold the processed wave segments - _id: (int) Identifier of the processed wave segment - - Returns: - numpy array: Processed wave segment - """ - mix_waves = mix_waves.split(1) - with torch.no_grad(): - pw = [] - for mix_wave in mix_waves: - self.prog.update() - spec = self.model.stft(mix_wave) - processed_spec = torch.tensor(self.process(spec)) - processed_wav = self.model.istft(processed_spec.to(self.device)) - processed_wav = processed_wav[:, :, trim:-trim].transpose(0, 1).reshape(2, -1).cpu().numpy() - pw.append(processed_wav) - processed_signal = np.concatenate(pw, axis=-1)[:, :-pad] - q.put({_id: processed_signal}) - return processed_signal - - def process_wave(self, wave: np.array, mt_threads=1): - """ - Process the wave array in a multi-threaded environment - - Args: - wave: (np.array) Wave array to be processed - mt_threads: (int) Number of threads to be used for processing - - Returns: - numpy array: Processed wave array - """ - self.prog = tqdm(total=0) - chunk = wave.shape[-1] // mt_threads - waves = self.segment(wave, False, chunk) - - # Create a queue to hold the processed wave segments - q = queue.Queue() - threads = [] - for c, batch in enumerate(waves): - mix_waves, pad, trim = self.pad_wave(batch) - self.prog.total = len(mix_waves) * mt_threads - thread = threading.Thread(target=self._process_wave, args=(mix_waves, trim, pad, q, c)) - thread.start() - threads.append(thread) - for thread in threads: - thread.join() - self.prog.close() - - processed_batches = [] - while not q.empty(): - processed_batches.append(q.get()) - processed_batches = [list(wave.values())[0] for wave in - sorted(processed_batches, key=lambda d: list(d.keys())[0])] - assert len(processed_batches) == len(waves), 'Incomplete processed batches, please reduce batch size!' - return self.segment(processed_batches, True, chunk) - - -def run_mdx(model_params, output_dir, model_path, filename, exclude_main=False, exclude_inversion=False, suffix=None, invert_suffix=None, denoise=False, keep_orig=True, m_threads=2): - device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') - - device_properties = torch.cuda.get_device_properties(device) - vram_gb = device_properties.total_memory / 1024**3 - m_threads = 1 if vram_gb < 8 else 2 - - model_hash = MDX.get_hash(model_path) - mp = model_params.get(model_hash) - model = MDXModel( - device, - dim_f=mp["mdx_dim_f_set"], - dim_t=2 ** mp["mdx_dim_t_set"], - n_fft=mp["mdx_n_fft_scale_set"], - stem_name=mp["primary_stem"], - compensation=mp["compensate"] - ) - - mdx_sess = MDX(model_path, model) - wave, sr = librosa.load(filename, mono=False, sr=44100) - # normalizing input wave gives better output - peak = max(np.max(wave), abs(np.min(wave))) - wave /= peak - if denoise: - wave_processed = -(mdx_sess.process_wave(-wave, m_threads)) + (mdx_sess.process_wave(wave, m_threads)) - wave_processed *= 0.5 - else: - wave_processed = mdx_sess.process_wave(wave, m_threads) - # return to previous peak - wave_processed *= peak - stem_name = model.stem_name if suffix is None else suffix - - main_filepath = None - if not exclude_main: - main_filepath = os.path.join(output_dir, f"{os.path.basename(os.path.splitext(filename)[0])}_{stem_name}.wav") - sf.write(main_filepath, wave_processed.T, sr) - - invert_filepath = None - if not exclude_inversion: - diff_stem_name = stem_naming.get(stem_name) if invert_suffix is None else invert_suffix - stem_name = f"{stem_name}_diff" if diff_stem_name is None else diff_stem_name - invert_filepath = os.path.join(output_dir, f"{os.path.basename(os.path.splitext(filename)[0])}_{stem_name}.wav") - sf.write(invert_filepath, (-wave_processed.T * model.compensation) + wave.T, sr) - - if not keep_orig: - os.remove(filename) - - del mdx_sess, wave_processed, wave - gc.collect() - return main_filepath, invert_filepath diff --git a/spaces/Cvandi/remake/scripts/generate_meta_info_pairdata.py b/spaces/Cvandi/remake/scripts/generate_meta_info_pairdata.py deleted file mode 100644 index 76dce7e41c803a8055f3627cccb98deb51419b09..0000000000000000000000000000000000000000 --- a/spaces/Cvandi/remake/scripts/generate_meta_info_pairdata.py +++ /dev/null @@ -1,49 +0,0 @@ -import argparse -import glob -import os - - -def main(args): - txt_file = open(args.meta_info, 'w') - # sca images - img_paths_gt = sorted(glob.glob(os.path.join(args.input[0], '*'))) - img_paths_lq = sorted(glob.glob(os.path.join(args.input[1], '*'))) - - assert len(img_paths_gt) == len(img_paths_lq), ('GT folder and LQ folder should have the same length, but got ' - f'{len(img_paths_gt)} and {len(img_paths_lq)}.') - - for img_path_gt, img_path_lq in zip(img_paths_gt, img_paths_lq): - # get the relative paths - img_name_gt = os.path.relpath(img_path_gt, args.root[0]) - img_name_lq = os.path.relpath(img_path_lq, args.root[1]) - print(f'{img_name_gt}, {img_name_lq}') - txt_file.write(f'{img_name_gt}, {img_name_lq}\n') - - -if __name__ == '__main__': - """This script is used to generate meta info (txt file) for paired images. - """ - parser = argparse.ArgumentParser() - parser.add_argument( - '--input', - nargs='+', - default=['datasets/DF2K/DIV2K_train_HR_sub', 'datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub'], - help='Input folder, should be [gt_folder, lq_folder]') - parser.add_argument('--root', nargs='+', default=[None, None], help='Folder root, will use the ') - parser.add_argument( - '--meta_info', - type=str, - default='datasets/DF2K/meta_info/meta_info_DIV2K_sub_pair.txt', - help='txt path for meta info') - args = parser.parse_args() - - assert len(args.input) == 2, 'Input folder should have two elements: gt folder and lq folder' - assert len(args.root) == 2, 'Root path should have two elements: root for gt folder and lq folder' - os.makedirs(os.path.dirname(args.meta_info), exist_ok=True) - for i in range(2): - if args.input[i].endswith('/'): - args.input[i] = args.input[i][:-1] - if args.root[i] is None: - args.root[i] = os.path.dirname(args.input[i]) - - main(args) diff --git a/spaces/DEVINKofficial/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/app.py b/spaces/DEVINKofficial/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/app.py deleted file mode 100644 index eb453a809d17e6dee04e158d1c68dc807478edef..0000000000000000000000000000000000000000 --- a/spaces/DEVINKofficial/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/Onodofthenorth/SD_PixelArt_SpriteSheet_Generator").launch() \ No newline at end of file diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/GdImageFile.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/GdImageFile.py deleted file mode 100644 index bafc43a19d432290867a5c08b9820f2e4f79aea3..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/GdImageFile.py +++ /dev/null @@ -1,97 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# GD file handling -# -# History: -# 1996-04-12 fl Created -# -# Copyright (c) 1997 by Secret Labs AB. -# Copyright (c) 1996 by Fredrik Lundh. -# -# See the README file for information on usage and redistribution. -# - - -""" -.. note:: - This format cannot be automatically recognized, so the - class is not registered for use with :py:func:`PIL.Image.open()`. To open a - gd file, use the :py:func:`PIL.GdImageFile.open()` function instead. - -.. warning:: - THE GD FORMAT IS NOT DESIGNED FOR DATA INTERCHANGE. This - implementation is provided for convenience and demonstrational - purposes only. -""" - - -from . import ImageFile, ImagePalette, UnidentifiedImageError -from ._binary import i16be as i16 -from ._binary import i32be as i32 - - -class GdImageFile(ImageFile.ImageFile): - """ - Image plugin for the GD uncompressed format. Note that this format - is not supported by the standard :py:func:`PIL.Image.open()` function. To use - this plugin, you have to import the :py:mod:`PIL.GdImageFile` module and - use the :py:func:`PIL.GdImageFile.open()` function. - """ - - format = "GD" - format_description = "GD uncompressed images" - - def _open(self): - # Header - s = self.fp.read(1037) - - if i16(s) not in [65534, 65535]: - msg = "Not a valid GD 2.x .gd file" - raise SyntaxError(msg) - - self.mode = "L" # FIXME: "P" - self._size = i16(s, 2), i16(s, 4) - - true_color = s[6] - true_color_offset = 2 if true_color else 0 - - # transparency index - tindex = i32(s, 7 + true_color_offset) - if tindex < 256: - self.info["transparency"] = tindex - - self.palette = ImagePalette.raw( - "XBGR", s[7 + true_color_offset + 4 : 7 + true_color_offset + 4 + 256 * 4] - ) - - self.tile = [ - ( - "raw", - (0, 0) + self.size, - 7 + true_color_offset + 4 + 256 * 4, - ("L", 0, 1), - ) - ] - - -def open(fp, mode="r"): - """ - Load texture from a GD image file. - - :param fp: GD file name, or an opened file handle. - :param mode: Optional mode. In this version, if the mode argument - is given, it must be "r". - :returns: An image instance. - :raises OSError: If the image could not be read. - """ - if mode != "r": - msg = "bad mode" - raise ValueError(msg) - - try: - return GdImageFile(fp) - except SyntaxError as e: - msg = "cannot identify this image file" - raise UnidentifiedImageError(msg) from e diff --git a/spaces/Dana19/ImageRecognition_FaceCount/README.md b/spaces/Dana19/ImageRecognition_FaceCount/README.md deleted file mode 100644 index 8afdea6aa817a1ec3b056820a882b63deedc6772..0000000000000000000000000000000000000000 --- a/spaces/Dana19/ImageRecognition_FaceCount/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Headcount -emoji: 💻 -colorFrom: green -colorTo: indigo -sdk: gradio -sdk_version: 3.3 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/DemoLou/moe-tts/text/thai.py b/spaces/DemoLou/moe-tts/text/thai.py deleted file mode 100644 index 998207c01a85c710a46db1ec8b62c39c2d94bc84..0000000000000000000000000000000000000000 --- a/spaces/DemoLou/moe-tts/text/thai.py +++ /dev/null @@ -1,44 +0,0 @@ -import re -from num_thai.thainumbers import NumThai - - -num = NumThai() - -# List of (Latin alphabet, Thai) pairs: -_latin_to_thai = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'เอ'), - ('b','บี'), - ('c','ซี'), - ('d','ดี'), - ('e','อี'), - ('f','เอฟ'), - ('g','จี'), - ('h','เอช'), - ('i','ไอ'), - ('j','เจ'), - ('k','เค'), - ('l','แอล'), - ('m','เอ็ม'), - ('n','เอ็น'), - ('o','โอ'), - ('p','พี'), - ('q','คิว'), - ('r','แอร์'), - ('s','เอส'), - ('t','ที'), - ('u','ยู'), - ('v','วี'), - ('w','ดับเบิลยู'), - ('x','เอ็กซ์'), - ('y','วาย'), - ('z','ซี') -]] - - -def num_to_thai(text): - return re.sub(r'(?:\d+(?:,?\d+)?)+(?:\.\d+(?:,?\d+)?)?', lambda x: ''.join(num.NumberToTextThai(float(x.group(0).replace(',', '')))), text) - -def latin_to_thai(text): - for regex, replacement in _latin_to_thai: - text = re.sub(regex, replacement, text) - return text diff --git a/spaces/Detomo/AI-Galary/README.md b/spaces/Detomo/AI-Galary/README.md deleted file mode 100644 index 5772a1eb261aa38b6407c07b0633d4a12566eccf..0000000000000000000000000000000000000000 --- a/spaces/Detomo/AI-Galary/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AI Galary -emoji: ⚡ -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Detomo/generate_wifi_qrcode/app.py b/spaces/Detomo/generate_wifi_qrcode/app.py deleted file mode 100644 index e6681ad55d07851290ef2f0f04f1e026a95b917a..0000000000000000000000000000000000000000 --- a/spaces/Detomo/generate_wifi_qrcode/app.py +++ /dev/null @@ -1,36 +0,0 @@ -import wifi_qrcode_generator -import gradio as gr - -examples = [['Home wifi', 'very complicated password', 'WPA']] - -def infer(wifi_name, wifi_pw, authentication_type): - image = wifi_qrcode_generator.wifi_qrcode( - wifi_name, False, authentication_type, wifi_pw - ) - image.save('qrcode_wifi.png') - return 'qrcode_wifi.png' - -description= "Generate code for accessing WIFI. Fill your wifi name, wifi password and select authentication type. Then click submit and enjoy 😎" - -iface = gr.Interface( - fn=infer, - title="Generate QR code for accessing WIFI 🥃", - description=description, - inputs=[ - gr.Textbox( - label="Wifi name", - lines=1, - ), - gr.Textbox( - label="Wifi password", - lines=1, - ), - gr.Dropdown( - label='authentication_type', - choices=["WPA", "WEP", "nopass"], - value='WPA' - ), - ], - outputs="image", - examples=examples, - article="Author: Vu Minh Chien", cache_examples=True).launch(enable_queue=True) diff --git a/spaces/DiffusionArtco/scifi-art-creator/app.py b/spaces/DiffusionArtco/scifi-art-creator/app.py deleted file mode 100644 index 236417b745e1681f1a9e1ae644d75254da2de370..0000000000000000000000000000000000000000 --- a/spaces/DiffusionArtco/scifi-art-creator/app.py +++ /dev/null @@ -1,36 +0,0 @@ -import gradio as gr -import requests -from PIL import Image -from io import BytesIO -import base64 - -api_url = "https://5cb20b40-572c-426f-9466-995256f9b6eb.id.repl.co/generate_image" - -def generate_image(model="Sci-Fi Diffusion", prompt="", seed=0, negative_prompt="", sampler="k_dpmpp_2s_a", steps=50): - data = "?model=" + model + "&prompt=" + prompt + "&seed=" + str(seed) + "&negative_prompt=" + negative_prompt + "&sampler=" + sampler + "&steps=" + str(steps) - response = requests.post(api_url + data, timeout=400) - if response.status_code == 200: - img_base64 = response.json()["url"] - img_bytes = base64.b64decode(img_base64) - img = Image.open(BytesIO(img_bytes)) - return img - else: - return None - -inputs = [ - gr.inputs.Dropdown([ 'Experience', 'FKing SciFi', 'Future Diffusion', 'JWST Deep Space Diffusion', 'Protogen Infinity', 'RCNZ Dumb Monkey', 'RealBiter', 'Robo-Diffusion', 'Sci-Fi Diffusion'], label="Model", default="Sci-Fi Diffusion"), - gr.inputs.Textbox(label="Prompt"), - gr.inputs.Number(label="Seed", default=0), - gr.inputs.Textbox(label="Negative Prompt", default=""), - gr.inputs.Dropdown(["k_lms", "k_heun", "k_euler", "k_euler_a", "k_dpm_2", "k_dpm_2_a", "DDIM", "k_dpm_fast", "k_dpm_adaptive", "k_dpmpp_2m", "k_dpmpp_2s_a", "k_dpmpp_sde"], label="Sampler", default="k_dpmpp_2s_a"), - gr.inputs.Number(label="Steps", default=50) -] - -outputs = gr.outputs.Image(label="Generated Image", type="pil") - -interface = gr.Interface(generate_image, inputs, outputs, title="", - description="
    ", - examples=[]) - -interface.launch() - diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/utils/ImagesDataset.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/utils/ImagesDataset.py deleted file mode 100644 index ffe6106ff3d52b1c6c13ecab48618f2786bbfee5..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/stylegan_human/utils/ImagesDataset.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - - -import os -from torch.utils.data import Dataset -from PIL import Image - -from utils.data_utils import make_dataset - - -class ImagesDataset(Dataset): - - def __init__(self, source_root, source_transform=None): - self.source_paths = sorted(make_dataset(source_root)) - self.source_transform = source_transform - - def __len__(self): - return len(self.source_paths) - - def __getitem__(self, index): - fname, from_path = self.source_paths[index] - from_im = Image.open(from_path).convert('RGB') - - if self.source_transform: - from_im = self.source_transform(from_im) - - return fname, from_im diff --git a/spaces/DragGan/DragGan-Inversion/torch_utils/ops/filtered_lrelu.cpp b/spaces/DragGan/DragGan-Inversion/torch_utils/ops/filtered_lrelu.cpp deleted file mode 100644 index ff4149b8b46b54d2f400ae10e44d19f20503ba1f..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/torch_utils/ops/filtered_lrelu.cpp +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include -#include -#include -#include "filtered_lrelu.h" - -//------------------------------------------------------------------------ - -static std::tuple filtered_lrelu( - torch::Tensor x, torch::Tensor fu, torch::Tensor fd, torch::Tensor b, torch::Tensor si, - int up, int down, int px0, int px1, int py0, int py1, int sx, int sy, float gain, float slope, float clamp, bool flip_filters, bool writeSigns) -{ - // Set CUDA device. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - - // Validate arguments. - TORCH_CHECK(fu.device() == x.device() && fd.device() == x.device() && b.device() == x.device(), "all input tensors must reside on the same device"); - TORCH_CHECK(fu.dtype() == torch::kFloat && fd.dtype() == torch::kFloat, "fu and fd must be float32"); - TORCH_CHECK(b.dtype() == x.dtype(), "x and b must have the same dtype"); - TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat, "x and b must be float16 or float32"); - TORCH_CHECK(x.dim() == 4, "x must be rank 4"); - TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); - TORCH_CHECK(x.numel() > 0, "x is empty"); - TORCH_CHECK((fu.dim() == 1 || fu.dim() == 2) && (fd.dim() == 1 || fd.dim() == 2), "fu and fd must be rank 1 or 2"); - TORCH_CHECK(fu.size(0) <= INT_MAX && fu.size(-1) <= INT_MAX, "fu is too large"); - TORCH_CHECK(fd.size(0) <= INT_MAX && fd.size(-1) <= INT_MAX, "fd is too large"); - TORCH_CHECK(fu.numel() > 0, "fu is empty"); - TORCH_CHECK(fd.numel() > 0, "fd is empty"); - TORCH_CHECK(b.dim() == 1 && b.size(0) == x.size(1), "b must be a vector with the same number of channels as x"); - TORCH_CHECK(up >= 1 && down >= 1, "up and down must be at least 1"); - - // Figure out how much shared memory is available on the device. - int maxSharedBytes = 0; - AT_CUDA_CHECK(cudaDeviceGetAttribute(&maxSharedBytes, cudaDevAttrMaxSharedMemoryPerBlockOptin, x.device().index())); - int sharedKB = maxSharedBytes >> 10; - - // Populate enough launch parameters to check if a CUDA kernel exists. - filtered_lrelu_kernel_params p; - p.up = up; - p.down = down; - p.fuShape = make_int2((int)fu.size(-1), fu.dim() == 2 ? (int)fu.size(0) : 0); // shape [n, 0] indicates separable filter. - p.fdShape = make_int2((int)fd.size(-1), fd.dim() == 2 ? (int)fd.size(0) : 0); - filtered_lrelu_kernel_spec test_spec = choose_filtered_lrelu_kernel(p, sharedKB); - if (!test_spec.exec) - { - // No kernel found - return empty tensors and indicate missing kernel with return code of -1. - return std::make_tuple(torch::Tensor(), torch::Tensor(), -1); - } - - // Input/output element size. - int64_t sz = (x.dtype() == torch::kHalf) ? 2 : 4; - - // Input sizes. - int64_t xw = (int)x.size(3); - int64_t xh = (int)x.size(2); - int64_t fut_w = (int)fu.size(-1) - 1; - int64_t fut_h = (int)fu.size(0) - 1; - int64_t fdt_w = (int)fd.size(-1) - 1; - int64_t fdt_h = (int)fd.size(0) - 1; - - // Logical size of upsampled buffer. - int64_t cw = xw * up + (px0 + px1) - fut_w; - int64_t ch = xh * up + (py0 + py1) - fut_h; - TORCH_CHECK(cw > fdt_w && ch > fdt_h, "upsampled buffer must be at least the size of downsampling filter"); - TORCH_CHECK(cw <= INT_MAX && ch <= INT_MAX, "upsampled buffer is too large"); - - // Compute output size and allocate. - int64_t yw = (cw - fdt_w + (down - 1)) / down; - int64_t yh = (ch - fdt_h + (down - 1)) / down; - TORCH_CHECK(yw > 0 && yh > 0, "output must be at least 1x1"); - TORCH_CHECK(yw <= INT_MAX && yh <= INT_MAX, "output is too large"); - torch::Tensor y = torch::empty({x.size(0), x.size(1), yh, yw}, x.options(), x.suggest_memory_format()); - - // Allocate sign tensor. - torch::Tensor so; - torch::Tensor s = si; - bool readSigns = !!s.numel(); - int64_t sw_active = 0; // Active width of sign tensor. - if (writeSigns) - { - sw_active = yw * down - (down - 1) + fdt_w; // Active width in elements. - int64_t sh = yh * down - (down - 1) + fdt_h; // Height = active height. - int64_t sw = (sw_active + 15) & ~15; // Width = active width in elements, rounded up to multiple of 16. - TORCH_CHECK(sh <= INT_MAX && (sw >> 2) <= INT_MAX, "signs is too large"); - s = so = torch::empty({x.size(0), x.size(1), sh, sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); - } - else if (readSigns) - sw_active = s.size(3) << 2; - - // Validate sign tensor if in use. - if (readSigns || writeSigns) - { - TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); - TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); - TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); - TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); - TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); - TORCH_CHECK(s.size(2) <= INT_MAX && s.size(3) <= INT_MAX, "signs is too large"); - } - - // Populate rest of CUDA kernel parameters. - p.x = x.data_ptr(); - p.y = y.data_ptr(); - p.b = b.data_ptr(); - p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; - p.fu = fu.data_ptr(); - p.fd = fd.data_ptr(); - p.pad0 = make_int2(px0, py0); - p.gain = gain; - p.slope = slope; - p.clamp = clamp; - p.flip = (flip_filters) ? 1 : 0; - p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); - p.yShape = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0)); - p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3), (int)s.size(2)) : make_int2(0, 0); // Width is in bytes. Contiguous. - p.sOfs = make_int2(sx, sy); - p.swLimit = (sw_active + 3) >> 2; // Rounded up to bytes. - - // x, y, b strides are in bytes. - p.xStride = make_longlong4(sz * x.stride(3), sz * x.stride(2), sz * x.stride(1), sz * x.stride(0)); - p.yStride = make_longlong4(sz * y.stride(3), sz * y.stride(2), sz * y.stride(1), sz * y.stride(0)); - p.bStride = sz * b.stride(0); - - // fu, fd strides are in elements. - p.fuStride = make_longlong3(fu.stride(-1), fu.dim() == 2 ? fu.stride(0) : 0, 0); - p.fdStride = make_longlong3(fd.stride(-1), fd.dim() == 2 ? fd.stride(0) : 0, 0); - - // Determine if indices don't fit in int32. Support negative strides although Torch currently never produces those. - bool index64b = false; - if (std::abs(p.bStride * x.size(1)) > INT_MAX) index64b = true; - if (std::min(x.size(0) * p.xStride.w, 0ll) + std::min(x.size(1) * p.xStride.z, 0ll) + std::min(x.size(2) * p.xStride.y, 0ll) + std::min(x.size(3) * p.xStride.x, 0ll) < -INT_MAX) index64b = true; - if (std::max(x.size(0) * p.xStride.w, 0ll) + std::max(x.size(1) * p.xStride.z, 0ll) + std::max(x.size(2) * p.xStride.y, 0ll) + std::max(x.size(3) * p.xStride.x, 0ll) > INT_MAX) index64b = true; - if (std::min(y.size(0) * p.yStride.w, 0ll) + std::min(y.size(1) * p.yStride.z, 0ll) + std::min(y.size(2) * p.yStride.y, 0ll) + std::min(y.size(3) * p.yStride.x, 0ll) < -INT_MAX) index64b = true; - if (std::max(y.size(0) * p.yStride.w, 0ll) + std::max(y.size(1) * p.yStride.z, 0ll) + std::max(y.size(2) * p.yStride.y, 0ll) + std::max(y.size(3) * p.yStride.x, 0ll) > INT_MAX) index64b = true; - if (s.numel() > INT_MAX) index64b = true; - - // Choose CUDA kernel. - filtered_lrelu_kernel_spec spec = { 0 }; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_cuda", [&] - { - if constexpr (sizeof(scalar_t) <= 4) // Exclude doubles. constexpr prevents template instantiation. - { - // Choose kernel based on index type, datatype and sign read/write modes. - if (!index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if (!index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if (!index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if ( index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if ( index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if ( index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - } - }); - TORCH_CHECK(spec.exec, "internal error - CUDA kernel not found") // This should not happen because we tested earlier that kernel exists. - - // Launch CUDA kernel. - void* args[] = {&p}; - int bx = spec.numWarps * 32; - int gx = (p.yShape.x - 1) / spec.tileOut.x + 1; - int gy = (p.yShape.y - 1) / spec.tileOut.y + 1; - int gz = p.yShape.z * p.yShape.w; - - // Repeat multiple horizontal tiles in a CTA? - if (spec.xrep) - { - p.tilesXrep = spec.xrep; - p.tilesXdim = gx; - - gx = (gx + p.tilesXrep - 1) / p.tilesXrep; - std::swap(gx, gy); - } - else - { - p.tilesXrep = 0; - p.tilesXdim = 0; - } - - // Launch filter setup kernel. - AT_CUDA_CHECK(cudaLaunchKernel(spec.setup, 1, 1024, args, 0, at::cuda::getCurrentCUDAStream())); - - // Copy kernels to constant memory. - if ( writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); - else if (!writeSigns && readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); - else if (!writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); - - // Set cache and shared memory configurations for main kernel. - AT_CUDA_CHECK(cudaFuncSetCacheConfig(spec.exec, cudaFuncCachePreferShared)); - if (spec.dynamicSharedKB) // Need dynamically allocated shared memory? - AT_CUDA_CHECK(cudaFuncSetAttribute(spec.exec, cudaFuncAttributeMaxDynamicSharedMemorySize, spec.dynamicSharedKB << 10)); - AT_CUDA_CHECK(cudaFuncSetSharedMemConfig(spec.exec, cudaSharedMemBankSizeFourByte)); - - // Launch main kernel. - const int maxSubGz = 65535; // CUDA maximum for block z dimension. - for (int zofs=0; zofs < gz; zofs += maxSubGz) // Do multiple launches if gz is too big. - { - p.blockZofs = zofs; - int subGz = std::min(maxSubGz, gz - zofs); - AT_CUDA_CHECK(cudaLaunchKernel(spec.exec, dim3(gx, gy, subGz), bx, args, spec.dynamicSharedKB << 10, at::cuda::getCurrentCUDAStream())); - } - - // Done. - return std::make_tuple(y, so, 0); -} - -//------------------------------------------------------------------------ - -static torch::Tensor filtered_lrelu_act(torch::Tensor x, torch::Tensor si, int sx, int sy, float gain, float slope, float clamp, bool writeSigns) -{ - // Set CUDA device. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - - // Validate arguments. - TORCH_CHECK(x.dim() == 4, "x must be rank 4"); - TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); - TORCH_CHECK(x.numel() > 0, "x is empty"); - TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat || x.dtype() == torch::kDouble, "x must be float16, float32 or float64"); - - // Output signs if we don't have sign input. - torch::Tensor so; - torch::Tensor s = si; - bool readSigns = !!s.numel(); - if (writeSigns) - { - int64_t sw = x.size(3); - sw = (sw + 15) & ~15; // Round to a multiple of 16 for coalescing. - s = so = torch::empty({x.size(0), x.size(1), x.size(2), sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); - } - - // Validate sign tensor if in use. - if (readSigns || writeSigns) - { - TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); - TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); - TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); - TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); - TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); - TORCH_CHECK(s.size(2) <= INT_MAX && (s.size(3) << 2) <= INT_MAX, "signs tensor is too large"); - } - - // Initialize CUDA kernel parameters. - filtered_lrelu_act_kernel_params p; - p.x = x.data_ptr(); - p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; - p.gain = gain; - p.slope = slope; - p.clamp = clamp; - p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); - p.xStride = make_longlong4(x.stride(3), x.stride(2), x.stride(1), x.stride(0)); - p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3) << 2, (int)s.size(2)) : make_int2(0, 0); // Width is in elements. Contiguous. - p.sOfs = make_int2(sx, sy); - - // Choose CUDA kernel. - void* func = 0; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_act_cuda", [&] - { - if (writeSigns) - func = choose_filtered_lrelu_act_kernel(); - else if (readSigns) - func = choose_filtered_lrelu_act_kernel(); - else - func = choose_filtered_lrelu_act_kernel(); - }); - TORCH_CHECK(func, "internal error - CUDA kernel not found"); - - // Launch CUDA kernel. - void* args[] = {&p}; - int bx = 128; // 4 warps per block. - - // Logical size of launch = writeSigns ? p.s : p.x - uint32_t gx = writeSigns ? p.sShape.x : p.xShape.x; - uint32_t gy = writeSigns ? p.sShape.y : p.xShape.y; - uint32_t gz = p.xShape.z * p.xShape.w; // Same as in p.sShape if signs are in use. - gx = (gx - 1) / bx + 1; - - // Make sure grid y and z dimensions are within CUDA launch limits. Kernel loops internally to do the rest. - const uint32_t gmax = 65535; - gy = std::min(gy, gmax); - gz = std::min(gz, gmax); - - // Launch. - AT_CUDA_CHECK(cudaLaunchKernel(func, dim3(gx, gy, gz), bx, args, 0, at::cuda::getCurrentCUDAStream())); - return so; -} - -//------------------------------------------------------------------------ - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("filtered_lrelu", &filtered_lrelu); // The whole thing. - m.def("filtered_lrelu_act_", &filtered_lrelu_act); // Activation and sign tensor handling only. Modifies data tensor in-place. -} - -//------------------------------------------------------------------------ diff --git a/spaces/EronSamez/RVC_HFmeu/gui_v0.py b/spaces/EronSamez/RVC_HFmeu/gui_v0.py deleted file mode 100644 index 88c3cf9eb1eaa0fa812b32ae4d3750b4ce0a8699..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/gui_v0.py +++ /dev/null @@ -1,786 +0,0 @@ -import os, sys, traceback, re - -import json - -now_dir = os.getcwd() -sys.path.append(now_dir) -from configs.config import Config - -Config = Config() -import PySimpleGUI as sg -import sounddevice as sd -import noisereduce as nr -import numpy as np -from fairseq import checkpoint_utils -import librosa, torch, pyworld, faiss, time, threading -import torch.nn.functional as F -import torchaudio.transforms as tat -import scipy.signal as signal -import torchcrepe - -# import matplotlib.pyplot as plt -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from i18n import I18nAuto - -i18n = I18nAuto() -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -current_dir = os.getcwd() - - -class RVC: - def __init__( - self, key, f0_method, hubert_path, pth_path, index_path, npy_path, index_rate - ) -> None: - """ - 初始化 - """ - try: - self.f0_up_key = key - self.time_step = 160 / 16000 * 1000 - self.f0_min = 50 - self.f0_max = 1100 - self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) - self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) - self.f0_method = f0_method - self.sr = 16000 - self.window = 160 - - # Get Torch Device - if torch.cuda.is_available(): - self.torch_device = torch.device( - f"cuda:{0 % torch.cuda.device_count()}" - ) - elif torch.backends.mps.is_available(): - self.torch_device = torch.device("mps") - else: - self.torch_device = torch.device("cpu") - - if index_rate != 0: - self.index = faiss.read_index(index_path) - # self.big_npy = np.load(npy_path) - self.big_npy = self.index.reconstruct_n(0, self.index.ntotal) - print("index search enabled") - self.index_rate = index_rate - model_path = hubert_path - print("load model(s) from {}".format(model_path)) - models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [model_path], - suffix="", - ) - self.model = models[0] - self.model = self.model.to(device) - if Config.is_half: - self.model = self.model.half() - else: - self.model = self.model.float() - self.model.eval() - cpt = torch.load(pth_path, map_location="cpu") - self.tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - self.if_f0 = cpt.get("f0", 1) - self.version = cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=Config.is_half - ) - else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=Config.is_half - ) - else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del self.net_g.enc_q - print(self.net_g.load_state_dict(cpt["weight"], strict=False)) - self.net_g.eval().to(device) - if Config.is_half: - self.net_g = self.net_g.half() - else: - self.net_g = self.net_g.float() - except: - print(traceback.format_exc()) - - def get_regular_crepe_computation(self, x, f0_min, f0_max, model="full"): - batch_size = 512 - # Compute pitch using first gpu - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.sr, - self.window, - f0_min, - f0_max, - model, - batch_size=batch_size, - device=self.torch_device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - return f0 - - def get_harvest_computation(self, x, f0_min, f0_max): - f0, t = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - return f0 - - def get_f0(self, x, f0_up_key, inp_f0=None): - # Calculate Padding and f0 details here - p_len = x.shape[0] // 512 # For Now This probs doesn't work - x_pad = 1 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - - f0 = 0 - # Here, check f0_methods and get their computations - if self.f0_method == "harvest": - f0 = self.get_harvest_computation(x, f0_min, f0_max) - elif self.f0_method == "reg-crepe": - f0 = self.get_regular_crepe_computation(x, f0_min, f0_max) - elif self.f0_method == "reg-crepe-tiny": - f0 = self.get_regular_crepe_computation(x, f0_min, f0_max, "tiny") - - # Calculate f0_course and f0_bak here - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0] - f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def infer(self, feats: torch.Tensor) -> np.ndarray: - """ - 推理函数 - """ - audio = feats.clone().cpu().numpy() - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).fill_(False) - if Config.is_half: - feats = feats.half() - else: - feats = feats.float() - inputs = { - "source": feats.to(device), - "padding_mask": padding_mask.to(device), - "output_layer": 9 if self.version == "v1" else 12, - } - torch.cuda.synchronize() - with torch.no_grad(): - logits = self.model.extract_features(**inputs) - feats = ( - self.model.final_proj(logits[0]) if self.version == "v1" else logits[0] - ) - - ####索引优化 - try: - if ( - hasattr(self, "index") - and hasattr(self, "big_npy") - and self.index_rate != 0 - ): - npy = feats[0].cpu().numpy().astype("float32") - score, ix = self.index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - if Config.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(device) * self.index_rate - + (1 - self.index_rate) * feats - ) - else: - print("index search FAIL or disabled") - except: - traceback.print_exc() - print("index search FAIL") - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - torch.cuda.synchronize() - print(feats.shape) - if self.if_f0 == 1: - pitch, pitchf = self.get_f0(audio, self.f0_up_key) - p_len = min(feats.shape[1], 13000, pitch.shape[0]) # 太大了爆显存 - else: - pitch, pitchf = None, None - p_len = min(feats.shape[1], 13000) # 太大了爆显存 - torch.cuda.synchronize() - # print(feats.shape,pitch.shape) - feats = feats[:, :p_len, :] - if self.if_f0 == 1: - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - pitch = torch.LongTensor(pitch).unsqueeze(0).to(device) - pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(device) - p_len = torch.LongTensor([p_len]).to(device) - ii = 0 # sid - sid = torch.LongTensor([ii]).to(device) - with torch.no_grad(): - if self.if_f0 == 1: - infered_audio = ( - self.net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] - .data.cpu() - .float() - ) - else: - infered_audio = ( - self.net_g.infer(feats, p_len, sid)[0][0, 0].data.cpu().float() - ) - torch.cuda.synchronize() - return infered_audio - - -class GUIConfig: - def __init__(self) -> None: - self.hubert_path: str = "" - self.pth_path: str = "" - self.index_path: str = "" - self.npy_path: str = "" - self.f0_method: str = "" - self.pitch: int = 12 - self.samplerate: int = 44100 - self.block_time: float = 1.0 # s - self.buffer_num: int = 1 - self.threhold: int = -30 - self.crossfade_time: float = 0.08 - self.extra_time: float = 0.04 - self.I_noise_reduce = False - self.O_noise_reduce = False - self.index_rate = 0.3 - - -class GUI: - def __init__(self) -> None: - self.config = GUIConfig() - self.flag_vc = False - - self.launcher() - - def load(self): - ( - input_devices, - output_devices, - input_devices_indices, - output_devices_indices, - ) = self.get_devices() - try: - with open("values1.json", "r") as j: - data = json.load(j) - except: - # Injecting f0_method into the json data - with open("values1.json", "w") as j: - data = { - "pth_path": "", - "index_path": "", - "sg_input_device": input_devices[ - input_devices_indices.index(sd.default.device[0]) - ], - "sg_output_device": output_devices[ - output_devices_indices.index(sd.default.device[1]) - ], - "threhold": "-45", - "pitch": "0", - "index_rate": "0", - "block_time": "1", - "crossfade_length": "0.04", - "extra_time": "1", - } - return data - - def launcher(self): - data = self.load() - sg.theme("DarkTeal12") - input_devices, output_devices, _, _ = self.get_devices() - layout = [ - [ - sg.Frame( - title="Proudly forked by Mangio621", - ), - sg.Frame( - title=i18n("Load model"), - layout=[ - [ - sg.Input( - default_text="hubert_base.pt", - key="hubert_path", - disabled=True, - ), - sg.FileBrowse( - i18n("Hubert Model"), - initial_folder=os.path.join(os.getcwd()), - file_types=(("pt files", "*.pt"),), - ), - ], - [ - sg.Input( - default_text=data.get("pth_path", ""), - key="pth_path", - ), - sg.FileBrowse( - i18n("Select the .pth file"), - initial_folder=os.path.join(os.getcwd(), "weights"), - file_types=(("weight files", "*.pth"),), - ), - ], - [ - sg.Input( - default_text=data.get("index_path", ""), - key="index_path", - ), - sg.FileBrowse( - i18n("Select the .index file"), - initial_folder=os.path.join(os.getcwd(), "logs"), - file_types=(("index files", "*.index"),), - ), - ], - [ - sg.Input( - default_text="你不需要填写这个You don't need write this.", - key="npy_path", - disabled=True, - ), - sg.FileBrowse( - i18n("Select the .npy file"), - initial_folder=os.path.join(os.getcwd(), "logs"), - file_types=(("feature files", "*.npy"),), - ), - ], - ], - ), - ], - [ - # Mangio f0 Selection frame Here - sg.Frame( - layout=[ - [ - sg.Radio( - "Harvest", "f0_method", key="harvest", default=True - ), - sg.Radio("Crepe", "f0_method", key="reg-crepe"), - sg.Radio("Crepe Tiny", "f0_method", key="reg-crepe-tiny"), - ] - ], - title="Select an f0 Method", - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("Input device")), - sg.Combo( - input_devices, - key="sg_input_device", - default_value=data.get("sg_input_device", ""), - ), - ], - [ - sg.Text(i18n("Output device")), - sg.Combo( - output_devices, - key="sg_output_device", - default_value=data.get("sg_output_device", ""), - ), - ], - ], - title=i18n("Audio device (please use the same type of driver)"), - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("Response threshold")), - sg.Slider( - range=(-60, 0), - key="threhold", - resolution=1, - orientation="h", - default_value=data.get("threhold", ""), - ), - ], - [ - sg.Text(i18n("Pitch settings")), - sg.Slider( - range=(-24, 24), - key="pitch", - resolution=1, - orientation="h", - default_value=data.get("pitch", ""), - ), - ], - [ - sg.Text(i18n("Index Rate")), - sg.Slider( - range=(0.0, 1.0), - key="index_rate", - resolution=0.01, - orientation="h", - default_value=data.get("index_rate", ""), - ), - ], - ], - title=i18n("General settings"), - ), - sg.Frame( - layout=[ - [ - sg.Text(i18n("Sample length")), - sg.Slider( - range=(0.1, 3.0), - key="block_time", - resolution=0.1, - orientation="h", - default_value=data.get("block_time", ""), - ), - ], - [ - sg.Text(i18n("Fade length")), - sg.Slider( - range=(0.01, 0.15), - key="crossfade_length", - resolution=0.01, - orientation="h", - default_value=data.get("crossfade_length", ""), - ), - ], - [ - sg.Text(i18n("Extra推理时长")), - sg.Slider( - range=(0.05, 3.00), - key="extra_time", - resolution=0.01, - orientation="h", - default_value=data.get("extra_time", ""), - ), - ], - [ - sg.Checkbox(i18n("Input noise reduction"), key="I_noise_reduce"), - sg.Checkbox(i18n("Output noise reduction"), key="O_noise_reduce"), - ], - ], - title=i18n("Performance settings"), - ), - ], - [ - sg.Button(i18n("开始音频Convert"), key="start_vc"), - sg.Button(i18n("停止音频Convert"), key="stop_vc"), - sg.Text(i18n("Inference time (ms):")), - sg.Text("0", key="infer_time"), - ], - ] - self.window = sg.Window("RVC - GUI", layout=layout) - self.event_handler() - - def event_handler(self): - while True: - event, values = self.window.read() - if event == sg.WINDOW_CLOSED: - self.flag_vc = False - exit() - if event == "start_vc" and self.flag_vc == False: - if self.set_values(values) == True: - print("using_cuda:" + str(torch.cuda.is_available())) - self.start_vc() - settings = { - "pth_path": values["pth_path"], - "index_path": values["index_path"], - "f0_method": self.get_f0_method_from_radios(values), - "sg_input_device": values["sg_input_device"], - "sg_output_device": values["sg_output_device"], - "threhold": values["threhold"], - "pitch": values["pitch"], - "index_rate": values["index_rate"], - "block_time": values["block_time"], - "crossfade_length": values["crossfade_length"], - "extra_time": values["extra_time"], - } - with open("values1.json", "w") as j: - json.dump(settings, j) - if event == "stop_vc" and self.flag_vc == True: - self.flag_vc = False - - # Function that returns the used f0 method in string format "harvest" - def get_f0_method_from_radios(self, values): - f0_array = [ - {"name": "harvest", "val": values["harvest"]}, - {"name": "reg-crepe", "val": values["reg-crepe"]}, - {"name": "reg-crepe-tiny", "val": values["reg-crepe-tiny"]}, - ] - # Filter through to find a true value - used_f0 = "" - for f0 in f0_array: - if f0["val"] == True: - used_f0 = f0["name"] - break - if used_f0 == "": - used_f0 = "harvest" # Default Harvest if used_f0 is empty somehow - return used_f0 - - def set_values(self, values): - if len(values["pth_path"].strip()) == 0: - sg.popup(i18n("Select the pth file")) - return False - if len(values["index_path"].strip()) == 0: - sg.popup(i18n("Select the index file")) - return False - pattern = re.compile("[^\x00-\x7F]+") - if pattern.findall(values["hubert_path"]): - sg.popup(i18n("The hubert model path must not contain Chinese characters")) - return False - if pattern.findall(values["pth_path"]): - sg.popup(i18n("The pth file path must not contain Chinese characters.")) - return False - if pattern.findall(values["index_path"]): - sg.popup(i18n("The index file path must not contain Chinese characters.")) - return False - self.set_devices(values["sg_input_device"], values["sg_output_device"]) - self.config.hubert_path = os.path.join(current_dir, "hubert_base.pt") - self.config.pth_path = values["pth_path"] - self.config.index_path = values["index_path"] - self.config.npy_path = values["npy_path"] - self.config.f0_method = self.get_f0_method_from_radios(values) - self.config.threhold = values["threhold"] - self.config.pitch = values["pitch"] - self.config.block_time = values["block_time"] - self.config.crossfade_time = values["crossfade_length"] - self.config.extra_time = values["extra_time"] - self.config.I_noise_reduce = values["I_noise_reduce"] - self.config.O_noise_reduce = values["O_noise_reduce"] - self.config.index_rate = values["index_rate"] - return True - - def start_vc(self): - torch.cuda.empty_cache() - self.flag_vc = True - self.block_frame = int(self.config.block_time * self.config.samplerate) - self.crossfade_frame = int(self.config.crossfade_time * self.config.samplerate) - self.sola_search_frame = int(0.012 * self.config.samplerate) - self.delay_frame = int(0.01 * self.config.samplerate) # 往前预留0.02s - self.extra_frame = int(self.config.extra_time * self.config.samplerate) - self.rvc = None - self.rvc = RVC( - self.config.pitch, - self.config.f0_method, - self.config.hubert_path, - self.config.pth_path, - self.config.index_path, - self.config.npy_path, - self.config.index_rate, - ) - self.input_wav: np.ndarray = np.zeros( - self.extra_frame - + self.crossfade_frame - + self.sola_search_frame - + self.block_frame, - dtype="float32", - ) - self.output_wav: torch.Tensor = torch.zeros( - self.block_frame, device=device, dtype=torch.float32 - ) - self.sola_buffer: torch.Tensor = torch.zeros( - self.crossfade_frame, device=device, dtype=torch.float32 - ) - self.fade_in_window: torch.Tensor = torch.linspace( - 0.0, 1.0, steps=self.crossfade_frame, device=device, dtype=torch.float32 - ) - self.fade_out_window: torch.Tensor = 1 - self.fade_in_window - self.resampler1 = tat.Resample( - orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32 - ) - self.resampler2 = tat.Resample( - orig_freq=self.rvc.tgt_sr, - new_freq=self.config.samplerate, - dtype=torch.float32, - ) - thread_vc = threading.Thread(target=self.soundinput) - thread_vc.start() - - def soundinput(self): - """ - 接受音频输入 - """ - with sd.Stream( - channels=2, - callback=self.audio_callback, - blocksize=self.block_frame, - samplerate=self.config.samplerate, - dtype="float32", - ): - while self.flag_vc: - time.sleep(self.config.block_time) - print("Audio block passed.") - print("ENDing VC") - - def audio_callback( - self, indata: np.ndarray, outdata: np.ndarray, frames, times, status - ): - """ - 音频处理 - """ - start_time = time.perf_counter() - indata = librosa.to_mono(indata.T) - if self.config.I_noise_reduce: - indata[:] = nr.reduce_noise(y=indata, sr=self.config.samplerate) - - """noise gate""" - frame_length = 2048 - hop_length = 1024 - rms = librosa.feature.rms( - y=indata, frame_length=frame_length, hop_length=hop_length - ) - db_threhold = librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold - # print(rms.shape,db.shape,db) - for i in range(db_threhold.shape[0]): - if db_threhold[i]: - indata[i * hop_length : (i + 1) * hop_length] = 0 - self.input_wav[:] = np.append(self.input_wav[self.block_frame :], indata) - - # infer - print("input_wav:" + str(self.input_wav.shape)) - # print('infered_wav:'+str(infer_wav.shape)) - infer_wav: torch.Tensor = self.resampler2( - self.rvc.infer(self.resampler1(torch.from_numpy(self.input_wav))) - )[-self.crossfade_frame - self.sola_search_frame - self.block_frame :].to( - device - ) - print("infer_wav:" + str(infer_wav.shape)) - - # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC - cor_nom = F.conv1d( - infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame], - self.sola_buffer[None, None, :], - ) - cor_den = torch.sqrt( - F.conv1d( - infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame] - ** 2, - torch.ones(1, 1, self.crossfade_frame, device=device), - ) - + 1e-8 - ) - sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0]) - print("sola offset: " + str(int(sola_offset))) - - # crossfade - self.output_wav[:] = infer_wav[sola_offset : sola_offset + self.block_frame] - self.output_wav[: self.crossfade_frame] *= self.fade_in_window - self.output_wav[: self.crossfade_frame] += self.sola_buffer[:] - if sola_offset < self.sola_search_frame: - self.sola_buffer[:] = ( - infer_wav[ - -self.sola_search_frame - - self.crossfade_frame - + sola_offset : -self.sola_search_frame - + sola_offset - ] - * self.fade_out_window - ) - else: - self.sola_buffer[:] = ( - infer_wav[-self.crossfade_frame :] * self.fade_out_window - ) - - if self.config.O_noise_reduce: - outdata[:] = np.tile( - nr.reduce_noise( - y=self.output_wav[:].cpu().numpy(), sr=self.config.samplerate - ), - (2, 1), - ).T - else: - outdata[:] = self.output_wav[:].repeat(2, 1).t().cpu().numpy() - total_time = time.perf_counter() - start_time - self.window["infer_time"].update(int(total_time * 1000)) - print("infer time:" + str(total_time)) - print("f0_method: " + str(self.config.f0_method)) - - def get_devices(self, update: bool = True): - """获取设备列表""" - if update: - sd._terminate() - sd._initialize() - devices = sd.query_devices() - hostapis = sd.query_hostapis() - for hostapi in hostapis: - for device_idx in hostapi["devices"]: - devices[device_idx]["hostapi_name"] = hostapi["name"] - input_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_output_channels"] > 0 - ] - input_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_output_channels"] > 0 - ] - return ( - input_devices, - output_devices, - input_devices_indices, - output_devices_indices, - ) - - def set_devices(self, input_device, output_device): - """设置输出设备""" - ( - input_devices, - output_devices, - input_device_indices, - output_device_indices, - ) = self.get_devices() - sd.default.device[0] = input_device_indices[input_devices.index(input_device)] - sd.default.device[1] = output_device_indices[ - output_devices.index(output_device) - ] - print("input device:" + str(sd.default.device[0]) + ":" + str(input_device)) - print("output device:" + str(sd.default.device[1]) + ":" + str(output_device)) - - -gui = GUI() diff --git a/spaces/EuroPython2022/clickbaitonator/fudge/main.py b/spaces/EuroPython2022/clickbaitonator/fudge/main.py deleted file mode 100644 index e8c2299b2449b6dd07d26c7ae678732b1dabca88..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/clickbaitonator/fudge/main.py +++ /dev/null @@ -1,192 +0,0 @@ -import os -import random -import time -import pickle -import math -from argparse import ArgumentParser - -from tqdm import tqdm -import numpy as np -import torch -import torch.nn as nn - -from data import Dataset -from model import Model -from util import save_checkpoint, ProgressMeter, AverageMeter, num_params, pad_mask -from constants import * - - -def train(model, dataset, optimizer, criterion, epoch, args, data_start_index): - model.train() - if data_start_index == 0: - dataset.shuffle('train', seed=epoch + args.seed) - if args.epoch_max_len is not None: - data_end_index = min(data_start_index + args.epoch_max_len, len(dataset.splits['train'])) - loader = dataset.loader('train', num_workers=args.num_workers, indices=list(range(data_start_index, data_end_index))) - data_start_index = data_end_index if data_end_index < len(dataset.splits['train']) else 0 - else: - loader = dataset.loader('train', num_workers=args.num_workers) - loss_meter = AverageMeter('loss', ':6.4f') - total_length = len(loader) - progress = ProgressMeter(total_length, [loss_meter], prefix='Training: ') - for batch_num, batch in enumerate(tqdm(loader, total=len(loader))): - batch = [tensor.to(args.device) for tensor in batch] - inputs, lengths, future_words, log_probs, labels, classification_targets, syllables_to_go, future_word_num_syllables, rhyme_group_index = batch - if args.task not in ['formality', 'iambic']: - if not args.debug and len(inputs) != args.batch_size: # it'll screw up the bias...? - continue - scores = model(inputs, lengths, future_words, log_probs, syllables_to_go, future_word_num_syllables, rhyme_group_index, run_classifier=True) - if args.task == 'formality': # we're learning for all positions at once. scores are batch x seq - expanded_labels = classification_targets.unsqueeze(1).expand(-1, scores.shape[1]) # batch x seq - length_mask = pad_mask(lengths).permute(1, 0) # batch x seq - loss = criterion(scores.flatten()[length_mask.flatten()==1], expanded_labels.flatten().float()[length_mask.flatten()==1]) - elif args.task in ['iambic', 'newline']: - use_indices = classification_targets.flatten() != -1 - loss = criterion(scores.flatten()[use_indices], classification_targets.flatten().float()[use_indices]) - else: # topic, rhyme - loss = criterion(scores.flatten(), labels.flatten().float()) - optimizer.zero_grad() - loss.backward() - optimizer.step() - loss_meter.update(loss.detach(), len(labels)) - if batch_num % args.train_print_freq == 0: - progress.display(batch_num) - progress.display(total_length) - return data_start_index - - -def validate(model, dataset, criterion, epoch, args): - model.eval() - random.seed(0) - loader = dataset.loader('val', num_workers=args.num_workers) - loss_meter = AverageMeter('loss', ':6.4f') - total_length = len(loader) - progress = ProgressMeter(total_length, [loss_meter], prefix='Validation: ') - with torch.no_grad(): - for batch_num, batch in enumerate(tqdm(loader, total=len(loader))): - batch = [tensor.to(args.device) for tensor in batch] - inputs, lengths, future_words, log_probs, labels, classification_targets, syllables_to_go, future_word_num_syllables, rhyme_group_index = batch - if args.task not in ['formality', 'iambic']: # topic predictor - if not args.debug and len(inputs) != args.batch_size: - continue - scores = model(inputs, lengths, future_words, log_probs, syllables_to_go, future_word_num_syllables, rhyme_group_index, run_classifier=True) - if args.task == 'formality': # we're learning for all positions at once. scores are batch x seq - expanded_labels = classification_targets.unsqueeze(1).expand(-1, scores.shape[1]) # batch x seq - length_mask = pad_mask(lengths).permute(1, 0) # batch x seq - loss = criterion(scores.flatten()[length_mask.flatten()==1], expanded_labels.flatten().float()[length_mask.flatten()==1]) - elif args.task in ['iambic', 'newline']: - use_indices = classification_targets.flatten() != -1 - loss = criterion(scores.flatten()[use_indices], classification_targets.flatten().float()[use_indices]) - else: # topic, rhyme - loss = criterion(scores.flatten(), labels.flatten().float()) - loss_meter.update(loss.detach(), len(labels)) - if batch_num % args.train_print_freq == 0: - progress.display(batch_num) - progress.display(total_length) - return loss_meter.avg - - -def main(args): - dataset = Dataset(args) - os.makedirs(args.save_dir, exist_ok=True) - with open(os.path.join(args.save_dir, 'dataset_info'), 'wb') as wf: - pickle.dump(dataset.dataset_info, wf) - if args.task == 'rhyme': - with open(os.path.join(args.save_dir, 'rhyme_info'), 'wb') as wf: - pickle.dump(dataset.rhyme_info, wf) - if args.ckpt: - checkpoint = torch.load(args.ckpt, map_location=args.device) - start_epoch = checkpoint['epoch'] + 1 - best_val_metric = checkpoint['best_metric'] - model_args = checkpoint['args'] - model = Model(model_args, dataset.gpt_pad_id, len(dataset.index2word), rhyme_group_size=len(dataset.index2rhyme_group) if args.task == 'rhyme' else None) # no need to get the glove embeddings when reloading since they're saved in model ckpt anyway - model.load_state_dict(checkpoint['state_dict']) - model = model.to(args.device) - optimizer = torch.optim.Adam(model.parameters(), lr=model_args.lr) - optimizer.load_state_dict(checkpoint['optimizer']) - data_start_index = checkpoint['data_start_index'] - print("=> loaded checkpoint '{}' (epoch {})" - .format(args.ckpt, checkpoint['epoch'])) - # NOTE: just import pdb after loading the model here if you want to play with it, it's easy - # model.eval() - # import pdb; pdb.set_trace() - else: - model = Model(args, dataset.gpt_pad_id, len(dataset.index2word), rhyme_group_size=len(dataset.index2rhyme_group) if args.task == 'rhyme' else None, glove_embeddings=dataset.glove_embeddings) - model = model.to(args.device) - optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) - best_val_metric = 1e8 # lower is better for BCE - data_start_index = 0 - print('num params', num_params(model)) - criterion = nn.BCEWithLogitsLoss().to(args.device) - - if args.evaluate: - epoch = 0 - validate(model, dataset, criterion, epoch, args) - return - for epoch in range(args.epochs): - print("TRAINING: Epoch {} at {}".format(epoch, time.ctime())) - data_start_index = train(model, dataset, optimizer, criterion, epoch, args, data_start_index) - if epoch % args.validation_freq == 0: - print("VALIDATION: Epoch {} at {}".format(epoch, time.ctime())) - metric = validate(model, dataset, criterion, epoch, args) - - if not args.debug: - if metric < best_val_metric: - print('new best val metric', metric) - best_val_metric = metric - save_checkpoint({ - 'epoch': epoch, - 'state_dict': model.state_dict(), - 'best_metric': best_val_metric, - 'optimizer': optimizer.state_dict(), - 'data_start_index': data_start_index, - 'args': args - }, os.path.join(args.save_dir, 'model_best.pth.tar')) - save_checkpoint({ - 'epoch': epoch, - 'state_dict': model.state_dict(), - 'best_metric': metric, - 'optimizer': optimizer.state_dict(), - 'data_start_index': data_start_index, - 'args': args - }, os.path.join(args.save_dir, 'model_epoch' + str(epoch) + '.pth.tar')) - - -if __name__=='__main__': - parser = ArgumentParser() - - # DATA - parser.add_argument('--task', type=str, required=True, choices=['iambic', 'rhyme', 'newline', 'topic', 'formality', 'clickbait']) - parser.add_argument('--data_dir', type=str, required=True) - parser.add_argument('--glove_file', type=str, help='glove embedding init, for topic task') - - # SAVE/LOAD - parser.add_argument('--save_dir', type=str, required=True, help='where to save ckpts') - parser.add_argument('--ckpt', type=str, default=None, help='load ckpt from file if given') - parser.add_argument('--dataset_info', type=str, help='saved dataset info') - parser.add_argument('--rhyme_info', type=str, help='saved dataset rhyme info, for a ckpt with task==rhyme') - - # TRAINING - parser.add_argument('--batch_size', type=int, default=128) - parser.add_argument('--epochs', type=int, default=100) - parser.add_argument('--epoch_max_len', type=int, default=None, help='max batches per epoch if set, for more frequent validation') - parser.add_argument('--validation_freq', type=int, default=1, help='validate every X epochs') - parser.add_argument('--lr', type=float, default=1e-3, help='Adam learning rate') - parser.add_argument('--seed', type=int, default=1, help='random seed') - parser.add_argument('--device', type=str, default='cuda', choices=['cpu', 'cuda']) - parser.add_argument('--num_workers', type=int, default=20, help='num workers for data loader') - parser.add_argument('--evaluate', action='store_true', default=False) - parser.add_argument('--debug', action='store_true', default=False) - - # PRINTING - parser.add_argument('--train_print_freq', type=int, default=100, help='how often to print metrics (every X batches)') - - args = parser.parse_args() - - random.seed(args.seed) - np.random.seed(args.seed) - torch.manual_seed(args.seed) - if args.evaluate: - assert args.ckpt is not None - - main(args) \ No newline at end of file diff --git a/spaces/Felladrin/Web-LLM-Mistral-7B-OpenOrca/src/index.ts b/spaces/Felladrin/Web-LLM-Mistral-7B-OpenOrca/src/index.ts deleted file mode 100644 index 9ba0fe30a092c73c8321a6bb2502750471b133f2..0000000000000000000000000000000000000000 --- a/spaces/Felladrin/Web-LLM-Mistral-7B-OpenOrca/src/index.ts +++ /dev/null @@ -1,279 +0,0 @@ -import { - ChatInterface, - ChatModule, - ChatRestModule, - ChatWorkerClient, -} from "@mlc-ai/web-llm"; - -function getElementAndCheck(id: string): HTMLElement { - const element = document.getElementById(id); - if (element == null) { - throw Error("Cannot find element " + id); - } - return element; -} - -const appConfig = { - model_list: [ - { - model_url: - "https://huggingface.co/Felladrin/mlc-chat-Mistral-7B-OpenOrca-q4f32_1/resolve/main/params/", - local_id: "Mistral-7B-OpenOrca-q4f32_1", - }, - ], - model_lib_map: { - "Mistral-7B-OpenOrca-q4f32_1": - "https://huggingface.co/Felladrin/mlc-chat-Mistral-7B-OpenOrca-q4f32_1/resolve/main/Mistral-7B-OpenOrca-q4f32_1-webgpu.wasm", - }, - use_web_worker: true, -}; - -class ChatUI { - private uiChat: HTMLElement; - private uiChatInput: HTMLInputElement; - private uiChatInfoLabel: HTMLLabelElement; - private chat: ChatInterface; - private localChat: ChatInterface; - private config = appConfig; - private selectedModel: string; - private chatLoaded = false; - private requestInProgress = false; - // We use a request chain to ensure that - // all requests send to chat are sequentialized - private chatRequestChain: Promise = Promise.resolve(); - - constructor(chat: ChatInterface, localChat: ChatInterface) { - // use web worker to run chat generation in background - this.chat = chat; - this.localChat = localChat; - // get the elements - this.uiChat = getElementAndCheck("chatui-chat"); - this.uiChatInput = getElementAndCheck("chatui-input") as HTMLInputElement; - this.uiChatInfoLabel = getElementAndCheck( - "chatui-info-label" - ) as HTMLLabelElement; - // register event handlers - getElementAndCheck("chatui-reset-btn").onclick = () => { - this.onReset(); - }; - getElementAndCheck("chatui-send-btn").onclick = () => { - this.onGenerate(); - }; - // TODO: find other alternative triggers - getElementAndCheck("chatui-input").onkeypress = (event) => { - if (event.keyCode === 13) { - this.onGenerate(); - } - }; - - const modelSelector = getElementAndCheck( - "chatui-select" - ) as HTMLSelectElement; - for (let i = 0; i < this.config.model_list.length; ++i) { - const item = this.config.model_list[i]; - const opt = document.createElement("option"); - opt.value = item.local_id; - opt.innerHTML = item.local_id; - opt.selected = i == 0; - modelSelector.appendChild(opt); - } - // Append local server option to the model selector - const localServerOpt = document.createElement("option"); - localServerOpt.value = "Local Server"; - localServerOpt.innerHTML = "Local Server"; - modelSelector.append(localServerOpt); - this.selectedModel = modelSelector.value; - modelSelector.onchange = () => { - this.onSelectChange(modelSelector); - }; - } - /** - * Push a task to the execution queue. - * - * @param task The task to be executed; - */ - private pushTask(task: () => Promise) { - const lastEvent = this.chatRequestChain; - this.chatRequestChain = lastEvent.then(task); - } - // Event handlers - // all event handler pushes the tasks to a queue - // that get executed sequentially - // the tasks previous tasks, which causes them to early stop - // can be interrupted by chat.interruptGenerate - private async onGenerate() { - if (this.requestInProgress) { - return; - } - this.pushTask(async () => { - await this.asyncGenerate(); - }); - } - - private async onSelectChange(modelSelector: HTMLSelectElement) { - if (this.requestInProgress) { - // interrupt previous generation if any - this.chat.interruptGenerate(); - } - // try reset after previous requests finishes - this.pushTask(async () => { - await this.chat.resetChat(); - this.resetChatHistory(); - await this.unloadChat(); - this.selectedModel = modelSelector.value; - await this.asyncInitChat(); - }); - } - - private async onReset() { - if (this.requestInProgress) { - // interrupt previous generation if any - this.chat.interruptGenerate(); - } - // try reset after previous requests finishes - this.pushTask(async () => { - await this.chat.resetChat(); - this.resetChatHistory(); - }); - } - - // Internal helper functions - private appendMessage(kind, text) { - if (kind == "init") { - text = "[System Initalize] " + text; - } - if (this.uiChat === undefined) { - throw Error("cannot find ui chat"); - } - const msg = ` -
    -
    -
    ${text}
    -
    -
    - `; - this.uiChat.insertAdjacentHTML("beforeend", msg); - this.uiChat.scrollTo(0, this.uiChat.scrollHeight); - } - - private updateLastMessage(kind, text) { - if (kind == "init") { - text = "[System Initalize] " + text; - } - if (this.uiChat === undefined) { - throw Error("cannot find ui chat"); - } - const matches = this.uiChat.getElementsByClassName(`msg ${kind}-msg`); - if (matches.length == 0) throw Error(`${kind} message do not exist`); - const msg = matches[matches.length - 1]; - const msgText = msg.getElementsByClassName("msg-text"); - if (msgText.length != 1) throw Error("Expect msg-text"); - if (msgText[0].innerHTML == text) return; - const list = text.split("\n").map((t) => { - const item = document.createElement("div"); - item.textContent = t; - return item; - }); - msgText[0].innerHTML = ""; - list.forEach((item) => msgText[0].append(item)); - this.uiChat.scrollTo(0, this.uiChat.scrollHeight); - } - - private resetChatHistory() { - const clearTags = ["left", "right", "init", "error"]; - for (const tag of clearTags) { - // need to unpack to list so the iterator don't get affected by mutation - const matches = [...this.uiChat.getElementsByClassName(`msg ${tag}-msg`)]; - for (const item of matches) { - this.uiChat.removeChild(item); - } - } - if (this.uiChatInfoLabel !== undefined) { - this.uiChatInfoLabel.innerHTML = ""; - } - } - - private async asyncInitChat() { - if (this.chatLoaded) return; - this.requestInProgress = true; - this.appendMessage("init", ""); - const initProgressCallback = (report) => { - this.updateLastMessage("init", report.text); - }; - this.chat.setInitProgressCallback(initProgressCallback); - - try { - if (this.selectedModel != "Local Server") { - await this.chat.reload(this.selectedModel, undefined, this.config); - } - } catch (err) { - this.appendMessage("error", "Init error, " + err.toString()); - console.log(err.stack); - this.unloadChat(); - this.requestInProgress = false; - return; - } - this.requestInProgress = false; - this.chatLoaded = true; - } - - private async unloadChat() { - await this.chat.unload(); - this.chatLoaded = false; - } - - /** - * Run generate - */ - private async asyncGenerate() { - await this.asyncInitChat(); - this.requestInProgress = true; - const prompt = this.uiChatInput.value; - if (prompt == "") { - this.requestInProgress = false; - return; - } - - this.appendMessage("right", prompt); - this.uiChatInput.value = ""; - this.uiChatInput.setAttribute("placeholder", "Generating..."); - - this.appendMessage("left", ""); - const callbackUpdateResponse = (step, msg) => { - if (msg.length === 0) return this.chat.interruptGenerate(); - this.updateLastMessage("left", msg); - }; - - try { - if (this.selectedModel == "Local Server") { - await this.localChat.generate(prompt, callbackUpdateResponse); - this.uiChatInfoLabel.innerHTML = - await this.localChat.runtimeStatsText(); - } else { - await this.chat.generate(prompt, callbackUpdateResponse); - this.uiChatInfoLabel.innerHTML = await this.chat.runtimeStatsText(); - } - } catch (err) { - this.appendMessage("error", "Generate error, " + err.toString()); - console.log(err.stack); - await this.unloadChat(); - } - this.uiChatInput.setAttribute("placeholder", "Enter your message..."); - this.requestInProgress = false; - } -} - -const useWebWorker = appConfig.use_web_worker; -let chat: ChatInterface; -let localChat: ChatInterface; - -if (useWebWorker) { - chat = new ChatWorkerClient( - new Worker(new URL("./worker.ts", import.meta.url), { type: "module" }) - ); - localChat = new ChatRestModule(); -} else { - chat = new ChatModule(); - localChat = new ChatRestModule(); -} -new ChatUI(chat, localChat); diff --git a/spaces/Francesco/torch-cam-transformers/app.py b/spaces/Francesco/torch-cam-transformers/app.py deleted file mode 100644 index 6c5bf7ea8344430235dfcc553c112be801348a50..0000000000000000000000000000000000000000 --- a/spaces/Francesco/torch-cam-transformers/app.py +++ /dev/null @@ -1,14 +0,0 @@ -import streamlit as st - -import sys - - -def main(): - # Wide mode - st.set_page_config(layout="wide") - - # Designing the interface - st.title(sys.version) - - -main() diff --git a/spaces/FrankZxShen/so-vits-svc-models-pcr/diffusion/data_loaders.py b/spaces/FrankZxShen/so-vits-svc-models-pcr/diffusion/data_loaders.py deleted file mode 100644 index bf18572329019d7a8f1df01799eda207c16dd7ff..0000000000000000000000000000000000000000 --- a/spaces/FrankZxShen/so-vits-svc-models-pcr/diffusion/data_loaders.py +++ /dev/null @@ -1,284 +0,0 @@ -import os -import random -import re -import numpy as np -import librosa -import torch -import random -from utils import repeat_expand_2d -from tqdm import tqdm -from torch.utils.data import Dataset - -def traverse_dir( - root_dir, - extensions, - amount=None, - str_include=None, - str_exclude=None, - is_pure=False, - is_sort=False, - is_ext=True): - - file_list = [] - cnt = 0 - for root, _, files in os.walk(root_dir): - for file in files: - if any([file.endswith(f".{ext}") for ext in extensions]): - # path - mix_path = os.path.join(root, file) - pure_path = mix_path[len(root_dir)+1:] if is_pure else mix_path - - # amount - if (amount is not None) and (cnt == amount): - if is_sort: - file_list.sort() - return file_list - - # check string - if (str_include is not None) and (str_include not in pure_path): - continue - if (str_exclude is not None) and (str_exclude in pure_path): - continue - - if not is_ext: - ext = pure_path.split('.')[-1] - pure_path = pure_path[:-(len(ext)+1)] - file_list.append(pure_path) - cnt += 1 - if is_sort: - file_list.sort() - return file_list - - -def get_data_loaders(args, whole_audio=False): - data_train = AudioDataset( - filelists = args.data.training_files, - waveform_sec=args.data.duration, - hop_size=args.data.block_size, - sample_rate=args.data.sampling_rate, - load_all_data=args.train.cache_all_data, - whole_audio=whole_audio, - extensions=args.data.extensions, - n_spk=args.model.n_spk, - spk=args.spk, - device=args.train.cache_device, - fp16=args.train.cache_fp16, - use_aug=True) - loader_train = torch.utils.data.DataLoader( - data_train , - batch_size=args.train.batch_size if not whole_audio else 1, - shuffle=True, - num_workers=args.train.num_workers if args.train.cache_device=='cpu' else 0, - persistent_workers=(args.train.num_workers > 0) if args.train.cache_device=='cpu' else False, - pin_memory=True if args.train.cache_device=='cpu' else False - ) - data_valid = AudioDataset( - filelists = args.data.validation_files, - waveform_sec=args.data.duration, - hop_size=args.data.block_size, - sample_rate=args.data.sampling_rate, - load_all_data=args.train.cache_all_data, - whole_audio=True, - spk=args.spk, - extensions=args.data.extensions, - n_spk=args.model.n_spk) - loader_valid = torch.utils.data.DataLoader( - data_valid, - batch_size=1, - shuffle=False, - num_workers=0, - pin_memory=True - ) - return loader_train, loader_valid - - -class AudioDataset(Dataset): - def __init__( - self, - filelists, - waveform_sec, - hop_size, - sample_rate, - spk, - load_all_data=True, - whole_audio=False, - extensions=['wav'], - n_spk=1, - device='cpu', - fp16=False, - use_aug=False, - ): - super().__init__() - - self.waveform_sec = waveform_sec - self.sample_rate = sample_rate - self.hop_size = hop_size - self.filelists = filelists - self.whole_audio = whole_audio - self.use_aug = use_aug - self.data_buffer={} - self.pitch_aug_dict = {} - # np.load(os.path.join(self.path_root, 'pitch_aug_dict.npy'), allow_pickle=True).item() - if load_all_data: - print('Load all the data filelists:', filelists) - else: - print('Load the f0, volume data filelists:', filelists) - with open(filelists,"r") as f: - self.paths = f.read().splitlines() - for name_ext in tqdm(self.paths, total=len(self.paths)): - name = os.path.splitext(name_ext)[0] - path_audio = name_ext - duration = librosa.get_duration(filename = path_audio, sr = self.sample_rate) - - path_f0 = name_ext + ".f0.npy" - f0,_ = np.load(path_f0,allow_pickle=True) - f0 = torch.from_numpy(np.array(f0,dtype=float)).float().unsqueeze(-1).to(device) - - path_volume = name_ext + ".vol.npy" - volume = np.load(path_volume) - volume = torch.from_numpy(volume).float().unsqueeze(-1).to(device) - - path_augvol = name_ext + ".aug_vol.npy" - aug_vol = np.load(path_augvol) - aug_vol = torch.from_numpy(aug_vol).float().unsqueeze(-1).to(device) - - if n_spk is not None and n_spk > 1: - spk_name = name_ext.split("/")[-2] - spk_id = spk[spk_name] if spk_name in spk else 0 - if spk_id < 0 or spk_id >= n_spk: - raise ValueError(' [x] Muiti-speaker traing error : spk_id must be a positive integer from 0 to n_spk-1 ') - else: - spk_id = 0 - spk_id = torch.LongTensor(np.array([spk_id])).to(device) - - if load_all_data: - ''' - audio, sr = librosa.load(path_audio, sr=self.sample_rate) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio) - audio = torch.from_numpy(audio).to(device) - ''' - path_mel = name_ext + ".mel.npy" - mel = np.load(path_mel) - mel = torch.from_numpy(mel).to(device) - - path_augmel = name_ext + ".aug_mel.npy" - aug_mel,keyshift = np.load(path_augmel, allow_pickle=True) - aug_mel = np.array(aug_mel,dtype=float) - aug_mel = torch.from_numpy(aug_mel).to(device) - self.pitch_aug_dict[name_ext] = keyshift - - path_units = name_ext + ".soft.pt" - units = torch.load(path_units).to(device) - units = units[0] - units = repeat_expand_2d(units,f0.size(0)).transpose(0,1) - - if fp16: - mel = mel.half() - aug_mel = aug_mel.half() - units = units.half() - - self.data_buffer[name_ext] = { - 'duration': duration, - 'mel': mel, - 'aug_mel': aug_mel, - 'units': units, - 'f0': f0, - 'volume': volume, - 'aug_vol': aug_vol, - 'spk_id': spk_id - } - else: - path_augmel = name_ext + ".aug_mel.npy" - aug_mel,keyshift = np.load(path_augmel, allow_pickle=True) - self.pitch_aug_dict[name_ext] = keyshift - self.data_buffer[name_ext] = { - 'duration': duration, - 'f0': f0, - 'volume': volume, - 'aug_vol': aug_vol, - 'spk_id': spk_id - } - - - def __getitem__(self, file_idx): - name_ext = self.paths[file_idx] - data_buffer = self.data_buffer[name_ext] - # check duration. if too short, then skip - if data_buffer['duration'] < (self.waveform_sec + 0.1): - return self.__getitem__( (file_idx + 1) % len(self.paths)) - - # get item - return self.get_data(name_ext, data_buffer) - - def get_data(self, name_ext, data_buffer): - name = os.path.splitext(name_ext)[0] - frame_resolution = self.hop_size / self.sample_rate - duration = data_buffer['duration'] - waveform_sec = duration if self.whole_audio else self.waveform_sec - - # load audio - idx_from = 0 if self.whole_audio else random.uniform(0, duration - waveform_sec - 0.1) - start_frame = int(idx_from / frame_resolution) - units_frame_len = int(waveform_sec / frame_resolution) - aug_flag = random.choice([True, False]) and self.use_aug - ''' - audio = data_buffer.get('audio') - if audio is None: - path_audio = os.path.join(self.path_root, 'audio', name) + '.wav' - audio, sr = librosa.load( - path_audio, - sr = self.sample_rate, - offset = start_frame * frame_resolution, - duration = waveform_sec) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio) - # clip audio into N seconds - audio = audio[ : audio.shape[-1] // self.hop_size * self.hop_size] - audio = torch.from_numpy(audio).float() - else: - audio = audio[start_frame * self.hop_size : (start_frame + units_frame_len) * self.hop_size] - ''' - # load mel - mel_key = 'aug_mel' if aug_flag else 'mel' - mel = data_buffer.get(mel_key) - if mel is None: - mel = name_ext + ".mel.npy" - mel = np.load(mel) - mel = mel[start_frame : start_frame + units_frame_len] - mel = torch.from_numpy(mel).float() - else: - mel = mel[start_frame : start_frame + units_frame_len] - - # load f0 - f0 = data_buffer.get('f0') - aug_shift = 0 - if aug_flag: - aug_shift = self.pitch_aug_dict[name_ext] - f0_frames = 2 ** (aug_shift / 12) * f0[start_frame : start_frame + units_frame_len] - - # load units - units = data_buffer.get('units') - if units is None: - path_units = name_ext + ".soft.pt" - units = torch.load(path_units) - units = units[0] - units = repeat_expand_2d(units,f0.size(0)).transpose(0,1) - - units = units[start_frame : start_frame + units_frame_len] - - # load volume - vol_key = 'aug_vol' if aug_flag else 'volume' - volume = data_buffer.get(vol_key) - volume_frames = volume[start_frame : start_frame + units_frame_len] - - # load spk_id - spk_id = data_buffer.get('spk_id') - - # load shift - aug_shift = torch.from_numpy(np.array([[aug_shift]])).float() - - return dict(mel=mel, f0=f0_frames, volume=volume_frames, units=units, spk_id=spk_id, aug_shift=aug_shift, name=name, name_ext=name_ext) - - def __len__(self): - return len(self.paths) \ No newline at end of file diff --git a/spaces/FrankZxShen/so-vits-svc-models-pcr/vencoder/whisper/audio.py b/spaces/FrankZxShen/so-vits-svc-models-pcr/vencoder/whisper/audio.py deleted file mode 100644 index 3bdb70ba9357e95ff05853dcc06437c3401ef3be..0000000000000000000000000000000000000000 --- a/spaces/FrankZxShen/so-vits-svc-models-pcr/vencoder/whisper/audio.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -from functools import lru_cache -from typing import Union - -import ffmpeg -import numpy as np -import torch -import torch.nn.functional as F - -from .utils import exact_div - -from librosa.filters import mel as librosa_mel_fn - -# hard-coded audio hyperparameters -SAMPLE_RATE = 16000 -N_FFT = 400 -N_MELS = 80 -HOP_LENGTH = 160 -CHUNK_LENGTH = 30 -N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk -N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input - - -def load_audio(file: str, sr: int = SAMPLE_RATE): - """ - Open an audio file and read as mono waveform, resampling as necessary - - Parameters - ---------- - file: str - The audio file to open - - sr: int - The sample rate to resample the audio if necessary - - Returns - ------- - A NumPy array containing the audio waveform, in float32 dtype. - """ - try: - # This launches a subprocess to decode audio while down-mixing and resampling as necessary. - # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed. - out, _ = ( - ffmpeg.input(file, threads=0) - .output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr) - .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True) - ) - except ffmpeg.Error as e: - raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e - - return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0 - - -def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1): - """ - Pad or trim the audio array to N_SAMPLES, as expected by the encoder. - """ - if torch.is_tensor(array): - if array.shape[axis] > length: - array = array.index_select(dim=axis, index=torch.arange(length, device=array.device)) - - if array.shape[axis] < length: - pad_widths = [(0, 0)] * array.ndim - pad_widths[axis] = (0, length - array.shape[axis]) - array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes]) - else: - if array.shape[axis] > length: - array = array.take(indices=range(length), axis=axis) - - if array.shape[axis] < length: - pad_widths = [(0, 0)] * array.ndim - pad_widths[axis] = (0, length - array.shape[axis]) - array = np.pad(array, pad_widths) - - return array - - -@lru_cache(maxsize=None) -def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor: - """ - load the mel filterbank matrix for projecting STFT into a Mel spectrogram. - Allows decoupling librosa dependency; saved using: - - np.savez_compressed( - "mel_filters.npz", - mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80), - ) - """ - assert n_mels == 80, f"Unsupported n_mels: {n_mels}" - return torch.from_numpy(librosa_mel_fn(sr=SAMPLE_RATE,n_fft=N_FFT,n_mels=n_mels)).to(device) - - -def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS): - """ - Compute the log-Mel spectrogram of - - Parameters - ---------- - audio: Union[str, np.ndarray, torch.Tensor], shape = (*) - The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz - - n_mels: int - The number of Mel-frequency filters, only 80 is supported - - Returns - ------- - torch.Tensor, shape = (80, n_frames) - A Tensor that contains the Mel spectrogram - """ - if not torch.is_tensor(audio): - if isinstance(audio, str): - audio = load_audio(audio) - audio = torch.from_numpy(audio) - - window = torch.hann_window(N_FFT).to(audio.device) - stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True) - magnitudes = stft[..., :-1].abs() ** 2 - - filters = mel_filters(audio.device, n_mels) - mel_spec = filters @ magnitudes - - log_spec = torch.clamp(mel_spec, min=1e-10).log10() - log_spec = torch.maximum(log_spec, log_spec.max() - 8.0) - log_spec = (log_spec + 4.0) / 4.0 - return log_spec diff --git a/spaces/FrankZxShen/vits-fast-fineturning-models-ba/text/english.py b/spaces/FrankZxShen/vits-fast-fineturning-models-ba/text/english.py deleted file mode 100644 index 6817392ba8a9eb830351de89fb7afc5ad72f5e42..0000000000000000000000000000000000000000 --- a/spaces/FrankZxShen/vits-fast-fineturning-models-ba/text/english.py +++ /dev/null @@ -1,188 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - - -# Regular expression matching whitespace: - - -import re -import inflect -from unidecode import unidecode -import eng_to_ipa as ipa -_inflect = inflect.engine() -_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') -_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') -_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') -_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') -_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') -_number_re = re.compile(r'[0-9]+') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - - -# List of (ipa, lazy ipa) pairs: -_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('æ', 'e'), - ('ɑ', 'a'), - ('ɔ', 'o'), - ('ð', 'z'), - ('θ', 's'), - ('ɛ', 'e'), - ('ɪ', 'i'), - ('ʊ', 'u'), - ('ʒ', 'ʥ'), - ('ʤ', 'ʥ'), - ('ˈ', '↓'), -]] - -# List of (ipa, lazy ipa2) pairs: -_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ð', 'z'), - ('θ', 's'), - ('ʒ', 'ʑ'), - ('ʤ', 'dʑ'), - ('ˈ', '↓'), -]] - -# List of (ipa, ipa2) pairs -_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ʤ', 'dʒ'), - ('ʧ', 'tʃ') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def collapse_whitespace(text): - return re.sub(r'\s+', ' ', text) - - -def _remove_commas(m): - return m.group(1).replace(',', '') - - -def _expand_decimal_point(m): - return m.group(1).replace('.', ' point ') - - -def _expand_dollars(m): - match = m.group(1) - parts = match.split('.') - if len(parts) > 2: - return match + ' dollars' # Unexpected format - dollars = int(parts[0]) if parts[0] else 0 - cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 - if dollars and cents: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) - elif dollars: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - return '%s %s' % (dollars, dollar_unit) - elif cents: - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s' % (cents, cent_unit) - else: - return 'zero dollars' - - -def _expand_ordinal(m): - return _inflect.number_to_words(m.group(0)) - - -def _expand_number(m): - num = int(m.group(0)) - if num > 1000 and num < 3000: - if num == 2000: - return 'two thousand' - elif num > 2000 and num < 2010: - return 'two thousand ' + _inflect.number_to_words(num % 100) - elif num % 100 == 0: - return _inflect.number_to_words(num // 100) + ' hundred' - else: - return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') - else: - return _inflect.number_to_words(num, andword='') - - -def normalize_numbers(text): - text = re.sub(_comma_number_re, _remove_commas, text) - text = re.sub(_pounds_re, r'\1 pounds', text) - text = re.sub(_dollars_re, _expand_dollars, text) - text = re.sub(_decimal_number_re, _expand_decimal_point, text) - text = re.sub(_ordinal_re, _expand_ordinal, text) - text = re.sub(_number_re, _expand_number, text) - return text - - -def mark_dark_l(text): - return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text) - - -def english_to_ipa(text): - text = unidecode(text).lower() - text = expand_abbreviations(text) - text = normalize_numbers(text) - phonemes = ipa.convert(text) - phonemes = collapse_whitespace(phonemes) - return phonemes - - -def english_to_lazy_ipa(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa: - text = re.sub(regex, replacement, text) - return text - - -def english_to_ipa2(text): - text = english_to_ipa(text) - text = mark_dark_l(text) - for regex, replacement in _ipa_to_ipa2: - text = re.sub(regex, replacement, text) - return text.replace('...', '…') - - -def english_to_lazy_ipa2(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa2: - text = re.sub(regex, replacement, text) - return text diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py deleted file mode 100644 index a02a814fe2f08b464454e8eb6e1c88004ab804f6..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py +++ /dev/null @@ -1,27 +0,0 @@ -_base_ = './fovea_r50_fpn_4x4_1x_coco.py' -model = dict( - pretrained='torchvision://resnet101', - backbone=dict(depth=101), - bbox_head=dict( - with_deform=True, - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -data = dict(train=dict(pipeline=train_pipeline)) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59.py deleted file mode 100644 index 4874121fd01e4024bfde445f451b7368c6834511..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3_r50-d8_480x480_40k_pascal_context_59.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/gcnet/gcnet_r50-d8_512x512_80k_ade20k.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/gcnet/gcnet_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 89d5e1ae0f3ef44626f3b5534c504cbce7389a32..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/gcnet/gcnet_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/grids/audiogen/audiogen_pretrained_16khz_eval.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/grids/audiogen/audiogen_pretrained_16khz_eval.py deleted file mode 100644 index 12f6d402a3c4a113d4c37be062790fa435b72104..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/grids/audiogen/audiogen_pretrained_16khz_eval.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Evaluation with objective metrics for the pretrained AudioGen models. -This grid takes signature from the training grid and runs evaluation-only stage. - -When running the grid for the first time, please use: -REGEN=1 dora grid audiogen.audiogen_pretrained_16khz_eval -and re-use the REGEN=1 option when the grid is changed to force regenerating it. - -Note that you need the proper metrics external libraries setup to use all -the objective metrics activated in this grid. Refer to the README for more information. -""" - -import os - -from ..musicgen._explorers import GenerationEvalExplorer -from ...environment import AudioCraftEnvironment -from ... import train - - -def eval(launcher, batch_size: int = 32): - opts = { - 'dset': 'audio/audiocaps_16khz', - 'solver/audiogen/evaluation': 'objective_eval', - 'execute_only': 'evaluate', - '+dataset.evaluate.batch_size': batch_size, - '+metrics.fad.tf.batch_size': 32, - } - # binary for FAD computation: replace this path with your own path - metrics_opts = { - 'metrics.fad.tf.bin': '/data/home/jadecopet/local/usr/opt/google-research' - } - opt1 = {'generate.lm.use_sampling': True, 'generate.lm.top_k': 250, 'generate.lm.top_p': 0.} - opt2 = {'transformer_lm.two_step_cfg': True} - - sub = launcher.bind(opts) - sub.bind_(metrics_opts) - - # base objective metrics - sub(opt1, opt2) - - -@GenerationEvalExplorer -def explorer(launcher): - partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global']) - launcher.slurm_(gpus=4, partition=partitions) - - if 'REGEN' not in os.environ: - folder = train.main.dora.dir / 'grids' / __name__.split('.', 2)[-1] - with launcher.job_array(): - for sig in folder.iterdir(): - if not sig.is_symlink(): - continue - xp = train.main.get_xp_from_sig(sig.name) - launcher(xp.argv) - return - - audiogen_base = launcher.bind(solver="audiogen/audiogen_base_16khz") - audiogen_base.bind_({'autocast': False, 'fsdp.use': True}) - - audiogen_base_medium = audiogen_base.bind({'continue_from': '//pretrained/facebook/audiogen-medium'}) - audiogen_base_medium.bind_({'model/lm/model_scale': 'medium'}) - eval(audiogen_base_medium, batch_size=128) diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/grids/compression/debug.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/grids/compression/debug.py deleted file mode 100644 index 5612ff5688d85fede0e605b244919e8081cb1da9..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/grids/compression/debug.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Grid search file, simply list all the exp you want in `explorer`. -Any new exp added there will be scheduled. -You can cancel and experiment by commenting its line. - -This grid is a minimal example for debugging compression task -and how to override parameters directly in a grid. -Learn more about dora grids: https://github.com/facebookresearch/dora -""" - -from ._explorers import CompressionExplorer -from ...environment import AudioCraftEnvironment - - -@CompressionExplorer -def explorer(launcher): - partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global']) - launcher.slurm_(gpus=2, partition=partitions) - launcher.bind_(solver='compression/debug') - - with launcher.job_array(): - # base debug task using config from solver=compression/debug - launcher() - # we can override parameters in the grid to launch additional xps - launcher({'rvq.bins': 2048, 'rvq.n_q': 4}) diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/metrics/visqol.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/metrics/visqol.py deleted file mode 100644 index 44f4b0a2c3c6c726857db8386491823dd85dde51..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/metrics/visqol.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import csv -import json -import logging -from pathlib import Path -import tempfile -import typing as tp -import subprocess -import shutil - -import torch -import torchaudio - -logger = logging.getLogger(__name__) - - -class ViSQOL: - """ViSQOL wrapper to run ViSQOL from Python using a pre-installed binary. - - To learn more about ViSQOL and how to build ViSQOL binary using bazel, please refer to the - instructions available in the open source repository: https://github.com/google/visqol - - ViSQOL is capable of running in two modes: - - Audio Mode: - When running in audio mode, input signals must have a 48kHz sample rate. Input should be resampled to 48kHz. - Input signals can be multi-channel, but they will be down-mixed to mono for performing the comparison. - Audio mode uses support vector regression, with the maximum range at ~4.75. - - Speech Mode: - When running in speech mode, ViSQOL uses a wideband model. It therefore expects input sample rates of 16kHz. - Input should be resampled to 16kHz. - As part of the speech mode processing, a root mean square implementation for voice activity detection - is performed on the reference signal to determine what parts of the signal have voice activity and - should therefore be included in the comparison. The signal is normalized before performing the voice - activity detection. - Input signals can be multi-channel, but they will be down-mixed to mono for performing the comparison. - Speech mode is scaled to have a maximum MOS of 5.0 to match previous version behavior. - - For more details, check the guidelines: https://github.com/google/visqol#general-guidelines-for-input - - Args: - visqol_bin (str): Path to the ViSQOL binary. - mode (str): ViSQOL computation mode, expecting "audio" or "speech". - model (str): Name of the model to use for similarity to quality model. - debug (bool): Whether to also get debug metrics from ViSQOL or not. - """ - SAMPLE_RATES_MODES = {"audio": 48_000, "speech": 16_000} - ALLOWED_SAMPLE_RATES = frozenset(SAMPLE_RATES_MODES.values()) - - def __init__(self, bin: tp.Union[Path, str], mode: str = "audio", - model: str = "libsvm_nu_svr_model.txt", debug: bool = False): - assert bin is not None and Path(bin).exists(), f"Could not find ViSQOL binary in specified path: {bin}" - self.visqol_bin = str(bin) - self.visqol_mode = mode - self.target_sr = self._get_target_sr(self.visqol_mode) - self.model = model - self.debug = debug - assert Path(self.visqol_model).exists(), \ - f"Could not find the specified model in ViSQOL install: {self.visqol_model}" - - def _get_target_sr(self, mode: str) -> int: - # returns target sampling rate for the corresponding ViSQOL mode. - if mode not in ViSQOL.SAMPLE_RATES_MODES: - raise ValueError( - f"Unsupported mode! Allowed are: {', '.join(ViSQOL.SAMPLE_RATES_MODES.keys())}" - ) - return ViSQOL.SAMPLE_RATES_MODES[mode] - - def _prepare_files( - self, ref_sig: torch.Tensor, deg_sig: torch.Tensor, sr: int, target_sr: int, pad_with_silence: bool = False - ): - # prepare files for ViSQOL evaluation. - assert target_sr in ViSQOL.ALLOWED_SAMPLE_RATES - assert len(ref_sig) == len(deg_sig), ( - "Expects same number of ref and degraded inputs", - f" but ref len {len(ref_sig)} != deg len {len(deg_sig)}" - ) - # resample audio if needed - if sr != target_sr: - transform = torchaudio.transforms.Resample(sr, target_sr) - pad = int(0.5 * target_sr) - rs_ref = [] - rs_deg = [] - for i in range(len(ref_sig)): - rs_ref_i = transform(ref_sig[i]) - rs_deg_i = transform(deg_sig[i]) - if pad_with_silence: - rs_ref_i = torch.nn.functional.pad(rs_ref_i, (pad, pad), mode='constant', value=0) - rs_deg_i = torch.nn.functional.pad(rs_deg_i, (pad, pad), mode='constant', value=0) - rs_ref.append(rs_ref_i) - rs_deg.append(rs_deg_i) - ref_sig = torch.stack(rs_ref) - deg_sig = torch.stack(rs_deg) - # save audio chunks to tmp dir and create csv - tmp_dir = Path(tempfile.mkdtemp()) - try: - tmp_input_csv_path = tmp_dir / "input.csv" - tmp_results_csv_path = tmp_dir / "results.csv" - tmp_debug_json_path = tmp_dir / "debug.json" - with open(tmp_input_csv_path, "w") as csv_file: - csv_writer = csv.writer(csv_file) - csv_writer.writerow(["reference", "degraded"]) - for i in range(len(ref_sig)): - tmp_ref_filename = tmp_dir / f"ref_{i}.wav" - tmp_deg_filename = tmp_dir / f"deg_{i}.wav" - torchaudio.save( - tmp_ref_filename, - torch.clamp(ref_sig[i], min=-0.99, max=0.99), - sample_rate=target_sr, - bits_per_sample=16, - encoding="PCM_S" - ) - torchaudio.save( - tmp_deg_filename, - torch.clamp(deg_sig[i], min=-0.99, max=0.99), - sample_rate=target_sr, - bits_per_sample=16, - encoding="PCM_S" - ) - csv_writer.writerow([str(tmp_ref_filename), str(tmp_deg_filename)]) - return tmp_dir, tmp_input_csv_path, tmp_results_csv_path, tmp_debug_json_path - except Exception as e: - logger.error("Exception occurred when preparing files for ViSQOL: %s", e) - return tmp_dir, None, None, None - - def _flush_files(self, tmp_dir: tp.Union[Path, str]): - # flush tmp files used to compute ViSQOL. - shutil.rmtree(str(tmp_dir)) - - def _collect_moslqo_score(self, results_csv_path: tp.Union[Path, str]) -> float: - # collect results for each evaluated pair and return averaged moslqo score. - with open(results_csv_path, "r") as csv_file: - reader = csv.DictReader(csv_file) - moslqo_scores = [float(row["moslqo"]) for row in reader] - if len(moslqo_scores) > 0: - return sum(moslqo_scores) / len(moslqo_scores) - else: - return 0.0 - - def _collect_debug_data(self, debug_json_path: tp.Union[Path, str]) -> dict: - # collect debug data for the visqol inference. - with open(debug_json_path, "r") as f: - data = json.load(f) - return data - - @property - def visqol_model(self): - return f'{self.visqol_bin}/model/{self.model}' - - def _run_visqol( - self, - input_csv_path: tp.Union[Path, str], - results_csv_path: tp.Union[Path, str], - debug_csv_path: tp.Optional[tp.Union[Path, str]], - ): - input_csv_path = str(input_csv_path) - results_csv_path = str(results_csv_path) - debug_csv_path = str(debug_csv_path) - cmd = [ - f'{self.visqol_bin}/bazel-bin/visqol', - '--batch_input_csv', f'{input_csv_path}', - '--results_csv', f'{results_csv_path}' - ] - if debug_csv_path is not None: - cmd += ['--output_debug', f'{debug_csv_path}'] - if self.visqol_mode == "speech": - cmd += ['--use_speech_mode'] - cmd += ['--similarity_to_quality_model', f'{self.visqol_model}'] - result = subprocess.run(cmd, capture_output=True) - if result.returncode: - logger.error("Error with visqol: \n %s \n %s", result.stdout.decode(), result.stderr.decode()) - raise RuntimeError("Error while executing visqol") - result.check_returncode() - - def __call__( - self, - ref_sig: torch.Tensor, - deg_sig: torch.Tensor, - sr: int, - pad_with_silence: bool = False, - ): - """Calculate the ViSQOL metric for a pair of audio signals at a given sample rate. - Args: - ref_sig (torch.Tensor): Reference signals as [B, C, T]. - deg_sig (torch.Tensor): Degraded signals as [B, C, T]. - sr (int): Sample rate of the two audio signals. - pad_with_silence (bool): Whether to pad the file with silences as recommended - in visqol guidelines (see: https://github.com/google/visqol#general-guidelines-for-input). - Returns: - float: The ViSQOL score or mean score for the batch. - """ - logger.debug(f"Calculating visqol with mode={self.visqol_mode} on {len(ref_sig)} samples") - tmp_dir, input_csv, results_csv, debug_json = self._prepare_files( - ref_sig, deg_sig, sr, self.target_sr, pad_with_silence - ) - try: - if input_csv and results_csv: - self._run_visqol( - input_csv, - results_csv, - debug_json if self.debug else None, - ) - mosqol = self._collect_moslqo_score(results_csv) - return mosqol - else: - raise RuntimeError("Something unexpected happened when running VISQOL!") - except Exception as e: - logger.error("Exception occurred when running ViSQOL: %s", e) - finally: - self._flush_files(tmp_dir) diff --git a/spaces/GuXiaoBei/wechat-chatbot/docker/sample-chatgpt-on-wechat/Makefile b/spaces/GuXiaoBei/wechat-chatbot/docker/sample-chatgpt-on-wechat/Makefile deleted file mode 100644 index 31b5f817b41d7d95055d9aa5d5e4f973abee0b45..0000000000000000000000000000000000000000 --- a/spaces/GuXiaoBei/wechat-chatbot/docker/sample-chatgpt-on-wechat/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -IMG:=`cat Name` -MOUNT:= -PORT_MAP:= -DOTENV:=.env -CONTAINER_NAME:=sample-chatgpt-on-wechat - -echo: - echo $(IMG) - -run_d: - docker rm $(CONTAINER_NAME) || echo - docker run -dt --name $(CONTAINER_NAME) $(PORT_MAP) \ - --env-file=$(DOTENV) \ - $(MOUNT) $(IMG) - -run_i: - docker rm $(CONTAINER_NAME) || echo - docker run -it --name $(CONTAINER_NAME) $(PORT_MAP) \ - --env-file=$(DOTENV) \ - $(MOUNT) $(IMG) - -stop: - docker stop $(CONTAINER_NAME) - -rm: stop - docker rm $(CONTAINER_NAME) diff --git a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/BBSNet/ResNet.py b/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/BBSNet/ResNet.py deleted file mode 100644 index 84e51fd71fca78131a3be3d0d35d808c07bb3a95..0000000000000000000000000000000000000000 --- a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/BBSNet/ResNet.py +++ /dev/null @@ -1,145 +0,0 @@ -import torch.nn as nn -import math - - -def conv3x3(in_planes, out_planes, stride=1): - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(BasicBlock, self).__init__() - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = nn.BatchNorm2d(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(Bottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) - self.bn1 = nn.BatchNorm2d(planes) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, - padding=1, bias=False) - self.bn2 = nn.BatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class ResNet50(nn.Module): - def __init__(self,mode='rgb'): - self.inplanes = 64 - super(ResNet50, self).__init__() - if(mode=='rgb'): - self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, - bias=False) - elif(mode=='rgbd'): - self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, - bias=False) - elif(mode=="share"): - self.conv1=nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, - bias=False) - self.conv1_d=nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, - bias=False) - else: - raise - self.bn1 = nn.BatchNorm2d(64) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = self._make_layer(Bottleneck, 64, 3) - self.layer2 = self._make_layer(Bottleneck, 128, 4, stride=2) - self.layer3_1 = self._make_layer(Bottleneck, 256, 6, stride=2) - self.layer4_1 = self._make_layer(Bottleneck, 512, 3, stride=2) - - self.inplanes = 512 - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, nn.BatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def _make_layer(self, block, planes, blocks, stride=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d(self.inplanes, planes * block.expansion, - kernel_size=1, stride=stride, bias=False), - nn.BatchNorm2d(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample)) - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x1 = self.layer3_1(x) - x1 = self.layer4_1(x1) - - return x1, x1 diff --git a/spaces/Hallucinate/demo/ldm/models/autoencoder.py b/spaces/Hallucinate/demo/ldm/models/autoencoder.py deleted file mode 100644 index 6a9c4f45498561953b8085981609b2a3298a5473..0000000000000000000000000000000000000000 --- a/spaces/Hallucinate/demo/ldm/models/autoencoder.py +++ /dev/null @@ -1,443 +0,0 @@ -import torch -import pytorch_lightning as pl -import torch.nn.functional as F -from contextlib import contextmanager - -from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer - -from ldm.modules.diffusionmodules.model import Encoder, Decoder -from ldm.modules.distributions.distributions import DiagonalGaussianDistribution - -from ldm.util import instantiate_from_config - - -class VQModel(pl.LightningModule): - def __init__(self, - ddconfig, - lossconfig, - n_embed, - embed_dim, - ckpt_path=None, - ignore_keys=[], - image_key="image", - colorize_nlabels=None, - monitor=None, - batch_resize_range=None, - scheduler_config=None, - lr_g_factor=1.0, - remap=None, - sane_index_shape=False, # tell vector quantizer to return indices as bhw - use_ema=False - ): - super().__init__() - self.embed_dim = embed_dim - self.n_embed = n_embed - self.image_key = image_key - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - self.loss = instantiate_from_config(lossconfig) - self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, - remap=remap, - sane_index_shape=sane_index_shape) - self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) - if colorize_nlabels is not None: - assert type(colorize_nlabels)==int - self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) - if monitor is not None: - self.monitor = monitor - self.batch_resize_range = batch_resize_range - if self.batch_resize_range is not None: - print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.") - - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) - self.scheduler_config = scheduler_config - self.lr_g_factor = lr_g_factor - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.parameters()) - self.model_ema.copy_to(self) - if context is not None: - print(f"{context}: Switched to EMA weights") - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.parameters()) - if context is not None: - print(f"{context}: Restored training weights") - - def init_from_ckpt(self, path, ignore_keys=list()): - sd = torch.load(path, map_location="cpu")["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - print(f"Unexpected Keys: {unexpected}") - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self) - - def encode(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - quant, emb_loss, info = self.quantize(h) - return quant, emb_loss, info - - def encode_to_prequant(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - return h - - def decode(self, quant): - quant = self.post_quant_conv(quant) - dec = self.decoder(quant) - return dec - - def decode_code(self, code_b): - quant_b = self.quantize.embed_code(code_b) - dec = self.decode(quant_b) - return dec - - def forward(self, input, return_pred_indices=False): - quant, diff, (_,_,ind) = self.encode(input) - dec = self.decode(quant) - if return_pred_indices: - return dec, diff, ind - return dec, diff - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() - if self.batch_resize_range is not None: - lower_size = self.batch_resize_range[0] - upper_size = self.batch_resize_range[1] - if self.global_step <= 4: - # do the first few batches with max size to avoid later oom - new_resize = upper_size - else: - new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16)) - if new_resize != x.shape[2]: - x = F.interpolate(x, size=new_resize, mode="bicubic") - x = x.detach() - return x - - def training_step(self, batch, batch_idx, optimizer_idx): - # https://github.com/pytorch/pytorch/issues/37142 - # try not to fool the heuristics - x = self.get_input(batch, self.image_key) - xrec, qloss, ind = self(x, return_pred_indices=True) - - if optimizer_idx == 0: - # autoencode - aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train", - predicted_indices=ind) - - self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) - return aeloss - - if optimizer_idx == 1: - # discriminator - discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) - return discloss - - def validation_step(self, batch, batch_idx): - log_dict = self._validation_step(batch, batch_idx) - with self.ema_scope(): - log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") - return log_dict - - def _validation_step(self, batch, batch_idx, suffix=""): - x = self.get_input(batch, self.image_key) - xrec, qloss, ind = self(x, return_pred_indices=True) - aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, - self.global_step, - last_layer=self.get_last_layer(), - split="val"+suffix, - predicted_indices=ind - ) - - discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, - self.global_step, - last_layer=self.get_last_layer(), - split="val"+suffix, - predicted_indices=ind - ) - rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] - self.log(f"val{suffix}/rec_loss", rec_loss, - prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) - self.log(f"val{suffix}/aeloss", aeloss, - prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) - if version.parse(pl.__version__) >= version.parse('1.4.0'): - del log_dict_ae[f"val{suffix}/rec_loss"] - self.log_dict(log_dict_ae) - self.log_dict(log_dict_disc) - return self.log_dict - - def configure_optimizers(self): - lr_d = self.learning_rate - lr_g = self.lr_g_factor*self.learning_rate - print("lr_d", lr_d) - print("lr_g", lr_g) - opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ - list(self.decoder.parameters())+ - list(self.quantize.parameters())+ - list(self.quant_conv.parameters())+ - list(self.post_quant_conv.parameters()), - lr=lr_g, betas=(0.5, 0.9)) - opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), - lr=lr_d, betas=(0.5, 0.9)) - - if self.scheduler_config is not None: - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }, - { - 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }, - ] - return [opt_ae, opt_disc], scheduler - return [opt_ae, opt_disc], [] - - def get_last_layer(self): - return self.decoder.conv_out.weight - - def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): - log = dict() - x = self.get_input(batch, self.image_key) - x = x.to(self.device) - if only_inputs: - log["inputs"] = x - return log - xrec, _ = self(x) - if x.shape[1] > 3: - # colorize with random projection - assert xrec.shape[1] > 3 - x = self.to_rgb(x) - xrec = self.to_rgb(xrec) - log["inputs"] = x - log["reconstructions"] = xrec - if plot_ema: - with self.ema_scope(): - xrec_ema, _ = self(x) - if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) - log["reconstructions_ema"] = xrec_ema - return log - - def to_rgb(self, x): - assert self.image_key == "segmentation" - if not hasattr(self, "colorize"): - self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) - x = F.conv2d(x, weight=self.colorize) - x = 2.*(x-x.min())/(x.max()-x.min()) - 1. - return x - - -class VQModelInterface(VQModel): - def __init__(self, embed_dim, *args, **kwargs): - super().__init__(embed_dim=embed_dim, *args, **kwargs) - self.embed_dim = embed_dim - - def encode(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - return h - - def decode(self, h, force_not_quantize=False): - # also go through quantization layer - if not force_not_quantize: - quant, emb_loss, info = self.quantize(h) - else: - quant = h - quant = self.post_quant_conv(quant) - dec = self.decoder(quant) - return dec - - -class AutoencoderKL(pl.LightningModule): - def __init__(self, - ddconfig, - lossconfig, - embed_dim, - ckpt_path=None, - ignore_keys=[], - image_key="image", - colorize_nlabels=None, - monitor=None, - ): - super().__init__() - self.image_key = image_key - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - self.loss = instantiate_from_config(lossconfig) - assert ddconfig["double_z"] - self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) - self.embed_dim = embed_dim - if colorize_nlabels is not None: - assert type(colorize_nlabels)==int - self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) - if monitor is not None: - self.monitor = monitor - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) - - def init_from_ckpt(self, path, ignore_keys=list()): - sd = torch.load(path, map_location="cpu")["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - self.load_state_dict(sd, strict=False) - print(f"Restored from {path}") - - def encode(self, x): - h = self.encoder(x) - moments = self.quant_conv(h) - posterior = DiagonalGaussianDistribution(moments) - return posterior - - def decode(self, z): - z = self.post_quant_conv(z) - dec = self.decoder(z) - return dec - - def forward(self, input, sample_posterior=True): - posterior = self.encode(input) - if sample_posterior: - z = posterior.sample() - else: - z = posterior.mode() - dec = self.decode(z) - return dec, posterior - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() - return x - - def training_step(self, batch, batch_idx, optimizer_idx): - inputs = self.get_input(batch, self.image_key) - reconstructions, posterior = self(inputs) - - if optimizer_idx == 0: - # train encoder+decoder+logvar - aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) - self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) - return aeloss - - if optimizer_idx == 1: - # train the discriminator - discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - - self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) - self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) - return discloss - - def validation_step(self, batch, batch_idx): - inputs = self.get_input(batch, self.image_key) - reconstructions, posterior = self(inputs) - aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, - last_layer=self.get_last_layer(), split="val") - - discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, - last_layer=self.get_last_layer(), split="val") - - self.log("val/rec_loss", log_dict_ae["val/rec_loss"]) - self.log_dict(log_dict_ae) - self.log_dict(log_dict_disc) - return self.log_dict - - def configure_optimizers(self): - lr = self.learning_rate - opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ - list(self.decoder.parameters())+ - list(self.quant_conv.parameters())+ - list(self.post_quant_conv.parameters()), - lr=lr, betas=(0.5, 0.9)) - opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), - lr=lr, betas=(0.5, 0.9)) - return [opt_ae, opt_disc], [] - - def get_last_layer(self): - return self.decoder.conv_out.weight - - @torch.no_grad() - def log_images(self, batch, only_inputs=False, **kwargs): - log = dict() - x = self.get_input(batch, self.image_key) - x = x.to(self.device) - if not only_inputs: - xrec, posterior = self(x) - if x.shape[1] > 3: - # colorize with random projection - assert xrec.shape[1] > 3 - x = self.to_rgb(x) - xrec = self.to_rgb(xrec) - log["samples"] = self.decode(torch.randn_like(posterior.sample())) - log["reconstructions"] = xrec - log["inputs"] = x - return log - - def to_rgb(self, x): - assert self.image_key == "segmentation" - if not hasattr(self, "colorize"): - self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) - x = F.conv2d(x, weight=self.colorize) - x = 2.*(x-x.min())/(x.max()-x.min()) - 1. - return x - - -class IdentityFirstStage(torch.nn.Module): - def __init__(self, *args, vq_interface=False, **kwargs): - self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff - super().__init__() - - def encode(self, x, *args, **kwargs): - return x - - def decode(self, x, *args, **kwargs): - return x - - def quantize(self, x, *args, **kwargs): - if self.vq_interface: - return x, None, [None, None, None] - return x - - def forward(self, x, *args, **kwargs): - return x diff --git a/spaces/HaloMaster/chinesesummary/fengshen/models/auto/configuration_auto.py b/spaces/HaloMaster/chinesesummary/fengshen/models/auto/configuration_auto.py deleted file mode 100644 index 81676226e57ca519273b98328a1afe6961c37ce3..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/models/auto/configuration_auto.py +++ /dev/null @@ -1,403 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The IDEA Authors. All rights reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Auto Config class.""" -import importlib -import re -import warnings -from collections import OrderedDict -from typing import List, Union - -from transformers.configuration_utils import PretrainedConfig -from transformers.file_utils import CONFIG_NAME -from transformers.utils import logging -from .dynamic import get_class_from_dynamic_module - - -logger = logging.get_logger(__name__) - -CONFIG_MAPPING_NAMES = OrderedDict( - [ - # Add configs here - ("roformer", "RoFormerConfig"), - ("longformer", "LongformerConfig"), - ] -) - -CONFIG_ARCHIVE_MAP_MAPPING_NAMES = OrderedDict( - [ - # Add archive maps here - ("roformer", "ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("longformer", "LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ] -) - -MODEL_NAMES_MAPPING = OrderedDict( - [ - # Add full (and cased) model names here - ("roformer", "Roformer"), - ("longformer", "Longformer"), - ] -) - -SPECIAL_MODEL_TYPE_TO_MODULE_NAME = OrderedDict([("openai-gpt", "openai")]) - - -def model_type_to_module_name(key): - """Converts a config key to the corresponding module.""" - # Special treatment - if key in SPECIAL_MODEL_TYPE_TO_MODULE_NAME: - return SPECIAL_MODEL_TYPE_TO_MODULE_NAME[key] - - return key.replace("-", "_") - - -def config_class_to_model_type(config): - """Converts a config class name to the corresponding model type""" - for key, cls in CONFIG_MAPPING_NAMES.items(): - if cls == config: - return key - return None - - -class _LazyConfigMapping(OrderedDict): - """ - A dictionary that lazily load its values when they are requested. - """ - - def __init__(self, mapping): - self._mapping = mapping - self._extra_content = {} - self._modules = {} - - def __getitem__(self, key): - if key in self._extra_content: - return self._extra_content[key] - if key not in self._mapping: - raise KeyError(key) - value = self._mapping[key] - module_name = model_type_to_module_name(key) - if module_name not in self._modules: - self._modules[module_name] = importlib.import_module(f".{module_name}", "fengshen.models") - - return getattr(self._modules[module_name], value) - - def keys(self): - return list(self._mapping.keys()) + list(self._extra_content.keys()) - - def values(self): - return [self[k] for k in self._mapping.keys()] + list(self._extra_content.values()) - - def items(self): - return [(k, self[k]) for k in self._mapping.keys()] + list(self._extra_content.items()) - - def __iter__(self): - return iter(list(self._mapping.keys()) + list(self._extra_content.keys())) - - def __contains__(self, item): - return item in self._mapping or item in self._extra_content - - def register(self, key, value): - """ - Register a new configuration in this mapping. - """ - if key in self._mapping.keys(): - raise ValueError(f"'{key}' is already used by a Transformers config, pick another name.") - self._extra_content[key] = value - - -CONFIG_MAPPING = _LazyConfigMapping(CONFIG_MAPPING_NAMES) - - -class _LazyLoadAllMappings(OrderedDict): - """ - A mapping that will load all pairs of key values at the first access (either by indexing, requestions keys, values, - etc.) - - Args: - mapping: The mapping to load. - """ - - def __init__(self, mapping): - self._mapping = mapping - self._initialized = False - self._data = {} - - def _initialize(self): - if self._initialized: - return - warnings.warn( - "ALL_PRETRAINED_CONFIG_ARCHIVE_MAP is deprecated and will be removed in v5 of Transformers. " - "It does not contain all available model checkpoints, far from it. Checkout hf.co/models for that.", - FutureWarning, - ) - - for model_type, map_name in self._mapping.items(): - module_name = model_type_to_module_name(model_type) - module = importlib.import_module(f".{module_name}", "transformers.models") - mapping = getattr(module, map_name) - self._data.update(mapping) - - self._initialized = True - - def __getitem__(self, key): - self._initialize() - return self._data[key] - - def keys(self): - self._initialize() - return self._data.keys() - - def values(self): - self._initialize() - return self._data.values() - - def items(self): - self._initialize() - return self._data.keys() - - def __iter__(self): - self._initialize() - return iter(self._data) - - def __contains__(self, item): - self._initialize() - return item in self._data - - -ALL_PRETRAINED_CONFIG_ARCHIVE_MAP = _LazyLoadAllMappings(CONFIG_ARCHIVE_MAP_MAPPING_NAMES) - - -def _get_class_name(model_class: Union[str, List[str]]): - if isinstance(model_class, (list, tuple)): - return " or ".join([f"[`{c}`]" for c in model_class if c is not None]) - return f"[`{model_class}`]" - - -def _list_model_options(indent, config_to_class=None, use_model_types=True): - if config_to_class is None and not use_model_types: - raise ValueError("Using `use_model_types=False` requires a `config_to_class` dictionary.") - if use_model_types: - if config_to_class is None: - model_type_to_name = {model_type: f"[`{config}`]" for model_type, config in CONFIG_MAPPING_NAMES.items()} - else: - model_type_to_name = { - model_type: _get_class_name(model_class) - for model_type, model_class in config_to_class.items() - if model_type in MODEL_NAMES_MAPPING - } - lines = [ - f"{indent}- **{model_type}** -- {model_type_to_name[model_type]} ({MODEL_NAMES_MAPPING[model_type]} model)" - for model_type in sorted(model_type_to_name.keys()) - ] - else: - config_to_name = { - CONFIG_MAPPING_NAMES[config]: _get_class_name(clas) - for config, clas in config_to_class.items() - if config in CONFIG_MAPPING_NAMES - } - config_to_model_name = { - config: MODEL_NAMES_MAPPING[model_type] for model_type, config in CONFIG_MAPPING_NAMES.items() - } - lines = [ - f"{indent}- [`{config_name}`] configuration class: {config_to_name[config_name]} ({config_to_model_name[config_name]} model)" - for config_name in sorted(config_to_name.keys()) - ] - return "\n".join(lines) - - -def replace_list_option_in_docstrings(config_to_class=None, use_model_types=True): - def docstring_decorator(fn): - docstrings = fn.__doc__ - lines = docstrings.split("\n") - i = 0 - while i < len(lines) and re.search(r"^(\s*)List options\s*$", lines[i]) is None: - i += 1 - if i < len(lines): - indent = re.search(r"^(\s*)List options\s*$", lines[i]).groups()[0] - if use_model_types: - indent = f"{indent} " - lines[i] = _list_model_options(indent, config_to_class=config_to_class, use_model_types=use_model_types) - docstrings = "\n".join(lines) - else: - raise ValueError( - f"The function {fn} should have an empty 'List options' in its docstring as placeholder, current docstring is:\n{docstrings}" - ) - fn.__doc__ = docstrings - return fn - - return docstring_decorator - - -class AutoConfig: - r""" - This is a generic configuration class that will be instantiated as one of the configuration classes of the library - when created with the [`~AutoConfig.from_pretrained`] class method. - - This class cannot be instantiated directly using `__init__()` (throws an error). - """ - - def __init__(self): - raise EnvironmentError( - "AutoConfig is designed to be instantiated " - "using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method." - ) - - @classmethod - def for_model(cls, model_type: str, *args, **kwargs): - if model_type in CONFIG_MAPPING: - config_class = CONFIG_MAPPING[model_type] - return config_class(*args, **kwargs) - raise ValueError( - f"Unrecognized model identifier: {model_type}. Should contain one of {', '.join(CONFIG_MAPPING.keys())}" - ) - - @classmethod - @replace_list_option_in_docstrings() - def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): - r""" - Instantiate one of the configuration classes of the library from a pretrained model configuration. - - The configuration class to instantiate is selected based on the `model_type` property of the config object that - is loaded, or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: - - List options - - Args: - pretrained_model_name_or_path (`str` or `os.PathLike`): - Can be either: - - - A string, the *model id* of a pretrained model configuration hosted inside a model repo on - huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or - namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - - A path to a *directory* containing a configuration file saved using the - [`~PretrainedConfig.save_pretrained`] method, or the [`~PreTrainedModel.save_pretrained`] method, - e.g., `./my_model_directory/`. - - A path or url to a saved configuration JSON *file*, e.g., - `./my_model_directory/configuration.json`. - cache_dir (`str` or `os.PathLike`, *optional*): - Path to a directory in which a downloaded pretrained model configuration should be cached if the - standard cache should not be used. - force_download (`bool`, *optional*, defaults to `False`): - Whether or not to force the (re-)download the model weights and configuration files and override the - cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to delete incompletely received files. Will attempt to resume the download if such a - file exists. - proxies (`Dict[str, str]`, *optional*): - A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', - 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. - revision(`str`, *optional*, defaults to `"main"`): - The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a - git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any - identifier allowed by git. - return_unused_kwargs (`bool`, *optional*, defaults to `False`): - If `False`, then this function returns just the final configuration object. - - If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a - dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the - part of `kwargs` which has not been used to update `config` and is otherwise ignored. - trust_remote_code (`bool`, *optional*, defaults to `False`): - Whether or not to allow for custom models defined on the Hub in their own modeling files. This option - should only be set to `True` for repositories you trust and in which you have read the code, as it will - execute code present on the Hub on your local machine. - kwargs(additional keyword arguments, *optional*): - The values in kwargs of any keys which are configuration attributes will be used to override the loaded - values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled - by the `return_unused_kwargs` keyword parameter. - - Examples: - - ```python - >>> from transformers import AutoConfig - - >>> # Download configuration from huggingface.co and cache. - >>> config = AutoConfig.from_pretrained("bert-base-uncased") - - >>> # Download configuration from huggingface.co (user-uploaded) and cache. - >>> config = AutoConfig.from_pretrained("dbmdz/bert-base-german-cased") - - >>> # If configuration file is in a directory (e.g., was saved using *save_pretrained('./test/saved_model/')*). - >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/") - - >>> # Load a specific configuration file. - >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/my_configuration.json") - - >>> # Change some config attributes when loading a pretrained config. - >>> config = AutoConfig.from_pretrained("bert-base-uncased", output_attentions=True, foo=False) - >>> config.output_attentions - True - - >>> config, unused_kwargs = AutoConfig.from_pretrained( - ... "bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True - ... ) - >>> config.output_attentions - True - - >>> config.unused_kwargs - {'foo': False} - ```""" - kwargs["_from_auto"] = True - kwargs["name_or_path"] = pretrained_model_name_or_path - trust_remote_code = kwargs.pop("trust_remote_code", False) - config_dict, _ = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) - if "auto_map" in config_dict and "AutoConfig" in config_dict["auto_map"]: - if not trust_remote_code: - raise ValueError( - f"Loading {pretrained_model_name_or_path} requires you to execute the configuration file in that repo " - "on your local machine. Make sure you have read the code there to avoid malicious use, then set " - "the option `trust_remote_code=True` to remove this error." - ) - if kwargs.get("revision", None) is None: - logger.warn( - "Explicitly passing a `revision` is encouraged when loading a configuration with custom code to " - "ensure no malicious code has been contributed in a newer revision." - ) - class_ref = config_dict["auto_map"]["AutoConfig"] - module_file, class_name = class_ref.split(".") - config_class = get_class_from_dynamic_module( - pretrained_model_name_or_path, module_file + ".py", class_name, **kwargs - ) - return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs) - elif "model_type" in config_dict: - config_class = CONFIG_MAPPING[config_dict["model_type"]] - return config_class.from_dict(config_dict, **kwargs) - else: - # Fallback: use pattern matching on the string. - for pattern, config_class in CONFIG_MAPPING.items(): - if pattern in str(pretrained_model_name_or_path): - return config_class.from_dict(config_dict, **kwargs) - - raise ValueError( - f"Unrecognized model in {pretrained_model_name_or_path}. " - f"Should have a `model_type` key in its {CONFIG_NAME}, or contain one of the following strings " - f"in its name: {', '.join(CONFIG_MAPPING.keys())}" - ) - - @staticmethod - def register(model_type, config): - """ - Register a new configuration for this class. - - Args: - model_type (`str`): The model type like "bert" or "gpt". - config ([`PretrainedConfig`]): The config to register. - """ - if issubclass(config, PretrainedConfig) and config.model_type != model_type: - raise ValueError( - "The config you are passing has a `model_type` attribute that is not consistent with the model type " - f"you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they " - "match!" - ) - CONFIG_MAPPING.register(model_type, config) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/criterions/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/criterions/__init__.py deleted file mode 100644 index b6fb6e751cdedb2af4b1f6c0950557e187cd9519..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/criterions/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .scst_loss import ScstRewardCriterion -from .label_smoothed_cross_entropy import AjustLabelSmoothedCrossEntropyCriterion \ No newline at end of file diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/criterions/label_smoothed_cross_entropy.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/criterions/label_smoothed_cross_entropy.py deleted file mode 100644 index 56d63e3e1b5a036e0adf32480e2b66f371738013..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/criterions/label_smoothed_cross_entropy.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from dataclasses import dataclass, field - -import torch -from fairseq import metrics, utils -from fairseq.criterions import FairseqCriterion, register_criterion -from fairseq.dataclass import FairseqDataclass -from omegaconf import II - - -@dataclass -class LabelSmoothedCrossEntropyCriterionConfig(FairseqDataclass): - label_smoothing: float = field( - default=0.0, - metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"}, - ) - report_accuracy: bool = field( - default=False, - metadata={"help": "report accuracy metric"}, - ) - ignore_prefix_size: int = field( - default=0, - metadata={"help": "Ignore first N tokens"}, - ) - sentence_avg: bool = II("optimization.sentence_avg") - - -def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True): - if target.dim() == lprobs.dim() - 1: - target = target.unsqueeze(-1) - nll_loss = -lprobs.gather(dim=-1, index=target) - smooth_loss = -lprobs.sum(dim=-1, keepdim=True) - if ignore_index is not None: - pad_mask = target.eq(ignore_index) - nll_loss.masked_fill_(pad_mask, 0.0) - smooth_loss.masked_fill_(pad_mask, 0.0) - else: - nll_loss = nll_loss.squeeze(-1) - smooth_loss = smooth_loss.squeeze(-1) - if reduce: - nll_loss = nll_loss.sum() - smooth_loss = smooth_loss.sum() - eps_i = epsilon / (lprobs.size(-1) - 1) - loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss - return loss, nll_loss - - -@register_criterion( - "label_smoothed_cross_entropy", dataclass=LabelSmoothedCrossEntropyCriterionConfig -) -class LabelSmoothedCrossEntropyCriterion(FairseqCriterion): - def __init__( - self, - task, - sentence_avg, - label_smoothing, - ignore_prefix_size=0, - report_accuracy=False, - ): - super().__init__(task) - self.sentence_avg = sentence_avg - self.eps = label_smoothing - self.ignore_prefix_size = ignore_prefix_size - self.report_accuracy = report_accuracy - - def forward(self, model, sample, reduce=True): - """Compute the loss for the given sample. - - Returns a tuple with three elements: - 1) the loss - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - """ - net_output = model(**sample["net_input"]) - loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce) - sample_size = ( - sample["target"].size(0) if self.sentence_avg else sample["ntokens"] - ) - logging_output = { - "loss": loss.data, - "nll_loss": nll_loss.data, - "ntokens": sample["ntokens"], - "nsentences": sample["target"].size(0), - "sample_size": sample_size, - } - if self.report_accuracy: - n_correct, total = self.compute_accuracy(model, net_output, sample) - logging_output["n_correct"] = utils.item(n_correct.data) - logging_output["total"] = utils.item(total.data) - return loss, sample_size, logging_output - - def get_lprobs_and_target(self, model, net_output, sample): - lprobs = model.get_normalized_probs(net_output, log_probs=True) - target = model.get_targets(sample, net_output) - if self.ignore_prefix_size > 0: - if getattr(lprobs, "batch_first", False): - lprobs = lprobs[:, self.ignore_prefix_size :, :].contiguous() - target = target[:, self.ignore_prefix_size :].contiguous() - else: - lprobs = lprobs[self.ignore_prefix_size :, :, :].contiguous() - target = target[self.ignore_prefix_size :, :].contiguous() - return lprobs.view(-1, lprobs.size(-1)), target.view(-1) - - def compute_loss(self, model, net_output, sample, reduce=True): - lprobs, target = self.get_lprobs_and_target(model, net_output, sample) - loss, nll_loss = label_smoothed_nll_loss( - lprobs, - target, - self.eps, - ignore_index=self.padding_idx, - reduce=reduce, - ) - return loss, nll_loss - - def compute_accuracy(self, model, net_output, sample): - lprobs, target = self.get_lprobs_and_target(model, net_output, sample) - mask = target.ne(self.padding_idx) - n_correct = torch.sum( - lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask)) - ) - total = torch.sum(mask) - return n_correct, total - - @classmethod - def reduce_metrics(cls, logging_outputs) -> None: - """Aggregate logging outputs from data parallel training.""" - loss_sum = sum(log.get("loss", 0) for log in logging_outputs) - nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs) - ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) - sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) - - metrics.log_scalar( - "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 - ) - metrics.log_scalar( - "nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3 - ) - metrics.log_derived( - "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) - ) - - total = utils.item(sum(log.get("total", 0) for log in logging_outputs)) - if total > 0: - metrics.log_scalar("total", total) - n_correct = utils.item( - sum(log.get("n_correct", 0) for log in logging_outputs) - ) - metrics.log_scalar("n_correct", n_correct) - metrics.log_derived( - "accuracy", - lambda meters: round( - meters["n_correct"].sum * 100.0 / meters["total"].sum, 3 - ) - if meters["total"].sum > 0 - else float("nan"), - ) - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - return True diff --git a/spaces/HelloMimosa/sail-rvc-Ai_Hoshino__From_Oshi_no_Ko___RVC_v2__300_Epoch/README.md b/spaces/HelloMimosa/sail-rvc-Ai_Hoshino__From_Oshi_no_Ko___RVC_v2__300_Epoch/README.md deleted file mode 100644 index d4654cdd019b1496d4c953944c9aced8a0dcc67a..0000000000000000000000000000000000000000 --- a/spaces/HelloMimosa/sail-rvc-Ai_Hoshino__From_Oshi_no_Ko___RVC_v2__300_Epoch/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Sail-rvc-Ai Hoshino From Oshi No Ko RVC V2 300 Epoch -emoji: 📈 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Hina4867/bingo/src/components/ui/select.tsx b/spaces/Hina4867/bingo/src/components/ui/select.tsx deleted file mode 100644 index 77f12c2996f541b97663de4c9e20ab34d4ec2fac..0000000000000000000000000000000000000000 --- a/spaces/Hina4867/bingo/src/components/ui/select.tsx +++ /dev/null @@ -1,123 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SelectPrimitive from '@radix-ui/react-select' - -import { cn } from '@/lib/utils' -import { - IconArrowDown, - IconCheck, - IconChevronUpDown -} from '@/components/ui/icons' - -const Select = SelectPrimitive.Root - -const SelectGroup = SelectPrimitive.Group - -const SelectValue = SelectPrimitive.Value - -const SelectTrigger = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - {children} - - - - -)) -SelectTrigger.displayName = SelectPrimitive.Trigger.displayName - -const SelectContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, position = 'popper', ...props }, ref) => ( - - - - {children} - - - -)) -SelectContent.displayName = SelectPrimitive.Content.displayName - -const SelectLabel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectLabel.displayName = SelectPrimitive.Label.displayName - -const SelectItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - - - - {children} - -)) -SelectItem.displayName = SelectPrimitive.Item.displayName - -const SelectSeparator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectSeparator.displayName = SelectPrimitive.Separator.displayName - -export { - Select, - SelectGroup, - SelectValue, - SelectTrigger, - SelectContent, - SelectLabel, - SelectItem, - SelectSeparator -} diff --git a/spaces/HugoDzz/super-godot-galaxy/build/_app/immutable/chunks/singletons.1f11d8d9.js b/spaces/HugoDzz/super-godot-galaxy/build/_app/immutable/chunks/singletons.1f11d8d9.js deleted file mode 100644 index 1d6d785958b2ff6017a2a3b5f00230cf6a9bb08e..0000000000000000000000000000000000000000 --- a/spaces/HugoDzz/super-godot-galaxy/build/_app/immutable/chunks/singletons.1f11d8d9.js +++ /dev/null @@ -1 +0,0 @@ -import{H as d,s as m}from"./index.9af7eb9c.js";const c=[];function p(e,t=d){let n;const o=new Set;function a(s){if(m(e,s)&&(e=s,n)){const u=!c.length;for(const i of o)i[1](),c.push(i,e);if(u){for(let i=0;i{o.delete(i),o.size===0&&n&&(n(),n=null)}}return{set:a,update:l,subscribe:r}}var g;const E=((g=globalThis.__sveltekit_1d38e1f)==null?void 0:g.base)??"";var k;const S=((k=globalThis.__sveltekit_1d38e1f)==null?void 0:k.assets)??E,w="1698134506027",y="sveltekit:snapshot",I="sveltekit:scroll",x="sveltekit:index",_={tap:1,hover:2,viewport:3,eager:4,off:-1};function O(e){let t=e.baseURI;if(!t){const n=e.getElementsByTagName("base");t=n.length?n[0].href:e.URL}return t}function U(){return{x:pageXOffset,y:pageYOffset}}function f(e,t){return e.getAttribute(`data-sveltekit-${t}`)}const b={..._,"":_.hover};function v(e){let t=e.assignedSlot??e.parentNode;return(t==null?void 0:t.nodeType)===11&&(t=t.host),t}function L(e,t){for(;e&&e!==t;){if(e.nodeName.toUpperCase()==="A"&&e.hasAttribute("href"))return e;e=v(e)}}function N(e,t){let n;try{n=new URL(e instanceof SVGAElement?e.href.baseVal:e.href,document.baseURI)}catch{}const o=e instanceof SVGAElement?e.target.baseVal:e.target,a=!n||!!o||R(n,t)||(e.getAttribute("rel")||"").split(/\s+/).includes("external"),l=(n==null?void 0:n.origin)===location.origin&&e.hasAttribute("download");return{url:n,external:a,target:o,download:l}}function P(e){let t=null,n=null,o=null,a=null,l=null,r=null,s=e;for(;s&&s!==document.documentElement;)o===null&&(o=f(s,"preload-code")),a===null&&(a=f(s,"preload-data")),t===null&&(t=f(s,"keepfocus")),n===null&&(n=f(s,"noscroll")),l===null&&(l=f(s,"reload")),r===null&&(r=f(s,"replacestate")),s=v(s);return{preload_code:b[o??"off"],preload_data:b[a??"off"],keep_focus:t==="off"?!1:t===""?!0:null,noscroll:n==="off"?!1:n===""?!0:null,reload:l==="off"?!1:l===""?!0:null,replace_state:r==="off"?!1:r===""?!0:null}}function h(e){const t=p(e);let n=!0;function o(){n=!0,t.update(r=>r)}function a(r){n=!1,t.set(r)}function l(r){let s;return t.subscribe(u=>{(s===void 0||n&&u!==s)&&r(s=u)})}return{notify:o,set:a,subscribe:l}}function A(){const{set:e,subscribe:t}=p(!1);let n;async function o(){clearTimeout(n);try{const a=await fetch(`${S}/_app/version.json`,{headers:{pragma:"no-cache","cache-control":"no-cache"}});if(!a.ok)return!1;const r=(await a.json()).version!==w;return r&&(e(!0),clearTimeout(n)),r}catch{return!1}}return{subscribe:t,check:o}}function R(e,t){return e.origin!==location.origin||!e.pathname.startsWith(t)}function V(e){e.client}const Y={url:h({}),page:h({}),navigating:p(null),updated:A()};export{x as I,_ as P,I as S,y as a,N as b,P as c,U as d,E as e,L as f,O as g,V as h,R as i,Y as s}; diff --git a/spaces/ICML2022/OFA/fairseq/examples/multilingual/data_scripts/check_iswlt_test_data.py b/spaces/ICML2022/OFA/fairseq/examples/multilingual/data_scripts/check_iswlt_test_data.py deleted file mode 100644 index f8e2eb0f15699f1b458a8445d0c1dd6229a21f77..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/multilingual/data_scripts/check_iswlt_test_data.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import os, sys -import subprocess -import re -from subprocess import check_call, check_output - -WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) - -if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): - print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') - sys.exit(-1) - - -BLEU_REGEX = re.compile("^BLEU\\S* = (\\S+) ") -def run_eval_bleu(cmd): - output = check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode("utf-8").strip() - print(output) - bleu = -1.0 - for line in output.strip().split('\n'): - m = BLEU_REGEX.search(line) - if m is not None: - bleu = m.groups()[0] - bleu = float(bleu) - break - return bleu - -def check_data_test_bleu(raw_folder, data_lang_pairs): - not_matchings = [] - for sacrebleu_set, src_tgts in data_lang_pairs: - for src_tgt in src_tgts: - print(f'checking test bleus for: {src_tgt} at {sacrebleu_set}') - src, tgt = src_tgt.split('-') - ssrc, stgt = src[:2], tgt[:2] - if os.path.exists(f'{raw_folder}/test.{tgt}-{src}.{src}'): - # reversed direction may have different test set - test_src = f'{raw_folder}/test.{tgt}-{src}.{src}' - else: - test_src = f'{raw_folder}/test.{src}-{tgt}.{src}' - cmd1 = f'cat {test_src} | sacrebleu -t "{sacrebleu_set}" -l {stgt}-{ssrc}; [ $? -eq 0 ] || echo ""' - test_tgt = f'{raw_folder}/test.{src}-{tgt}.{tgt}' - cmd2 = f'cat {test_tgt} | sacrebleu -t "{sacrebleu_set}" -l {ssrc}-{stgt}; [ $? -eq 0 ] || echo ""' - bleu1 = run_eval_bleu(cmd1) - if bleu1 != 100.0: - not_matchings.append(f'{sacrebleu_set}:{src_tgt} source side not matching: {test_src}') - bleu2 = run_eval_bleu(cmd2) - if bleu2 != 100.0: - not_matchings.append(f'{sacrebleu_set}:{src_tgt} target side not matching: {test_tgt}') - return not_matchings - -if __name__ == "__main__": - to_data_path = f'{WORKDIR_ROOT}/iwsltv2' - not_matching = check_data_test_bleu( - f'{to_data_path}/raw', - [ - ('iwslt17', ['en_XX-ar_AR', 'en_XX-ko_KR', 'ar_AR-en_XX', 'ko_KR-en_XX']), - ('iwslt17', ['en_XX-it_IT', 'en_XX-nl_XX', 'it_IT-en_XX', 'nl_XX-en_XX']), - ('iwslt17/tst2015', ['en_XX-vi_VN', "vi_VN-en_XX"]), - ] - ) - if len(not_matching) > 0: - print('the following datasets do not have matching test datasets:\n\t', '\n\t'.join(not_matching)) - diff --git a/spaces/Ikaros521/so-vits-svc-4.0-ikaros2/vdecoder/hifigan/nvSTFT.py b/spaces/Ikaros521/so-vits-svc-4.0-ikaros2/vdecoder/hifigan/nvSTFT.py deleted file mode 100644 index 88597d62a505715091f9ba62d38bf0a85a31b95a..0000000000000000000000000000000000000000 --- a/spaces/Ikaros521/so-vits-svc-4.0-ikaros2/vdecoder/hifigan/nvSTFT.py +++ /dev/null @@ -1,111 +0,0 @@ -import math -import os -os.environ["LRU_CACHE_CAPACITY"] = "3" -import random -import torch -import torch.utils.data -import numpy as np -import librosa -from librosa.util import normalize -from librosa.filters import mel as librosa_mel_fn -from scipy.io.wavfile import read -import soundfile as sf - -def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False): - sampling_rate = None - try: - data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile. - except Exception as ex: - print(f"'{full_path}' failed to load.\nException:") - print(ex) - if return_empty_on_exception: - return [], sampling_rate or target_sr or 32000 - else: - raise Exception(ex) - - if len(data.shape) > 1: - data = data[:, 0] - assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension) - - if np.issubdtype(data.dtype, np.integer): # if audio data is type int - max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX - else: # if audio data is type fp32 - max_mag = max(np.amax(data), -np.amin(data)) - max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32 - - data = torch.FloatTensor(data.astype(np.float32))/max_mag - - if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except - return [], sampling_rate or target_sr or 32000 - if target_sr is not None and sampling_rate != target_sr: - data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr)) - sampling_rate = target_sr - - return data, sampling_rate - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - -class STFT(): - def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5): - self.target_sr = sr - - self.n_mels = n_mels - self.n_fft = n_fft - self.win_size = win_size - self.hop_length = hop_length - self.fmin = fmin - self.fmax = fmax - self.clip_val = clip_val - self.mel_basis = {} - self.hann_window = {} - - def get_mel(self, y, center=False): - sampling_rate = self.target_sr - n_mels = self.n_mels - n_fft = self.n_fft - win_size = self.win_size - hop_length = self.hop_length - fmin = self.fmin - fmax = self.fmax - clip_val = self.clip_val - - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - if fmax not in self.mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax) - self.mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device) - self.hann_window[str(y.device)] = torch.hann_window(self.win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_length)/2), int((n_fft-hop_length)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_length, win_length=win_size, window=self.hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - # print(111,spec) - spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9)) - # print(222,spec) - spec = torch.matmul(self.mel_basis[str(fmax)+'_'+str(y.device)], spec) - # print(333,spec) - spec = dynamic_range_compression_torch(spec, clip_val=clip_val) - # print(444,spec) - return spec - - def __call__(self, audiopath): - audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr) - spect = self.get_mel(audio.unsqueeze(0)).squeeze(0) - return spect - -stft = STFT() diff --git a/spaces/JUNGU/emotion-ko-state/README.md b/spaces/JUNGU/emotion-ko-state/README.md deleted file mode 100644 index 48eb244083a0ac6a51b0298cd043231b60b3fde8..0000000000000000000000000000000000000000 --- a/spaces/JUNGU/emotion-ko-state/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Emotion Ko State -emoji: 🦀 -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Jamkonams/AutoGPT/autogpt/memory/pinecone.py b/spaces/Jamkonams/AutoGPT/autogpt/memory/pinecone.py deleted file mode 100644 index 27fcd62482d0cf44e02fa1c339195be58cb745b0..0000000000000000000000000000000000000000 --- a/spaces/Jamkonams/AutoGPT/autogpt/memory/pinecone.py +++ /dev/null @@ -1,75 +0,0 @@ -import pinecone -from colorama import Fore, Style - -from autogpt.llm_utils import create_embedding_with_ada -from autogpt.logs import logger -from autogpt.memory.base import MemoryProviderSingleton - - -class PineconeMemory(MemoryProviderSingleton): - def __init__(self, cfg): - pinecone_api_key = cfg.pinecone_api_key - pinecone_region = cfg.pinecone_region - pinecone.init(api_key=pinecone_api_key, environment=pinecone_region) - dimension = 1536 - metric = "cosine" - pod_type = "p1" - table_name = "auto-gpt" - # this assumes we don't start with memory. - # for now this works. - # we'll need a more complicated and robust system if we want to start with - # memory. - self.vec_num = 0 - - try: - pinecone.whoami() - except Exception as e: - logger.typewriter_log( - "FAILED TO CONNECT TO PINECONE", - Fore.RED, - Style.BRIGHT + str(e) + Style.RESET_ALL, - ) - logger.double_check( - "Please ensure you have setup and configured Pinecone properly for use." - + f"You can check out {Fore.CYAN + Style.BRIGHT}" - "https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup" - f"{Style.RESET_ALL} to ensure you've set up everything correctly." - ) - exit(1) - - if table_name not in pinecone.list_indexes(): - pinecone.create_index( - table_name, dimension=dimension, metric=metric, pod_type=pod_type - ) - self.index = pinecone.Index(table_name) - - def add(self, data): - vector = create_embedding_with_ada(data) - # no metadata here. We may wish to change that long term. - self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})]) - _text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}" - self.vec_num += 1 - return _text - - def get(self, data): - return self.get_relevant(data, 1) - - def clear(self): - self.index.delete(deleteAll=True) - return "Obliviated" - - def get_relevant(self, data, num_relevant=5): - """ - Returns all the data in the memory that is relevant to the given data. - :param data: The data to compare to. - :param num_relevant: The number of relevant data to return. Defaults to 5 - """ - query_embedding = create_embedding_with_ada(data) - results = self.index.query( - query_embedding, top_k=num_relevant, include_metadata=True - ) - sorted_results = sorted(results.matches, key=lambda x: x.score) - return [str(item["metadata"]["raw_text"]) for item in sorted_results] - - def get_stats(self): - return self.index.describe_index_stats() diff --git a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Rabbit/con_rabbit_logreg.py b/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Rabbit/con_rabbit_logreg.py deleted file mode 100644 index 1cc3b96da1275f1d01afb4c999f9c404d09b247a..0000000000000000000000000000000000000000 --- a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Rabbit/con_rabbit_logreg.py +++ /dev/null @@ -1,37 +0,0 @@ -import cv2 -import numpy as np -from PIL import Image -import pickle -import tensorflow as tf -import os - -class rabbitsLogReg: - def __init__(self,url) -> None: - self.image = url - - def predict_image(self): - # Load the model - load_extractor = tf.keras.models.load_model("././Model/Rabbit/resnetLogreg/resnet_EXTRACTOR.h5") - - modelpath = "././Model/Rabbit/resnetLogreg/dataSaved.pkl" - - with open(modelpath, 'rb') as file: - saved_data = pickle.load(file) - animal_breed = saved_data['class_name'] - model = saved_data['logreg_model'] - - im = Image.open(self.image) - img = im.convert("RGB") - img= np.asarray(img) - image_resized= cv2.resize(img, (224,224)) - features = load_extractor.predict(np.expand_dims(image_resized, axis=0)) - - reshaped_features = features.reshape(features.shape[0],-1) - predicted_class = model.predict(reshaped_features) - pred_prob = model.predict_proba(reshaped_features)[:2] - prediction_probability = pred_prob[0][predicted_class[0]] - predicted_class - - output_class= animal_breed[predicted_class[0]] - - return [output_class, prediction_probability] diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT/readme/README_en.md b/spaces/JohnSmith9982/ChuanhuChatGPT/readme/README_en.md deleted file mode 100644 index 80af4fbbfba5d15e1cb6d1f4b67808ca76fa37d7..0000000000000000000000000000000000000000 --- a/spaces/JohnSmith9982/ChuanhuChatGPT/readme/README_en.md +++ /dev/null @@ -1,140 +0,0 @@ -
    - - 简体中文 | English | 日本語 -
    - -

    川虎 Chat 🐯 Chuanhu Chat

    -
    - - Logo - - -

    -

    Lightweight and User-friendly Web-UI for LLMs including ChatGPT/ChatGLM/LLaMA

    -

    - - Tests Passing - - - GitHub Contributors - - - GitHub pull requests - -

    - Streaming / Unlimited conversations / Save history / Preset prompts / Chat with files / Web search
    - LaTeX rendering / Table rendering / Code highlighting
    - Auto dark mode / Adaptive web interface / WeChat-like theme
    - Multi-parameters tuning / Multi-API-Key support / Multi-user support
    - Compatible with GPT-4 / Local deployment for LLMs -

    - Video Tutorial - · - 2.0 Introduction - · - 3.0 Introduction & Tutorial - || - Online trial - · - One-Click deployment -

    -

    - Animation Demo -

    -

    -
    - -## Supported LLM Models - -**LLM models via API**: - -- [ChatGPT](https://chat.openai.com) ([GPT-4](https://openai.com/product/gpt-4)) -- [Google PaLM](https://developers.generativeai.google/products/palm) -- [Inspur Yuan 1.0](https://air.inspur.com/home) -- [MiniMax](https://api.minimax.chat/) -- [XMChat](https://github.com/MILVLG/xmchat) - -**LLM models via local deployment**: - -- [ChatGLM](https://github.com/THUDM/ChatGLM-6B) ([ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)) -- [LLaMA](https://github.com/facebookresearch/llama) -- [StableLM](https://github.com/Stability-AI/StableLM) -- [MOSS](https://github.com/OpenLMLab/MOSS) - -## Usage Tips - -- To better control the ChatGPT, use System Prompt. -- To use a Prompt Template, select the Prompt Template Collection file first, and then choose certain prompt from the drop-down menu. -- To try again if the response is unsatisfactory, use `🔄 Regenerate` button. -- To start a new line in the input box, press Shift + Enter keys. -- To quickly switch between input history, press and key in the input box. -- To deploy the program onto a server, set `"server_name": "0.0.0.0", "server_port" ,` in `config.json`. -- To get a public shared link, set `"share": true,` in `config.json`. Please be noted that the program must be running in order to be accessed via a public link. -- To use it in Hugging Face Spaces: It is recommended to **Duplicate Space** and run the program in your own Space for a faster and more secure experience. - -## Quickstart - -```shell -git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git -cd ChuanhuChatGPT -pip install -r requirements.txt -``` - -Then make a copy of `config_example.json`, rename it to `config.json`, and then fill in your API-Key and other settings in the file. - -```shell -python ChuanhuChatbot.py -``` - -A browser window will open and you will be able to chat with ChatGPT. - -> **Note** -> -> Please check our [wiki page](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程) for detailed instructions. - -## Troubleshooting - -When you encounter problems, you should try manually pulling the latest changes of this project first. The steps are as follows: - -1. Download the latest code archive by clicking on `Download ZIP` on the webpage, or - ```shell - git pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f - ``` -2. Try installing the dependencies again (as this project may have introduced new dependencies) - ``` - pip install -r requirements.txt - ``` - -Generally, you can solve most problems by following these steps. - -If the problem still exists, please refer to this page: [Frequently Asked Questions (FAQ)](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题) - -This page lists almost all the possible problems and solutions. Please read it carefully. - -## More Information - -More information could be found in our [wiki](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki): - -- [How to contribute a translation](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/Localization) -- [How to make a contribution](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/贡献指南) -- [How to cite the project](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可#如何引用该项目) -- [Project changelog](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/更新日志) -- [Project license](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可) - -## Starchart - -[![Star History Chart](https://api.star-history.com/svg?repos=GaiZhenbiao/ChuanhuChatGPT&type=Date)](https://star-history.com/#GaiZhenbiao/ChuanhuChatGPT&Date) - -## Contributors - - - - - -## Sponsor - -🐯 If you find this project helpful, feel free to buy me a coke or a cup of coffee~ - -Buy Me A Coffee - -image diff --git a/spaces/KdaiP/yolov8-deepsort-tracking/deep_sort/utils/asserts.py b/spaces/KdaiP/yolov8-deepsort-tracking/deep_sort/utils/asserts.py deleted file mode 100644 index 59a73cc04025762d6490fcd2945a747d963def32..0000000000000000000000000000000000000000 --- a/spaces/KdaiP/yolov8-deepsort-tracking/deep_sort/utils/asserts.py +++ /dev/null @@ -1,13 +0,0 @@ -from os import environ - - -def assert_in(file, files_to_check): - if file not in files_to_check: - raise AssertionError("{} does not exist in the list".format(str(file))) - return True - - -def assert_in_env(check_list: list): - for item in check_list: - assert_in(item, environ.keys()) - return True diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/mkgui/base/ui/streamlit_utils.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/mkgui/base/ui/streamlit_utils.py deleted file mode 100644 index beb6e65c61f8a16b4376494123f31178cdb88bde..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/mkgui/base/ui/streamlit_utils.py +++ /dev/null @@ -1,13 +0,0 @@ -CUSTOM_STREAMLIT_CSS = """ -div[data-testid="stBlock"] button { - width: 100% !important; - margin-bottom: 20px !important; - border-color: #bfbfbf !important; -} -section[data-testid="stSidebar"] div { - max-width: 10rem; -} -pre code { - white-space: pre-wrap; -} -""" diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/utils/util.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/utils/util.py deleted file mode 100644 index 34bcffd6c0975377a54ae1ce89002be1dae8151d..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/utils/util.py +++ /dev/null @@ -1,50 +0,0 @@ -import matplotlib -matplotlib.use('Agg') -import time - -class Timer(): - ''' Timer for recording training time distribution. ''' - def __init__(self): - self.prev_t = time.time() - self.clear() - - def set(self): - self.prev_t = time.time() - - def cnt(self, mode): - self.time_table[mode] += time.time()-self.prev_t - self.set() - if mode == 'bw': - self.click += 1 - - def show(self): - total_time = sum(self.time_table.values()) - self.time_table['avg'] = total_time/self.click - self.time_table['rd'] = 100*self.time_table['rd']/total_time - self.time_table['fw'] = 100*self.time_table['fw']/total_time - self.time_table['bw'] = 100*self.time_table['bw']/total_time - msg = '{avg:.3f} sec/step (rd {rd:.1f}% | fw {fw:.1f}% | bw {bw:.1f}%)'.format( - **self.time_table) - self.clear() - return msg - - def clear(self): - self.time_table = {'rd': 0, 'fw': 0, 'bw': 0} - self.click = 0 - -# Reference : https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/e2e_asr.py#L168 - -def human_format(num): - magnitude = 0 - while num >= 1000: - magnitude += 1 - num /= 1000.0 - # add more suffixes if you need them - return '{:3.1f}{}'.format(num, [' ', 'K', 'M', 'G', 'T', 'P'][magnitude]) - - -# provide easy access of attribute from dict, such abc.key -class AttrDict(dict): - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self diff --git a/spaces/KevinQHLin/UniVTG/main/train_mr.py b/spaces/KevinQHLin/UniVTG/main/train_mr.py deleted file mode 100644 index 1a10d029f81f86733d6dab71a3aee575917b092b..0000000000000000000000000000000000000000 --- a/spaces/KevinQHLin/UniVTG/main/train_mr.py +++ /dev/null @@ -1,266 +0,0 @@ -import os -import pdb -import sys -import time -import json -import pprint -import random -import numpy as np -from tqdm import tqdm, trange -from collections import defaultdict - -import torch -import torch.nn as nn -import torch.backends.cudnn as cudnn -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter - -sys.path.append('/data/home/qinghonglin/univtg') -from main.config import BaseOptions, setup_model -from main.dataset import \ - DatasetMR, start_end_collate_mr, prepare_batch_inputs_mr -from main.inference_mr import eval_epoch, start_inference -from utils.basic_utils import set_seed, AverageMeter, dict_to_markdown -from utils.model_utils import count_parameters - -import logging -logger = logging.getLogger(__name__) -logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - level=logging.INFO) - -def train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer): - logger.info(f"[Epoch {epoch_i+1}]") - model.train() - criterion.train() - - # init meters - time_meters = defaultdict(AverageMeter) - loss_meters = defaultdict(AverageMeter) - - num_training_examples = len(train_loader) - timer_dataloading = time.time() - for batch_idx, batch in tqdm(enumerate(train_loader), - desc="Training Iteration", - total=num_training_examples): - time_meters["dataloading_time"].update(time.time() - timer_dataloading) - - timer_start = time.time() - model_inputs, targets = prepare_batch_inputs_mr(batch[1], opt.device, non_blocking=opt.pin_memory) - time_meters["prepare_inputs_time"].update(time.time() - timer_start) - - timer_start = time.time() - - # try: - outputs = model(**model_inputs) - loss_dict = criterion(outputs, targets) - weight_dict = criterion.weight_dict - losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) - time_meters["model_forward_time"].update(time.time() - timer_start) - - timer_start = time.time() - optimizer.zero_grad() - losses.backward() - - if opt.grad_clip > 0: - nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) - optimizer.step() - time_meters["model_backward_time"].update(time.time() - timer_start) - - loss_dict["loss_overall"] = float(losses) # for logging only - for k, v in loss_dict.items(): - loss_meters[k].update(float(v) * weight_dict[k] if k in weight_dict else float(v)) - - timer_dataloading = time.time() - - # print/add logs - tb_writer.add_scalar("Train/lr", float(optimizer.param_groups[0]["lr"]), epoch_i+1) - for k, v in loss_meters.items(): - tb_writer.add_scalar("Train/{}".format(k), v.avg, epoch_i+1) - - to_write = opt.train_log_txt_formatter.format( - time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), - epoch=epoch_i+1, - loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in loss_meters.items()])) - with open(opt.train_log_filepath, "a") as f: - f.write(to_write) - - logger.info("Epoch time stats:") - for name, meter in time_meters.items(): - d = {k: f"{getattr(meter, k):.4f}" for k in ["max", "min", "avg"]} - logger.info(f"{name} ==> {d}") - - -def train(model, criterion, optimizer, lr_scheduler, train_dataset, val_dataset, opt): - tb_writer = SummaryWriter(opt.tensorboard_log_dir) - tb_writer.add_text("hyperparameters", dict_to_markdown(vars(opt), max_str_len=None)) - opt.train_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str}\n" - opt.eval_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str} [Metrics] {eval_metrics_str}\n" - - train_loader = DataLoader( - train_dataset, - collate_fn=start_end_collate_mr, - batch_size=opt.bsz, - num_workers=opt.num_workers, - shuffle=True, - pin_memory=opt.pin_memory - ) - - prev_best_score = 0. - es_cnt = 0 - if opt.start_epoch is None: - start_epoch = -1 if opt.eval_init else 0 - else: - start_epoch = opt.start_epoch - save_submission_filename = "latest_{}_{}_preds.jsonl".format(opt.dset_name, opt.eval_split_name) - for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"): - if epoch_i > -1: - train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer) - lr_scheduler.step() - eval_epoch_interval = opt.eval_epoch - if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0: - with torch.no_grad(): - metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \ - eval_epoch(model, val_dataset, opt, save_submission_filename, epoch_i, criterion, tb_writer) - - # log - to_write = opt.eval_log_txt_formatter.format( - time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), - epoch=epoch_i, - loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in eval_loss_meters.items()]), - eval_metrics_str=json.dumps(metrics_no_nms)) - - with open(opt.eval_log_filepath, "a") as f: - f.write(to_write) - logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4))) - if metrics_nms is not None: - logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4))) - - metrics = metrics_nms if metrics_nms is not None else metrics_no_nms - for k, v in metrics["brief"].items(): - tb_writer.add_scalar(f"Eval/{k}", float(v), epoch_i+1) - - # stop_score = metrics["brief"]["MR-full-mAP"] - # pdb.set_trace() - stop_score = metrics["brief"][opt.main_metric] - if stop_score > prev_best_score: - es_cnt = 0 - prev_best_score = stop_score - - checkpoint = { - "model": model.state_dict(), - "optimizer": optimizer.state_dict(), - "lr_scheduler": lr_scheduler.state_dict(), - "epoch": epoch_i, - "opt": opt - } - torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) - - best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] - for src, tgt in zip(latest_file_paths, best_file_paths): - os.renames(src, tgt) - logger.info("The checkpoint file has been updated.") - else: - es_cnt += 1 - if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop - with open(opt.train_log_filepath, "a") as f: - f.write(f"Early Stop at epoch {epoch_i}") - logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") - break - - # save ckpt - checkpoint = { - "model": model.state_dict(), - "optimizer": optimizer.state_dict(), - "lr_scheduler": lr_scheduler.state_dict(), - "epoch": epoch_i, - "opt": opt - } - torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) - - if (epoch_i + 1) % opt.save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies - checkpoint = { - "model": model.state_dict(), - "optimizer": optimizer.state_dict(), - "epoch": epoch_i, - "opt": opt - } - torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) - - if opt.debug: - break - - tb_writer.close() - - -def start_training(): - logger.info("Setup config, data and model...") - opt = BaseOptions().parse() - set_seed(opt.seed) - if opt.debug: # keep the model run deterministically - # 'cudnn.benchmark = True' enabled auto finding the best algorithm for a specific input/net config. - # Enable this only when input size is fixed. - cudnn.benchmark = False - cudnn.deterministic = True - - dataset_config = dict( - dset_name=opt.dset_name, - data_path=opt.train_path, - v_feat_dirs=opt.v_feat_dirs, - q_feat_dir=opt.t_feat_dir, - v_feat_dim=opt.v_feat_dim, - q_feat_dim=opt.t_feat_dim, - q_feat_type="last_hidden_state", - max_q_l=opt.max_q_l, - max_v_l=opt.max_v_l, - ctx_mode=opt.ctx_mode, - data_ratio=opt.data_ratio, - normalize_v=not opt.no_norm_vfeat, - normalize_t=not opt.no_norm_tfeat, - clip_len=opt.clip_length, - max_windows=opt.max_windows, - span_loss_type=opt.span_loss_type, - txt_drop_ratio=opt.txt_drop_ratio, - use_cache=opt.use_cache, - add_easy_negative=opt.add_easy_negative, - easy_negative_only=opt.easy_negative_only - ) - - dataset_config["data_path"] = opt.train_path - train_dataset = DatasetMR(**dataset_config) - - if opt.eval_path is not None: - dataset_config["data_path"] = opt.eval_path - dataset_config["txt_drop_ratio"] = 0 - dataset_config["q_feat_dir"] = opt.t_feat_dir.replace("txt_clip_asr", "txt_clip").replace("txt_clip_cap", "txt_clip") # for pretraining - # dataset_config["load_labels"] = False # uncomment to calculate eval loss - eval_dataset = DatasetMR(**dataset_config) - else: - eval_dataset = None - - if opt.lr_warmup > 0: - # total_steps = opt.n_epoch * len(train_dataset) // opt.bsz - total_steps = opt.n_epoch - warmup_steps = opt.lr_warmup if opt.lr_warmup > 1 else int(opt.lr_warmup * total_steps) - opt.lr_warmup = [warmup_steps, total_steps] - model, criterion, optimizer, lr_scheduler = setup_model(opt) - logger.info(f"Model {model}") - count_parameters(model) - logger.info("Start Training...") - train(model, criterion, optimizer, lr_scheduler, train_dataset, eval_dataset, opt) - return opt.ckpt_filepath.replace(".ckpt", "_best.ckpt"), opt.eval_split_name, opt.eval_path, opt.debug - - -if __name__ == '__main__': - best_ckpt_path, eval_split_name, eval_path, debug = start_training() - if not debug: - input_args = ["--resume", best_ckpt_path, - "--eval_split_name", eval_split_name, - "--eval_path", eval_path] - - import sys - sys.argv[1:] = input_args - logger.info("\n\n\nFINISHED TRAINING!!!") - logger.info("Evaluating model at {}".format(best_ckpt_path)) - logger.info("Input args {}".format(sys.argv[1:])) - start_inference() \ No newline at end of file diff --git a/spaces/Kimata/Sanskrit-TTS/indic_nlp_resources/transliterate/README.md b/spaces/Kimata/Sanskrit-TTS/indic_nlp_resources/transliterate/README.md deleted file mode 100644 index 1f55e11e80f6fc5ebbf42dade0266e3d4ee06ce4..0000000000000000000000000000000000000000 --- a/spaces/Kimata/Sanskrit-TTS/indic_nlp_resources/transliterate/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# Transliteration Models for Indian languages -These are models for transliteration involving Indian languages. -The models are essentially Statistical Machine Translation systems trained using Moses over a -character-level parallel corpora of transliterations. Hence, you will need Moses to use these transliteration models. -The transliteration corpus has itself been mined in an unsupervised fashion from a translation corpus. - -Currently we have trained transliteration models for five language pairs: bn-hi, ta-hi, te-hi, en-hi and mr-hi - -Support for transliteration has been introduced in Moses from version 2.1 -So please ensure that you have minimum 2.1 version setup for Moses - -Commands to run the transliteration module using moses - -$moseshome/mosesdecoder/scripts/Transliteration/post-decoding-transliteration.pl \ ---moses-src-dir $moseshome/mosesdecoder --external-bin-dir $moseshome/tools \ ---transliteration-model-dir {path to transliteration model folder} --oov-file {path to file containing oov words, oovs are space separated with each line containing all oovs for the input line}\ - --input-file {input file to transliterated} --output-file {output file location} \ - --input-extension {input language code for eg. en} --output-extension {output language code for eg. hi} --language-model {path to language model} \ - --decoder $moseshome/mosesdecoder/bin/moses - -A sample execution of the model will be as follows: - -export moseshome={path to moses installation} -$moseshome/mosesdecoder/scripts/Transliteration/post-decoding-transliteration.pl \ ---moses-src-dir $moseshome/mosesdecoder --external-bin-dir $moseshome/tools \ ---transliteration-model-dir /home/ratish/project/nlp_resources/indic_nlp_resources/transliterate/en-hi \ ---oov-file /home/ratish/project/translit/input.oov \ - --input-file /home/ratish/project/translit/input.en \ - --output-file /home/ratish/project/translit/output.hi \ - --input-extension en --output-extension hi --language-model /home/ratish/project/translit/lm/nc.binlm.1 \ - --decoder $moseshome/mosesdecoder/bin/moses - -So far, we have seen the use of transliteration in a post-editing task for machine translation task. -In case, the models are needed for purely transliteration purpose, the input file and OOV file are the same. -Sample input file: -New Delhi is capital of India -India is worlds seventh largest nation in the World - -OOV file -New Delhi is capital of India -India is worlds seventh largest nation in the World - -On running the transliteration module, the output is: -न्यू डेल्ही इस कैपिटल आफ इंडिया -इंडिया इस वर्ल्ड सेवंथ लारगेस्ट नेशन इन थे वर्ल्ड diff --git a/spaces/KyanChen/RSPrompter/mmdet/datasets/samplers/batch_sampler.py b/spaces/KyanChen/RSPrompter/mmdet/datasets/samplers/batch_sampler.py deleted file mode 100644 index 980440eb3434e48b7cad90f0577c838a05d787b4..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/datasets/samplers/batch_sampler.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Sequence - -from torch.utils.data import BatchSampler, Sampler - -from mmdet.registry import DATA_SAMPLERS - - -# TODO: maybe replace with a data_loader wrapper -@DATA_SAMPLERS.register_module() -class AspectRatioBatchSampler(BatchSampler): - """A sampler wrapper for grouping images with similar aspect ratio (< 1 or. - - >= 1) into a same batch. - - Args: - sampler (Sampler): Base sampler. - batch_size (int): Size of mini-batch. - drop_last (bool): If ``True``, the sampler will drop the last batch if - its size would be less than ``batch_size``. - """ - - def __init__(self, - sampler: Sampler, - batch_size: int, - drop_last: bool = False) -> None: - if not isinstance(sampler, Sampler): - raise TypeError('sampler should be an instance of ``Sampler``, ' - f'but got {sampler}') - if not isinstance(batch_size, int) or batch_size <= 0: - raise ValueError('batch_size should be a positive integer value, ' - f'but got batch_size={batch_size}') - self.sampler = sampler - self.batch_size = batch_size - self.drop_last = drop_last - # two groups for w < h and w >= h - self._aspect_ratio_buckets = [[] for _ in range(2)] - - def __iter__(self) -> Sequence[int]: - for idx in self.sampler: - data_info = self.sampler.dataset.get_data_info(idx) - width, height = data_info['width'], data_info['height'] - bucket_id = 0 if width < height else 1 - bucket = self._aspect_ratio_buckets[bucket_id] - bucket.append(idx) - # yield a batch of indices in the same aspect ratio group - if len(bucket) == self.batch_size: - yield bucket[:] - del bucket[:] - - # yield the rest data and reset the bucket - left_data = self._aspect_ratio_buckets[0] + self._aspect_ratio_buckets[ - 1] - self._aspect_ratio_buckets = [[] for _ in range(2)] - while len(left_data) > 0: - if len(left_data) <= self.batch_size: - if not self.drop_last: - yield left_data[:] - left_data = [] - else: - yield left_data[:self.batch_size] - left_data = left_data[self.batch_size:] - - def __len__(self) -> int: - if self.drop_last: - return len(self.sampler) // self.batch_size - else: - return (len(self.sampler) + self.batch_size - 1) // self.batch_size diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/dense_heads/vfnet_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/dense_heads/vfnet_head.py deleted file mode 100644 index 430b06d085d94760d56a7ea083eaf23bd32b1f53..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/dense_heads/vfnet_head.py +++ /dev/null @@ -1,722 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Tuple, Union - -import numpy as np -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, Scale -from mmcv.ops import DeformConv2d -from torch import Tensor - -from mmdet.registry import MODELS, TASK_UTILS -from mmdet.structures.bbox import bbox_overlaps -from mmdet.utils import (ConfigType, InstanceList, MultiConfig, - OptInstanceList, RangeType, reduce_mean) -from ..task_modules.prior_generators import MlvlPointGenerator -from ..task_modules.samplers import PseudoSampler -from ..utils import multi_apply -from .atss_head import ATSSHead -from .fcos_head import FCOSHead - -INF = 1e8 - - -@MODELS.register_module() -class VFNetHead(ATSSHead, FCOSHead): - """Head of `VarifocalNet (VFNet): An IoU-aware Dense Object - Detector.`_. - - The VFNet predicts IoU-aware classification scores which mix the - object presence confidence and object localization accuracy as the - detection score. It is built on the FCOS architecture and uses ATSS - for defining positive/negative training examples. The VFNet is trained - with Varifocal Loss and empolys star-shaped deformable convolution to - extract features for a bbox. - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple - level points. - center_sampling (bool): If true, use center sampling. Defaults to False. - center_sample_radius (float): Radius of center sampling. Defaults to 1.5. - sync_num_pos (bool): If true, synchronize the number of positive - examples across GPUs. Defaults to True - gradient_mul (float): The multiplier to gradients from bbox refinement - and recognition. Defaults to 0.1. - bbox_norm_type (str): The bbox normalization type, 'reg_denom' or - 'stride'. Defaults to reg_denom - loss_cls_fl (:obj:`ConfigDict` or dict): Config of focal loss. - use_vfl (bool): If true, use varifocal loss for training. - Defaults to True. - loss_cls (:obj:`ConfigDict` or dict): Config of varifocal loss. - loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss, - GIoU Loss. - loss_bbox (:obj:`ConfigDict` or dict): Config of localization - refinement loss, GIoU Loss. - norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and - config norm layer. Defaults to norm_cfg=dict(type='GN', - num_groups=32, requires_grad=True). - use_atss (bool): If true, use ATSS to define positive/negative - examples. Defaults to True. - anchor_generator (:obj:`ConfigDict` or dict): Config of anchor - generator for ATSS. - init_cfg (:obj:`ConfigDict` or dict or list[dict] or - list[:obj:`ConfigDict`]): Initialization config dict. - - Example: - >>> self = VFNetHead(11, 7) - >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] - >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats) - >>> assert len(cls_score) == len(self.scales) - """ # noqa: E501 - - def __init__(self, - num_classes: int, - in_channels: int, - regress_ranges: RangeType = ((-1, 64), (64, 128), (128, 256), - (256, 512), (512, INF)), - center_sampling: bool = False, - center_sample_radius: float = 1.5, - sync_num_pos: bool = True, - gradient_mul: float = 0.1, - bbox_norm_type: str = 'reg_denom', - loss_cls_fl: ConfigType = dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - use_vfl: bool = True, - loss_cls: ConfigType = dict( - type='VarifocalLoss', - use_sigmoid=True, - alpha=0.75, - gamma=2.0, - iou_weighted=True, - loss_weight=1.0), - loss_bbox: ConfigType = dict( - type='GIoULoss', loss_weight=1.5), - loss_bbox_refine: ConfigType = dict( - type='GIoULoss', loss_weight=2.0), - norm_cfg: ConfigType = dict( - type='GN', num_groups=32, requires_grad=True), - use_atss: bool = True, - reg_decoded_bbox: bool = True, - anchor_generator: ConfigType = dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - center_offset=0.0, - strides=[8, 16, 32, 64, 128]), - init_cfg: MultiConfig = dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=dict( - type='Normal', - name='vfnet_cls', - std=0.01, - bias_prob=0.01)), - **kwargs) -> None: - # dcn base offsets, adapted from reppoints_head.py - self.num_dconv_points = 9 - self.dcn_kernel = int(np.sqrt(self.num_dconv_points)) - self.dcn_pad = int((self.dcn_kernel - 1) / 2) - dcn_base = np.arange(-self.dcn_pad, - self.dcn_pad + 1).astype(np.float64) - dcn_base_y = np.repeat(dcn_base, self.dcn_kernel) - dcn_base_x = np.tile(dcn_base, self.dcn_kernel) - dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape( - (-1)) - self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1) - - super(FCOSHead, self).__init__( - num_classes=num_classes, - in_channels=in_channels, - norm_cfg=norm_cfg, - init_cfg=init_cfg, - **kwargs) - self.regress_ranges = regress_ranges - self.reg_denoms = [ - regress_range[-1] for regress_range in regress_ranges - ] - self.reg_denoms[-1] = self.reg_denoms[-2] * 2 - self.center_sampling = center_sampling - self.center_sample_radius = center_sample_radius - self.sync_num_pos = sync_num_pos - self.bbox_norm_type = bbox_norm_type - self.gradient_mul = gradient_mul - self.use_vfl = use_vfl - if self.use_vfl: - self.loss_cls = MODELS.build(loss_cls) - else: - self.loss_cls = MODELS.build(loss_cls_fl) - self.loss_bbox = MODELS.build(loss_bbox) - self.loss_bbox_refine = MODELS.build(loss_bbox_refine) - - # for getting ATSS targets - self.use_atss = use_atss - self.reg_decoded_bbox = reg_decoded_bbox - self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) - - self.anchor_center_offset = anchor_generator['center_offset'] - - self.num_base_priors = self.prior_generator.num_base_priors[0] - - if self.train_cfg: - self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) - if self.train_cfg.get('sampler', None) is not None: - self.sampler = TASK_UTILS.build( - self.train_cfg['sampler'], default_args=dict(context=self)) - else: - self.sampler = PseudoSampler() - # only be used in `get_atss_targets` when `use_atss` is True - self.atss_prior_generator = TASK_UTILS.build(anchor_generator) - - self.fcos_prior_generator = MlvlPointGenerator( - anchor_generator['strides'], - self.anchor_center_offset if self.use_atss else 0.5) - - # In order to reuse the `get_bboxes` in `BaseDenseHead. - # Only be used in testing phase. - self.prior_generator = self.fcos_prior_generator - - def _init_layers(self) -> None: - """Initialize layers of the head.""" - super(FCOSHead, self)._init_cls_convs() - super(FCOSHead, self)._init_reg_convs() - self.relu = nn.ReLU() - self.vfnet_reg_conv = ConvModule( - self.feat_channels, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - bias=self.conv_bias) - self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) - self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) - - self.vfnet_reg_refine_dconv = DeformConv2d( - self.feat_channels, - self.feat_channels, - self.dcn_kernel, - 1, - padding=self.dcn_pad) - self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1) - self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides]) - - self.vfnet_cls_dconv = DeformConv2d( - self.feat_channels, - self.feat_channels, - self.dcn_kernel, - 1, - padding=self.dcn_pad) - self.vfnet_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - - def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]: - """Forward features from the upstream network. - - Args: - x (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: - - - cls_scores (list[Tensor]): Box iou-aware scores for each scale - level, each is a 4D-tensor, the channel number is - num_points * num_classes. - - bbox_preds (list[Tensor]): Box offsets for each - scale level, each is a 4D-tensor, the channel number is - num_points * 4. - - bbox_preds_refine (list[Tensor]): Refined Box offsets for - each scale level, each is a 4D-tensor, the channel - number is num_points * 4. - """ - return multi_apply(self.forward_single, x, self.scales, - self.scales_refine, self.strides, self.reg_denoms) - - def forward_single(self, x: Tensor, scale: Scale, scale_refine: Scale, - stride: int, reg_denom: int) -> tuple: - """Forward features of a single scale level. - - Args: - x (Tensor): FPN feature maps of the specified stride. - scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize - the bbox prediction. - scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to - resize the refined bbox prediction. - stride (int): The corresponding stride for feature maps, - used to normalize the bbox prediction when - bbox_norm_type = 'stride'. - reg_denom (int): The corresponding regression range for feature - maps, only used to normalize the bbox prediction when - bbox_norm_type = 'reg_denom'. - - Returns: - tuple: iou-aware cls scores for each box, bbox predictions and - refined bbox predictions of input feature maps. - """ - cls_feat = x - reg_feat = x - - for cls_layer in self.cls_convs: - cls_feat = cls_layer(cls_feat) - - for reg_layer in self.reg_convs: - reg_feat = reg_layer(reg_feat) - - # predict the bbox_pred of different level - reg_feat_init = self.vfnet_reg_conv(reg_feat) - if self.bbox_norm_type == 'reg_denom': - bbox_pred = scale( - self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom - elif self.bbox_norm_type == 'stride': - bbox_pred = scale( - self.vfnet_reg(reg_feat_init)).float().exp() * stride - else: - raise NotImplementedError - - # compute star deformable convolution offsets - # converting dcn_offset to reg_feat.dtype thus VFNet can be - # trained with FP16 - dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul, - stride).to(reg_feat.dtype) - - # refine the bbox_pred - reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset)) - bbox_pred_refine = scale_refine( - self.vfnet_reg_refine(reg_feat)).float().exp() - bbox_pred_refine = bbox_pred_refine * bbox_pred.detach() - - # predict the iou-aware cls score - cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset)) - cls_score = self.vfnet_cls(cls_feat) - - if self.training: - return cls_score, bbox_pred, bbox_pred_refine - else: - return cls_score, bbox_pred_refine - - def star_dcn_offset(self, bbox_pred: Tensor, gradient_mul: float, - stride: int) -> Tensor: - """Compute the star deformable conv offsets. - - Args: - bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b). - gradient_mul (float): Gradient multiplier. - stride (int): The corresponding stride for feature maps, - used to project the bbox onto the feature map. - - Returns: - Tensor: The offsets for deformable convolution. - """ - dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred) - bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \ - gradient_mul * bbox_pred - # map to the feature map scale - bbox_pred_grad_mul = bbox_pred_grad_mul / stride - N, C, H, W = bbox_pred.size() - - x1 = bbox_pred_grad_mul[:, 0, :, :] - y1 = bbox_pred_grad_mul[:, 1, :, :] - x2 = bbox_pred_grad_mul[:, 2, :, :] - y2 = bbox_pred_grad_mul[:, 3, :, :] - bbox_pred_grad_mul_offset = bbox_pred.new_zeros( - N, 2 * self.num_dconv_points, H, W) - bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1 - bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1 - bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1 - bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1 - bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2 - bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1 - bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2 - bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2 - bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1 - bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2 - bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2 - bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2 - dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset - - return dcn_offset - - def loss_by_feat( - self, - cls_scores: List[Tensor], - bbox_preds: List[Tensor], - bbox_preds_refine: List[Tensor], - batch_gt_instances: InstanceList, - batch_img_metas: List[dict], - batch_gt_instances_ignore: OptInstanceList = None) -> dict: - """Compute loss of the head. - - Args: - cls_scores (list[Tensor]): Box iou-aware scores for each scale - level, each is a 4D-tensor, the channel number is - num_points * num_classes. - bbox_preds (list[Tensor]): Box offsets for each - scale level, each is a 4D-tensor, the channel number is - num_points * 4. - bbox_preds_refine (list[Tensor]): Refined Box offsets for - each scale level, each is a 4D-tensor, the channel - number is num_points * 4. - batch_gt_instances (list[:obj:`InstanceData`]): Batch of - gt_instance. It usually includes ``bboxes`` and ``labels`` - attributes. - batch_img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): - Batch of gt_instances_ignore. It includes ``bboxes`` attribute - data that is ignored during training and testing. - Defaults to None. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine) - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - all_level_points = self.fcos_prior_generator.grid_priors( - featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device) - labels, label_weights, bbox_targets, bbox_weights = self.get_targets( - cls_scores, - all_level_points, - batch_gt_instances, - batch_img_metas, - batch_gt_instances_ignore=batch_gt_instances_ignore) - - num_imgs = cls_scores[0].size(0) - # flatten cls_scores, bbox_preds and bbox_preds_refine - flatten_cls_scores = [ - cls_score.permute(0, 2, 3, - 1).reshape(-1, - self.cls_out_channels).contiguous() - for cls_score in cls_scores - ] - flatten_bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() - for bbox_pred in bbox_preds - ] - flatten_bbox_preds_refine = [ - bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() - for bbox_pred_refine in bbox_preds_refine - ] - flatten_cls_scores = torch.cat(flatten_cls_scores) - flatten_bbox_preds = torch.cat(flatten_bbox_preds) - flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine) - flatten_labels = torch.cat(labels) - flatten_bbox_targets = torch.cat(bbox_targets) - # repeat points to align with bbox_preds - flatten_points = torch.cat( - [points.repeat(num_imgs, 1) for points in all_level_points]) - - # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes - bg_class_ind = self.num_classes - pos_inds = torch.where( - ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0] - num_pos = len(pos_inds) - - pos_bbox_preds = flatten_bbox_preds[pos_inds] - pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds] - pos_labels = flatten_labels[pos_inds] - - # sync num_pos across all gpus - if self.sync_num_pos: - num_pos_avg_per_gpu = reduce_mean( - pos_inds.new_tensor(num_pos).float()).item() - num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0) - else: - num_pos_avg_per_gpu = num_pos - - pos_bbox_targets = flatten_bbox_targets[pos_inds] - pos_points = flatten_points[pos_inds] - - pos_decoded_bbox_preds = self.bbox_coder.decode( - pos_points, pos_bbox_preds) - pos_decoded_target_preds = self.bbox_coder.decode( - pos_points, pos_bbox_targets) - iou_targets_ini = bbox_overlaps( - pos_decoded_bbox_preds, - pos_decoded_target_preds.detach(), - is_aligned=True).clamp(min=1e-6) - bbox_weights_ini = iou_targets_ini.clone().detach() - bbox_avg_factor_ini = reduce_mean( - bbox_weights_ini.sum()).clamp_(min=1).item() - - pos_decoded_bbox_preds_refine = \ - self.bbox_coder.decode(pos_points, pos_bbox_preds_refine) - iou_targets_rf = bbox_overlaps( - pos_decoded_bbox_preds_refine, - pos_decoded_target_preds.detach(), - is_aligned=True).clamp(min=1e-6) - bbox_weights_rf = iou_targets_rf.clone().detach() - bbox_avg_factor_rf = reduce_mean( - bbox_weights_rf.sum()).clamp_(min=1).item() - - if num_pos > 0: - loss_bbox = self.loss_bbox( - pos_decoded_bbox_preds, - pos_decoded_target_preds.detach(), - weight=bbox_weights_ini, - avg_factor=bbox_avg_factor_ini) - - loss_bbox_refine = self.loss_bbox_refine( - pos_decoded_bbox_preds_refine, - pos_decoded_target_preds.detach(), - weight=bbox_weights_rf, - avg_factor=bbox_avg_factor_rf) - - # build IoU-aware cls_score targets - if self.use_vfl: - pos_ious = iou_targets_rf.clone().detach() - cls_iou_targets = torch.zeros_like(flatten_cls_scores) - cls_iou_targets[pos_inds, pos_labels] = pos_ious - else: - loss_bbox = pos_bbox_preds.sum() * 0 - loss_bbox_refine = pos_bbox_preds_refine.sum() * 0 - if self.use_vfl: - cls_iou_targets = torch.zeros_like(flatten_cls_scores) - - if self.use_vfl: - loss_cls = self.loss_cls( - flatten_cls_scores, - cls_iou_targets, - avg_factor=num_pos_avg_per_gpu) - else: - loss_cls = self.loss_cls( - flatten_cls_scores, - flatten_labels, - weight=label_weights, - avg_factor=num_pos_avg_per_gpu) - - return dict( - loss_cls=loss_cls, - loss_bbox=loss_bbox, - loss_bbox_rf=loss_bbox_refine) - - def get_targets( - self, - cls_scores: List[Tensor], - mlvl_points: List[Tensor], - batch_gt_instances: InstanceList, - batch_img_metas: List[dict], - batch_gt_instances_ignore: OptInstanceList = None) -> tuple: - """A wrapper for computing ATSS and FCOS targets for points in multiple - images. - - Args: - cls_scores (list[Tensor]): Box iou-aware scores for each scale - level with shape (N, num_points * num_classes, H, W). - mlvl_points (list[Tensor]): Points of each fpn level, each has - shape (num_points, 2). - batch_gt_instances (list[:obj:`InstanceData`]): Batch of - gt_instance. It usually includes ``bboxes`` and ``labels`` - attributes. - batch_img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): - Batch of gt_instances_ignore. It includes ``bboxes`` attribute - data that is ignored during training and testing. - Defaults to None. - - Returns: - tuple: - - - labels_list (list[Tensor]): Labels of each level. - - label_weights (Tensor/None): Label weights of all levels. - - bbox_targets_list (list[Tensor]): Regression targets of each - level, (l, t, r, b). - - bbox_weights (Tensor/None): Bbox weights of all levels. - """ - if self.use_atss: - return self.get_atss_targets(cls_scores, mlvl_points, - batch_gt_instances, batch_img_metas, - batch_gt_instances_ignore) - else: - self.norm_on_bbox = False - return self.get_fcos_targets(mlvl_points, batch_gt_instances) - - def _get_targets_single(self, *args, **kwargs): - """Avoid ambiguity in multiple inheritance.""" - if self.use_atss: - return ATSSHead._get_targets_single(self, *args, **kwargs) - else: - return FCOSHead._get_targets_single(self, *args, **kwargs) - - def get_fcos_targets(self, points: List[Tensor], - batch_gt_instances: InstanceList) -> tuple: - """Compute FCOS regression and classification targets for points in - multiple images. - - Args: - points (list[Tensor]): Points of each fpn level, each has shape - (num_points, 2). - batch_gt_instances (list[:obj:`InstanceData`]): Batch of - gt_instance. It usually includes ``bboxes`` and ``labels`` - attributes. - - Returns: - tuple: - - - labels (list[Tensor]): Labels of each level. - - label_weights: None, to be compatible with ATSS targets. - - bbox_targets (list[Tensor]): BBox targets of each level. - - bbox_weights: None, to be compatible with ATSS targets. - """ - labels, bbox_targets = FCOSHead.get_targets(self, points, - batch_gt_instances) - label_weights = None - bbox_weights = None - return labels, label_weights, bbox_targets, bbox_weights - - def get_anchors(self, - featmap_sizes: List[Tuple], - batch_img_metas: List[dict], - device: str = 'cuda') -> tuple: - """Get anchors according to feature map sizes. - - Args: - featmap_sizes (list[tuple]): Multi-level feature map sizes. - batch_img_metas (list[dict]): Image meta info. - device (str): Device for returned tensors - - Returns: - tuple: - - - anchor_list (list[Tensor]): Anchors of each image. - - valid_flag_list (list[Tensor]): Valid flags of each image. - """ - num_imgs = len(batch_img_metas) - - # since feature map sizes of all images are the same, we only compute - # anchors for one time - multi_level_anchors = self.atss_prior_generator.grid_priors( - featmap_sizes, device=device) - anchor_list = [multi_level_anchors for _ in range(num_imgs)] - - # for each image, we compute valid flags of multi level anchors - valid_flag_list = [] - for img_id, img_meta in enumerate(batch_img_metas): - multi_level_flags = self.atss_prior_generator.valid_flags( - featmap_sizes, img_meta['pad_shape'], device=device) - valid_flag_list.append(multi_level_flags) - - return anchor_list, valid_flag_list - - def get_atss_targets( - self, - cls_scores: List[Tensor], - mlvl_points: List[Tensor], - batch_gt_instances: InstanceList, - batch_img_metas: List[dict], - batch_gt_instances_ignore: OptInstanceList = None) -> tuple: - """A wrapper for computing ATSS targets for points in multiple images. - - Args: - cls_scores (list[Tensor]): Box iou-aware scores for each scale - level with shape (N, num_points * num_classes, H, W). - mlvl_points (list[Tensor]): Points of each fpn level, each has - shape (num_points, 2). - batch_gt_instances (list[:obj:`InstanceData`]): Batch of - gt_instance. It usually includes ``bboxes`` and ``labels`` - attributes. - batch_img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): - Batch of gt_instances_ignore. It includes ``bboxes`` attribute - data that is ignored during training and testing. - Defaults to None. - - Returns: - tuple: - - - labels_list (list[Tensor]): Labels of each level. - - label_weights (Tensor): Label weights of all levels. - - bbox_targets_list (list[Tensor]): Regression targets of each - level, (l, t, r, b). - - bbox_weights (Tensor): Bbox weights of all levels. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len( - featmap_sizes - ) == self.atss_prior_generator.num_levels == \ - self.fcos_prior_generator.num_levels - - device = cls_scores[0].device - - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, batch_img_metas, device=device) - - cls_reg_targets = ATSSHead.get_targets( - self, - anchor_list, - valid_flag_list, - batch_gt_instances, - batch_img_metas, - batch_gt_instances_ignore, - unmap_outputs=True) - - (anchor_list, labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, avg_factor) = cls_reg_targets - - bbox_targets_list = [ - bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list - ] - - num_imgs = len(batch_img_metas) - # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format - bbox_targets_list = self.transform_bbox_targets( - bbox_targets_list, mlvl_points, num_imgs) - - labels_list = [labels.reshape(-1) for labels in labels_list] - label_weights_list = [ - label_weights.reshape(-1) for label_weights in label_weights_list - ] - bbox_weights_list = [ - bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list - ] - label_weights = torch.cat(label_weights_list) - bbox_weights = torch.cat(bbox_weights_list) - return labels_list, label_weights, bbox_targets_list, bbox_weights - - def transform_bbox_targets(self, decoded_bboxes: List[Tensor], - mlvl_points: List[Tensor], - num_imgs: int) -> List[Tensor]: - """Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format. - - Args: - decoded_bboxes (list[Tensor]): Regression targets of each level, - in the form of (x1, y1, x2, y2). - mlvl_points (list[Tensor]): Points of each fpn level, each has - shape (num_points, 2). - num_imgs (int): the number of images in a batch. - - Returns: - bbox_targets (list[Tensor]): Regression targets of each level in - the form of (l, t, r, b). - """ - # TODO: Re-implemented in Class PointCoder - assert len(decoded_bboxes) == len(mlvl_points) - num_levels = len(decoded_bboxes) - mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points] - bbox_targets = [] - for i in range(num_levels): - bbox_target = self.bbox_coder.encode(mlvl_points[i], - decoded_bboxes[i]) - bbox_targets.append(bbox_target) - - return bbox_targets - - def _load_from_state_dict(self, state_dict: dict, prefix: str, - local_metadata: dict, strict: bool, - missing_keys: Union[List[str], str], - unexpected_keys: Union[List[str], str], - error_msgs: Union[List[str], str]) -> None: - """Override the method in the parent class to avoid changing para's - name.""" - pass diff --git a/spaces/Larvuz/instruct-pix2pix/edit_app.py b/spaces/Larvuz/instruct-pix2pix/edit_app.py deleted file mode 100644 index 0359e815ad51b1a2291dd8943555568e452981ad..0000000000000000000000000000000000000000 --- a/spaces/Larvuz/instruct-pix2pix/edit_app.py +++ /dev/null @@ -1,192 +0,0 @@ -from __future__ import annotations - -import math -import random - -import gradio as gr -import torch -from PIL import Image, ImageOps -from diffusers import StableDiffusionInstructPix2PixPipeline - - -help_text = """ -If you're not getting what you want, there may be a few reasons: -1. Is the image not changing enough? Your Image CFG weight may be too high. This value dictates how similar the output should be to the input. It's possible your edit requires larger changes from the original image, and your Image CFG weight isn't allowing that. Alternatively, your Text CFG weight may be too low. This value dictates how much to listen to the text instruction. The default Image CFG of 1.5 and Text CFG of 7.5 are a good starting point, but aren't necessarily optimal for each edit. Try: - * Decreasing the Image CFG weight, or - * Increasing the Text CFG weight, or -2. Conversely, is the image changing too much, such that the details in the original image aren't preserved? Try: - * Increasing the Image CFG weight, or - * Decreasing the Text CFG weight -3. Try generating results with different random seeds by setting "Randomize Seed" and running generation multiple times. You can also try setting "Randomize CFG" to sample new Text CFG and Image CFG values each time. -4. Rephrasing the instruction sometimes improves results (e.g., "turn him into a dog" vs. "make him a dog" vs. "as a dog"). -5. Increasing the number of steps sometimes improves results. -6. Do faces look weird? The Stable Diffusion autoencoder has a hard time with faces that are small in the image. Try: - * Cropping the image so the face takes up a larger portion of the frame. -""" - - -example_instructions = [ - "Make it a picasso painting", - "as if it were by modigliani", - "convert to a bronze statue", - "Turn it into an anime.", - "have it look like a graphic novel", - "make him gain weight", - "what would he look like bald?", - "Have him smile", - "Put him in a cocktail party.", - "move him at the beach.", - "add dramatic lighting", - "Convert to black and white", - "What if it were snowing?", - "Give him a leather jacket", - "Turn him into a cyborg!", - "make him wear a beanie", -] - -model_id = "timbrooks/instruct-pix2pix" - -def main(): - pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None).to("cuda") - example_image = Image.open("imgs/example.jpg").convert("RGB") - - def load_example( - steps: int, - randomize_seed: bool, - seed: int, - randomize_cfg: bool, - text_cfg_scale: float, - image_cfg_scale: float, - ): - example_instruction = random.choice(example_instructions) - return [example_image, example_instruction] + generate( - example_image, - example_instruction, - steps, - randomize_seed, - seed, - randomize_cfg, - text_cfg_scale, - image_cfg_scale, - ) - - def generate( - input_image: Image.Image, - instruction: str, - steps: int, - randomize_seed: bool, - seed: int, - randomize_cfg: bool, - text_cfg_scale: float, - image_cfg_scale: float, - ): - seed = random.randint(0, 100000) if randomize_seed else seed - text_cfg_scale = round(random.uniform(6.0, 9.0), ndigits=2) if randomize_cfg else text_cfg_scale - image_cfg_scale = round(random.uniform(1.2, 1.8), ndigits=2) if randomize_cfg else image_cfg_scale - - width, height = input_image.size - factor = 512 / max(width, height) - factor = math.ceil(min(width, height) * factor / 64) * 64 / min(width, height) - width = int((width * factor) // 64) * 64 - height = int((height * factor) // 64) * 64 - input_image = ImageOps.fit(input_image, (width, height), method=Image.Resampling.LANCZOS) - - if instruction == "": - return [input_image, seed] - - generator = torch.manual_seed(seed) - edited_image = pipe( - instruction, image=input_image, - guidance_scale=text_cfg_scale, image_guidance_scale=image_cfg_scale, - num_inference_steps=steps, generator=generator, - ).images[0] - return [seed, text_cfg_scale, image_cfg_scale, edited_image] - - def reset(): - return [0, "Randomize Seed", 1371, "Fix CFG", 7.5, 1.5, None] - - with gr.Blocks() as demo: - gr.HTML("""

    - InstructPix2Pix: Learning to Follow Image Editing Instructions -

    -

    For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. -
    - -Duplicate Space -

    """) - with gr.Row(): - with gr.Column(scale=1, min_width=100): - generate_button = gr.Button("Generate") - with gr.Column(scale=1, min_width=100): - load_button = gr.Button("Load Example") - with gr.Column(scale=1, min_width=100): - reset_button = gr.Button("Reset") - with gr.Column(scale=3): - instruction = gr.Textbox(lines=1, label="Edit Instruction", interactive=True) - - with gr.Row(): - input_image = gr.Image(label="Input Image", type="pil", interactive=True) - edited_image = gr.Image(label=f"Edited Image", type="pil", interactive=False) - input_image.style(height=512, width=512) - edited_image.style(height=512, width=512) - - with gr.Row(): - steps = gr.Number(value=50, precision=0, label="Steps", interactive=True) - randomize_seed = gr.Radio( - ["Fix Seed", "Randomize Seed"], - value="Randomize Seed", - type="index", - show_label=False, - interactive=True, - ) - seed = gr.Number(value=1371, precision=0, label="Seed", interactive=True) - randomize_cfg = gr.Radio( - ["Fix CFG", "Randomize CFG"], - value="Fix CFG", - type="index", - show_label=False, - interactive=True, - ) - text_cfg_scale = gr.Number(value=7.5, label=f"Text CFG", interactive=True) - image_cfg_scale = gr.Number(value=1.5, label=f"Image CFG", interactive=True) - - gr.Markdown(help_text) - - load_button.click( - fn=load_example, - inputs=[ - steps, - randomize_seed, - seed, - randomize_cfg, - text_cfg_scale, - image_cfg_scale, - ], - outputs=[input_image, instruction, seed, text_cfg_scale, image_cfg_scale, edited_image], - ) - generate_button.click( - fn=generate, - inputs=[ - input_image, - instruction, - steps, - randomize_seed, - seed, - randomize_cfg, - text_cfg_scale, - image_cfg_scale, - ], - outputs=[seed, text_cfg_scale, image_cfg_scale, edited_image], - ) - reset_button.click( - fn=reset, - inputs=[], - outputs=[steps, randomize_seed, seed, randomize_cfg, text_cfg_scale, image_cfg_scale, edited_image], - ) - - demo.queue(concurrency_count=1) - demo.launch(share=False) - - -if __name__ == "__main__": - main() diff --git a/spaces/Loren/Streamlit_OCR_comparator/README.md b/spaces/Loren/Streamlit_OCR_comparator/README.md deleted file mode 100644 index 2894464cad0f3f4d163e1f8e72750b10259159da..0000000000000000000000000000000000000000 --- a/spaces/Loren/Streamlit_OCR_comparator/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Streamlit OCR Comparator -emoji: 📰🔍🔤 -colorFrom: indigo -colorTo: gray -sdk: streamlit -sdk_version: 1.10.0 -app_file: Home.py -tags: [streamlit, ocr] -pinned: true ---- \ No newline at end of file diff --git a/spaces/MCkernick/Image_Restoration_Colorization/Global/detection_models/Synchronized-BatchNorm-PyTorch/sync_batchnorm/__init__.py b/spaces/MCkernick/Image_Restoration_Colorization/Global/detection_models/Synchronized-BatchNorm-PyTorch/sync_batchnorm/__init__.py deleted file mode 100644 index 6d9b36c74b1808b56ded68cf080a689db7e0ee4e..0000000000000000000000000000000000000000 --- a/spaces/MCkernick/Image_Restoration_Colorization/Global/detection_models/Synchronized-BatchNorm-PyTorch/sync_batchnorm/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- -# File : __init__.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -from .batchnorm import set_sbn_eps_mode -from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d -from .batchnorm import patch_sync_batchnorm, convert_model -from .replicate import DataParallelWithCallback, patch_replication_callback diff --git a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/utils.py b/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/utils.py deleted file mode 100644 index 9794e0fc3463a5e8fad05c037cce64683059a6d3..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/utils.py +++ /dev/null @@ -1,226 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.ERROR) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r", encoding="utf-8") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() \ No newline at end of file diff --git a/spaces/Marshalls/testmtd/feature_extraction/madmom/conftest.py b/spaces/Marshalls/testmtd/feature_extraction/madmom/conftest.py deleted file mode 100644 index 3a5999ddc80faa0ce6a54389562c9f092b40bb03..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/feature_extraction/madmom/conftest.py +++ /dev/null @@ -1,24 +0,0 @@ -# encoding: utf-8 -""" -This files contains pytest fixtures and other test related stuff. - -""" -from __future__ import absolute_import, division, print_function - -import numpy as np -import pytest - -# save numpy's current print options -_NP_PRINT_OPTIONS = np.get_printoptions() - - -@pytest.fixture(autouse=True) -def setup_doctest(request): - """Set up the environment for doctests (when run through pytest).""" - np.set_printoptions(precision=5, edgeitems=2, suppress=True) - - def fin(): - """Restore the environment after doctests (when run through pytest).""" - np.set_printoptions(**_NP_PRINT_OPTIONS) - - request.addfinalizer(fin) diff --git a/spaces/Marshalls/testmtd/feature_extraction/madmom/evaluation/__init__.py b/spaces/Marshalls/testmtd/feature_extraction/madmom/evaluation/__init__.py deleted file mode 100644 index 19e34a11def040b971b534b9ed3fda71d0a35c85..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/feature_extraction/madmom/evaluation/__init__.py +++ /dev/null @@ -1,882 +0,0 @@ -# encoding: utf-8 -# pylint: disable=no-member -# pylint: disable=invalid-name -# pylint: disable=too-many-arguments -# pylint: disable=wrong-import-position -""" -Evaluation package. - -""" - -from __future__ import absolute_import, division, print_function - -import numpy as np - - -# evaluation helper functions -def find_closest_matches(detections, annotations): - """ - Find the closest annotation for each detection. - - Parameters - ---------- - detections : list or numpy array - Detected events. - annotations : list or numpy array - Annotated events. - - Returns - ------- - indices : numpy array - Indices of the closest matches. - - Notes - ----- - The sequences must be ordered. - - """ - # make sure the arrays have the correct types - detections = np.asarray(detections, dtype=np.float) - annotations = np.asarray(annotations, dtype=np.float) - # TODO: right now, it only works with 1D arrays - if detections.ndim > 1 or annotations.ndim > 1: - raise NotImplementedError('please implement multi-dim support') - # if no detections or annotations are given - if len(detections) == 0 or len(annotations) == 0: - # return a empty array - return np.zeros(0, dtype=np.int) - # if only a single annotation is given - if len(annotations) == 1: - # return an array as long as the detections with indices 0 - return np.zeros(len(detections), dtype=np.int) - # solution found at: http://stackoverflow.com/questions/8914491/ - indices = annotations.searchsorted(detections) - indices = np.clip(indices, 1, len(annotations) - 1) - left = annotations[indices - 1] - right = annotations[indices] - indices -= detections - left < right - detections - # return the indices of the closest matches - return indices - - -def calc_errors(detections, annotations, matches=None): - """ - Errors of the detections to the closest annotations. - - Parameters - ---------- - detections : list or numpy array - Detected events. - annotations : list or numpy array - Annotated events. - matches : list or numpy array - Indices of the closest events. - - Returns - ------- - errors : numpy array - Errors. - - Notes - ----- - The sequences must be ordered. To speed up the calculation, a list of - pre-computed indices of the closest matches can be used. - - """ - # make sure the arrays have the correct types - detections = np.asarray(detections, dtype=np.float) - annotations = np.asarray(annotations, dtype=np.float) - if matches is not None: - matches = np.asarray(matches, dtype=np.int) - # TODO: right now, it only works with 1D arrays - if detections.ndim > 1 or annotations.ndim > 1: - raise NotImplementedError('please implement multi-dim support') - # if no detections or annotations are given - if len(detections) == 0 or len(annotations) == 0: - # return a empty array - return np.zeros(0, dtype=np.float) - # determine the closest annotations - if matches is None: - matches = find_closest_matches(detections, annotations) - # calc error relative to those annotations - errors = detections - annotations[matches] - # return the errors - return errors - - -def calc_absolute_errors(detections, annotations, matches=None): - """ - Absolute errors of the detections to the closest annotations. - - Parameters - ---------- - detections : list or numpy array - Detected events. - annotations : list or numpy array - Annotated events. - matches : list or numpy array - Indices of the closest events. - - Returns - ------- - errors : numpy array - Absolute errors. - - Notes - ----- - The sequences must be ordered. To speed up the calculation, a list of - pre-computed indices of the closest matches can be used. - - """ - # make sure the arrays have the correct types - detections = np.asarray(detections, dtype=np.float) - annotations = np.asarray(annotations, dtype=np.float) - if matches is not None: - matches = np.asarray(matches, dtype=np.int) - # TODO: right now, it only works with 1D arrays - if detections.ndim > 1 or annotations.ndim > 1: - raise NotImplementedError('please implement multi-dim support') - # return the errors - return np.abs(calc_errors(detections, annotations, matches)) - - -def calc_relative_errors(detections, annotations, matches=None): - """ - Relative errors of the detections to the closest annotations. - - Parameters - ---------- - detections : list or numpy array - Detected events. - annotations : list or numpy array - Annotated events. - matches : list or numpy array - Indices of the closest events. - - Returns - ------- - errors : numpy array - Relative errors. - - Notes - ----- - The sequences must be ordered. To speed up the calculation, a list of - pre-computed indices of the closest matches can be used. - - """ - # make sure the arrays have the correct types - detections = np.asarray(detections, dtype=np.float) - annotations = np.asarray(annotations, dtype=np.float) - if matches is not None: - matches = np.asarray(matches, dtype=np.int) - # TODO: right now, it only works with 1D arrays - if detections.ndim > 1 or annotations.ndim > 1: - raise NotImplementedError('please implement multi-dim support') - # if no detections or annotations are given - if len(detections) == 0 or len(annotations) == 0: - # return a empty array - return np.zeros(0, dtype=np.float) - # determine the closest annotations - if matches is None: - matches = find_closest_matches(detections, annotations) - # calculate the absolute errors - errors = calc_errors(detections, annotations, matches) - # return the relative errors - return np.abs(1 - (errors / annotations[matches])) - - -# abstract evaluation base class -class EvaluationMixin(object): - """ - Evaluation mixin class. - - This class has a `name` attribute which is used for display purposes and - defaults to 'None'. - - `METRIC_NAMES` is a list of tuples, containing the attribute's name and the - corresponding label, e.g.: - - The attributes defined in `METRIC_NAMES` will be provided as an ordered - dictionary as the `metrics` property unless the subclass overwrites the - property. - - `FLOAT_FORMAT` is used to format floats. - - """ - - # Example: - # METRIC_NAMES = [ - # ('precision', 'Precision'), - # ('recall', 'Recall'), - # ('fmeasure', 'F-measure'), - # ] - name = None - METRIC_NAMES = [] - FLOAT_FORMAT = '{:.3f}' - - @property - def metrics(self): - """Metrics as a dictionary.""" - from collections import OrderedDict - metrics = OrderedDict() - # metrics = {} - for metric in [m[0] for m in self.METRIC_NAMES]: - metrics[metric] = getattr(self, metric) - return metrics - - def __len__(self): - """Length of the evaluation object.""" - raise NotImplementedError('must be implemented by subclass.') - - def tostring(self, **kwargs): - """ - Format the evaluation metrics as a human readable string. - - Returns - ------- - str - Evaluation metrics formatted as a human readable string. - - Notes - ----- - This is a fallback method formatting the `metrics` dictionary in a - human readable way. Classes inheriting from this mixin class should - provide a method better suitable. - - """ - # pylint: disable=unused-argument - import pprint - return pprint.pformat(dict(self.metrics), indent=4) - - -# evaluation classes -class SimpleEvaluation(EvaluationMixin): - """ - Simple Precision, Recall, F-measure and Accuracy evaluation based on the - numbers of true/false positive/negative detections. - - Parameters - ---------- - num_tp : int - Number of true positive detections. - num_fp : int - Number of false positive detections. - num_tn : int - Number of true negative detections. - num_fn : int - Number of false negative detections. - name : str - Name to be displayed. - - Notes - ----- - This class is only suitable for a 1-class evaluation problem. - - """ - - METRIC_NAMES = [ - ('num_tp', 'No. of true positives'), - ('num_fp', 'No. of false positives'), - ('num_tn', 'No. of true negatives'), - ('num_fn', 'No. of false negatives'), - ('num_annotations', 'No. Annotations'), - ('precision', 'Precision'), - ('recall', 'Recall'), - ('fmeasure', 'F-measure'), - ('accuracy', 'Accuracy'), - ] - - def __init__(self, num_tp=0, num_fp=0, num_tn=0, num_fn=0, name=None, - **kwargs): - # pylint: disable=unused-argument - # hidden variables, to be able to overwrite them in subclasses - self._num_tp = int(num_tp) - self._num_fp = int(num_fp) - self._num_tn = int(num_tn) - self._num_fn = int(num_fn) - # name of the evaluation - self.name = name - - @property - def num_tp(self): - """Number of true positive detections.""" - return self._num_tp - - @property - def num_fp(self): - """Number of false positive detections.""" - return self._num_fp - - @property - def num_tn(self): - """Number of true negative detections.""" - return self._num_tn - - @property - def num_fn(self): - """Number of false negative detections.""" - return self._num_fn - - @property - def num_annotations(self): - """Number of annotations.""" - return self.num_tp + self.num_fn - - def __len__(self): - # the length equals the number of annotations - return self.num_annotations - - @property - def precision(self): - """Precision.""" - # correct / retrieved - retrieved = float(self.num_tp + self.num_fp) - # if there are no positive predictions, none of them are wrong - if retrieved == 0: - return 1. - return self.num_tp / retrieved - - @property - def recall(self): - """Recall.""" - # correct / relevant - relevant = float(self.num_tp + self.num_fn) - # if there are no positive annotations, we recalled all of them - if relevant == 0: - return 1. - return self.num_tp / relevant - - @property - def fmeasure(self): - """F-measure.""" - # 2pr / (p+r) - numerator = 2. * self.precision * self.recall - if numerator == 0: - return 0. - return numerator / (self.precision + self.recall) - - @property - def accuracy(self): - """Accuracy.""" - # acc: (TP + TN) / (TP + FP + TN + FN) - denominator = self.num_fp + self.num_fn + self.num_tp + self.num_tn - if denominator == 0: - return 1. - numerator = float(self.num_tp + self.num_tn) - if numerator == 0: - return 0. - return numerator / denominator - - def tostring(self, **kwargs): - """ - Format the evaluation metrics as a human readable string. - - Returns - ------- - str - Evaluation metrics formatted as a human readable string. - - - """ - ret = '' - if self.name is not None: - ret += '%s\n ' % self.name - ret += 'Annotations: %5d TP: %5d FP: %5d FN: %5d ' \ - 'Precision: %.3f Recall: %.3f F-measure: %.3f Acc: %.3f' % \ - (self.num_annotations, self.num_tp, self.num_fp, self.num_fn, - self.precision, self.recall, self.fmeasure, self.accuracy) - return ret - - def __str__(self): - return self.tostring() - - -# evaluate Precision, Recall, F-measure and Accuracy with lists or numpy arrays -class Evaluation(SimpleEvaluation): - """ - Evaluation class for measuring Precision, Recall and F-measure based on - numpy arrays or lists with true/false positive/negative detections. - - Parameters - ---------- - tp : list or numpy array - True positive detections. - fp : list or numpy array - False positive detections. - tn : list or numpy array - True negative detections. - fn : list or numpy array - False negative detections. - name : str - Name to be displayed. - - """ - - def __init__(self, tp=None, fp=None, tn=None, fn=None, **kwargs): - # set default values - if tp is None: - tp = [] - if fp is None: - fp = [] - if tn is None: - tn = [] - if fn is None: - fn = [] - # instantiate a SimpleEvaluation object - super(Evaluation, self).__init__(**kwargs) - # convert everything to numpy arrays and save them - self.tp = np.asarray(list(tp), dtype=np.float) - self.fp = np.asarray(list(fp), dtype=np.float) - self.tn = np.asarray(list(tn), dtype=np.float) - self.fn = np.asarray(list(fn), dtype=np.float) - - @property - def num_tp(self): - """Number of true positive detections.""" - return len(self.tp) - - @property - def num_fp(self): - """Number of false positive detections.""" - return len(self.fp) - - @property - def num_tn(self): - """Number of true negative detections.""" - return len(self.tn) - - @property - def num_fn(self): - """Number of false negative detections.""" - return len(self.fn) - - -# class for evaluation of Precision, Recall, F-measure with 2D arrays -class MultiClassEvaluation(Evaluation): - """ - Evaluation class for measuring Precision, Recall and F-measure based on - 2D numpy arrays with true/false positive/negative detections. - - Parameters - ---------- - tp : list of tuples or numpy array, shape (num_tp, 2) - True positive detections. - fp : list of tuples or numpy array, shape (num_fp, 2) - False positive detections. - tn : list of tuples or numpy array, shape (num_tn, 2) - True negative detections. - fn : list of tuples or numpy array, shape (num_fn, 2) - False negative detections. - name : str - Name to be displayed. - - Notes - ----- - The second item of the tuples or the second column of the arrays denote - the class the detection belongs to. - - """ - def __init__(self, tp=None, fp=None, tn=None, fn=None, **kwargs): - # set default values - if tp is None: - tp = np.zeros((0, 2)) - if fp is None: - fp = np.zeros((0, 2)) - if tn is None: - tn = np.zeros((0, 2)) - if fn is None: - fn = np.zeros((0, 2)) - super(MultiClassEvaluation, self).__init__(**kwargs) - self.tp = np.asarray(tp, dtype=np.float) - self.fp = np.asarray(fp, dtype=np.float) - self.tn = np.asarray(tn, dtype=np.float) - self.fn = np.asarray(fn, dtype=np.float) - - def tostring(self, verbose=False, **kwargs): - """ - Format the evaluation metrics as a human readable string. - - Parameters - ---------- - verbose : bool - Add evaluation for individual classes. - - Returns - ------- - str - Evaluation metrics formatted as a human readable string. - - """ - ret = '' - - if verbose: - # extract all classes - classes = [] - if self.tp.any(): - classes = np.append(classes, np.unique(self.tp[:, 1])) - if self.fp.any(): - classes = np.append(classes, np.unique(self.fp[:, 1])) - if self.tn.any(): - classes = np.append(classes, np.unique(self.tn[:, 1])) - if self.fn.any(): - classes = np.append(classes, np.unique(self.fn[:, 1])) - for cls in sorted(np.unique(classes)): - # extract the TP, FP, TN and FN of this class - tp = self.tp[self.tp[:, 1] == cls] - fp = self.fp[self.fp[:, 1] == cls] - tn = self.tn[self.tn[:, 1] == cls] - fn = self.fn[self.fn[:, 1] == cls] - # evaluate them - e = Evaluation(tp, fp, tn, fn, name='Class %s' % cls) - # append to the output string - ret += ' %s\n' % e.tostring(verbose=False) - # normal formatting - ret += 'Annotations: %5d TP: %5d FP: %4d FN: %4d ' \ - 'Precision: %.3f Recall: %.3f F-measure: %.3f Acc: %.3f' % \ - (self.num_annotations, self.num_tp, self.num_fp, self.num_fn, - self.precision, self.recall, self.fmeasure, self.accuracy) - # return - return ret - - -# class for summing Evaluations -class SumEvaluation(SimpleEvaluation): - """ - Simple class for summing evaluations. - - Parameters - ---------- - eval_objects : list - Evaluation objects. - name : str - Name to be displayed. - - """ - - def __init__(self, eval_objects, name=None): - # pylint: disable=super-init-not-called - # Note: we want to inherit the evaluation functions/properties, no need - # to call __super__, but we need to take care of 'name' - if not isinstance(eval_objects, list): - # wrap the given eval_object in a list - eval_objects = [eval_objects] - self.eval_objects = eval_objects - self.name = name or 'sum for %d files' % len(self) - - def __len__(self): - # just use the length of the evaluation objects - return len(self.eval_objects) - - # redefine the counters (number of TP, FP, TN, FN & number of annotations) - - @property - def num_tp(self): - """Number of true positive detections.""" - return sum(e.num_tp for e in self.eval_objects) - - @property - def num_fp(self): - """Number of false positive detections.""" - return sum(e.num_fp for e in self.eval_objects) - - @property - def num_tn(self): - """Number of true negative detections.""" - return sum(e.num_tn for e in self.eval_objects) - - @property - def num_fn(self): - """Number of false negative detections.""" - return sum(e.num_fn for e in self.eval_objects) - - @property - def num_annotations(self): - """Number of annotations.""" - return sum(e.num_annotations for e in self.eval_objects) - - -# class for averaging Evaluations -class MeanEvaluation(SumEvaluation): - """ - Simple class for averaging evaluation. - - Parameters - ---------- - eval_objects : list - Evaluation objects. - name : str - Name to be displayed. - - """ - - def __init__(self, eval_objects, name=None, **kwargs): - super(MeanEvaluation, self).__init__(eval_objects, **kwargs) - # handle the 'name' here to be able to set a different default value - self.name = name or 'mean for %d files' % len(self) - - # overwrite the properties to calculate the mean instead of the sum - - @property - def num_tp(self): - """Number of true positive detections.""" - if not self.eval_objects: - return 0. - return np.nanmean([e.num_tp for e in self.eval_objects]) - - @property - def num_fp(self): - """Number of false positive detections.""" - if not self.eval_objects: - return 0. - return np.nanmean([e.num_fp for e in self.eval_objects]) - - @property - def num_tn(self): - """Number of true negative detections.""" - if not self.eval_objects: - return 0. - return np.nanmean([e.num_tn for e in self.eval_objects]) - - @property - def num_fn(self): - """Number of false negative detections.""" - if not self.eval_objects: - return 0. - return np.nanmean([e.num_fn for e in self.eval_objects]) - - @property - def num_annotations(self): - """Number of annotations.""" - if not self.eval_objects: - return 0. - return np.nanmean([e.num_annotations for e in self.eval_objects]) - - @property - def precision(self): - """Precision.""" - return np.nanmean([e.precision for e in self.eval_objects]) - - @property - def recall(self): - """Recall.""" - return np.nanmean([e.recall for e in self.eval_objects]) - - @property - def fmeasure(self): - """F-measure.""" - return np.nanmean([e.fmeasure for e in self.eval_objects]) - - @property - def accuracy(self): - """Accuracy.""" - return np.nanmean([e.accuracy for e in self.eval_objects]) - - def tostring(self, **kwargs): - """ - Format the evaluation metrics as a human readable string. - - Returns - ------- - str - Evaluation metrics formatted as a human readable string. - - """ - ret = '' - if self.name is not None: - ret += '%s\n ' % self.name - # TODO: unify this with SimpleEvaluation but - # add option to provide field formatters (e.g. 3d or 5.2f) - # format with floats instead of integers - ret += 'Annotations: %5.2f TP: %5.2f FP: %5.2f FN: %5.2f ' \ - 'Precision: %.3f Recall: %.3f F-measure: %.3f Acc: %.3f' % \ - (self.num_annotations, self.num_tp, self.num_fp, self.num_fn, - self.precision, self.recall, self.fmeasure, self.accuracy) - return ret - - -def tostring(eval_objects, **kwargs): - """ - Format the given evaluation objects as human readable strings. - - Parameters - ---------- - eval_objects : list - Evaluation objects. - - Returns - ------- - str - Evaluation metrics formatted as a human readable string. - - """ - # pylint: disable=unused-argument - return '\n'.join([e.tostring() for e in eval_objects]) - - -def tocsv(eval_objects, metric_names=None, float_format='{:.3f}', **kwargs): - """ - Format the given evaluation objects as a CSV table. - - Parameters - ---------- - eval_objects : list - Evaluation objects. - metric_names : list of tuples, optional - List of tuples defining the name of the property corresponding to the - metric, and the metric label e.g. ('fp', 'False Positives'). - float_format : str, optional - How to format the metrics. - - Returns - ------- - str - CSV table representation of the evaluation objects. - - Notes - ----- - If no `metric_names` are given, they will be extracted from the first - evaluation object. - - """ - # pylint: disable=unused-argument - - if metric_names is None: - # get the evaluation metrics from the first evaluation object - metric_names = eval_objects[0].METRIC_NAMES - metric_names, metric_labels = list(zip(*metric_names)) - # add header - lines = ['Name,' + ','.join(metric_labels)] - # TODO: use e.metrics dict? - # add the evaluation objects - for e in eval_objects: - values = [float_format.format(getattr(e, mn)) for mn in metric_names] - lines.append(e.name + ',' + ','.join(values)) - # return everything - return '\n'.join(lines) - - -def totex(eval_objects, metric_names=None, float_format='{:.3f}', **kwargs): - """ - Format the given evaluation objects as a LaTeX table. - - Parameters - ---------- - eval_objects : list - Evaluation objects. - metric_names : list of tuples, optional - List of tuples defining the name of the property corresponding to the - metric, and the metric label e.g. ('fp', 'False Positives'). - float_format : str, optional - How to format the metrics. - - Returns - ------- - str - LaTeX table representation of the evaluation objects. - - Notes - ----- - If no `metric_names` are given, they will be extracted from the first - evaluation object. - - """ - # pylint: disable=unused-argument - - if metric_names is None: - # get the evaluation metrics from the first evaluation object - metric_names = eval_objects[0].METRIC_NAMES - metric_names, metric_labels = list(zip(*metric_names)) - # add header - lines = ['Name & ' + ' & '.join(metric_labels) + '\\\\'] - # TODO: use e.metrics dict - # TODO: add a generic totable() function which accepts columns separator, - # newline stuff (e.g. tex \\\\) and others - # add the evaluation objects - for e in eval_objects: - values = [float_format.format(getattr(e, mn)) for mn in metric_names] - lines.append(e.name + ' & ' + ' & '.join(values) + '\\\\') - # return everything - return '\n'.join(lines) - - -def evaluation_io(parser, ann_suffix, det_suffix, ann_dir=None, det_dir=None): - """ - Add evaluation input/output and formatting related arguments to an existing - parser object. - - Parameters - ---------- - parser : argparse parser instance - Existing argparse parser object. - ann_suffix : str - Suffix of the annotation files. - det_suffix : str - Suffix of the detection files. - ann_dir : str, optional - Use only annotations from this folder (and sub-folders). - det_dir : str, optional - Use only detections from this folder (and sub-folders). - - Returns - ------- - io_group : argparse argument group - Evaluation input / output argument group. - formatter_group : argparse argument group - Evaluation formatter argument group. - - """ - import sys - import argparse - # general input output file handling - parser.add_argument('files', nargs='*', - help='files (or folders) to be evaluated') - parser.add_argument('-o', dest='outfile', type=argparse.FileType('w'), - default=sys.stdout, - help='output file [default: STDOUT]') - # file suffixes used for evaluation - g = parser.add_argument_group('file/folder/suffix arguments') - g.add_argument('-a', dest='ann_suffix', action='store', default=ann_suffix, - help='suffix of the annotation files ' - '[default: %(default)s]') - g.add_argument('--ann_dir', action='store', default=ann_dir, - help='search only this directory (recursively) for ' - 'annotation files [default: %(default)s]') - g.add_argument('-d', dest='det_suffix', action='store', default=det_suffix, - help='suffix of the detection files [default: %(default)s]') - g.add_argument('--det_dir', action='store', default=det_dir, - help='search only this directory (recursively) for ' - 'detection files [default: %(default)s]') - # option to ignore non-existing detections - g.add_argument('-i', '--ignore_non_existing', action='store_true', - help='ignore non-existing detections [default: raise a ' - 'warning and assume empty detections]') - # verbose - parser.add_argument('-v', '--verbose', action='count', default=0, - help='increase verbosity level') - # option to suppress warnings - parser.add_argument('-q', '--quiet', action='store_true', - help='suppress any warnings') - # output format options - parser.set_defaults(output_formatter=tostring) - f = parser.add_argument_group('formatting arguments') - formats = f.add_mutually_exclusive_group() - formats.add_argument('--tex', dest='output_formatter', - action='store_const', const=totex, - help='format output to be used in .tex files') - formats.add_argument('--csv', dest='output_formatter', - action='store_const', const=tocsv, - help='format output to be used in .csv files') - # return the output formatting group so the caller can add more options - return g, f - - -# finally import the submodules -from . import chords, beats, notes, onsets, tempo - -# import often used classes -from .beats import BeatEvaluation, BeatMeanEvaluation -from .chords import ChordEvaluation, ChordMeanEvaluation, ChordSumEvaluation -from .key import KeyEvaluation, KeyMeanEvaluation -from .notes import NoteEvaluation, NoteMeanEvaluation, NoteSumEvaluation -from .onsets import OnsetEvaluation, OnsetMeanEvaluation, OnsetSumEvaluation -from .tempo import TempoEvaluation, TempoMeanEvaluation diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/iou3d.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/iou3d.py deleted file mode 100644 index 6fc71979190323f44c09f8b7e1761cf49cd2d76b..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/iou3d.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', [ - 'iou3d_boxes_iou_bev_forward', 'iou3d_nms_forward', - 'iou3d_nms_normal_forward' -]) - - -def boxes_iou_bev(boxes_a, boxes_b): - """Calculate boxes IoU in the Bird's Eye View. - - Args: - boxes_a (torch.Tensor): Input boxes a with shape (M, 5). - boxes_b (torch.Tensor): Input boxes b with shape (N, 5). - - Returns: - ans_iou (torch.Tensor): IoU result with shape (M, N). - """ - ans_iou = boxes_a.new_zeros( - torch.Size((boxes_a.shape[0], boxes_b.shape[0]))) - - ext_module.iou3d_boxes_iou_bev_forward(boxes_a.contiguous(), - boxes_b.contiguous(), ans_iou) - - return ans_iou - - -def nms_bev(boxes, scores, thresh, pre_max_size=None, post_max_size=None): - """NMS function GPU implementation (for BEV boxes). The overlap of two - boxes for IoU calculation is defined as the exact overlapping area of the - two boxes. In this function, one can also set ``pre_max_size`` and - ``post_max_size``. - - Args: - boxes (torch.Tensor): Input boxes with the shape of [N, 5] - ([x1, y1, x2, y2, ry]). - scores (torch.Tensor): Scores of boxes with the shape of [N]. - thresh (float): Overlap threshold of NMS. - pre_max_size (int, optional): Max size of boxes before NMS. - Default: None. - post_max_size (int, optional): Max size of boxes after NMS. - Default: None. - - Returns: - torch.Tensor: Indexes after NMS. - """ - assert boxes.size(1) == 5, 'Input boxes shape should be [N, 5]' - order = scores.sort(0, descending=True)[1] - - if pre_max_size is not None: - order = order[:pre_max_size] - boxes = boxes[order].contiguous() - - keep = torch.zeros(boxes.size(0), dtype=torch.long) - num_out = ext_module.iou3d_nms_forward(boxes, keep, thresh) - keep = order[keep[:num_out].cuda(boxes.device)].contiguous() - if post_max_size is not None: - keep = keep[:post_max_size] - return keep - - -def nms_normal_bev(boxes, scores, thresh): - """Normal NMS function GPU implementation (for BEV boxes). The overlap of - two boxes for IoU calculation is defined as the exact overlapping area of - the two boxes WITH their yaw angle set to 0. - - Args: - boxes (torch.Tensor): Input boxes with shape (N, 5). - scores (torch.Tensor): Scores of predicted boxes with shape (N). - thresh (float): Overlap threshold of NMS. - - Returns: - torch.Tensor: Remaining indices with scores in descending order. - """ - assert boxes.shape[1] == 5, 'Input boxes shape should be [N, 5]' - order = scores.sort(0, descending=True)[1] - - boxes = boxes[order].contiguous() - - keep = torch.zeros(boxes.size(0), dtype=torch.long) - num_out = ext_module.iou3d_nms_normal_forward(boxes, keep, thresh) - return order[keep[:num_out].cuda(boxes.device)].contiguous() diff --git a/spaces/MetaWabbit/Auto-GPT/run.sh b/spaces/MetaWabbit/Auto-GPT/run.sh deleted file mode 100644 index edcbc44155b9ca9df83e283fdf976472c13e6492..0000000000000000000000000000000000000000 --- a/spaces/MetaWabbit/Auto-GPT/run.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -python scripts/check_requirements.py requirements.txt -if [ $? -eq 1 ] -then - echo Installing missing packages... - pip install -r requirements.txt -fi -python -m autogpt $@ -read -p "Press any key to continue..." diff --git a/spaces/NCTCMumbai/NCTC/models/official/vision/image_classification/classifier_trainer_test.py b/spaces/NCTCMumbai/NCTC/models/official/vision/image_classification/classifier_trainer_test.py deleted file mode 100644 index 244425feef76bf89d4de939cb8a1914a6f0f47c6..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/vision/image_classification/classifier_trainer_test.py +++ /dev/null @@ -1,387 +0,0 @@ -# Lint as: python3 -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Unit tests for the classifier trainer models.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import copy -import functools -import json - -import os -import sys - -from typing import Any, Callable, Iterable, Mapping, MutableMapping, Optional, Tuple - -from absl import flags -from absl.testing import parameterized -import tensorflow as tf - -from tensorflow.python.distribute import combinations -from tensorflow.python.distribute import strategy_combinations -from official.utils.flags import core as flags_core -from official.vision.image_classification import classifier_trainer -from official.vision.image_classification import dataset_factory -from official.vision.image_classification import test_utils -from official.vision.image_classification.configs import base_configs - -classifier_trainer.define_classifier_flags() - - -def distribution_strategy_combinations() -> Iterable[Tuple[Any, ...]]: - """Returns the combinations of end-to-end tests to run.""" - return combinations.combine( - distribution=[ - strategy_combinations.default_strategy, - strategy_combinations.tpu_strategy, - strategy_combinations.one_device_strategy_gpu, - strategy_combinations.mirrored_strategy_with_two_gpus, - ], - model=[ - 'efficientnet', - 'resnet', - ], - mode='eager', - dataset=[ - 'imagenet', - ], - ) - - -def get_params_override(params_override: Mapping[str, Any]) -> str: - """Converts params_override dict to string command.""" - return '--params_override=' + json.dumps(params_override) - - -def basic_params_override(dtype: str = 'float32') -> MutableMapping[str, Any]: - """Returns a basic parameter configuration for testing.""" - return { - 'train_dataset': { - 'builder': 'synthetic', - 'use_per_replica_batch_size': True, - 'batch_size': 1, - 'image_size': 224, - 'dtype': dtype, - }, - 'validation_dataset': { - 'builder': 'synthetic', - 'batch_size': 1, - 'use_per_replica_batch_size': True, - 'image_size': 224, - 'dtype': dtype, - }, - 'train': { - 'steps': 1, - 'epochs': 1, - 'callbacks': { - 'enable_checkpoint_and_export': True, - 'enable_tensorboard': False, - }, - }, - 'evaluation': { - 'steps': 1, - }, - } - - -def get_trivial_model(num_classes: int) -> tf.keras.Model: - """Creates and compiles trivial model for ImageNet dataset.""" - model = test_utils.trivial_model(num_classes=num_classes) - lr = 0.01 - optimizer = tf.keras.optimizers.SGD(learning_rate=lr) - loss_obj = tf.keras.losses.SparseCategoricalCrossentropy() - model.compile(optimizer=optimizer, - loss=loss_obj, - run_eagerly=True) - return model - - -def get_trivial_data() -> tf.data.Dataset: - """Gets trivial data in the ImageNet size.""" - def generate_data(_) -> tf.data.Dataset: - image = tf.zeros(shape=(224, 224, 3), dtype=tf.float32) - label = tf.zeros([1], dtype=tf.int32) - return image, label - - dataset = tf.data.Dataset.range(1) - dataset = dataset.repeat() - dataset = dataset.map(generate_data, - num_parallel_calls=tf.data.experimental.AUTOTUNE) - dataset = dataset.prefetch(buffer_size=1).batch(1) - return dataset - - -def run_end_to_end(main: Callable[[Any], None], - extra_flags: Optional[Iterable[str]] = None, - model_dir: Optional[str] = None): - """Runs the classifier trainer end-to-end.""" - extra_flags = [] if extra_flags is None else extra_flags - args = [sys.argv[0], '--model_dir', model_dir] + extra_flags - flags_core.parse_flags(argv=args) - main(flags.FLAGS) - - -class ClassifierTest(tf.test.TestCase, parameterized.TestCase): - """Unit tests for Keras models.""" - _tempdir = None - - @classmethod - def setUpClass(cls): # pylint: disable=invalid-name - super(ClassifierTest, cls).setUpClass() - - def tearDown(self): - super(ClassifierTest, self).tearDown() - tf.io.gfile.rmtree(self.get_temp_dir()) - - @combinations.generate(distribution_strategy_combinations()) - def test_end_to_end_train_and_eval(self, distribution, model, dataset): - """Test train_and_eval and export for Keras classifier models.""" - # Some parameters are not defined as flags (e.g. cannot run - # classifier_train.py --batch_size=...) by design, so use - # "--params_override=..." instead - model_dir = self.get_temp_dir() - base_flags = [ - '--data_dir=not_used', - '--model_type=' + model, - '--dataset=' + dataset, - ] - train_and_eval_flags = base_flags + [ - get_params_override(basic_params_override()), - '--mode=train_and_eval', - ] - - run = functools.partial(classifier_trainer.run, - strategy_override=distribution) - run_end_to_end(main=run, - extra_flags=train_and_eval_flags, - model_dir=model_dir) - - @combinations.generate( - combinations.combine( - distribution=[ - strategy_combinations.one_device_strategy_gpu, - ], - model=[ - 'efficientnet', - 'resnet', - ], - mode='eager', - dataset='imagenet', - dtype='float16', - )) - def test_gpu_train(self, distribution, model, dataset, dtype): - """Test train_and_eval and export for Keras classifier models.""" - # Some parameters are not defined as flags (e.g. cannot run - # classifier_train.py --batch_size=...) by design, so use - # "--params_override=..." instead - model_dir = self.get_temp_dir() - base_flags = [ - '--data_dir=not_used', - '--model_type=' + model, - '--dataset=' + dataset, - ] - train_and_eval_flags = base_flags + [ - get_params_override(basic_params_override(dtype)), - '--mode=train_and_eval', - ] - - export_params = basic_params_override() - export_path = os.path.join(model_dir, 'export') - export_params['export'] = {} - export_params['export']['destination'] = export_path - export_flags = base_flags + [ - '--mode=export_only', - get_params_override(export_params) - ] - - run = functools.partial(classifier_trainer.run, - strategy_override=distribution) - run_end_to_end(main=run, - extra_flags=train_and_eval_flags, - model_dir=model_dir) - run_end_to_end(main=run, - extra_flags=export_flags, - model_dir=model_dir) - self.assertTrue(os.path.exists(export_path)) - - @combinations.generate( - combinations.combine( - distribution=[ - strategy_combinations.tpu_strategy, - ], - model=[ - 'efficientnet', - 'resnet', - ], - mode='eager', - dataset='imagenet', - dtype='bfloat16', - )) - def test_tpu_train(self, distribution, model, dataset, dtype): - """Test train_and_eval and export for Keras classifier models.""" - # Some parameters are not defined as flags (e.g. cannot run - # classifier_train.py --batch_size=...) by design, so use - # "--params_override=..." instead - model_dir = self.get_temp_dir() - base_flags = [ - '--data_dir=not_used', - '--model_type=' + model, - '--dataset=' + dataset, - ] - train_and_eval_flags = base_flags + [ - get_params_override(basic_params_override(dtype)), - '--mode=train_and_eval', - ] - - run = functools.partial(classifier_trainer.run, - strategy_override=distribution) - run_end_to_end(main=run, - extra_flags=train_and_eval_flags, - model_dir=model_dir) - - @combinations.generate(distribution_strategy_combinations()) - def test_end_to_end_invalid_mode(self, distribution, model, dataset): - """Test the Keras EfficientNet model with `strategy`.""" - model_dir = self.get_temp_dir() - extra_flags = [ - '--data_dir=not_used', - '--mode=invalid_mode', - '--model_type=' + model, - '--dataset=' + dataset, - get_params_override(basic_params_override()), - ] - - run = functools.partial(classifier_trainer.run, - strategy_override=distribution) - with self.assertRaises(ValueError): - run_end_to_end(main=run, extra_flags=extra_flags, model_dir=model_dir) - - -class UtilTests(parameterized.TestCase, tf.test.TestCase): - """Tests for individual utility functions within classifier_trainer.py.""" - - @parameterized.named_parameters( - ('efficientnet-b0', 'efficientnet', 'efficientnet-b0', 224), - ('efficientnet-b1', 'efficientnet', 'efficientnet-b1', 240), - ('efficientnet-b2', 'efficientnet', 'efficientnet-b2', 260), - ('efficientnet-b3', 'efficientnet', 'efficientnet-b3', 300), - ('efficientnet-b4', 'efficientnet', 'efficientnet-b4', 380), - ('efficientnet-b5', 'efficientnet', 'efficientnet-b5', 456), - ('efficientnet-b6', 'efficientnet', 'efficientnet-b6', 528), - ('efficientnet-b7', 'efficientnet', 'efficientnet-b7', 600), - ('resnet', 'resnet', '', None), - ) - def test_get_model_size(self, model, model_name, expected): - config = base_configs.ExperimentConfig( - model_name=model, - model=base_configs.ModelConfig( - model_params={ - 'model_name': model_name, - }, - ) - ) - size = classifier_trainer.get_image_size_from_model(config) - self.assertEqual(size, expected) - - @parameterized.named_parameters( - ('dynamic', 'dynamic', None, 'dynamic'), - ('scalar', 128., None, 128.), - ('float32', None, 'float32', 1), - ('float16', None, 'float16', 128), - ) - def test_get_loss_scale(self, loss_scale, dtype, expected): - config = base_configs.ExperimentConfig( - runtime=base_configs.RuntimeConfig( - loss_scale=loss_scale), - train_dataset=dataset_factory.DatasetConfig(dtype=dtype)) - ls = classifier_trainer.get_loss_scale(config, fp16_default=128) - self.assertEqual(ls, expected) - - @parameterized.named_parameters( - ('float16', 'float16'), - ('bfloat16', 'bfloat16') - ) - def test_initialize(self, dtype): - config = base_configs.ExperimentConfig( - runtime=base_configs.RuntimeConfig( - run_eagerly=False, - enable_xla=False, - per_gpu_thread_count=1, - gpu_thread_mode='gpu_private', - num_gpus=1, - dataset_num_private_threads=1, - ), - train_dataset=dataset_factory.DatasetConfig(dtype=dtype), - model=base_configs.ModelConfig(), - ) - - class EmptyClass: - pass - fake_ds_builder = EmptyClass() - fake_ds_builder.dtype = dtype - fake_ds_builder.config = EmptyClass() - classifier_trainer.initialize(config, fake_ds_builder) - - def test_resume_from_checkpoint(self): - """Tests functionality for resuming from checkpoint.""" - # Set the keras policy - policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') - tf.keras.mixed_precision.experimental.set_policy(policy) - - # Get the model, datasets, and compile it. - model = get_trivial_model(10) - - # Create the checkpoint - model_dir = self.get_temp_dir() - train_epochs = 1 - train_steps = 10 - ds = get_trivial_data() - callbacks = [ - tf.keras.callbacks.ModelCheckpoint( - os.path.join(model_dir, 'model.ckpt-{epoch:04d}'), - save_weights_only=True) - ] - model.fit( - ds, - callbacks=callbacks, - epochs=train_epochs, - steps_per_epoch=train_steps) - - # Test load from checkpoint - clean_model = get_trivial_model(10) - weights_before_load = copy.deepcopy(clean_model.get_weights()) - initial_epoch = classifier_trainer.resume_from_checkpoint( - model=clean_model, - model_dir=model_dir, - train_steps=train_steps) - self.assertEqual(initial_epoch, 1) - self.assertNotAllClose(weights_before_load, clean_model.get_weights()) - - tf.io.gfile.rmtree(model_dir) - - def test_serialize_config(self): - """Tests functionality for serializing data.""" - config = base_configs.ExperimentConfig() - model_dir = self.get_temp_dir() - classifier_trainer.serialize_config(params=config, model_dir=model_dir) - saved_params_path = os.path.join(model_dir, 'params.yaml') - self.assertTrue(os.path.exists(saved_params_path)) - tf.io.gfile.rmtree(model_dir) - -if __name__ == '__main__': - tf.test.main() diff --git a/spaces/NCTCMumbai/NCTC/models/official/vision/image_classification/configs/base_configs.py b/spaces/NCTCMumbai/NCTC/models/official/vision/image_classification/configs/base_configs.py deleted file mode 100644 index 11fcb5305660ec71153ebfc12631f455a3464115..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/vision/image_classification/configs/base_configs.py +++ /dev/null @@ -1,231 +0,0 @@ -# Lint as: python3 -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Definitions for high level configuration groups..""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -from typing import Any, List, Mapping, Optional - -import dataclasses - -from official.modeling import hyperparams -from official.modeling.hyperparams import config_definitions - -CallbacksConfig = config_definitions.CallbacksConfig -TensorboardConfig = config_definitions.TensorboardConfig -RuntimeConfig = config_definitions.RuntimeConfig - - -@dataclasses.dataclass -class ExportConfig(hyperparams.Config): - """Configuration for exports. - - Attributes: - checkpoint: the path to the checkpoint to export. - destination: the path to where the checkpoint should be exported. - """ - checkpoint: str = None - destination: str = None - - -@dataclasses.dataclass -class MetricsConfig(hyperparams.Config): - """Configuration for Metrics. - - Attributes: - accuracy: Whether or not to track accuracy as a Callback. Defaults to None. - top_5: Whether or not to track top_5_accuracy as a Callback. Defaults to - None. - """ - accuracy: bool = None - top_5: bool = None - - -@dataclasses.dataclass -class TimeHistoryConfig(hyperparams.Config): - """Configuration for the TimeHistory callback. - - Attributes: - log_steps: Interval of steps between logging of batch level stats. - """ - log_steps: int = None - - -@dataclasses.dataclass -class TrainConfig(hyperparams.Config): - """Configuration for training. - - Attributes: - resume_checkpoint: Whether or not to enable load checkpoint loading. - Defaults to None. - epochs: The number of training epochs to run. Defaults to None. - steps: The number of steps to run per epoch. If None, then this will be - inferred based on the number of images and batch size. Defaults to None. - callbacks: An instance of CallbacksConfig. - metrics: An instance of MetricsConfig. - tensorboard: An instance of TensorboardConfig. - set_epoch_loop: Whether or not to set `experimental_steps_per_execution` to - equal the number of training steps in `model.compile`. This reduces the - number of callbacks run per epoch which significantly improves end-to-end - TPU training time. - """ - resume_checkpoint: bool = None - epochs: int = None - steps: int = None - callbacks: CallbacksConfig = CallbacksConfig() - metrics: MetricsConfig = None - tensorboard: TensorboardConfig = TensorboardConfig() - time_history: TimeHistoryConfig = TimeHistoryConfig() - set_epoch_loop: bool = False - - -@dataclasses.dataclass -class EvalConfig(hyperparams.Config): - """Configuration for evaluation. - - Attributes: - epochs_between_evals: The number of train epochs to run between evaluations. - Defaults to None. - steps: The number of eval steps to run during evaluation. If None, this will - be inferred based on the number of images and batch size. Defaults to - None. - skip_eval: Whether or not to skip evaluation. - """ - epochs_between_evals: int = None - steps: int = None - skip_eval: bool = False - - -@dataclasses.dataclass -class LossConfig(hyperparams.Config): - """Configuration for Loss. - - Attributes: - name: The name of the loss. Defaults to None. - label_smoothing: Whether or not to apply label smoothing to the loss. This - only applies to 'categorical_cross_entropy'. - """ - name: str = None - label_smoothing: float = None - - -@dataclasses.dataclass -class OptimizerConfig(hyperparams.Config): - """Configuration for Optimizers. - - Attributes: - name: The name of the optimizer. Defaults to None. - decay: Decay or rho, discounting factor for gradient. Defaults to None. - epsilon: Small value used to avoid 0 denominator. Defaults to None. - momentum: Plain momentum constant. Defaults to None. - nesterov: Whether or not to apply Nesterov momentum. Defaults to None. - moving_average_decay: The amount of decay to apply. If 0 or None, then - exponential moving average is not used. Defaults to None. - lookahead: Whether or not to apply the lookahead optimizer. Defaults to - None. - beta_1: The exponential decay rate for the 1st moment estimates. Used in the - Adam optimizers. Defaults to None. - beta_2: The exponential decay rate for the 2nd moment estimates. Used in the - Adam optimizers. Defaults to None. - epsilon: Small value used to avoid 0 denominator. Defaults to 1e-7. - """ - name: str = None - decay: float = None - epsilon: float = None - momentum: float = None - nesterov: bool = None - moving_average_decay: Optional[float] = None - lookahead: Optional[bool] = None - beta_1: float = None - beta_2: float = None - epsilon: float = None - - -@dataclasses.dataclass -class LearningRateConfig(hyperparams.Config): - """Configuration for learning rates. - - Attributes: - name: The name of the learning rate. Defaults to None. - initial_lr: The initial learning rate. Defaults to None. - decay_epochs: The number of decay epochs. Defaults to None. - decay_rate: The rate of decay. Defaults to None. - warmup_epochs: The number of warmup epochs. Defaults to None. - batch_lr_multiplier: The multiplier to apply to the base learning rate, if - necessary. Defaults to None. - examples_per_epoch: the number of examples in a single epoch. Defaults to - None. - boundaries: boundaries used in piecewise constant decay with warmup. - multipliers: multipliers used in piecewise constant decay with warmup. - scale_by_batch_size: Scale the learning rate by a fraction of the batch - size. Set to 0 for no scaling (default). - staircase: Apply exponential decay at discrete values instead of continuous. - """ - name: str = None - initial_lr: float = None - decay_epochs: float = None - decay_rate: float = None - warmup_epochs: int = None - examples_per_epoch: int = None - boundaries: List[int] = None - multipliers: List[float] = None - scale_by_batch_size: float = 0. - staircase: bool = None - - -@dataclasses.dataclass -class ModelConfig(hyperparams.Config): - """Configuration for Models. - - Attributes: - name: The name of the model. Defaults to None. - model_params: The parameters used to create the model. Defaults to None. - num_classes: The number of classes in the model. Defaults to None. - loss: A `LossConfig` instance. Defaults to None. - optimizer: An `OptimizerConfig` instance. Defaults to None. - """ - name: str = None - model_params: hyperparams.Config = None - num_classes: int = None - loss: LossConfig = None - optimizer: OptimizerConfig = None - - -@dataclasses.dataclass -class ExperimentConfig(hyperparams.Config): - """Base configuration for an image classification experiment. - - Attributes: - model_dir: The directory to use when running an experiment. - mode: e.g. 'train_and_eval', 'export' - runtime: A `RuntimeConfig` instance. - train: A `TrainConfig` instance. - evaluation: An `EvalConfig` instance. - model: A `ModelConfig` instance. - export: An `ExportConfig` instance. - """ - model_dir: str = None - model_name: str = None - mode: str = None - runtime: RuntimeConfig = None - train_dataset: Any = None - validation_dataset: Any = None - train: TrainConfig = None - evaluation: EvalConfig = None - model: ModelConfig = None - export: ExportConfig = None diff --git a/spaces/Navneet574/Heart_Disease_Prediciton/app.py b/spaces/Navneet574/Heart_Disease_Prediciton/app.py deleted file mode 100644 index 30f66c36eeab6633487a32671d615f07ab544f93..0000000000000000000000000000000000000000 --- a/spaces/Navneet574/Heart_Disease_Prediciton/app.py +++ /dev/null @@ -1,82 +0,0 @@ -import gradio as gr -from joblib import load -import numpy as np -import pandas as pd - -AgeCategory = ['18-24', '25-29', '35-39', '35-39', '40-44', '45-49', '50-54', '55-59', '60-64', '65-69', '70-74', '75-79', '80 or older'] -Binary_Input = ['Yes', 'No'] -Sex = ['Male', 'Female'] -Race = ['White', 'Black', 'Asian', 'American Indian/Alaskan Native', 'Hispanic', 'Other'] -Diabetic = ['Yes', 'No', 'No borderline diabetes', 'Yes (during pregnancy)'] -GeneralHealth = ['Excellent', 'Very good', 'Good', 'Fair', 'Poor'] - -dic_ageCat = {'18-24':0, '25-29':1, '35-39':2, '35-39':3, '40-44':4, '45-49':5, '50-54':6, '55-59':7, '60-64':8, '65-69':9, '70-74':10, '75-79':11, '80 or older':12} -dic_binary = {'Yes':1, 'No':0} -dic_sex = {'Male':0, 'Female':1} -dic_race = {'White':5, 'Black':2, 'Asian':0, 'American Indian/Alaskan Native':3, 'Hispanic':1, 'Other':4} -dic_diabetic = {'Yes':2, 'No':0, 'No borderline diabetes':1, 'Yes (during pregnancy)':3} -dic_generalHealth = {'Excellent':0, 'Very good':4, 'Good':2, 'Fair':1, 'Poor':3} - - -def predict_price(BMI, Smoking, AlcoholDrinking, Stroke, PhysicalHealth, MentalHealth, DiffWalking, Sex, AgeCategory, Race, Diabetic, PhysicalActivity, GenHealth, SleepTime, Asthma, KidneyDisease, SkinCancer): - model = load('Heart_Disease_Classification.jb') - - data = { - 'BMI': [BMI], - 'Smoking': dic_binary[Smoking], - 'AlcoholDrinking': dic_binary[AlcoholDrinking], - 'Stroke': dic_binary[Stroke], - 'PhysicalHealth': [PhysicalHealth], - 'MentalHealth': [MentalHealth], - 'DiffWalking': dic_binary[DiffWalking], - 'Sex': dic_sex[Sex], - 'AgeCategory': dic_ageCat[AgeCategory], - 'Race': dic_race[Race], - 'Diabetic': dic_diabetic[Diabetic], - 'PhysicalActivity': dic_binary[PhysicalActivity], - 'GenHealth': dic_generalHealth[GenHealth], - 'SleepTime': [SleepTime], - 'Asthma': dic_binary[Asthma], - 'KidneyDisease': dic_binary[KidneyDisease], - 'SkinCancer': dic_binary[SkinCancer] - } - - Xinp = pd.DataFrame(data) - print(Xinp) - - stone = model.predict(Xinp) - - if stone == 1: - return stone["YES"] - else: - return stone["NO"] - -ui = gr.Interface( - fn=predict_price,inputs=[ - gr.inputs.Textbox(placeholder='BMI', default=0, - numeric=True, label='Body Mass Index (BMI) (normal is between 18.5 and 24.9)'), - gr.inputs.Dropdown(choices=Binary_Input, label='Smoking', default="No"), - gr.inputs.Dropdown(choices=Binary_Input, label='Alcohol Drinking', default="No"), - gr.inputs.Dropdown(choices=Binary_Input, label='Stroke', default="No"), - gr.inputs.Textbox(placeholder='Physical Health', - default=0, numeric=True, label='Physical Health Score'), - gr.inputs.Textbox(placeholder='MentalHealth', default=0, - numeric=True, label='Mental Health Score'), - gr.inputs.Dropdown(choices=Binary_Input, label='Diff Walking', default="No"), - gr.inputs.Dropdown(choices=Sex, label='Sex', default=""), - gr.inputs.Dropdown(choices=AgeCategory, label='Age Category', default="18-24"), - gr.inputs.Dropdown(choices=Race, label='Race', default="Asian"), - gr.inputs.Dropdown(choices=Diabetic, label='Diabetic', default="No"), - gr.inputs.Dropdown(choices=Binary_Input, label='Physical Activity', default="Yes"), - gr.inputs.Dropdown(choices=GeneralHealth, label='General Health', default="Excellent"), - gr.inputs.Textbox(placeholder='Sleep Time', - default='7', numeric=True, label='Sleep Time (normal is between 6-8 hrs)'), - gr.inputs.Dropdown(choices=Binary_Input, label='Asthma', default="No"), - gr.inputs.Dropdown(choices=Binary_Input, label='Kidney Disease', default="No"), - gr.inputs.Dropdown(choices=Binary_Input, label='Skin Cancer', default="No") - ], outputs=[ - "text" - ] -) -if __name__ == "__main__": - ui.launch() \ No newline at end of file diff --git a/spaces/Nikhil0987/omm/convo.py b/spaces/Nikhil0987/omm/convo.py deleted file mode 100644 index f5c87b19d82d3f1e1869101faf1205475b5ed3b9..0000000000000000000000000000000000000000 --- a/spaces/Nikhil0987/omm/convo.py +++ /dev/null @@ -1,27 +0,0 @@ -from transformers import pipeline, Conversation -import streamlit as st -from streamlit_option_menu import option_menu - -chatbot = pipeline(task="conversational", model="microsoft/DialoGPT-medium") - -def Convo(): - if convo := st.chat_input("Enter your message"): - - conversation = Conversation(convo) - - - - # candidate_labels = ["HELP", "PROBLEM SOLVE", "GENERAL TALK"] - - - ans = chatbot(conversation) - # add_user_input = st.button("Add User Input") - - # conversation.add_user_input("{}".format(convo)) - # conversation = chatbot(conversation) - with st.chat_message("assistant"): - # ans - st.write(ans.generated_responses[-1]) - - -# convo() diff --git a/spaces/NimaBoscarino/climategan/figures/metrics_onefig.py b/spaces/NimaBoscarino/climategan/figures/metrics_onefig.py deleted file mode 100644 index d9d372dcbb1bed2fffbfd8e81d6da749ceab730b..0000000000000000000000000000000000000000 --- a/spaces/NimaBoscarino/climategan/figures/metrics_onefig.py +++ /dev/null @@ -1,772 +0,0 @@ -""" -This scripts plots examples of the images that get best and worse metrics -""" -print("Imports...", end="") -import os -import sys -from argparse import ArgumentParser -from pathlib import Path - -import matplotlib.patches as mpatches -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import seaborn as sns -import yaml -from imageio import imread -from matplotlib.gridspec import GridSpec -from skimage.color import rgba2rgb -from sklearn.metrics.pairwise import euclidean_distances - -sys.path.append("../") - -from climategan.data import encode_mask_label -from climategan.eval_metrics import edges_coherence_std_min -from eval_masker import crop_and_resize - -# ----------------------- -# ----- Constants ----- -# ----------------------- - -# Metrics -metrics = ["error", "f05", "edge_coherence"] - -dict_metrics = { - "names": { - "tpr": "TPR, Recall, Sensitivity", - "tnr": "TNR, Specificity, Selectivity", - "fpr": "FPR", - "fpt": "False positives relative to image size", - "fnr": "FNR, Miss rate", - "fnt": "False negatives relative to image size", - "mpr": "May positive rate (MPR)", - "mnr": "May negative rate (MNR)", - "accuracy": "Accuracy (ignoring may)", - "error": "Error", - "f05": "F05 score", - "precision": "Precision", - "edge_coherence": "Edge coherence", - "accuracy_must_may": "Accuracy (ignoring cannot)", - }, - "key_metrics": ["error", "f05", "edge_coherence"], -} - - -# Colors -colorblind_palette = sns.color_palette("colorblind") -color_cannot = colorblind_palette[1] -color_must = colorblind_palette[2] -color_may = colorblind_palette[7] -color_pred = colorblind_palette[4] - -icefire = sns.color_palette("icefire", as_cmap=False, n_colors=5) -color_tp = icefire[0] -color_tn = icefire[1] -color_fp = icefire[4] -color_fn = icefire[3] - - -def parsed_args(): - """ - Parse and returns command-line args - - Returns: - argparse.Namespace: the parsed arguments - """ - parser = ArgumentParser() - parser.add_argument( - "--input_csv", - default="ablations_metrics_20210311.csv", - type=str, - help="CSV containing the results of the ablation study", - ) - parser.add_argument( - "--output_dir", - default=None, - type=str, - help="Output directory", - ) - parser.add_argument( - "--models_log_path", - default=None, - type=str, - help="Path containing the log files of the models", - ) - parser.add_argument( - "--masker_test_set_dir", - default=None, - type=str, - help="Directory containing the test images", - ) - parser.add_argument( - "--best_model", - default="dada, msd_spade, pseudo", - type=str, - help="The string identifier of the best model", - ) - parser.add_argument( - "--dpi", - default=200, - type=int, - help="DPI for the output images", - ) - parser.add_argument( - "--alpha", - default=0.5, - type=float, - help="Transparency of labels shade", - ) - parser.add_argument( - "--percentile", - default=0.05, - type=float, - help="Transparency of labels shade", - ) - parser.add_argument( - "--seed", - default=None, - type=int, - help="Bootstrap random seed, for reproducibility", - ) - parser.add_argument( - "--no_images", - action="store_true", - default=False, - help="Do not generate images", - ) - - return parser.parse_args() - - -def map_color(arr, input_color, output_color, rtol=1e-09): - """ - Maps one color to another - """ - input_color_arr = np.tile(input_color, (arr.shape[:2] + (1,))) - output = arr.copy() - output[np.all(np.isclose(arr, input_color_arr, rtol=rtol), axis=2)] = output_color - return output - - -def plot_labels(ax, img, label, img_id, n_, add_title, do_legend): - label_colmap = label.astype(float) - label_colmap = map_color(label_colmap, (255, 0, 0), color_cannot) - label_colmap = map_color(label_colmap, (0, 0, 255), color_must) - label_colmap = map_color(label_colmap, (0, 0, 0), color_may) - - ax.imshow(img) - ax.imshow(label_colmap, alpha=0.5) - ax.axis("off") - - if n_ in [1, 3, 5]: - color_ = "green" - else: - color_ = "red" - - ax.text( - -0.15, - 0.5, - img_id, - color=color_, - fontweight="roman", - fontsize="x-large", - horizontalalignment="left", - verticalalignment="center", - transform=ax.transAxes, - ) - - if add_title: - ax.set_title("Labels", rotation=0, fontsize="x-large") - - -def plot_pred(ax, img, pred, img_id, add_title, do_legend): - pred = np.tile(np.expand_dims(pred, axis=2), reps=(1, 1, 3)) - - pred_colmap = pred.astype(float) - pred_colmap = map_color(pred_colmap, (1, 1, 1), color_pred) - pred_colmap_ma = np.ma.masked_not_equal(pred_colmap, color_pred) - pred_colmap_ma = pred_colmap_ma.mask * img + pred_colmap_ma - - ax.imshow(img) - ax.imshow(pred_colmap_ma, alpha=0.5) - ax.axis("off") - - if add_title: - ax.set_title("Prediction", rotation=0, fontsize="x-large") - - -def plot_correct_incorrect( - ax, img_filename, img, metric, label, img_id, n_, add_title, do_legend -): - # FP - fp_map = imread( - model_path / "eval-metrics/fp" / "{}_fp.png".format(Path(img_filename).stem) - ) - fp_map = np.tile(np.expand_dims(fp_map, axis=2), reps=(1, 1, 3)) - - fp_map_colmap = fp_map.astype(float) - fp_map_colmap = map_color(fp_map_colmap, (1, 1, 1), color_fp) - - # FN - fn_map = imread( - model_path / "eval-metrics/fn" / "{}_fn.png".format(Path(img_filename).stem) - ) - fn_map = np.tile(np.expand_dims(fn_map, axis=2), reps=(1, 1, 3)) - - fn_map_colmap = fn_map.astype(float) - fn_map_colmap = map_color(fn_map_colmap, (1, 1, 1), color_fn) - - # TP - tp_map = imread( - model_path / "eval-metrics/tp" / "{}_tp.png".format(Path(img_filename).stem) - ) - tp_map = np.tile(np.expand_dims(tp_map, axis=2), reps=(1, 1, 3)) - - tp_map_colmap = tp_map.astype(float) - tp_map_colmap = map_color(tp_map_colmap, (1, 1, 1), color_tp) - - # TN - tn_map = imread( - model_path / "eval-metrics/tn" / "{}_tn.png".format(Path(img_filename).stem) - ) - tn_map = np.tile(np.expand_dims(tn_map, axis=2), reps=(1, 1, 3)) - - tn_map_colmap = tn_map.astype(float) - tn_map_colmap = map_color(tn_map_colmap, (1, 1, 1), color_tn) - - label_colmap = label.astype(float) - label_colmap = map_color(label_colmap, (0, 0, 0), color_may) - label_colmap_ma = np.ma.masked_not_equal(label_colmap, color_may) - label_colmap_ma = label_colmap_ma.mask * img + label_colmap_ma - - # Combine masks - maps = fp_map_colmap + fn_map_colmap + tp_map_colmap + tn_map_colmap - maps_ma = np.ma.masked_equal(maps, (0, 0, 0)) - maps_ma = maps_ma.mask * img + maps_ma - - ax.imshow(img) - ax.imshow(label_colmap_ma, alpha=0.5) - ax.imshow(maps_ma, alpha=0.5) - ax.axis("off") - - if add_title: - ax.set_title("Metric", rotation=0, fontsize="x-large") - - -def plot_edge_coherence(ax, img, metric, label, pred, img_id, n_, add_title, do_legend): - pred = np.tile(np.expand_dims(pred, axis=2), reps=(1, 1, 3)) - - ec, pred_ec, label_ec = edges_coherence_std_min( - np.squeeze(pred[:, :, 0]), np.squeeze(encode_mask_label(label, "flood")) - ) - - ################## - # Edge distances # - ################## - - # Location of edges - pred_ec_coord = np.argwhere(pred_ec > 0) - label_ec_coord = np.argwhere(label_ec > 0) - - # Normalized pairwise distances between pred and label - dist_mat = np.divide( - euclidean_distances(pred_ec_coord, label_ec_coord), pred_ec.shape[0] - ) - - # Standard deviation of the minimum distance from pred to label - min_dist = np.min(dist_mat, axis=1) # noqa: F841 - - ############# - # Make plot # - ############# - - pred_ec = np.tile( - np.expand_dims(np.asarray(pred_ec > 0, dtype=float), axis=2), reps=(1, 1, 3) - ) - pred_ec_colmap = map_color(pred_ec, (1, 1, 1), color_pred) - pred_ec_colmap_ma = np.ma.masked_not_equal(pred_ec_colmap, color_pred) # noqa: F841 - - label_ec = np.tile( - np.expand_dims(np.asarray(label_ec > 0, dtype=float), axis=2), reps=(1, 1, 3) - ) - label_ec_colmap = map_color(label_ec, (1, 1, 1), color_must) - label_ec_colmap_ma = np.ma.masked_not_equal( # noqa: F841 - label_ec_colmap, color_must - ) - - # Combined pred and label edges - combined_ec = pred_ec_colmap + label_ec_colmap - combined_ec_ma = np.ma.masked_equal(combined_ec, (0, 0, 0)) - combined_ec_img = combined_ec_ma.mask * img + combined_ec - - # Pred - pred_colmap = pred.astype(float) - pred_colmap = map_color(pred_colmap, (1, 1, 1), color_pred) - pred_colmap_ma = np.ma.masked_not_equal(pred_colmap, color_pred) - - # Must - label_colmap = label.astype(float) - label_colmap = map_color(label_colmap, (0, 0, 255), color_must) - label_colmap_ma = np.ma.masked_not_equal(label_colmap, color_must) - - # TP - tp_map = imread( - model_path / "eval-metrics/tp" / "{}_tp.png".format(Path(srs_sel.filename).stem) - ) - tp_map = np.tile(np.expand_dims(tp_map, axis=2), reps=(1, 1, 3)) - tp_map_colmap = tp_map.astype(float) - tp_map_colmap = map_color(tp_map_colmap, (1, 1, 1), color_tp) - tp_map_colmap_ma = np.ma.masked_not_equal(tp_map_colmap, color_tp) - - # Combination - comb_pred = ( - (pred_colmap_ma.mask ^ tp_map_colmap_ma.mask) - & tp_map_colmap_ma.mask - & combined_ec_ma.mask - ) * pred_colmap - comb_label = ( - (label_colmap_ma.mask ^ pred_colmap_ma.mask) - & pred_colmap_ma.mask - & combined_ec_ma.mask - ) * label_colmap - comb_tp = combined_ec_ma.mask * tp_map_colmap.copy() - combined = comb_tp + comb_label + comb_pred - combined_ma = np.ma.masked_equal(combined, (0, 0, 0)) - combined_ma = combined_ma.mask * combined_ec_img + combined_ma - - ax.imshow(combined_ec_img, alpha=1) - ax.imshow(combined_ma, alpha=0.5) - ax.axis("off") - - # Plot lines - idx_sort_x = np.argsort(pred_ec_coord[:, 1]) - offset = 100 - for idx in range(offset, pred_ec_coord.shape[0], offset): - y0, x0 = pred_ec_coord[idx_sort_x[idx], :] - argmin = np.argmin(dist_mat[idx_sort_x[idx]]) - y1, x1 = label_ec_coord[argmin, :] - ax.plot([x0, x1], [y0, y1], color="white", linewidth=0.5) - - if add_title: - ax.set_title("Metric", rotation=0, fontsize="x-large") - - -def plot_images_metric( - axes, metric, img_filename, img_id, n_, srs_sel, add_title, do_legend -): - - # Read images - img_path = imgs_orig_path / img_filename - label_path = labels_path / "{}_labeled.png".format(Path(img_filename).stem) - img, label = crop_and_resize(img_path, label_path) - img = rgba2rgb(img) if img.shape[-1] == 4 else img / 255.0 - - pred = imread( - model_path / "eval-metrics/pred" / "{}_pred.png".format(Path(img_filename).stem) - ) - - # Label - plot_labels(axes[0], img, label, img_id, n_, add_title, do_legend) - - # Prediction - plot_pred(axes[1], img, pred, img_id, add_title, do_legend) - - # Correct / incorrect - if metric in ["error", "f05"]: - plot_correct_incorrect( - axes[2], - img_filename, - img, - metric, - label, - img_id, - n_, - add_title, - do_legend=False, - ) - handles = [] - lw = 1.0 - handles.append( - mpatches.Patch(facecolor=color_tp, label="TP", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch(facecolor=color_tn, label="TN", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch(facecolor=color_fp, label="FP", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch(facecolor=color_fn, label="FN", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch( - facecolor=color_may, - label="May-be-flooded", - linewidth=lw, - alpha=0.66, - ) - ) - labels = ["TP", "TN", "FP", "FN", "May-be-flooded"] - if metric == "error": - if n_ in [1, 3, 5]: - title = "Low error rate" - else: - title = "High error rate" - else: - if n_ in [1, 3, 5]: - title = "High F05 score" - else: - title = "Low F05 score" - # Edge coherence - elif metric == "edge_coherence": - plot_edge_coherence( - axes[2], img, metric, label, pred, img_id, n_, add_title, do_legend=False - ) - handles = [] - lw = 1.0 - handles.append( - mpatches.Patch(facecolor=color_tp, label="TP", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch(facecolor=color_pred, label="pred", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch( - facecolor=color_must, - label="Must-be-flooded", - linewidth=lw, - alpha=0.66, - ) - ) - labels = ["TP", "Prediction", "Must-be-flooded"] - if n_ in [1, 3, 5]: - title = "High edge coherence" - else: - title = "Low edge coherence" - - else: - raise ValueError - - labels_values_title = "Error: {:.4f} \nFO5: {:.4f} \nEdge coherence: {:.4f}".format( - srs_sel.error, srs_sel.f05, srs_sel.edge_coherence - ) - - plot_legend(axes[3], img, handles, labels, labels_values_title, title) - - -def plot_legend(ax, img, handles, labels, labels_values_title, title): - img_ = np.zeros_like(img, dtype=np.uint8) - img_.fill(255) - ax.imshow(img_) - ax.axis("off") - - leg1 = ax.legend( - handles=handles, - labels=labels, - title=title, - title_fontsize="medium", - labelspacing=0.6, - loc="upper left", - fontsize="x-small", - frameon=False, - ) - leg1._legend_box.align = "left" - - leg2 = ax.legend( - title=labels_values_title, - title_fontsize="small", - loc="lower left", - frameon=False, - ) - leg2._legend_box.align = "left" - - ax.add_artist(leg1) - - -def scatterplot_metrics_pair(ax, df, x_metric, y_metric, dict_images): - - sns.scatterplot(data=df, x=x_metric, y=y_metric, ax=ax) - - # Set X-label - ax.set_xlabel(dict_metrics["names"][x_metric], rotation=0, fontsize="medium") - - # Set Y-label - ax.set_ylabel(dict_metrics["names"][y_metric], rotation=90, fontsize="medium") - - # Change spines - sns.despine(ax=ax, left=True, bottom=True) - - annotate_scatterplot(ax, dict_images, x_metric, y_metric) - - -def scatterplot_metrics(ax, df, df_all, dict_images, plot_all=False): - - # Other - if plot_all: - sns.scatterplot( - data=df_all.loc[df_all.ground == True], - x="error", y="f05", hue="edge_coherence", ax=ax, - marker='+', alpha=0.25) - sns.scatterplot( - data=df_all.loc[df_all.instagan == True], - x="error", y="f05", hue="edge_coherence", ax=ax, - marker='x', alpha=0.25) - sns.scatterplot( - data=df_all.loc[(df_all.instagan == False) & (df_all.instagan == False) & - (df_all.model_feats != args.best_model)], - x="error", y="f05", hue="edge_coherence", ax=ax, - marker='s', alpha=0.25) - - # Best model - cmap_ = sns.cubehelix_palette(as_cmap=True) - sns.scatterplot( - data=df, x="error", y="f05", hue="edge_coherence", ax=ax, palette=cmap_ - ) - - norm = plt.Normalize(df["edge_coherence"].min(), df["edge_coherence"].max()) - sm = plt.cm.ScalarMappable(cmap=cmap_, norm=norm) - sm.set_array([]) - - # Remove the legend and add a colorbar - ax.get_legend().remove() - ax_cbar = ax.figure.colorbar(sm) - ax_cbar.set_label("Edge coherence", labelpad=8) - - # Set X-label - ax.set_xlabel(dict_metrics["names"]["error"], rotation=0, fontsize="medium") - - # Set Y-label - ax.set_ylabel(dict_metrics["names"]["f05"], rotation=90, fontsize="medium") - - annotate_scatterplot(ax, dict_images, "error", "f05") - - # Change spines - sns.despine(ax=ax, left=True, bottom=True) - - # Set XY limits - xlim = ax.get_xlim() - ylim = ax.get_ylim() - ax.set_xlim([0.0, xlim[1]]) - ax.set_ylim([ylim[0], 1.0]) - - -def annotate_scatterplot(ax, dict_images, x_metric, y_metric, offset=0.1): - xlim = ax.get_xlim() - ylim = ax.get_ylim() - x_len = xlim[1] - xlim[0] - y_len = ylim[1] - ylim[0] - x_th = xlim[1] - x_len / 2.0 - y_th = ylim[1] - y_len / 2.0 - for text, d in dict_images.items(): - if text in ["B", "D", "F"]: - x = d[x_metric] - y = d[y_metric] - - x_text = x + x_len * offset if x < x_th else x - x_len * offset - y_text = y + y_len * offset if y < y_th else y - y_len * offset - - ax.annotate( - xy=(x, y), - xycoords="data", - xytext=(x_text, y_text), - textcoords="data", - text=text, - arrowprops=dict(facecolor="black", shrink=0.05), - fontsize="medium", - color="black", - ) - elif text == "A": - x = ( - dict_images["A"][x_metric] - + dict_images["C"][x_metric] - + dict_images["E"][x_metric] - ) / 3 - y = ( - dict_images["A"][y_metric] - + dict_images["C"][y_metric] - + dict_images["E"][y_metric] - ) / 3 - - x_text = x + x_len * 2 * offset if x < x_th else x - x_len * 2 * offset - y_text = ( - y + y_len * 0.45 * offset if y < y_th else y - y_len * 0.45 * offset - ) - - ax.annotate( - xy=(x, y), - xycoords="data", - xytext=(x_text, y_text), - textcoords="data", - text="A, C, E", - arrowprops=dict(facecolor="black", shrink=0.05), - fontsize="medium", - color="black", - ) - - -if __name__ == "__main__": - # ----------------------------- - # ----- Parse arguments ----- - # ----------------------------- - args = parsed_args() - print("Args:\n" + "\n".join([f" {k:20}: {v}" for k, v in vars(args).items()])) - - # Determine output dir - if args.output_dir is None: - output_dir = Path(os.environ["SLURM_TMPDIR"]) - else: - output_dir = Path(args.output_dir) - if not output_dir.exists(): - output_dir.mkdir(parents=True, exist_ok=False) - - # Store args - output_yml = output_dir / "labels.yml" - with open(output_yml, "w") as f: - yaml.dump(vars(args), f) - - # Data dirs - imgs_orig_path = Path(args.masker_test_set_dir) / "imgs" - labels_path = Path(args.masker_test_set_dir) / "labels" - - # Read CSV - df_all = pd.read_csv(args.input_csv, index_col="model_img_idx") - - # Select best model - df = df_all.loc[df_all.model_feats == args.best_model] - v_key, model_dir = df.model.unique()[0].split("/") - model_path = Path(args.models_log_path) / "ablation-{}".format(v_key) / model_dir - - # Set up plot - sns.reset_orig() - sns.set(style="whitegrid") - plt.rcParams.update({"font.family": "serif"}) - plt.rcParams.update( - { - "font.serif": [ - "Computer Modern Roman", - "Times New Roman", - "Utopia", - "New Century Schoolbook", - "Century Schoolbook L", - "ITC Bookman", - "Bookman", - "Times", - "Palatino", - "Charter", - "serif" "Bitstream Vera Serif", - "DejaVu Serif", - ] - } - ) - - if args.seed: - np.random.seed(args.seed) - img_ids = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - dict_images = {} - idx = 0 - - # Define grid of subplots - grid_vmargin = 0.03 # Extent of the vertical margin between metric grids - ax_hspace = 0.04 # Extent of the vertical space between axes of same grid - ax_wspace = 0.05 # Extent of the horizontal space between axes of same grid - n_grids = len(metrics) - n_cols = 4 - n_rows = 2 - h_grid = (1.0 / n_grids) - ((n_grids - 1) * grid_vmargin) / n_grids - - fig1 = plt.figure(dpi=200, figsize=(11, 13)) - - n_ = 0 - add_title = False - for metric_id, metric in enumerate(metrics): - - # Create grid - top_grid = 1.0 - metric_id * h_grid - metric_id * grid_vmargin - bottom_grid = top_grid - h_grid - gridspec = GridSpec( - n_rows, - n_cols, - wspace=ax_wspace, - hspace=ax_hspace, - bottom=bottom_grid, - top=top_grid, - ) - - # Select best - if metric == "error": - ascending = True - else: - ascending = False - idx_rand = np.random.permutation(int(args.percentile * len(df)))[0] - srs_sel = df.sort_values(by=metric, ascending=ascending).iloc[idx_rand] - img_id = img_ids[idx] - dict_images.update({img_id: srs_sel}) - # Read images - img_filename = srs_sel.filename - - axes_row = [fig1.add_subplot(gridspec[0, c]) for c in range(n_cols)] - if not args.no_images: - n_ += 1 - if metric_id == 0: - add_title = True - plot_images_metric( - axes_row, - metric, - img_filename, - img_id, - n_, - srs_sel, - add_title=add_title, - do_legend=False, - ) - add_title = False - - idx += 1 - print("1 more row done.") - # Select worst - if metric == "error": - ascending = False - else: - ascending = True - idx_rand = np.random.permutation(int(args.percentile * len(df)))[0] - srs_sel = df.sort_values(by=metric, ascending=ascending).iloc[idx_rand] - img_id = img_ids[idx] - dict_images.update({img_id: srs_sel}) - # Read images - img_filename = srs_sel.filename - - axes_row = [fig1.add_subplot(gridspec[1, c]) for c in range(n_cols)] - if not args.no_images: - n_ += 1 - plot_images_metric( - axes_row, - metric, - img_filename, - img_id, - n_, - srs_sel, - add_title=add_title, - do_legend=False, - ) - - idx += 1 - print("1 more row done.") - - output_fig = output_dir / "all_metrics.png" - - fig1.tight_layout() # (pad=1.5) # - fig1.savefig(output_fig, dpi=fig1.dpi, bbox_inches="tight") - - # Scatter plot - fig2 = plt.figure(dpi=200) - - scatterplot_metrics(fig2.gca(), df, df_all, dict_images) - - # fig2, axes = plt.subplots(nrows=1, ncols=3, dpi=200, figsize=(18, 5)) - # - # scatterplot_metrics_pair(axes[0], df, "error", "f05", dict_images) - # scatterplot_metrics_pair(axes[1], df, "error", "edge_coherence", dict_images) - # scatterplot_metrics_pair(axes[2], df, "f05", "edge_coherence", dict_images) - - output_fig = output_dir / "scatterplots.png" - fig2.savefig(output_fig, dpi=fig2.dpi, bbox_inches="tight") diff --git a/spaces/NimaBoscarino/climategan/utils_scripts/make-labelbox.sh b/spaces/NimaBoscarino/climategan/utils_scripts/make-labelbox.sh deleted file mode 100644 index d649238546b996ae1ad00f7753c04aeebc7aaa97..0000000000000000000000000000000000000000 --- a/spaces/NimaBoscarino/climategan/utils_scripts/make-labelbox.sh +++ /dev/null @@ -1,9 +0,0 @@ -echo "Dowloading Script" && python download_labelbox.py - -echo "Merging Script" && python merge_labelbox_masks.py - -echo "Cleaning labeled" -rm /Users/victor/Downloads/metrics-v2/labels/* -cp /Users/victor/Downloads/labelbox_test_flood-v2/__labeled/* /Users/victor/Downloads/metrics-v2/labels - -echo "Create labeled images Script" && python create_labeled.py \ No newline at end of file diff --git a/spaces/Nunchakuka/FrenchAnonymizer/models.py b/spaces/Nunchakuka/FrenchAnonymizer/models.py deleted file mode 100644 index 46b8aacb1bef18f6fad4c20c968b19125626799c..0000000000000000000000000000000000000000 --- a/spaces/Nunchakuka/FrenchAnonymizer/models.py +++ /dev/null @@ -1,351 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class Encoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SpeakerEncoder(torch.nn.Module): - def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256): - super(SpeakerEncoder, self).__init__() - self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True) - self.linear = nn.Linear(model_hidden_size, model_embedding_size) - self.relu = nn.ReLU() - - def forward(self, mels): - self.lstm.flatten_parameters() - _, (hidden, _) = self.lstm(mels) - embeds_raw = self.relu(self.linear(hidden[-1])) - return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True) - - def compute_partial_slices(self, total_frames, partial_frames, partial_hop): - mel_slices = [] - for i in range(0, total_frames-partial_frames, partial_hop): - mel_range = torch.arange(i, i+partial_frames) - mel_slices.append(mel_range) - - return mel_slices - - def embed_utterance(self, mel, partial_frames=128, partial_hop=64): - mel_len = mel.size(1) - last_mel = mel[:,-partial_frames:] - - if mel_len > partial_frames: - mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop) - mels = list(mel[:,s] for s in mel_slices) - mels.append(last_mel) - mels = torch.stack(tuple(mels), 0).squeeze(1) - - with torch.no_grad(): - partial_embeds = self(mels) - embed = torch.mean(partial_embeds, axis=0).unsqueeze(0) - #embed = embed / torch.linalg.norm(embed, 2) - else: - with torch.no_grad(): - embed = self(last_mel) - - return embed - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - ssl_dim, - use_spk, - **kwargs): - - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - self.ssl_dim = ssl_dim - self.use_spk = use_spk - - self.enc_p = Encoder(ssl_dim, inter_channels, hidden_channels, 5, 1, 16) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if not self.use_spk: - self.enc_spk = SpeakerEncoder(model_hidden_size=gin_channels, model_embedding_size=gin_channels) - - def forward(self, c, spec, g=None, mel=None, c_lengths=None, spec_lengths=None): - if c_lengths == None: - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - if spec_lengths == None: - spec_lengths = (torch.ones(spec.size(0)) * spec.size(-1)).to(spec.device) - - if not self.use_spk: - g = self.enc_spk(mel.transpose(1,2)) - g = g.unsqueeze(-1) - - _, m_p, logs_p, _ = self.enc_p(c, c_lengths) - z, m_q, logs_q, spec_mask = self.enc_q(spec, spec_lengths, g=g) - z_p = self.flow(z, spec_mask, g=g) - - z_slice, ids_slice = commons.rand_slice_segments(z, spec_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - - return o, ids_slice, spec_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, c, g=None, mel=None, c_lengths=None): - if c_lengths == None: - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - if not self.use_spk: - g = self.enc_spk.embed_utterance(mel.transpose(1,2)) - g = g.unsqueeze(-1) - - z_p, m_p, logs_p, c_mask = self.enc_p(c, c_lengths) - z = self.flow(z_p, c_mask, g=g, reverse=True) - o = self.dec(z * c_mask, g=g) - - return o diff --git a/spaces/Nunchakuka/FrenchAnonymizer/utils.py b/spaces/Nunchakuka/FrenchAnonymizer/utils.py deleted file mode 100644 index 1bd5b6185af6c9f1c270b8ba345bfc36d059e081..0000000000000000000000000000000000000000 --- a/spaces/Nunchakuka/FrenchAnonymizer/utils.py +++ /dev/null @@ -1,305 +0,0 @@ -import os -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch -from torch.nn import functional as F -from commons import sequence_mask - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def get_cmodel(rank): - checkpoint = torch.load('wavlm/WavLM-Large.pt') - cfg = WavLMConfig(checkpoint['cfg']) - cmodel = WavLM(cfg).cuda(rank) - cmodel.load_state_dict(checkpoint['model']) - cmodel.eval() - return cmodel - - -def get_content(cmodel, y): - with torch.no_grad(): - c = cmodel.extract_features(y.squeeze(1))[0] - c = c.transpose(1, 2) - return c - - -def get_vocoder(rank): - with open("hifigan/config.json", "r") as f: - config = json.load(f) - config = hifigan.AttrDict(config) - vocoder = hifigan.Generator(config) - ckpt = torch.load("hifigan/generator_v1") - vocoder.load_state_dict(ckpt["generator"]) - vocoder.eval() - vocoder.remove_weight_norm() - vocoder.cuda(rank) - return vocoder - - -def transform(mel, height): # 68-92 - #r = np.random.random() - #rate = r * 0.3 + 0.85 # 0.85-1.15 - #height = int(mel.size(-2) * rate) - tgt = torchvision.transforms.functional.resize(mel, (height, mel.size(-1))) - if height >= mel.size(-2): - return tgt[:, :mel.size(-2), :] - else: - silence = tgt[:,-1:,:].repeat(1,mel.size(-2)-height,1) - silence += torch.randn_like(silence) / 10 - return torch.cat((tgt, silence), 1) - - -def stretch(mel, width): # 0.5-2 - return torchvision.transforms.functional.resize(mel, (mel.size(-2), width)) - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/noisychannel/rerank.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/noisychannel/rerank.py deleted file mode 100644 index bb80d11a67cd75764a89f6f41915b0348ae96e92..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/noisychannel/rerank.py +++ /dev/null @@ -1,428 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from multiprocessing import Pool - -import numpy as np -from fairseq import options -from fairseq.data import dictionary -from fairseq.scoring import bleu - -from examples.noisychannel import ( - rerank_generate, - rerank_options, - rerank_score_bw, - rerank_score_lm, - rerank_utils, -) - - -def score_target_hypo( - args, a, b, c, lenpen, target_outfile, hypo_outfile, write_hypos, normalize -): - - print("lenpen", lenpen, "weight1", a, "weight2", b, "weight3", c) - gen_output_lst, bitext1_lst, bitext2_lst, lm_res_lst = load_score_files(args) - dict = dictionary.Dictionary() - scorer = scorer = bleu.Scorer( - bleu.BleuConfig( - pad=dict.pad(), - eos=dict.eos(), - unk=dict.unk(), - ) - ) - - ordered_hypos = {} - ordered_targets = {} - - for shard_id in range(len(bitext1_lst)): - bitext1 = bitext1_lst[shard_id] - bitext2 = bitext2_lst[shard_id] - gen_output = gen_output_lst[shard_id] - lm_res = lm_res_lst[shard_id] - - total = len(bitext1.rescore_source.keys()) - source_lst = [] - hypo_lst = [] - score_lst = [] - reference_lst = [] - j = 1 - best_score = -math.inf - - for i in range(total): - # length is measured in terms of words, not bpe tokens, since models may not share the same bpe - target_len = len(bitext1.rescore_hypo[i].split()) - - if lm_res is not None: - lm_score = lm_res.score[i] - else: - lm_score = 0 - - if bitext2 is not None: - bitext2_score = bitext2.rescore_score[i] - bitext2_backwards = bitext2.backwards - else: - bitext2_score = None - bitext2_backwards = None - - score = rerank_utils.get_score( - a, - b, - c, - target_len, - bitext1.rescore_score[i], - bitext2_score, - lm_score=lm_score, - lenpen=lenpen, - src_len=bitext1.source_lengths[i], - tgt_len=bitext1.target_lengths[i], - bitext1_backwards=bitext1.backwards, - bitext2_backwards=bitext2_backwards, - normalize=normalize, - ) - - if score > best_score: - best_score = score - best_hypo = bitext1.rescore_hypo[i] - - if j == gen_output.num_hypos[i] or j == args.num_rescore: - j = 1 - hypo_lst.append(best_hypo) - score_lst.append(best_score) - source_lst.append(bitext1.rescore_source[i]) - reference_lst.append(bitext1.rescore_target[i]) - - best_score = -math.inf - best_hypo = "" - else: - j += 1 - - gen_keys = list(sorted(gen_output.no_bpe_target.keys())) - - for key in range(len(gen_keys)): - if args.prefix_len is None: - assert hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]], ( - "pred and rescore hypo mismatch: i: " - + str(key) - + ", " - + str(hypo_lst[key]) - + str(gen_keys[key]) - + str(gen_output.no_bpe_hypo[key]) - ) - sys_tok = dict.encode_line(hypo_lst[key]) - ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]]) - scorer.add(ref_tok, sys_tok) - - else: - full_hypo = rerank_utils.get_full_from_prefix( - hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]] - ) - sys_tok = dict.encode_line(full_hypo) - ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]]) - scorer.add(ref_tok, sys_tok) - - # if only one set of hyper parameters is provided, write the predictions to a file - if write_hypos: - # recover the orinal ids from n best list generation - for key in range(len(gen_output.no_bpe_target)): - if args.prefix_len is None: - assert hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]], ( - "pred and rescore hypo mismatch:" - + "i:" - + str(key) - + str(hypo_lst[key]) - + str(gen_output.no_bpe_hypo[key]) - ) - ordered_hypos[gen_keys[key]] = hypo_lst[key] - ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[ - gen_keys[key] - ] - - else: - full_hypo = rerank_utils.get_full_from_prefix( - hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]] - ) - ordered_hypos[gen_keys[key]] = full_hypo - ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[ - gen_keys[key] - ] - - # write the hypos in the original order from nbest list generation - if args.num_shards == (len(bitext1_lst)): - with open(target_outfile, "w") as t: - with open(hypo_outfile, "w") as h: - for key in range(len(ordered_hypos)): - t.write(ordered_targets[key]) - h.write(ordered_hypos[key]) - - res = scorer.result_string(4) - if write_hypos: - print(res) - score = rerank_utils.parse_bleu_scoring(res) - return score - - -def match_target_hypo(args, target_outfile, hypo_outfile): - """combine scores from the LM and bitext models, and write the top scoring hypothesis to a file""" - if len(args.weight1) == 1: - res = score_target_hypo( - args, - args.weight1[0], - args.weight2[0], - args.weight3[0], - args.lenpen[0], - target_outfile, - hypo_outfile, - True, - args.normalize, - ) - rerank_scores = [res] - else: - print("launching pool") - with Pool(32) as p: - rerank_scores = p.starmap( - score_target_hypo, - [ - ( - args, - args.weight1[i], - args.weight2[i], - args.weight3[i], - args.lenpen[i], - target_outfile, - hypo_outfile, - False, - args.normalize, - ) - for i in range(len(args.weight1)) - ], - ) - - if len(rerank_scores) > 1: - best_index = np.argmax(rerank_scores) - best_score = rerank_scores[best_index] - print("best score", best_score) - print("best lenpen", args.lenpen[best_index]) - print("best weight1", args.weight1[best_index]) - print("best weight2", args.weight2[best_index]) - print("best weight3", args.weight3[best_index]) - return ( - args.lenpen[best_index], - args.weight1[best_index], - args.weight2[best_index], - args.weight3[best_index], - best_score, - ) - - else: - return ( - args.lenpen[0], - args.weight1[0], - args.weight2[0], - args.weight3[0], - rerank_scores[0], - ) - - -def load_score_files(args): - if args.all_shards: - shard_ids = list(range(args.num_shards)) - else: - shard_ids = [args.shard_id] - - gen_output_lst = [] - bitext1_lst = [] - bitext2_lst = [] - lm_res1_lst = [] - - for shard_id in shard_ids: - using_nbest = args.nbest_list is not None - ( - pre_gen, - left_to_right_preprocessed_dir, - right_to_left_preprocessed_dir, - backwards_preprocessed_dir, - lm_preprocessed_dir, - ) = rerank_utils.get_directories( - args.data_dir_name, - args.num_rescore, - args.gen_subset, - args.gen_model_name, - shard_id, - args.num_shards, - args.sampling, - args.prefix_len, - args.target_prefix_frac, - args.source_prefix_frac, - ) - - rerank1_is_gen = ( - args.gen_model == args.score_model1 and args.source_prefix_frac is None - ) - rerank2_is_gen = ( - args.gen_model == args.score_model2 and args.source_prefix_frac is None - ) - - score1_file = rerank_utils.rescore_file_name( - pre_gen, - args.prefix_len, - args.model1_name, - target_prefix_frac=args.target_prefix_frac, - source_prefix_frac=args.source_prefix_frac, - backwards=args.backwards1, - ) - if args.score_model2 is not None: - score2_file = rerank_utils.rescore_file_name( - pre_gen, - args.prefix_len, - args.model2_name, - target_prefix_frac=args.target_prefix_frac, - source_prefix_frac=args.source_prefix_frac, - backwards=args.backwards2, - ) - if args.language_model is not None: - lm_score_file = rerank_utils.rescore_file_name( - pre_gen, args.prefix_len, args.lm_name, lm_file=True - ) - - # get gen output - predictions_bpe_file = pre_gen + "/generate_output_bpe.txt" - if using_nbest: - print("Using predefined n-best list from interactive.py") - predictions_bpe_file = args.nbest_list - gen_output = rerank_utils.BitextOutputFromGen( - predictions_bpe_file, - bpe_symbol=args.post_process, - nbest=using_nbest, - prefix_len=args.prefix_len, - target_prefix_frac=args.target_prefix_frac, - ) - - if rerank1_is_gen: - bitext1 = gen_output - else: - bitext1 = rerank_utils.BitextOutput( - score1_file, - args.backwards1, - args.right_to_left1, - args.post_process, - args.prefix_len, - args.target_prefix_frac, - args.source_prefix_frac, - ) - - if args.score_model2 is not None or args.nbest_list is not None: - if rerank2_is_gen: - bitext2 = gen_output - else: - bitext2 = rerank_utils.BitextOutput( - score2_file, - args.backwards2, - args.right_to_left2, - args.post_process, - args.prefix_len, - args.target_prefix_frac, - args.source_prefix_frac, - ) - - assert ( - bitext2.source_lengths == bitext1.source_lengths - ), "source lengths for rescoring models do not match" - assert ( - bitext2.target_lengths == bitext1.target_lengths - ), "target lengths for rescoring models do not match" - else: - if args.diff_bpe: - assert args.score_model2 is None - bitext2 = gen_output - else: - bitext2 = None - - if args.language_model is not None: - lm_res1 = rerank_utils.LMOutput( - lm_score_file, - args.lm_dict, - args.prefix_len, - args.post_process, - args.target_prefix_frac, - ) - else: - lm_res1 = None - - gen_output_lst.append(gen_output) - bitext1_lst.append(bitext1) - bitext2_lst.append(bitext2) - lm_res1_lst.append(lm_res1) - return gen_output_lst, bitext1_lst, bitext2_lst, lm_res1_lst - - -def rerank(args): - if type(args.lenpen) is not list: - args.lenpen = [args.lenpen] - if type(args.weight1) is not list: - args.weight1 = [args.weight1] - if type(args.weight2) is not list: - args.weight2 = [args.weight2] - if type(args.weight3) is not list: - args.weight3 = [args.weight3] - if args.all_shards: - shard_ids = list(range(args.num_shards)) - else: - shard_ids = [args.shard_id] - - for shard_id in shard_ids: - ( - pre_gen, - left_to_right_preprocessed_dir, - right_to_left_preprocessed_dir, - backwards_preprocessed_dir, - lm_preprocessed_dir, - ) = rerank_utils.get_directories( - args.data_dir_name, - args.num_rescore, - args.gen_subset, - args.gen_model_name, - shard_id, - args.num_shards, - args.sampling, - args.prefix_len, - args.target_prefix_frac, - args.source_prefix_frac, - ) - rerank_generate.gen_and_reprocess_nbest(args) - rerank_score_bw.score_bw(args) - rerank_score_lm.score_lm(args) - - if args.write_hypos is None: - write_targets = pre_gen + "/matched_targets" - write_hypos = pre_gen + "/matched_hypos" - else: - write_targets = args.write_hypos + "_targets" + args.gen_subset - write_hypos = args.write_hypos + "_hypos" + args.gen_subset - - if args.all_shards: - write_targets += "_all_shards" - write_hypos += "_all_shards" - - ( - best_lenpen, - best_weight1, - best_weight2, - best_weight3, - best_score, - ) = match_target_hypo(args, write_targets, write_hypos) - - return best_lenpen, best_weight1, best_weight2, best_weight3, best_score - - -def cli_main(): - parser = rerank_options.get_reranking_parser() - args = options.parse_args_and_arch(parser) - rerank(args) - - -if __name__ == "__main__": - cli_main() diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/nonautoregressive_translation/scripts.md b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/nonautoregressive_translation/scripts.md deleted file mode 100644 index 9d3d7b67dc08440b5f4d1c5a7ffcd4bd6e76c14f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/nonautoregressive_translation/scripts.md +++ /dev/null @@ -1,179 +0,0 @@ -# Examples of Training scripts for Non-autoregressive Machine Translation models - -### Non-autoregressive Transformer (NAT, Gu et al., 2017) -Note that we need to have an additional module to perform "length prediction" (`--length-loss-factor`) before generating the whole sequence. -```bash -fairseq-train \ - data-bin/wmt14_en_de_distill \ - --save-dir checkpoints \ - --ddp-backend=legacy_ddp \ - --task translation_lev \ - --criterion nat_loss \ - --arch nonautoregressive_transformer \ - --noise full_mask \ - --share-all-embeddings \ - --optimizer adam --adam-betas '(0.9,0.98)' \ - --lr 0.0005 --lr-scheduler inverse_sqrt \ - --stop-min-lr '1e-09' --warmup-updates 10000 \ - --warmup-init-lr '1e-07' --label-smoothing 0.1 \ - --dropout 0.3 --weight-decay 0.01 \ - --decoder-learned-pos \ - --encoder-learned-pos \ - --pred-length-offset \ - --length-loss-factor 0.1 \ - --apply-bert-init \ - --log-format 'simple' --log-interval 100 \ - --fixed-validation-seed 7 \ - --max-tokens 8000 \ - --save-interval-updates 10000 \ - --max-update 300000 -``` - -### Fast Structured Decoding for Sequence Models (NAT-CRF, Sun et al., 2019) -Note that we implemented a low-rank appromixated CRF model by setting `--crf-lowrank-approx=32` and `--crf-beam-approx=64` as discribed in the original paper. All other settings are the same as the vanilla NAT model. -```bash -fairseq-train \ - data-bin/wmt14_en_de_distill \ - --save-dir checkpoints \ - --ddp-backend=legacy_ddp \ - --task translation_lev \ - --criterion nat_loss \ - --arch nacrf_transformer \ - --noise full_mask \ - --share-all-embeddings \ - --optimizer adam --adam-betas '(0.9,0.98)' \ - --lr 0.0005 --lr-scheduler inverse_sqrt \ - --stop-min-lr '1e-09' --warmup-updates 10000 \ - --warmup-init-lr '1e-07' --label-smoothing 0.1 \ - --dropout 0.3 --weight-decay 0.01 \ - --decoder-learned-pos \ - --encoder-learned-pos \ - --pred-length-offset \ - --length-loss-factor 0.1 \ - --word-ins-loss-factor 0.5 \ - --crf-lowrank-approx 32 \ - --crf-beam-approx 64 \ - --apply-bert-init \ - --log-format 'simple' --log-interval 100 \ - --fixed-validation-seed 7 \ - --max-tokens 8000 \ - --save-interval-updates 10000 \ - --max-update 300000 -``` - - -### Non-autoregressive Transformer with Iterative Refinement (iNAT, Lee et al., 2018) -Note that `--train-step` means how many iterations of refinement we used during training, and `--dae-ratio` controls the ratio of denoising auto-encoder training described in the original paper. -```bash -fairseq-train \ - data-bin/wmt14_en_de_distill \ - --save-dir checkpoints \ - --ddp-backend=legacy_ddp \ - --task translation_lev \ - --criterion nat_loss \ - --arch iterative_nonautoregressive_transformer \ - --noise full_mask \ - --share-all-embeddings \ - --optimizer adam --adam-betas '(0.9,0.98)' \ - --lr 0.0005 --lr-scheduler inverse_sqrt \ - --stop-min-lr '1e-09' --warmup-updates 10000 \ - --warmup-init-lr '1e-07' --label-smoothing 0.1 \ - --dropout 0.3 --weight-decay 0.01 \ - --decoder-learned-pos \ - --encoder-learned-pos \ - --pred-length-offset \ - --length-loss-factor 0.1 \ - --train-step 4 \ - --dae-ratio 0.5 \ - --stochastic-approx \ - --apply-bert-init \ - --log-format 'simple' --log-interval 100 \ - --fixed-validation-seed 7 \ - --max-tokens 8000 \ - --save-interval-updates 10000 \ - --max-update 300000 -``` - -### Insertion Transformer (InsT, Stern et al., 2019) -Note that we need to specify the "slot-loss" (uniform or balanced tree) described in the original paper. Here we use `--label-tau` to control the temperature. - -```bash -fairseq-train \ - data-bin/wmt14_en_de_distill \ - --save-dir checkpoints \ - --ddp-backend=legacy_ddp \ - --task translation_lev \ - --criterion nat_loss \ - --arch insertion_transformer \ - --noise random_delete \ - --share-all-embeddings \ - --optimizer adam --adam-betas '(0.9,0.98)' \ - --lr 0.0005 --lr-scheduler inverse_sqrt \ - --stop-min-lr '1e-09' --warmup-updates 10000 \ - --warmup-init-lr '1e-07' --label-smoothing 0.1 \ - --dropout 0.3 --weight-decay 0.01 \ - --decoder-learned-pos \ - --encoder-learned-pos \ - --apply-bert-init \ - --log-format 'simple' --log-interval 100 \ - --fixed-validation-seed 7 \ - --max-tokens 8000 \ - --save-interval-updates 10000 \ - --max-update 300000 -``` - - -### Mask Predict (CMLM, Ghazvininejad et al., 2019) -```bash -fairseq-train \ - data-bin/wmt14_en_de_distill \ - --save-dir checkpoints \ - --ddp-backend=legacy_ddp \ - --task translation_lev \ - --criterion nat_loss \ - --arch cmlm_transformer \ - --noise random_mask \ - --share-all-embeddings \ - --optimizer adam --adam-betas '(0.9,0.98)' \ - --lr 0.0005 --lr-scheduler inverse_sqrt \ - --stop-min-lr '1e-09' --warmup-updates 10000 \ - --warmup-init-lr '1e-07' --label-smoothing 0.1 \ - --dropout 0.3 --weight-decay 0.01 \ - --decoder-learned-pos \ - --encoder-learned-pos \ - --apply-bert-init \ - --log-format 'simple' --log-interval 100 \ - --fixed-validation-seed 7 \ - --max-tokens 8000 \ - --save-interval-updates 10000 \ - --max-update 300000 -``` - - - - -### Levenshtein Transformer (LevT, Gu et al., 2019) -```bash -fairseq-train \ - data-bin/wmt14_en_de_distill \ - --save-dir checkpoints \ - --ddp-backend=legacy_ddp \ - --task translation_lev \ - --criterion nat_loss \ - --arch levenshtein_transformer \ - --noise random_delete \ - --share-all-embeddings \ - --optimizer adam --adam-betas '(0.9,0.98)' \ - --lr 0.0005 --lr-scheduler inverse_sqrt \ - --stop-min-lr '1e-09' --warmup-updates 10000 \ - --warmup-init-lr '1e-07' --label-smoothing 0.1 \ - --dropout 0.3 --weight-decay 0.01 \ - --decoder-learned-pos \ - --encoder-learned-pos \ - --apply-bert-init \ - --log-format 'simple' --log-interval 100 \ - --fixed-validation-seed 7 \ - --max-tokens 8000 \ - --save-interval-updates 10000 \ - --max-update 300000 -``` diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/__init__.py deleted file mode 100644 index 8b7eb2ec4fc5190c4dcdfe34b0259e6f448e18a9..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/__init__.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -"""isort:skip_file""" - -from .dictionary import Dictionary, TruncatedDictionary - -from .fairseq_dataset import FairseqDataset, FairseqIterableDataset - -from .base_wrapper_dataset import BaseWrapperDataset - -from .add_target_dataset import AddTargetDataset -from .append_token_dataset import AppendTokenDataset -from .audio.raw_audio_dataset import BinarizedAudioDataset, FileAudioDataset -from .audio.hubert_dataset import HubertDataset -from .backtranslation_dataset import BacktranslationDataset -from .bucket_pad_length_dataset import BucketPadLengthDataset -from .colorize_dataset import ColorizeDataset -from .concat_dataset import ConcatDataset -from .concat_sentences_dataset import ConcatSentencesDataset -from .denoising_dataset import DenoisingDataset -from .id_dataset import IdDataset -from .indexed_dataset import ( - IndexedCachedDataset, - IndexedDataset, - IndexedRawTextDataset, - MMapIndexedDataset, -) -from .language_pair_dataset import LanguagePairDataset -from .list_dataset import ListDataset -from .lm_context_window_dataset import LMContextWindowDataset -from .lru_cache_dataset import LRUCacheDataset -from .mask_tokens_dataset import MaskTokensDataset -from .monolingual_dataset import MonolingualDataset -from .multi_corpus_sampled_dataset import MultiCorpusSampledDataset -from .nested_dictionary_dataset import NestedDictionaryDataset -from .noising import NoisingDataset -from .numel_dataset import NumelDataset -from .num_samples_dataset import NumSamplesDataset -from .offset_tokens_dataset import OffsetTokensDataset -from .pad_dataset import LeftPadDataset, PadDataset, RightPadDataset -from .prepend_dataset import PrependDataset -from .prepend_token_dataset import PrependTokenDataset -from .raw_label_dataset import RawLabelDataset -from .replace_dataset import ReplaceDataset -from .resampling_dataset import ResamplingDataset -from .roll_dataset import RollDataset -from .round_robin_zip_datasets import RoundRobinZipDatasets -from .sort_dataset import SortDataset -from .strip_token_dataset import StripTokenDataset -from .subsample_dataset import SubsampleDataset -from .token_block_dataset import TokenBlockDataset -from .transform_eos_dataset import TransformEosDataset -from .transform_eos_lang_pair_dataset import TransformEosLangPairDataset -from .shorten_dataset import TruncateDataset, RandomCropDataset -from .multilingual.sampled_multi_dataset import SampledMultiDataset -from .multilingual.sampled_multi_epoch_dataset import SampledMultiEpochDataset -from .fasta_dataset import FastaDataset, EncodedFastaDataset - -from .iterators import ( - CountingIterator, - EpochBatchIterator, - GroupedIterator, - ShardedIterator, -) - -__all__ = [ - "AddTargetDataset", - "AppendTokenDataset", - "BacktranslationDataset", - "BaseWrapperDataset", - "BinarizedAudioDataset", - "BucketPadLengthDataset", - "ColorizeDataset", - "ConcatDataset", - "ConcatSentencesDataset", - "CountingIterator", - "DenoisingDataset", - "Dictionary", - "EncodedFastaDataset", - "EpochBatchIterator", - "FairseqDataset", - "FairseqIterableDataset", - "FastaDataset", - "FileAudioDataset", - "GroupedIterator", - "HubertDataset", - "IdDataset", - "IndexedCachedDataset", - "IndexedDataset", - "IndexedRawTextDataset", - "LanguagePairDataset", - "LeftPadDataset", - "ListDataset", - "LMContextWindowDataset", - "LRUCacheDataset", - "MaskTokensDataset", - "MMapIndexedDataset", - "MonolingualDataset", - "MultiCorpusSampledDataset", - "NestedDictionaryDataset", - "NoisingDataset", - "NumelDataset", - "NumSamplesDataset", - "OffsetTokensDataset", - "PadDataset", - "PrependDataset", - "PrependTokenDataset", - "RandomCropDataset", - "RawLabelDataset", - "ResamplingDataset", - "ReplaceDataset", - "RightPadDataset", - "RollDataset", - "RoundRobinZipDatasets", - "SampledMultiDataset", - "SampledMultiEpochDataset", - "ShardedIterator", - "SortDataset", - "StripTokenDataset", - "SubsampleDataset", - "TokenBlockDataset", - "TransformEosDataset", - "TransformEosLangPairDataset", - "TruncateDataset", - "TruncatedDictionary", -] diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/tasks/simultaneous_translation.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/tasks/simultaneous_translation.py deleted file mode 100644 index 11c7dc1ea966a54f8915ef164377e40f90e851a1..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/tasks/simultaneous_translation.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from fairseq.tasks import register_task -from fairseq.tasks.speech_to_text import SpeechToTextTask -from fairseq.tasks.translation import ( - TranslationTask, TranslationConfig -) - -try: - import examples.simultaneous_translation # noqa - import_successful = True -except BaseException: - import_successful = False - - -logger = logging.getLogger(__name__) - - -def check_import(flag): - if not flag: - raise ImportError( - "'examples.simultaneous_translation' is not correctly imported. " - "Please considering `pip install -e $FAIRSEQ_DIR`." - ) - - -@register_task("simul_speech_to_text") -class SimulSpeechToTextTask(SpeechToTextTask): - def __init__(self, args, tgt_dict): - check_import(import_successful) - super().__init__(args, tgt_dict) - - -@register_task("simul_text_to_text", dataclass=TranslationConfig) -class SimulTextToTextTask(TranslationTask): - def __init__(self, cfg, src_dict, tgt_dict): - check_import(import_successful) - super().__init__(cfg, src_dict, tgt_dict) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/README.md b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/README.md deleted file mode 100644 index 57104230655c7c517d25904e634c53b6159ee60f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Unit to Speech Model (unit2speech) - -Unit to speech model is modified Tacotron2 model that learns to synthesize speech from discrete speech units. All models are trained on quantized [LJSpeech](https://keithito.com/LJ-Speech-Dataset/). - -Upstream Units | Download Link -|-|- -Log Mel Filterbank + KM50 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/logmel/tts_km50/tts_checkpoint_best.pt) -Log Mel Filterbank + KM100 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/logmel/tts_km100/tts_checkpoint_best.pt) -Log Mel Filterbank + KM200 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/logmel/tts_km200/tts_checkpoint_best.pt) -Log Mel Filterbank + KM500 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/logmel/tts_km500/tts_checkpoint_best.pt) -Modified CPC + KM50 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/cpc/tts_km50/tts_checkpoint_best.pt) -Modified CPC + KM100 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/cpc/tts_km100/tts_checkpoint_best.pt) -Modified CPC + KM200 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/cpc/tts_km200/tts_checkpoint_best.pt) -Modified CPC + KM500 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/cpc/tts_km500/tts_checkpoint_best.pt) -HuBERT Base + KM50 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/hubert/tts_km50/tts_checkpoint_best.pt) -HuBERT Base + KM100 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/hubert/tts_km100/tts_checkpoint_best.pt) -HuBERT Base + KM200 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/hubert/tts_km200/tts_checkpoint_best.pt) -HuBERT Base + KM500 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/hubert/tts_km500/tts_checkpoint_best.pt) -wav2vec 2.0 Large + KM50 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/w2v2/tts_km50/tts_checkpoint_best.pt) -wav2vec 2.0 Large + KM100 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/w2v2/tts_km100/tts_checkpoint_best.pt) -wav2vec 2.0 Large + KM200 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/w2v2/tts_km200/tts_checkpoint_best.pt) -wav2vec 2.0 Large + KM500 | [download](https://dl.fbaipublicfiles.com/textless_nlp/gslm/w2v2/tts_km500/tts_checkpoint_best.pt) - -## Run inference using a unit2speech model -* Install librosa, unidecode and inflect using `pip install librosa, unidecode, inflect` -* Download [Waveglow checkpoint](https://dl.fbaipublicfiles.com/textless_nlp/gslm/waveglow_256channels_new.pt). This is the vocoder. - -Sample commnd to run inference using trained unit2speech models. Please note that the quantized audio to synthesized should be using the same units as the unit2speech model was trained with. -``` -FAIRSEQ_ROOT= -TTS_MODEL_PATH= -QUANTIZED_UNIT_PATH= -OUT_DIR= -WAVEGLOW_PATH= - -PYTHONPATH=${FAIRSEQ_ROOT}:${FAIRSEQ_ROOT}/examples/textless_nlp/gslm/unit2speech python ${FAIRSEQ_ROOT}/examples/textless_nlp/gslm/unit2speech/synthesize_audio_from_units.py \ - --tts_model_path $TTS_MODEL_PATH \ - --quantized_unit_path $QUANTIZED_UNIT_PATH \ - --out_audio_dir $OUT_DIR \ - --waveglow_path $WAVEGLOW_PATH \ - --max_decoder_steps 2000 -``` \ No newline at end of file diff --git a/spaces/ORI-Muchim/BlueArchiveTTS/text/japanese.py b/spaces/ORI-Muchim/BlueArchiveTTS/text/japanese.py deleted file mode 100644 index 375e4d50872d5c68ee57ca17470a2ca425425eba..0000000000000000000000000000000000000000 --- a/spaces/ORI-Muchim/BlueArchiveTTS/text/japanese.py +++ /dev/null @@ -1,153 +0,0 @@ -import re -from unidecode import unidecode -import pyopenjtalk - - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - -# List of (romaji, ipa) pairs for marks: -_romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ts', 'ʦ'), - ('u', 'ɯ'), - ('j', 'ʥ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (romaji, ipa2) pairs for marks: -_romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('u', 'ɯ'), - ('ʧ', 'tʃ'), - ('j', 'dʑ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text != '': - text += ' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil', 'pau']: - text += phoneme.replace('ch', 'ʧ').replace('sh', - 'ʃ').replace('cl', 'Q') - else: - continue - # n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']: - a2_next = -1 - else: - a2_next = int( - re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i < len(marks): - text += unidecode(marks[i]).replace(' ', '') - return text - - -def get_real_sokuon(text): - for regex, replacement in _real_sokuon: - text = re.sub(regex, replacement, text) - return text - - -def get_real_hatsuon(text): - for regex, replacement in _real_hatsuon: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = re.sub( - r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa2(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa2: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa3(text): - text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace( - 'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a') - text = re.sub( - r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text) - return text diff --git a/spaces/Omnibus/MusicGen/tests/quantization/test_vq.py b/spaces/Omnibus/MusicGen/tests/quantization/test_vq.py deleted file mode 100644 index c215099fedacae35c6798fdd9b8420a447aa16bb..0000000000000000000000000000000000000000 --- a/spaces/Omnibus/MusicGen/tests/quantization/test_vq.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from audiocraft.quantization.vq import ResidualVectorQuantizer - - -class TestResidualVectorQuantizer: - - def test_rvq(self): - x = torch.randn(1, 16, 2048) - vq = ResidualVectorQuantizer(n_q=8, dimension=16, bins=8) - res = vq(x, 1.) - assert res.x.shape == torch.Size([1, 16, 2048]) diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/dense_heads/centernet.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/dense_heads/centernet.py deleted file mode 100644 index feb7a8222487756d38482da95183bbbcbbe96ed9..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/dense_heads/centernet.py +++ /dev/null @@ -1,864 +0,0 @@ - -import math -import json -import copy -from typing import List, Dict -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from detectron2.modeling.proposal_generator.build import PROPOSAL_GENERATOR_REGISTRY -from detectron2.layers import ShapeSpec, cat -from detectron2.structures import Instances, Boxes -from detectron2.modeling import detector_postprocess -from detectron2.utils.comm import get_world_size -from detectron2.config import configurable - -from ..layers.heatmap_focal_loss import heatmap_focal_loss_jit -from ..layers.heatmap_focal_loss import binary_heatmap_focal_loss -from ..layers.iou_loss import IOULoss -from ..layers.ml_nms import ml_nms -from ..debug import debug_train, debug_test -from .utils import reduce_sum, _transpose -from .centernet_head import CenterNetHead - -__all__ = ["CenterNet"] - -INF = 100000000 - -@PROPOSAL_GENERATOR_REGISTRY.register() -class CenterNet(nn.Module): - @configurable - def __init__(self, - # input_shape: Dict[str, ShapeSpec], - in_channels=256, - *, - num_classes=80, - in_features=("p3", "p4", "p5", "p6", "p7"), - strides=(8, 16, 32, 64, 128), - score_thresh=0.05, - hm_min_overlap=0.8, - loc_loss_type='giou', - min_radius=4, - hm_focal_alpha=0.25, - hm_focal_beta=4, - loss_gamma=2.0, - reg_weight=2.0, - not_norm_reg=True, - with_agn_hm=False, - only_proposal=False, - as_proposal=False, - not_nms=False, - pos_weight=1., - neg_weight=1., - sigmoid_clamp=1e-4, - ignore_high_fp=-1., - center_nms=False, - sizes_of_interest=[[0,80],[64,160],[128,320],[256,640],[512,10000000]], - more_pos=False, - more_pos_thresh=0.2, - more_pos_topk=9, - pre_nms_topk_train=1000, - pre_nms_topk_test=1000, - post_nms_topk_train=100, - post_nms_topk_test=100, - nms_thresh_train=0.6, - nms_thresh_test=0.6, - no_reduce=False, - debug=False, - vis_thresh=0.5, - pixel_mean=[103.530,116.280,123.675], - pixel_std=[1.0,1.0,1.0], - device='cuda', - centernet_head=None, - ): - super().__init__() - self.num_classes = num_classes - self.in_features = in_features - self.strides = strides - self.score_thresh = score_thresh - self.min_radius = min_radius - self.hm_focal_alpha = hm_focal_alpha - self.hm_focal_beta = hm_focal_beta - self.loss_gamma = loss_gamma - self.reg_weight = reg_weight - self.not_norm_reg = not_norm_reg - self.with_agn_hm = with_agn_hm - self.only_proposal = only_proposal - self.as_proposal = as_proposal - self.not_nms = not_nms - self.pos_weight = pos_weight - self.neg_weight = neg_weight - self.sigmoid_clamp = sigmoid_clamp - self.ignore_high_fp = ignore_high_fp - self.center_nms = center_nms - self.sizes_of_interest = sizes_of_interest - self.more_pos = more_pos - self.more_pos_thresh = more_pos_thresh - self.more_pos_topk = more_pos_topk - self.pre_nms_topk_train = pre_nms_topk_train - self.pre_nms_topk_test = pre_nms_topk_test - self.post_nms_topk_train = post_nms_topk_train - self.post_nms_topk_test = post_nms_topk_test - self.nms_thresh_train = nms_thresh_train - self.nms_thresh_test = nms_thresh_test - self.no_reduce = no_reduce - self.debug = debug - self.vis_thresh = vis_thresh - if self.center_nms: - self.not_nms = True - self.iou_loss = IOULoss(loc_loss_type) - assert (not self.only_proposal) or self.with_agn_hm - # delta for rendering heatmap - self.delta = (1 - hm_min_overlap) / (1 + hm_min_overlap) - if centernet_head is None: - self.centernet_head = CenterNetHead( - in_channels=in_channels, - num_levels=len(in_features), - with_agn_hm=with_agn_hm, - only_proposal=only_proposal) - else: - self.centernet_head = centernet_head - if self.debug: - pixel_mean = torch.Tensor(pixel_mean).to( - torch.device(device)).view(3, 1, 1) - pixel_std = torch.Tensor(pixel_std).to( - torch.device(device)).view(3, 1, 1) - self.denormalizer = lambda x: x * pixel_std + pixel_mean - - @classmethod - def from_config(cls, cfg, input_shape): - ret = { - # 'input_shape': input_shape, - 'in_channels': input_shape[ - cfg.MODEL.CENTERNET.IN_FEATURES[0]].channels, - 'num_classes': cfg.MODEL.CENTERNET.NUM_CLASSES, - 'in_features': cfg.MODEL.CENTERNET.IN_FEATURES, - 'strides': cfg.MODEL.CENTERNET.FPN_STRIDES, - 'score_thresh': cfg.MODEL.CENTERNET.INFERENCE_TH, - 'loc_loss_type': cfg.MODEL.CENTERNET.LOC_LOSS_TYPE, - 'hm_min_overlap': cfg.MODEL.CENTERNET.HM_MIN_OVERLAP, - 'min_radius': cfg.MODEL.CENTERNET.MIN_RADIUS, - 'hm_focal_alpha': cfg.MODEL.CENTERNET.HM_FOCAL_ALPHA, - 'hm_focal_beta': cfg.MODEL.CENTERNET.HM_FOCAL_BETA, - 'loss_gamma': cfg.MODEL.CENTERNET.LOSS_GAMMA, - 'reg_weight': cfg.MODEL.CENTERNET.REG_WEIGHT, - 'not_norm_reg': cfg.MODEL.CENTERNET.NOT_NORM_REG, - 'with_agn_hm': cfg.MODEL.CENTERNET.WITH_AGN_HM, - 'only_proposal': cfg.MODEL.CENTERNET.ONLY_PROPOSAL, - 'as_proposal': cfg.MODEL.CENTERNET.AS_PROPOSAL, - 'not_nms': cfg.MODEL.CENTERNET.NOT_NMS, - 'pos_weight': cfg.MODEL.CENTERNET.POS_WEIGHT, - 'neg_weight': cfg.MODEL.CENTERNET.NEG_WEIGHT, - 'sigmoid_clamp': cfg.MODEL.CENTERNET.SIGMOID_CLAMP, - 'ignore_high_fp': cfg.MODEL.CENTERNET.IGNORE_HIGH_FP, - 'center_nms': cfg.MODEL.CENTERNET.CENTER_NMS, - 'sizes_of_interest': cfg.MODEL.CENTERNET.SOI, - 'more_pos': cfg.MODEL.CENTERNET.MORE_POS, - 'more_pos_thresh': cfg.MODEL.CENTERNET.MORE_POS_THRESH, - 'more_pos_topk': cfg.MODEL.CENTERNET.MORE_POS_TOPK, - 'pre_nms_topk_train': cfg.MODEL.CENTERNET.PRE_NMS_TOPK_TRAIN, - 'pre_nms_topk_test': cfg.MODEL.CENTERNET.PRE_NMS_TOPK_TEST, - 'post_nms_topk_train': cfg.MODEL.CENTERNET.POST_NMS_TOPK_TRAIN, - 'post_nms_topk_test': cfg.MODEL.CENTERNET.POST_NMS_TOPK_TEST, - 'nms_thresh_train': cfg.MODEL.CENTERNET.NMS_TH_TRAIN, - 'nms_thresh_test': cfg.MODEL.CENTERNET.NMS_TH_TEST, - 'no_reduce': cfg.MODEL.CENTERNET.NO_REDUCE, - 'debug': cfg.DEBUG, - 'vis_thresh': cfg.VIS_THRESH, - 'pixel_mean': cfg.MODEL.PIXEL_MEAN, - 'pixel_std': cfg.MODEL.PIXEL_STD, - 'device': cfg.MODEL.DEVICE, - 'centernet_head': CenterNetHead( - cfg, [input_shape[f] for f in cfg.MODEL.CENTERNET.IN_FEATURES]), - } - return ret - - - def forward(self, images, features_dict, gt_instances): - features = [features_dict[f] for f in self.in_features] - clss_per_level, reg_pred_per_level, agn_hm_pred_per_level = \ - self.centernet_head(features) - grids = self.compute_grids(features) - shapes_per_level = grids[0].new_tensor( - [(x.shape[2], x.shape[3]) for x in reg_pred_per_level]) - - if not self.training: - return self.inference( - images, clss_per_level, reg_pred_per_level, - agn_hm_pred_per_level, grids) - else: - pos_inds, labels, reg_targets, flattened_hms = \ - self._get_ground_truth( - grids, shapes_per_level, gt_instances) - # logits_pred: M x F, reg_pred: M x 4, agn_hm_pred: M - logits_pred, reg_pred, agn_hm_pred = self._flatten_outputs( - clss_per_level, reg_pred_per_level, agn_hm_pred_per_level) - - if self.more_pos: - # add more pixels as positive if \ - # 1. they are within the center3x3 region of an object - # 2. their regression losses are small (= 0).squeeze(1) - reg_pred = reg_pred[reg_inds] - reg_targets_pos = reg_targets[reg_inds] - reg_weight_map = flattened_hms.max(dim=1)[0] - reg_weight_map = reg_weight_map[reg_inds] - reg_weight_map = reg_weight_map * 0 + 1 \ - if self.not_norm_reg else reg_weight_map - if self.no_reduce: - reg_norm = max(reg_weight_map.sum(), 1) - else: - reg_norm = max(reduce_sum(reg_weight_map.sum()).item() / num_gpus, 1) - - reg_loss = self.reg_weight * self.iou_loss( - reg_pred, reg_targets_pos, reg_weight_map, - reduction='sum') / reg_norm - losses['loss_centernet_loc'] = reg_loss - - if self.with_agn_hm: - cat_agn_heatmap = flattened_hms.max(dim=1)[0] # M - agn_pos_loss, agn_neg_loss = binary_heatmap_focal_loss( - agn_hm_pred, cat_agn_heatmap, pos_inds, - alpha=self.hm_focal_alpha, - beta=self.hm_focal_beta, - gamma=self.loss_gamma, - sigmoid_clamp=self.sigmoid_clamp, - ignore_high_fp=self.ignore_high_fp, - ) - agn_pos_loss = self.pos_weight * agn_pos_loss / num_pos_avg - agn_neg_loss = self.neg_weight * agn_neg_loss / num_pos_avg - losses['loss_centernet_agn_pos'] = agn_pos_loss - losses['loss_centernet_agn_neg'] = agn_neg_loss - - if self.debug: - print('losses', losses) - print('total_num_pos', total_num_pos) - return losses - - - def compute_grids(self, features): - grids = [] - for level, feature in enumerate(features): - h, w = feature.size()[-2:] - shifts_x = torch.arange( - 0, w * self.strides[level], - step=self.strides[level], - dtype=torch.float32, device=feature.device) - shifts_y = torch.arange( - 0, h * self.strides[level], - step=self.strides[level], - dtype=torch.float32, device=feature.device) - shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) - shift_x = shift_x.reshape(-1) - shift_y = shift_y.reshape(-1) - grids_per_level = torch.stack((shift_x, shift_y), dim=1) + \ - self.strides[level] // 2 - grids.append(grids_per_level) - return grids - - - def _get_ground_truth(self, grids, shapes_per_level, gt_instances): - ''' - Input: - grids: list of tensors [(hl x wl, 2)]_l - shapes_per_level: list of tuples L x 2: - gt_instances: gt instances - Retuen: - pos_inds: N - labels: N - reg_targets: M x 4 - flattened_hms: M x C or M x 1 - N: number of objects in all images - M: number of pixels from all FPN levels - ''' - - # get positive pixel index - if not self.more_pos: - pos_inds, labels = self._get_label_inds( - gt_instances, shapes_per_level) - else: - pos_inds, labels = None, None - heatmap_channels = self.num_classes - L = len(grids) - num_loc_list = [len(loc) for loc in grids] - strides = torch.cat([ - shapes_per_level.new_ones(num_loc_list[l]) * self.strides[l] \ - for l in range(L)]).float() # M - reg_size_ranges = torch.cat([ - shapes_per_level.new_tensor(self.sizes_of_interest[l]).float().view( - 1, 2).expand(num_loc_list[l], 2) for l in range(L)]) # M x 2 - grids = torch.cat(grids, dim=0) # M x 2 - M = grids.shape[0] - - reg_targets = [] - flattened_hms = [] - for i in range(len(gt_instances)): # images - boxes = gt_instances[i].gt_boxes.tensor # N x 4 - area = gt_instances[i].gt_boxes.area() # N - gt_classes = gt_instances[i].gt_classes # N in [0, self.num_classes] - - N = boxes.shape[0] - if N == 0: - reg_targets.append(grids.new_zeros((M, 4)) - INF) - flattened_hms.append( - grids.new_zeros(( - M, 1 if self.only_proposal else heatmap_channels))) - continue - - l = grids[:, 0].view(M, 1) - boxes[:, 0].view(1, N) # M x N - t = grids[:, 1].view(M, 1) - boxes[:, 1].view(1, N) # M x N - r = boxes[:, 2].view(1, N) - grids[:, 0].view(M, 1) # M x N - b = boxes[:, 3].view(1, N) - grids[:, 1].view(M, 1) # M x N - reg_target = torch.stack([l, t, r, b], dim=2) # M x N x 4 - - centers = ((boxes[:, [0, 1]] + boxes[:, [2, 3]]) / 2) # N x 2 - centers_expanded = centers.view(1, N, 2).expand(M, N, 2) # M x N x 2 - strides_expanded = strides.view(M, 1, 1).expand(M, N, 2) - centers_discret = ((centers_expanded / strides_expanded).int() * \ - strides_expanded).float() + strides_expanded / 2 # M x N x 2 - - is_peak = (((grids.view(M, 1, 2).expand(M, N, 2) - \ - centers_discret) ** 2).sum(dim=2) == 0) # M x N - is_in_boxes = reg_target.min(dim=2)[0] > 0 # M x N - is_center3x3 = self.get_center3x3( - grids, centers, strides) & is_in_boxes # M x N - is_cared_in_the_level = self.assign_reg_fpn( - reg_target, reg_size_ranges) # M x N - reg_mask = is_center3x3 & is_cared_in_the_level # M x N - - dist2 = ((grids.view(M, 1, 2).expand(M, N, 2) - \ - centers_expanded) ** 2).sum(dim=2) # M x N - dist2[is_peak] = 0 - radius2 = self.delta ** 2 * 2 * area # N - radius2 = torch.clamp( - radius2, min=self.min_radius ** 2) - weighted_dist2 = dist2 / radius2.view(1, N).expand(M, N) # M x N - reg_target = self._get_reg_targets( - reg_target, weighted_dist2.clone(), reg_mask, area) # M x 4 - - if self.only_proposal: - flattened_hm = self._create_agn_heatmaps_from_dist( - weighted_dist2.clone()) # M x 1 - else: - flattened_hm = self._create_heatmaps_from_dist( - weighted_dist2.clone(), gt_classes, - channels=heatmap_channels) # M x C - - reg_targets.append(reg_target) - flattened_hms.append(flattened_hm) - - # transpose im first training_targets to level first ones - reg_targets = _transpose(reg_targets, num_loc_list) - flattened_hms = _transpose(flattened_hms, num_loc_list) - for l in range(len(reg_targets)): - reg_targets[l] = reg_targets[l] / float(self.strides[l]) - reg_targets = cat([x for x in reg_targets], dim=0) # MB x 4 - flattened_hms = cat([x for x in flattened_hms], dim=0) # MB x C - - return pos_inds, labels, reg_targets, flattened_hms - - - def _get_label_inds(self, gt_instances, shapes_per_level): - ''' - Inputs: - gt_instances: [n_i], sum n_i = N - shapes_per_level: L x 2 [(h_l, w_l)]_L - Returns: - pos_inds: N' - labels: N' - ''' - pos_inds = [] - labels = [] - L = len(self.strides) - B = len(gt_instances) - shapes_per_level = shapes_per_level.long() - loc_per_level = (shapes_per_level[:, 0] * shapes_per_level[:, 1]).long() # L - level_bases = [] - s = 0 - for l in range(L): - level_bases.append(s) - s = s + B * loc_per_level[l] - level_bases = shapes_per_level.new_tensor(level_bases).long() # L - strides_default = shapes_per_level.new_tensor(self.strides).float() # L - for im_i in range(B): - targets_per_im = gt_instances[im_i] - bboxes = targets_per_im.gt_boxes.tensor # n x 4 - n = bboxes.shape[0] - centers = ((bboxes[:, [0, 1]] + bboxes[:, [2, 3]]) / 2) # n x 2 - centers = centers.view(n, 1, 2).expand(n, L, 2) - strides = strides_default.view(1, L, 1).expand(n, L, 2) - centers_inds = (centers / strides).long() # n x L x 2 - Ws = shapes_per_level[:, 1].view(1, L).expand(n, L) - pos_ind = level_bases.view(1, L).expand(n, L) + \ - im_i * loc_per_level.view(1, L).expand(n, L) + \ - centers_inds[:, :, 1] * Ws + \ - centers_inds[:, :, 0] # n x L - is_cared_in_the_level = self.assign_fpn_level(bboxes) - pos_ind = pos_ind[is_cared_in_the_level].view(-1) - label = targets_per_im.gt_classes.view( - n, 1).expand(n, L)[is_cared_in_the_level].view(-1) - - pos_inds.append(pos_ind) # n' - labels.append(label) # n' - pos_inds = torch.cat(pos_inds, dim=0).long() - labels = torch.cat(labels, dim=0) - return pos_inds, labels # N, N - - - def assign_fpn_level(self, boxes): - ''' - Inputs: - boxes: n x 4 - size_ranges: L x 2 - Return: - is_cared_in_the_level: n x L - ''' - size_ranges = boxes.new_tensor( - self.sizes_of_interest).view(len(self.sizes_of_interest), 2) # L x 2 - crit = ((boxes[:, 2:] - boxes[:, :2]) **2).sum(dim=1) ** 0.5 / 2 # n - n, L = crit.shape[0], size_ranges.shape[0] - crit = crit.view(n, 1).expand(n, L) - size_ranges_expand = size_ranges.view(1, L, 2).expand(n, L, 2) - is_cared_in_the_level = (crit >= size_ranges_expand[:, :, 0]) & \ - (crit <= size_ranges_expand[:, :, 1]) - return is_cared_in_the_level - - - def assign_reg_fpn(self, reg_targets_per_im, size_ranges): - ''' - TODO (Xingyi): merge it with assign_fpn_level - Inputs: - reg_targets_per_im: M x N x 4 - size_ranges: M x 2 - ''' - crit = ((reg_targets_per_im[:, :, :2] + \ - reg_targets_per_im[:, :, 2:])**2).sum(dim=2) ** 0.5 / 2 # M x N - is_cared_in_the_level = (crit >= size_ranges[:, [0]]) & \ - (crit <= size_ranges[:, [1]]) - return is_cared_in_the_level - - - def _get_reg_targets(self, reg_targets, dist, mask, area): - ''' - reg_targets (M x N x 4): long tensor - dist (M x N) - is_*: M x N - ''' - dist[mask == 0] = INF * 1.0 - min_dist, min_inds = dist.min(dim=1) # M - reg_targets_per_im = reg_targets[ - range(len(reg_targets)), min_inds] # M x N x 4 --> M x 4 - reg_targets_per_im[min_dist == INF] = - INF - return reg_targets_per_im - - - def _create_heatmaps_from_dist(self, dist, labels, channels): - ''' - dist: M x N - labels: N - return: - heatmaps: M x C - ''' - heatmaps = dist.new_zeros((dist.shape[0], channels)) - for c in range(channels): - inds = (labels == c) # N - if inds.int().sum() == 0: - continue - heatmaps[:, c] = torch.exp(-dist[:, inds].min(dim=1)[0]) - zeros = heatmaps[:, c] < 1e-4 - heatmaps[zeros, c] = 0 - return heatmaps - - - def _create_agn_heatmaps_from_dist(self, dist): - ''' - TODO (Xingyi): merge it with _create_heatmaps_from_dist - dist: M x N - return: - heatmaps: M x 1 - ''' - heatmaps = dist.new_zeros((dist.shape[0], 1)) - heatmaps[:, 0] = torch.exp(-dist.min(dim=1)[0]) - zeros = heatmaps < 1e-4 - heatmaps[zeros] = 0 - return heatmaps - - - def _flatten_outputs(self, clss, reg_pred, agn_hm_pred): - # Reshape: (N, F, Hl, Wl) -> (N, Hl, Wl, F) -> (sum_l N*Hl*Wl, F) - clss = cat([x.permute(0, 2, 3, 1).reshape(-1, x.shape[1]) \ - for x in clss], dim=0) if clss[0] is not None else None - reg_pred = cat( - [x.permute(0, 2, 3, 1).reshape(-1, 4) for x in reg_pred], dim=0) - agn_hm_pred = cat([x.permute(0, 2, 3, 1).reshape(-1) \ - for x in agn_hm_pred], dim=0) if self.with_agn_hm else None - return clss, reg_pred, agn_hm_pred - - - def get_center3x3(self, locations, centers, strides): - ''' - Inputs: - locations: M x 2 - centers: N x 2 - strides: M - ''' - M, N = locations.shape[0], centers.shape[0] - locations_expanded = locations.view(M, 1, 2).expand(M, N, 2) # M x N x 2 - centers_expanded = centers.view(1, N, 2).expand(M, N, 2) # M x N x 2 - strides_expanded = strides.view(M, 1, 1).expand(M, N, 2) # M x N - centers_discret = ((centers_expanded / strides_expanded).int() * \ - strides_expanded).float() + strides_expanded / 2 # M x N x 2 - dist_x = (locations_expanded[:, :, 0] - centers_discret[:, :, 0]).abs() - dist_y = (locations_expanded[:, :, 1] - centers_discret[:, :, 1]).abs() - return (dist_x <= strides_expanded[:, :, 0]) & \ - (dist_y <= strides_expanded[:, :, 0]) - - - def inference(self, images, clss_per_level, reg_pred_per_level, - agn_hm_pred_per_level, grids): - logits_pred = [x.sigmoid() if x is not None else None \ - for x in clss_per_level] - agn_hm_pred_per_level = [x.sigmoid() if x is not None else None \ - for x in agn_hm_pred_per_level] - - if self.only_proposal: - proposals = self.predict_instances( - grids, agn_hm_pred_per_level, reg_pred_per_level, - images.image_sizes, [None for _ in agn_hm_pred_per_level]) - else: - proposals = self.predict_instances( - grids, logits_pred, reg_pred_per_level, - images.image_sizes, agn_hm_pred_per_level) - if self.as_proposal or self.only_proposal: - for p in range(len(proposals)): - proposals[p].proposal_boxes = proposals[p].get('pred_boxes') - proposals[p].objectness_logits = proposals[p].get('scores') - proposals[p].remove('pred_boxes') - - if self.debug: - debug_test( - [self.denormalizer(x) for x in images], - logits_pred, reg_pred_per_level, - agn_hm_pred_per_level, preds=proposals, - vis_thresh=self.vis_thresh, - debug_show_name=False) - return proposals, {} - - - def predict_instances( - self, grids, logits_pred, reg_pred, image_sizes, agn_hm_pred, - is_proposal=False): - sampled_boxes = [] - for l in range(len(grids)): - sampled_boxes.append(self.predict_single_level( - grids[l], logits_pred[l], reg_pred[l] * self.strides[l], - image_sizes, agn_hm_pred[l], l, is_proposal=is_proposal)) - boxlists = list(zip(*sampled_boxes)) - boxlists = [Instances.cat(boxlist) for boxlist in boxlists] - boxlists = self.nms_and_topK( - boxlists, nms=not self.not_nms) - return boxlists - - - def predict_single_level( - self, grids, heatmap, reg_pred, image_sizes, agn_hm, level, - is_proposal=False): - N, C, H, W = heatmap.shape - # put in the same format as grids - if self.center_nms: - heatmap_nms = nn.functional.max_pool2d( - heatmap, (3, 3), stride=1, padding=1) - heatmap = heatmap * (heatmap_nms == heatmap).float() - heatmap = heatmap.permute(0, 2, 3, 1) # N x H x W x C - heatmap = heatmap.reshape(N, -1, C) # N x HW x C - box_regression = reg_pred.view(N, 4, H, W).permute(0, 2, 3, 1) # N x H x W x 4 - box_regression = box_regression.reshape(N, -1, 4) - - candidate_inds = heatmap > self.score_thresh # 0.05 - pre_nms_top_n = candidate_inds.view(N, -1).sum(1) # N - pre_nms_topk = self.pre_nms_topk_train if self.training else self.pre_nms_topk_test - pre_nms_top_n = pre_nms_top_n.clamp(max=pre_nms_topk) # N - - if agn_hm is not None: - agn_hm = agn_hm.view(N, 1, H, W).permute(0, 2, 3, 1) - agn_hm = agn_hm.reshape(N, -1) - heatmap = heatmap * agn_hm[:, :, None] - - results = [] - for i in range(N): - per_box_cls = heatmap[i] # HW x C - per_candidate_inds = candidate_inds[i] # n - per_box_cls = per_box_cls[per_candidate_inds] # n - - per_candidate_nonzeros = per_candidate_inds.nonzero() # n - per_box_loc = per_candidate_nonzeros[:, 0] # n - per_class = per_candidate_nonzeros[:, 1] # n - - per_box_regression = box_regression[i] # HW x 4 - per_box_regression = per_box_regression[per_box_loc] # n x 4 - per_grids = grids[per_box_loc] # n x 2 - - per_pre_nms_top_n = pre_nms_top_n[i] # 1 - - if per_candidate_inds.sum().item() > per_pre_nms_top_n.item(): - per_box_cls, top_k_indices = \ - per_box_cls.topk(per_pre_nms_top_n, sorted=False) - per_class = per_class[top_k_indices] - per_box_regression = per_box_regression[top_k_indices] - per_grids = per_grids[top_k_indices] - - detections = torch.stack([ - per_grids[:, 0] - per_box_regression[:, 0], - per_grids[:, 1] - per_box_regression[:, 1], - per_grids[:, 0] + per_box_regression[:, 2], - per_grids[:, 1] + per_box_regression[:, 3], - ], dim=1) # n x 4 - - # avoid invalid boxes in RoI heads - detections[:, 2] = torch.max(detections[:, 2], detections[:, 0] + 0.01) - detections[:, 3] = torch.max(detections[:, 3], detections[:, 1] + 0.01) - boxlist = Instances(image_sizes[i]) - boxlist.scores = torch.sqrt(per_box_cls) \ - if self.with_agn_hm else per_box_cls # n - # import pdb; pdb.set_trace() - boxlist.pred_boxes = Boxes(detections) - boxlist.pred_classes = per_class - results.append(boxlist) - return results - - - def nms_and_topK(self, boxlists, nms=True): - num_images = len(boxlists) - results = [] - for i in range(num_images): - nms_thresh = self.nms_thresh_train if self.training else \ - self.nms_thresh_test - result = ml_nms(boxlists[i], nms_thresh) if nms else boxlists[i] - if self.debug: - print('#proposals before nms', len(boxlists[i])) - print('#proposals after nms', len(result)) - num_dets = len(result) - post_nms_topk = self.post_nms_topk_train if self.training else \ - self.post_nms_topk_test - if num_dets > post_nms_topk: - cls_scores = result.scores - image_thresh, _ = torch.kthvalue( - cls_scores.float().cpu(), - num_dets - post_nms_topk + 1 - ) - keep = cls_scores >= image_thresh.item() - keep = torch.nonzero(keep).squeeze(1) - result = result[keep] - if self.debug: - print('#proposals after filter', len(result)) - results.append(result) - return results - - - def _add_more_pos(self, reg_pred, gt_instances, shapes_per_level): - labels, level_masks, c33_inds, c33_masks, c33_regs = \ - self._get_c33_inds(gt_instances, shapes_per_level) - N, L, K = labels.shape[0], len(self.strides), 9 - c33_inds[c33_masks == 0] = 0 - reg_pred_c33 = reg_pred[c33_inds].detach() # N x L x K - invalid_reg = c33_masks == 0 - c33_regs_expand = c33_regs.view(N * L * K, 4).clamp(min=0) - if N > 0: - with torch.no_grad(): - c33_reg_loss = self.iou_loss( - reg_pred_c33.view(N * L * K, 4), - c33_regs_expand, None, - reduction='none').view(N, L, K).detach() # N x L x K - else: - c33_reg_loss = reg_pred_c33.new_zeros((N, L, K)).detach() - c33_reg_loss[invalid_reg] = INF # N x L x K - c33_reg_loss.view(N * L, K)[level_masks.view(N * L), 4] = 0 # real center - c33_reg_loss = c33_reg_loss.view(N, L * K) - if N == 0: - loss_thresh = c33_reg_loss.new_ones((N)).float() - else: - loss_thresh = torch.kthvalue( - c33_reg_loss, self.more_pos_topk, dim=1)[0] # N - loss_thresh[loss_thresh > self.more_pos_thresh] = self.more_pos_thresh # N - new_pos = c33_reg_loss.view(N, L, K) < \ - loss_thresh.view(N, 1, 1).expand(N, L, K) - pos_inds = c33_inds[new_pos].view(-1) # P - labels = labels.view(N, 1, 1).expand(N, L, K)[new_pos].view(-1) - return pos_inds, labels - - - def _get_c33_inds(self, gt_instances, shapes_per_level): - ''' - TODO (Xingyi): The current implementation is ugly. Refactor. - Get the center (and the 3x3 region near center) locations of each objects - Inputs: - gt_instances: [n_i], sum n_i = N - shapes_per_level: L x 2 [(h_l, w_l)]_L - ''' - labels = [] - level_masks = [] - c33_inds = [] - c33_masks = [] - c33_regs = [] - L = len(self.strides) - B = len(gt_instances) - shapes_per_level = shapes_per_level.long() - loc_per_level = (shapes_per_level[:, 0] * shapes_per_level[:, 1]).long() # L - level_bases = [] - s = 0 - for l in range(L): - level_bases.append(s) - s = s + B * loc_per_level[l] - level_bases = shapes_per_level.new_tensor(level_bases).long() # L - strides_default = shapes_per_level.new_tensor(self.strides).float() # L - K = 9 - dx = shapes_per_level.new_tensor([-1, 0, 1, -1, 0, 1, -1, 0, 1]).long() - dy = shapes_per_level.new_tensor([-1, -1, -1, 0, 0, 0, 1, 1, 1]).long() - for im_i in range(B): - targets_per_im = gt_instances[im_i] - bboxes = targets_per_im.gt_boxes.tensor # n x 4 - n = bboxes.shape[0] - if n == 0: - continue - centers = ((bboxes[:, [0, 1]] + bboxes[:, [2, 3]]) / 2) # n x 2 - centers = centers.view(n, 1, 2).expand(n, L, 2) - - strides = strides_default.view(1, L, 1).expand(n, L, 2) # - centers_inds = (centers / strides).long() # n x L x 2 - center_grids = centers_inds * strides + strides // 2# n x L x 2 - l = center_grids[:, :, 0] - bboxes[:, 0].view(n, 1).expand(n, L) - t = center_grids[:, :, 1] - bboxes[:, 1].view(n, 1).expand(n, L) - r = bboxes[:, 2].view(n, 1).expand(n, L) - center_grids[:, :, 0] - b = bboxes[:, 3].view(n, 1).expand(n, L) - center_grids[:, :, 1] # n x L - reg = torch.stack([l, t, r, b], dim=2) # n x L x 4 - reg = reg / strides_default.view(1, L, 1).expand(n, L, 4).float() - - Ws = shapes_per_level[:, 1].view(1, L).expand(n, L) - Hs = shapes_per_level[:, 0].view(1, L).expand(n, L) - expand_Ws = Ws.view(n, L, 1).expand(n, L, K) - expand_Hs = Hs.view(n, L, 1).expand(n, L, K) - label = targets_per_im.gt_classes.view(n).clone() - mask = reg.min(dim=2)[0] >= 0 # n x L - mask = mask & self.assign_fpn_level(bboxes) - labels.append(label) # n - level_masks.append(mask) # n x L - - Dy = dy.view(1, 1, K).expand(n, L, K) - Dx = dx.view(1, 1, K).expand(n, L, K) - c33_ind = level_bases.view(1, L, 1).expand(n, L, K) + \ - im_i * loc_per_level.view(1, L, 1).expand(n, L, K) + \ - (centers_inds[:, :, 1:2].expand(n, L, K) + Dy) * expand_Ws + \ - (centers_inds[:, :, 0:1].expand(n, L, K) + Dx) # n x L x K - - c33_mask = \ - ((centers_inds[:, :, 1:2].expand(n, L, K) + dy) < expand_Hs) & \ - ((centers_inds[:, :, 1:2].expand(n, L, K) + dy) >= 0) & \ - ((centers_inds[:, :, 0:1].expand(n, L, K) + dx) < expand_Ws) & \ - ((centers_inds[:, :, 0:1].expand(n, L, K) + dx) >= 0) - # TODO (Xingyi): think about better way to implement this - # Currently it hard codes the 3x3 region - c33_reg = reg.view(n, L, 1, 4).expand(n, L, K, 4).clone() - c33_reg[:, :, [0, 3, 6], 0] -= 1 - c33_reg[:, :, [0, 3, 6], 2] += 1 - c33_reg[:, :, [2, 5, 8], 0] += 1 - c33_reg[:, :, [2, 5, 8], 2] -= 1 - c33_reg[:, :, [0, 1, 2], 1] -= 1 - c33_reg[:, :, [0, 1, 2], 3] += 1 - c33_reg[:, :, [6, 7, 8], 1] += 1 - c33_reg[:, :, [6, 7, 8], 3] -= 1 - c33_mask = c33_mask & (c33_reg.min(dim=3)[0] >= 0) # n x L x K - c33_inds.append(c33_ind) - c33_masks.append(c33_mask) - c33_regs.append(c33_reg) - - if len(level_masks) > 0: - labels = torch.cat(labels, dim=0) - level_masks = torch.cat(level_masks, dim=0) - c33_inds = torch.cat(c33_inds, dim=0).long() - c33_regs = torch.cat(c33_regs, dim=0) - c33_masks = torch.cat(c33_masks, dim=0) - else: - labels = shapes_per_level.new_zeros((0)).long() - level_masks = shapes_per_level.new_zeros((0, L)).bool() - c33_inds = shapes_per_level.new_zeros((0, L, K)).long() - c33_regs = shapes_per_level.new_zeros((0, L, K, 4)).float() - c33_masks = shapes_per_level.new_zeros((0, L, K)).bool() - return labels, level_masks, c33_inds, c33_masks, c33_regs # N x L, N x L x K \ No newline at end of file diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/fetch_data/celebahq_dataset_prepare.sh b/spaces/OpenGVLab/InternGPT/third-party/lama/fetch_data/celebahq_dataset_prepare.sh deleted file mode 100644 index 6d2ba9a6265c0d5fa580035952a1f568dd8d9e44..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/fetch_data/celebahq_dataset_prepare.sh +++ /dev/null @@ -1,37 +0,0 @@ -mkdir celeba-hq-dataset - -unzip data256x256.zip -d celeba-hq-dataset/ - -# Reindex -for i in `echo {00001..30000}` -do - mv 'celeba-hq-dataset/data256x256/'$i'.jpg' 'celeba-hq-dataset/data256x256/'$[10#$i - 1]'.jpg' -done - - -# Split: split train -> train & val -cat fetch_data/train_shuffled.flist | shuf > celeba-hq-dataset/temp_train_shuffled.flist -cat celeba-hq-dataset/temp_train_shuffled.flist | head -n 2000 > celeba-hq-dataset/val_shuffled.flist -cat celeba-hq-dataset/temp_train_shuffled.flist | tail -n +2001 > celeba-hq-dataset/train_shuffled.flist -cat fetch_data/val_shuffled.flist > celeba-hq-dataset/visual_test_shuffled.flist - -mkdir celeba-hq-dataset/train_256/ -mkdir celeba-hq-dataset/val_source_256/ -mkdir celeba-hq-dataset/visual_test_source_256/ - -cat celeba-hq-dataset/train_shuffled.flist | xargs -I {} mv celeba-hq-dataset/data256x256/{} celeba-hq-dataset/train_256/ -cat celeba-hq-dataset/val_shuffled.flist | xargs -I {} mv celeba-hq-dataset/data256x256/{} celeba-hq-dataset/val_source_256/ -cat celeba-hq-dataset/visual_test_shuffled.flist | xargs -I {} mv celeba-hq-dataset/data256x256/{} celeba-hq-dataset/visual_test_source_256/ - - -# create location config celeba.yaml -PWD=$(pwd) -DATASET=${PWD}/celeba-hq-dataset -CELEBA=${PWD}/configs/training/location/celeba.yaml - -touch $CELEBA -echo "# @package _group_" >> $CELEBA -echo "data_root_dir: ${DATASET}/" >> $CELEBA -echo "out_root_dir: ${PWD}/experiments/" >> $CELEBA -echo "tb_dir: ${PWD}/tb_logs/" >> $CELEBA -echo "pretrained_models: ${PWD}/" >> $CELEBA diff --git a/spaces/OpenMotionLab/MotionGPT/pyrender/tests/conftest.py b/spaces/OpenMotionLab/MotionGPT/pyrender/tests/conftest.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/oneformer/evaluation/cityscapes_evaluation.py b/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/oneformer/evaluation/cityscapes_evaluation.py deleted file mode 100644 index 4e06ab8cbe6b43a355c0a9cfb3f2d688438d2c64..0000000000000000000000000000000000000000 --- a/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/oneformer/evaluation/cityscapes_evaluation.py +++ /dev/null @@ -1,201 +0,0 @@ -# ------------------------------------------------------------------------------ -# Reference: https://github.com/facebookresearch/detectron2/blob/main/detectron2/evaluation/cityscapes_evaluation.py -# Modified by Jitesh Jain (https://github.com/praeclarumjj3) -# ------------------------------------------------------------------------------ - -import glob -import logging -import numpy as np -import os -import tempfile -from collections import OrderedDict -import torch -from PIL import Image - -from detectron2.data import MetadataCatalog -from detectron2.utils import comm -from detectron2.utils.file_io import PathManager - -from .evaluator import DatasetEvaluator - - -class CityscapesEvaluator(DatasetEvaluator): - """ - Base class for evaluation using cityscapes API. - """ - - def __init__(self, dataset_name): - """ - Args: - dataset_name (str): the name of the dataset. - It must have the following metadata associated with it: - "thing_classes", "gt_dir". - """ - self._metadata = MetadataCatalog.get(dataset_name) - self._cpu_device = torch.device("cpu") - self._logger = logging.getLogger(__name__) - - def reset(self): - self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_") - self._temp_dir = self._working_dir.name - # All workers will write to the same results directory - # TODO this does not work in distributed training - assert ( - comm.get_local_size() == comm.get_world_size() - ), "CityscapesEvaluator currently do not work with multiple machines." - self._temp_dir = comm.all_gather(self._temp_dir)[0] - if self._temp_dir != self._working_dir.name: - self._working_dir.cleanup() - self._logger.info( - "Writing cityscapes results to temporary directory {} ...".format(self._temp_dir) - ) - - -class CityscapesInstanceEvaluator(CityscapesEvaluator): - """ - Evaluate instance segmentation results on cityscapes dataset using cityscapes API. - - Note: - * It does not work in multi-machine distributed training. - * It contains a synchronization, therefore has to be used on all ranks. - * Only the main process runs evaluation. - """ - - def process(self, inputs, outputs): - from cityscapesscripts.helpers.labels import name2label - - for input, output in zip(inputs, outputs): - file_name = input["file_name"] - basename = os.path.splitext(os.path.basename(file_name))[0] - pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt") - - if "instances" in output: - output = output["instances"].to(self._cpu_device) - num_instances = len(output) - with open(pred_txt, "w") as fout: - for i in range(num_instances): - pred_class = output.pred_classes[i] - classes = self._metadata.stuff_classes[pred_class] - class_id = name2label[classes].id - score = output.scores[i] - mask = output.pred_masks[i].numpy().astype("uint8") - png_filename = os.path.join( - self._temp_dir, basename + "_{}_{}.png".format(i, classes) - ) - - Image.fromarray(mask * 255).save(png_filename) - fout.write( - "{} {} {}\n".format(os.path.basename(png_filename), class_id, score) - ) - else: - # Cityscapes requires a prediction file for every ground truth image. - with open(pred_txt, "w") as fout: - pass - - def evaluate(self): - """ - Returns: - dict: has a key "segm", whose value is a dict of "AP" and "AP50". - """ - comm.synchronize() - if comm.get_rank() > 0: - return - import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval - - self._logger.info("Evaluating results under {} ...".format(self._temp_dir)) - - # set some global states in cityscapes evaluation API, before evaluating - cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) - cityscapes_eval.args.predictionWalk = None - cityscapes_eval.args.JSONOutput = False - cityscapes_eval.args.colorized = False - cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json") - - # These lines are adopted from - # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa - gt_dir = PathManager.get_local_path(self._metadata.gt_dir) - groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png")) - assert len( - groundTruthImgList - ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( - cityscapes_eval.args.groundTruthSearch - ) - predictionImgList = [] - for gt in groundTruthImgList: - predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args)) - results = cityscapes_eval.evaluateImgLists( - predictionImgList, groundTruthImgList, cityscapes_eval.args - )["averages"] - - ret = OrderedDict() - ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100} - self._working_dir.cleanup() - return ret - - -class CityscapesSemSegEvaluator(CityscapesEvaluator): - """ - Evaluate semantic segmentation results on cityscapes dataset using cityscapes API. - - Note: - * It does not work in multi-machine distributed training. - * It contains a synchronization, therefore has to be used on all ranks. - * Only the main process runs evaluation. - """ - - def process(self, inputs, outputs): - from cityscapesscripts.helpers.labels import trainId2label - - for input, output in zip(inputs, outputs): - file_name = input["file_name"] - basename = os.path.splitext(os.path.basename(file_name))[0] - pred_filename = os.path.join(self._temp_dir, basename + "_pred.png") - - output = output["sem_seg"].argmax(dim=0).to(self._cpu_device).numpy() - pred = 255 * np.ones(output.shape, dtype=np.uint8) - for train_id, label in trainId2label.items(): - if label.ignoreInEval: - continue - pred[output == train_id] = label.id - Image.fromarray(pred).save(pred_filename) - - def evaluate(self): - comm.synchronize() - if comm.get_rank() > 0: - return - # Load the Cityscapes eval script *after* setting the required env var, - # since the script reads CITYSCAPES_DATASET into global variables at load time. - import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval - - self._logger.info("Evaluating results under {} ...".format(self._temp_dir)) - - # set some global states in cityscapes evaluation API, before evaluating - cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) - cityscapes_eval.args.predictionWalk = None - cityscapes_eval.args.JSONOutput = False - cityscapes_eval.args.colorized = False - - # These lines are adopted from - # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa - gt_dir = PathManager.get_local_path(self._metadata.gt_dir) - groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_labelIds.png")) - assert len( - groundTruthImgList - ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( - cityscapes_eval.args.groundTruthSearch - ) - predictionImgList = [] - for gt in groundTruthImgList: - predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt)) - results = cityscapes_eval.evaluateImgLists( - predictionImgList, groundTruthImgList, cityscapes_eval.args - ) - ret = OrderedDict() - ret["sem_seg"] = { - "IoU": 100.0 * results["averageScoreClasses"], - "iIoU": 100.0 * results["averageScoreInstClasses"], - "IoU_sup": 100.0 * results["averageScoreCategories"], - "iIoU_sup": 100.0 * results["averageScoreInstCategories"], - } - self._working_dir.cleanup() - return ret diff --git a/spaces/PeepDaSlan9/AutoGPT/benchmark/__init__.py b/spaces/PeepDaSlan9/AutoGPT/benchmark/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/PeepDaSlan9/De-limiter/dataloader/delimit_dataset.py b/spaces/PeepDaSlan9/De-limiter/dataloader/delimit_dataset.py deleted file mode 100644 index ba0443e0c22481af2b451d8ccc5ab6e6f79cffb8..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/De-limiter/dataloader/delimit_dataset.py +++ /dev/null @@ -1,573 +0,0 @@ -import os -import random -from typing import Optional, Callable -import json -import glob -import csv - -import numpy as np -import torch -import librosa -import pyloudnorm as pyln -from pedalboard import Pedalboard, Limiter, Gain, Compressor, Clipping - -from .dataset import ( - MusdbTrainDataset, - MusdbValidDataset, - apply_limitaug, -) -from utils import ( - load_wav_arbitrary_position_stereo, - load_wav_specific_position_stereo, - db2linear, -) - - -class DelimitTrainDataset(MusdbTrainDataset): - def __init__( - self, - target: str = "all", - root: str = None, - seq_duration: Optional[float] = 6.0, - samples_per_track: int = 64, - source_augmentations: Optional[Callable] = lambda audio: audio, - sample_rate: int = 44100, - seed: int = 42, - limitaug_method: str = "limitaug", - limitaug_mode: str = "normal_L", - limitaug_custom_target_lufs: float = None, - limitaug_custom_target_lufs_std: float = None, - target_loudnorm_lufs: float = -14.0, - target_limitaug_mode: str = None, - target_limitaug_custom_target_lufs: float = None, - target_limitaug_custom_target_lufs_std: float = None, - custom_limiter_attack_range: list = [2.0, 2.0], - custom_limiter_release_range: list = [200.0, 200.0], - *args, - **kwargs, - ) -> None: - super().__init__( - target=target, - root=root, - seq_duration=seq_duration, - samples_per_track=samples_per_track, - source_augmentations=source_augmentations, - sample_rate=sample_rate, - seed=seed, - limitaug_method=limitaug_method, - limitaug_mode=limitaug_mode, - limitaug_custom_target_lufs=limitaug_custom_target_lufs, - limitaug_custom_target_lufs_std=limitaug_custom_target_lufs_std, - target_loudnorm_lufs=target_loudnorm_lufs, - custom_limiter_attack_range=custom_limiter_attack_range, - custom_limiter_release_range=custom_limiter_release_range, - *args, - **kwargs, - ) - - self.target_limitaug_mode = target_limitaug_mode - - self.target_limitaug_custom_target_lufs = (target_limitaug_custom_target_lufs,) - self.target_limitaug_custom_target_lufs_std = ( - target_limitaug_custom_target_lufs_std, - ) - self.limitaug_mode_statistics["target_custom"] = [ - target_limitaug_custom_target_lufs, - target_limitaug_custom_target_lufs_std, - ] - - """ - Parameters - ---------- - limitaug_method : str - choose from ["linear_gain_increase", "limitaug", "limitaug_then_loudnorm", "only_loudnorm"] - limitaug_mode : str - choose from ["uniform", "normal", "normal_L", "normal_XL", "normal_short_term", "normal_L_short_term", "normal_XL_short_term", "custom"] - limitaug_custom_target_lufs : float - valid only when - limitaug_mode == "custom" - target_loudnorm_lufs : float - valid only when - limitaug_method == 'limitaug_then_loudnorm' or 'only_loudnorm' - default is -14. - To the best of my knowledge, Spotify and Youtube music is using -14 as a reference loudness normalization level. - No special reason for the choice of -14 as target_loudnorm_lufs. - target : str - target name of the source to be separated, defaults to ``vocals``. - root : str - root path of MUSDB - seq_duration : float - training is performed in chunks of ``seq_duration`` (in seconds, - defaults to ``None`` which loads the full audio track - samples_per_track : int - sets the number of samples, yielded from each track per epoch. - Defaults to 64 - source_augmentations : list[callables] - provide list of augmentation function that take a multi-channel - audio file of shape (src, samples) as input and output. Defaults to - no-augmentations (input = output) - seed : int - control randomness of dataset iterations - args, kwargs : additional keyword arguments - used to add further control for the musdb dataset - initialization function. - """ - - # Get a limitaug result without target (individual stem source) - def get_limitaug_mixture(self, mixture): - if self.limitaug_method == "limitaug": - self.board[1].release_ms = random.uniform(30.0, 200.0) - target_lufs = self.sample_target_lufs() - mixture_limited, mixture_lufs = apply_limitaug( - mixture, - self.board, - self.meter, - self.sample_rate, - target_lufs=target_lufs, - ) - - elif self.limitaug_method == "limitaug_then_loudnorm": - self.board[1].release_ms = random.uniform(30.0, 200.0) - target_lufs = self.sample_target_lufs() - mixture_limited, mixture_lufs = ( - apply_limitaug( - mixture, - self.board, - self.meter, - self.sample_rate, - target_lufs=target_lufs, - target_loudnorm_lufs=self.target_loudnorm_lufs, - ), - ) - - # Apply LimitAug using Custom Limiter - elif self.limitaug_method == "custom_limiter_limitaug": - # Change attack time of First compressor of the Limiter - self.board[1].attack_ms = random.uniform( - self.custom_limiter_attack_range[0], self.custom_limiter_attack_range[1] - ) - # Change release time of First compressor of the Limiter - self.board[1].release_ms = random.uniform( - self.custom_limiter_release_range[0], - self.custom_limiter_release_range[1], - ) - # Change release time of Second compressor of the Limiter - self.board[2].release_ms = random.uniform(30.0, 200.0) - target_lufs = self.sample_target_lufs() - mixture_limited, mixture_lufs = apply_limitaug( - mixture, - self.board, - self.meter, - self.sample_rate, - target_lufs=target_lufs, - target_loudnorm_lufs=self.target_loudnorm_lufs, - ) - - # When we want to force NN to output an appropriately compressed target output - if self.target_limitaug_mode: - mixture_target_lufs = random.gauss( - self.limitaug_mode_statistics[self.target_limitaug_mode][0], - self.limitaug_mode_statistics[self.target_limitaug_mode][1], - ) - mixture, target_lufs = apply_limitaug( - mixture, - self.board, - self.meter, - self.sample_rate, - target_lufs=mixture_target_lufs, - loudness=mixture_lufs, - ) - - if np.isinf(mixture_lufs): - mixture_loudnorm = mixture - else: - augmented_gain = self.target_loudnorm_lufs - mixture_lufs - mixture_loudnorm = mixture * db2linear(augmented_gain, eps=0.0) - - return mixture_limited, mixture_loudnorm - - def __getitem__(self, index): - audio_sources = [] - - for k, source in enumerate(self.sources): - # memorize index of target source - if source == self.target: # if source is 'vocals' - track_path = self.train_list[ - index // self.samples_per_track - ] # we want to use # training samples per each track. - audio_path = f"{track_path}/{source}.wav" - audio = load_wav_arbitrary_position_stereo( - audio_path, self.sample_rate, self.seq_duration - ) - else: - track_path = random.choice(self.train_list) - audio_path = f"{track_path}/{source}.wav" - audio = load_wav_arbitrary_position_stereo( - audio_path, self.sample_rate, self.seq_duration - ) - audio = self.source_augmentations(audio) - audio_sources.append(audio) - - stems = np.stack(audio_sources, axis=0) - - # apply linear mix over source index=0 - # and here, linear mixture is a target unlike in MusdbTrainDataset - mixture = stems.sum(0) - mixture_limited, mixture_loudnorm = self.get_limitaug_mixture(mixture) - # We will give mixture_limited as an input and mixture_loudnorm as a target to the model. - - mixture_limited = np.clip(mixture_limited, -1.0, 1.0) - mixture_limited = torch.as_tensor(mixture_limited, dtype=torch.float32) - mixture_loudnorm = torch.as_tensor(mixture_loudnorm, dtype=torch.float32) - - return mixture_limited, mixture_loudnorm - - -class OzoneTrainDataset(DelimitTrainDataset): - def __init__( - self, - target: str = "all", - root: str = None, - ozone_root: str = None, - use_fixed: float = 0.1, # ratio of fixed samples - seq_duration: Optional[float] = 6.0, - samples_per_track: int = 64, - source_augmentations: Optional[Callable] = lambda audio: audio, - sample_rate: int = 44100, - seed: int = 42, - limitaug_method: str = "limitaug", - limitaug_mode: str = "normal_L", - limitaug_custom_target_lufs: float = None, - limitaug_custom_target_lufs_std: float = None, - target_loudnorm_lufs: float = -14.0, - target_limitaug_mode: str = None, - target_limitaug_custom_target_lufs: float = None, - target_limitaug_custom_target_lufs_std: float = None, - custom_limiter_attack_range: list = [2.0, 2.0], - custom_limiter_release_range: list = [200.0, 200.0], - *args, - **kwargs, - ) -> None: - super().__init__( - target, - root, - seq_duration, - samples_per_track, - source_augmentations, - sample_rate, - seed, - limitaug_method, - limitaug_mode, - limitaug_custom_target_lufs, - limitaug_custom_target_lufs_std, - target_loudnorm_lufs, - target_limitaug_mode, - target_limitaug_custom_target_lufs, - target_limitaug_custom_target_lufs_std, - custom_limiter_attack_range, - custom_limiter_release_range, - *args, - **kwargs, - ) - - self.ozone_root = ozone_root - self.use_fixed = use_fixed - self.list_train_fixed = glob.glob(f"{self.ozone_root}/ozone_train_fixed/*.wav") - self.list_train_random = glob.glob( - f"{self.ozone_root}/ozone_train_random/*.wav" - ) - self.dict_train_random = {} - - # Load information of pre-generated random training examples - list_csv_files = glob.glob(f"{self.ozone_root}/ozone_train_random_*.csv") - list_csv_files.sort() - for csv_file in list_csv_files: - with open(csv_file, "r") as f: - reader = csv.reader(f) - next(reader) - for row in reader: - self.dict_train_random[row[0]] = { - "max_threshold": float(row[1]), - "max_character": float(row[2]), - "vocals": { - "name": row[3], - "start_sec": float(row[4]), - "gain": float(row[5]), - "channelswap": bool(row[6]), - }, - "bass": { - "name": row[7], - "start_sec": float(row[8]), - "gain": float(row[9]), - "channelswap": bool(row[10]), - }, - "drums": { - "name": row[11], - "start_sec": float(row[12]), - "gain": float(row[13]), - "channelswap": bool(row[14]), - }, - "other": { - "name": row[15], - "start_sec": float(row[16]), - "gain": float(row[17]), - "channelswap": bool(row[18]), - }, - } - - def __getitem__(self, idx): - use_fixed_prob = random.random() - - if use_fixed_prob <= self.use_fixed: - # Fixed examples - audio_path = random.choice(self.list_train_fixed) - song_name = os.path.basename(audio_path).replace(".wav", "") - mixture_limited, start_pos_sec = load_wav_arbitrary_position_stereo( - audio_path, self.sample_rate, self.seq_duration, return_pos=True - ) - - audio_sources = [] - track_path = f"{self.root}/train/{song_name}" - for source in self.sources: - audio_path = f"{track_path}/{source}.wav" - audio = load_wav_specific_position_stereo( - audio_path, - self.sample_rate, - self.seq_duration, - start_position=start_pos_sec, - ) - audio_sources.append(audio) - - else: - # Random examples - # Load mixture_limited (pre-generated) - audio_path = random.choice(self.list_train_random) - seg_name = os.path.basename(audio_path).replace(".wav", "") - mixture_limited, sr = librosa.load( - audio_path, sr=self.sample_rate, mono=False - ) - - # Load mixture_unlimited (from the original musdb18, using metadata) - audio_sources = [] - for source in self.sources: - dict_seg_info = self.dict_train_random[seg_name] - dict_seg_source_info = dict_seg_info[source] - audio_path = ( - f"{self.root}/train/{dict_seg_source_info['name']}/{source}.wav" - ) - audio = load_wav_specific_position_stereo( - audio_path, - self.sample_rate, - self.seq_duration, - start_position=dict_seg_source_info["start_sec"], - ) - - # apply augmentations - audio = audio * dict_seg_source_info["gain"] - if dict_seg_source_info["channelswap"]: - audio = np.flip(audio, axis=0) - - audio_sources.append(audio) - - stems = np.stack(audio_sources, axis=0) - mixture = stems.sum(axis=0) - mixture_lufs = self.meter.integrated_loudness(mixture.T) - if np.isinf(mixture_lufs): - mixture_loudnorm = mixture - else: - augmented_gain = self.target_loudnorm_lufs - mixture_lufs - mixture_loudnorm = mixture * db2linear(augmented_gain, eps=0.0) - - return mixture_limited, mixture_loudnorm - - -class DelimitValidDataset(MusdbValidDataset): - def __init__( - self, - target: str = "vocals", - root: str = None, - delimit_valid_root: str = None, - valid_target_lufs: float = -8.05, # From the Table 1 of the "Towards robust music source separation on loud commercial music" paper, the average loudness of commerical music. - target_loudnorm_lufs: float = -14.0, - delimit_valid_L_root: str = None, # This will be used when using the target as compressed (normal_L) mixture. - use_custom_limiter: bool = False, - custom_limiter_attack_range: list = [0.1, 10.0], - custom_limiter_release_range: list = [30.0, 200.0], - *args, - **kwargs, - ) -> None: - super().__init__(target=target, root=root, *args, **kwargs) - self.delimit_valid_root = delimit_valid_root - if self.delimit_valid_root: - with open(f"{self.delimit_valid_root}/valid_loudness.json", "r") as f: - self.dict_valid_loudness = json.load(f) - self.delimit_valid_L_root = delimit_valid_L_root - if self.delimit_valid_L_root: - with open(f"{self.delimit_valid_L_root}/valid_loudness.json", "r") as f: - self.dict_valid_L_loudness = json.load(f) - - self.valid_target_lufs = valid_target_lufs - self.target_loudnorm_lufs = target_loudnorm_lufs - self.meter = pyln.Meter(self.sample_rate) - self.use_custom_limiter = use_custom_limiter - - if self.use_custom_limiter: - print("using Custom limiter limitaug for validation!!") - self.custom_limiter_attack_range = custom_limiter_attack_range - self.custom_limiter_release_range = custom_limiter_release_range - self.board = Pedalboard( - [ - Gain(gain_db=0.0), - Compressor( - threshold_db=-10.0, ratio=4.0, attack_ms=2.0, release_ms=200.0 - ), # attack_ms and release_ms will be changed later. - Compressor( - threshold_db=0.0, - ratio=1000.0, - attack_ms=0.001, - release_ms=100.0, - ), - Gain(gain_db=3.75), - Clipping(threshold_db=0.0), - ] - ) # This implementation is the same as JUCE Limiter. - # However, we want the first compressor to have a variable attack and release time. - # Therefore, we use the Custom Limiter instead of the JUCE Limiter. - else: - self.board = Pedalboard( - [Gain(gain_db=0.0), Limiter(threshold_db=0.0, release_ms=100.0)] - ) # Currently, we are using a limiter with a release time of 100ms. - - def __getitem__(self, index): - audio_sources = [] - target_ind = None - - for k, source in enumerate(self.sources): - # memorize index of target source - if source == self.target: # if source is 'vocals' - target_ind = k - track_path = self.valid_list[index] - song_name = os.path.basename(track_path) - audio_path = f"{track_path}/{source}.wav" - # audio = utils.load_wav_stereo(audio_path, self.sample_rate) - audio = librosa.load(audio_path, mono=False, sr=self.sample_rate)[0] - else: - track_path = self.valid_list[index] - song_name = os.path.basename(track_path) - audio_path = f"{track_path}/{source}.wav" - # audio = utils.load_wav_stereo(audio_path, self.sample_rate) - audio = librosa.load(audio_path, mono=False, sr=self.sample_rate)[0] - - audio = torch.as_tensor(audio, dtype=torch.float32) - audio_sources.append(audio) - - stems = np.stack(audio_sources, axis=0) - - # apply linear mix over source index=0 - # and here, linear mixture is a target unlike in MusdbTrainDataset - mixture = stems.sum(0) - if ( - self.delimit_valid_root - ): # If there exists a pre-processed delimit valid dataset - audio_path = f"{self.delimit_valid_root}/valid/{song_name}.wav" - mixture_limited = librosa.load(audio_path, mono=False, sr=self.sample_rate)[ - 0 - ] - mixture_lufs = self.dict_valid_loudness[song_name] - - else: - if self.use_custom_limiter: - custom_limiter_attack = random.uniform( - self.custom_limiter_attack_range[0], - self.custom_limiter_attack_range[1], - ) - self.board[1].attack_ms = custom_limiter_attack - - custom_limiter_release = random.uniform( - self.custom_limiter_release_range[0], - self.custom_limiter_release_range[1], - ) - self.board[1].release_ms = custom_limiter_release - - mixture_limited, mixture_lufs = apply_limitaug( - mixture, - self.board, - self.meter, - self.sample_rate, - target_lufs=self.valid_target_lufs, - ) - else: - mixture_limited, mixture_lufs = apply_limitaug( - mixture, - self.board, - self.meter, - self.sample_rate, - target_lufs=self.valid_target_lufs, - # target_loudnorm_lufs=self.target_loudnorm_lufs, - ) # mixture_limited is a limiter applied mixture - # We will give mixture_limited as an input and mixture_loudnorm as a target to the model. - - if self.delimit_valid_L_root: - audio_L_path = f"{self.delimit_valid_L_root}/valid/{song_name}.wav" - mixture_loudnorm = librosa.load( - audio_L_path, mono=False, sr=self.sample_rate - )[0] - mixture_lufs = self.dict_valid_L_loudness[song_name] - mixture = mixture_loudnorm - - augmented_gain = self.target_loudnorm_lufs - mixture_lufs - mixture_loudnorm = mixture * db2linear(augmented_gain) - - if self.use_custom_limiter: - return ( - mixture_limited, - mixture_loudnorm, - song_name, - mixture_lufs, - custom_limiter_attack, - custom_limiter_release, - ) - else: - return mixture_limited, mixture_loudnorm, song_name, mixture_lufs - - -class OzoneValidDataset(MusdbValidDataset): - def __init__( - self, - target: str = "all", - root: str = None, - ozone_root: str = None, - target_loudnorm_lufs: float = -14.0, - *args, - **kwargs, - ) -> None: - super().__init__(target=target, root=root, *args, **kwargs) - - self.ozone_root = ozone_root - self.target_loudnorm_lufs = target_loudnorm_lufs - - with open(f"{self.ozone_root}/valid_loudness.json", "r") as f: - self.dict_valid_loudness = json.load(f) - - def __getitem__(self, index): - audio_sources = [] - - track_path = self.valid_list[index] - song_name = os.path.basename(track_path) - for k, source in enumerate(self.sources): - audio_path = f"{track_path}/{source}.wav" - audio = librosa.load(audio_path, mono=False, sr=self.sample_rate)[0] - audio_sources.append(audio) - - stems = np.stack(audio_sources, axis=0) - - mixture = stems.sum(0) - - audio_path = f"{self.ozone_root}/ozone_train_fixed/{song_name}.wav" - mixture_limited = librosa.load(audio_path, mono=False, sr=self.sample_rate)[0] - - mixture_lufs = self.dict_valid_loudness[song_name] - augmented_gain = self.target_loudnorm_lufs - mixture_lufs - mixture_loudnorm = mixture * db2linear(augmented_gain) - - return mixture_limited, mixture_loudnorm, song_name, mixture_lufs diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/modulated_deform_conv.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/modulated_deform_conv.py deleted file mode 100644 index 75559579cf053abcc99538606cbb88c723faf783..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/modulated_deform_conv.py +++ /dev/null @@ -1,282 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -import torch.nn as nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair, _single - -from annotator.uniformer.mmcv.utils import deprecated_api_warning -from ..cnn import CONV_LAYERS -from ..utils import ext_loader, print_log - -ext_module = ext_loader.load_ext( - '_ext', - ['modulated_deform_conv_forward', 'modulated_deform_conv_backward']) - - -class ModulatedDeformConv2dFunction(Function): - - @staticmethod - def symbolic(g, input, offset, mask, weight, bias, stride, padding, - dilation, groups, deform_groups): - input_tensors = [input, offset, mask, weight] - if bias is not None: - input_tensors.append(bias) - return g.op( - 'mmcv::MMCVModulatedDeformConv2d', - *input_tensors, - stride_i=stride, - padding_i=padding, - dilation_i=dilation, - groups_i=groups, - deform_groups_i=deform_groups) - - @staticmethod - def forward(ctx, - input, - offset, - mask, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - groups=1, - deform_groups=1): - if input is not None and input.dim() != 4: - raise ValueError( - f'Expected 4D tensor as input, got {input.dim()}D tensor \ - instead.') - ctx.stride = _pair(stride) - ctx.padding = _pair(padding) - ctx.dilation = _pair(dilation) - ctx.groups = groups - ctx.deform_groups = deform_groups - ctx.with_bias = bias is not None - if not ctx.with_bias: - bias = input.new_empty(0) # fake tensor - # When pytorch version >= 1.6.0, amp is adopted for fp16 mode; - # amp won't cast the type of model (float32), but "offset" is cast - # to float16 by nn.Conv2d automatically, leading to the type - # mismatch with input (when it is float32) or weight. - # The flag for whether to use fp16 or amp is the type of "offset", - # we cast weight and input to temporarily support fp16 and amp - # whatever the pytorch version is. - input = input.type_as(offset) - weight = weight.type_as(input) - ctx.save_for_backward(input, offset, mask, weight, bias) - output = input.new_empty( - ModulatedDeformConv2dFunction._output_size(ctx, input, weight)) - ctx._bufs = [input.new_empty(0), input.new_empty(0)] - ext_module.modulated_deform_conv_forward( - input, - weight, - bias, - ctx._bufs[0], - offset, - mask, - output, - ctx._bufs[1], - kernel_h=weight.size(2), - kernel_w=weight.size(3), - stride_h=ctx.stride[0], - stride_w=ctx.stride[1], - pad_h=ctx.padding[0], - pad_w=ctx.padding[1], - dilation_h=ctx.dilation[0], - dilation_w=ctx.dilation[1], - group=ctx.groups, - deformable_group=ctx.deform_groups, - with_bias=ctx.with_bias) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input, offset, mask, weight, bias = ctx.saved_tensors - grad_input = torch.zeros_like(input) - grad_offset = torch.zeros_like(offset) - grad_mask = torch.zeros_like(mask) - grad_weight = torch.zeros_like(weight) - grad_bias = torch.zeros_like(bias) - grad_output = grad_output.contiguous() - ext_module.modulated_deform_conv_backward( - input, - weight, - bias, - ctx._bufs[0], - offset, - mask, - ctx._bufs[1], - grad_input, - grad_weight, - grad_bias, - grad_offset, - grad_mask, - grad_output, - kernel_h=weight.size(2), - kernel_w=weight.size(3), - stride_h=ctx.stride[0], - stride_w=ctx.stride[1], - pad_h=ctx.padding[0], - pad_w=ctx.padding[1], - dilation_h=ctx.dilation[0], - dilation_w=ctx.dilation[1], - group=ctx.groups, - deformable_group=ctx.deform_groups, - with_bias=ctx.with_bias) - if not ctx.with_bias: - grad_bias = None - - return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, - None, None, None, None, None) - - @staticmethod - def _output_size(ctx, input, weight): - channels = weight.size(0) - output_size = (input.size(0), channels) - for d in range(input.dim() - 2): - in_size = input.size(d + 2) - pad = ctx.padding[d] - kernel = ctx.dilation[d] * (weight.size(d + 2) - 1) + 1 - stride_ = ctx.stride[d] - output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, ) - if not all(map(lambda s: s > 0, output_size)): - raise ValueError( - 'convolution input is too small (output would be ' + - 'x'.join(map(str, output_size)) + ')') - return output_size - - -modulated_deform_conv2d = ModulatedDeformConv2dFunction.apply - - -class ModulatedDeformConv2d(nn.Module): - - @deprecated_api_warning({'deformable_groups': 'deform_groups'}, - cls_name='ModulatedDeformConv2d') - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - deform_groups=1, - bias=True): - super(ModulatedDeformConv2d, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = _pair(kernel_size) - self.stride = _pair(stride) - self.padding = _pair(padding) - self.dilation = _pair(dilation) - self.groups = groups - self.deform_groups = deform_groups - # enable compatibility with nn.Conv2d - self.transposed = False - self.output_padding = _single(0) - - self.weight = nn.Parameter( - torch.Tensor(out_channels, in_channels // groups, - *self.kernel_size)) - if bias: - self.bias = nn.Parameter(torch.Tensor(out_channels)) - else: - self.register_parameter('bias', None) - self.init_weights() - - def init_weights(self): - n = self.in_channels - for k in self.kernel_size: - n *= k - stdv = 1. / math.sqrt(n) - self.weight.data.uniform_(-stdv, stdv) - if self.bias is not None: - self.bias.data.zero_() - - def forward(self, x, offset, mask): - return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, - self.stride, self.padding, - self.dilation, self.groups, - self.deform_groups) - - -@CONV_LAYERS.register_module('DCNv2') -class ModulatedDeformConv2dPack(ModulatedDeformConv2d): - """A ModulatedDeformable Conv Encapsulation that acts as normal Conv - layers. - - Args: - in_channels (int): Same as nn.Conv2d. - out_channels (int): Same as nn.Conv2d. - kernel_size (int or tuple[int]): Same as nn.Conv2d. - stride (int): Same as nn.Conv2d, while tuple is not supported. - padding (int): Same as nn.Conv2d, while tuple is not supported. - dilation (int): Same as nn.Conv2d, while tuple is not supported. - groups (int): Same as nn.Conv2d. - bias (bool or str): If specified as `auto`, it will be decided by the - norm_cfg. Bias will be set as True if norm_cfg is None, otherwise - False. - """ - - _version = 2 - - def __init__(self, *args, **kwargs): - super(ModulatedDeformConv2dPack, self).__init__(*args, **kwargs) - self.conv_offset = nn.Conv2d( - self.in_channels, - self.deform_groups * 3 * self.kernel_size[0] * self.kernel_size[1], - kernel_size=self.kernel_size, - stride=self.stride, - padding=self.padding, - dilation=self.dilation, - bias=True) - self.init_weights() - - def init_weights(self): - super(ModulatedDeformConv2dPack, self).init_weights() - if hasattr(self, 'conv_offset'): - self.conv_offset.weight.data.zero_() - self.conv_offset.bias.data.zero_() - - def forward(self, x): - out = self.conv_offset(x) - o1, o2, mask = torch.chunk(out, 3, dim=1) - offset = torch.cat((o1, o2), dim=1) - mask = torch.sigmoid(mask) - return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, - self.stride, self.padding, - self.dilation, self.groups, - self.deform_groups) - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - version = local_metadata.get('version', None) - - if version is None or version < 2: - # the key is different in early versions - # In version < 2, ModulatedDeformConvPack - # loads previous benchmark models. - if (prefix + 'conv_offset.weight' not in state_dict - and prefix[:-1] + '_offset.weight' in state_dict): - state_dict[prefix + 'conv_offset.weight'] = state_dict.pop( - prefix[:-1] + '_offset.weight') - if (prefix + 'conv_offset.bias' not in state_dict - and prefix[:-1] + '_offset.bias' in state_dict): - state_dict[prefix + - 'conv_offset.bias'] = state_dict.pop(prefix[:-1] + - '_offset.bias') - - if version is not None and version > 1: - print_log( - f'ModulatedDeformConvPack {prefix.rstrip(".")} is upgraded to ' - 'version 2.', - logger='root') - - super()._load_from_state_dict(state_dict, prefix, local_metadata, - strict, missing_keys, unexpected_keys, - error_msgs) diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/points_sampler.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/points_sampler.py deleted file mode 100644 index a802a74fd6c3610d9ae178e6201f47423eca7ad1..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/points_sampler.py +++ /dev/null @@ -1,177 +0,0 @@ -from typing import List - -import torch -from torch import nn as nn - -from annotator.uniformer.mmcv.runner import force_fp32 -from .furthest_point_sample import (furthest_point_sample, - furthest_point_sample_with_dist) - - -def calc_square_dist(point_feat_a, point_feat_b, norm=True): - """Calculating square distance between a and b. - - Args: - point_feat_a (Tensor): (B, N, C) Feature vector of each point. - point_feat_b (Tensor): (B, M, C) Feature vector of each point. - norm (Bool, optional): Whether to normalize the distance. - Default: True. - - Returns: - Tensor: (B, N, M) Distance between each pair points. - """ - num_channel = point_feat_a.shape[-1] - # [bs, n, 1] - a_square = torch.sum(point_feat_a.unsqueeze(dim=2).pow(2), dim=-1) - # [bs, 1, m] - b_square = torch.sum(point_feat_b.unsqueeze(dim=1).pow(2), dim=-1) - - corr_matrix = torch.matmul(point_feat_a, point_feat_b.transpose(1, 2)) - - dist = a_square + b_square - 2 * corr_matrix - if norm: - dist = torch.sqrt(dist) / num_channel - return dist - - -def get_sampler_cls(sampler_type): - """Get the type and mode of points sampler. - - Args: - sampler_type (str): The type of points sampler. - The valid value are "D-FPS", "F-FPS", or "FS". - - Returns: - class: Points sampler type. - """ - sampler_mappings = { - 'D-FPS': DFPSSampler, - 'F-FPS': FFPSSampler, - 'FS': FSSampler, - } - try: - return sampler_mappings[sampler_type] - except KeyError: - raise KeyError( - f'Supported `sampler_type` are {sampler_mappings.keys()}, but got \ - {sampler_type}') - - -class PointsSampler(nn.Module): - """Points sampling. - - Args: - num_point (list[int]): Number of sample points. - fps_mod_list (list[str], optional): Type of FPS method, valid mod - ['F-FPS', 'D-FPS', 'FS'], Default: ['D-FPS']. - F-FPS: using feature distances for FPS. - D-FPS: using Euclidean distances of points for FPS. - FS: using F-FPS and D-FPS simultaneously. - fps_sample_range_list (list[int], optional): - Range of points to apply FPS. Default: [-1]. - """ - - def __init__(self, - num_point: List[int], - fps_mod_list: List[str] = ['D-FPS'], - fps_sample_range_list: List[int] = [-1]): - super().__init__() - # FPS would be applied to different fps_mod in the list, - # so the length of the num_point should be equal to - # fps_mod_list and fps_sample_range_list. - assert len(num_point) == len(fps_mod_list) == len( - fps_sample_range_list) - self.num_point = num_point - self.fps_sample_range_list = fps_sample_range_list - self.samplers = nn.ModuleList() - for fps_mod in fps_mod_list: - self.samplers.append(get_sampler_cls(fps_mod)()) - self.fp16_enabled = False - - @force_fp32() - def forward(self, points_xyz, features): - """ - Args: - points_xyz (Tensor): (B, N, 3) xyz coordinates of the features. - features (Tensor): (B, C, N) Descriptors of the features. - - Returns: - Tensor: (B, npoint, sample_num) Indices of sampled points. - """ - indices = [] - last_fps_end_index = 0 - - for fps_sample_range, sampler, npoint in zip( - self.fps_sample_range_list, self.samplers, self.num_point): - assert fps_sample_range < points_xyz.shape[1] - - if fps_sample_range == -1: - sample_points_xyz = points_xyz[:, last_fps_end_index:] - if features is not None: - sample_features = features[:, :, last_fps_end_index:] - else: - sample_features = None - else: - sample_points_xyz = \ - points_xyz[:, last_fps_end_index:fps_sample_range] - if features is not None: - sample_features = features[:, :, last_fps_end_index: - fps_sample_range] - else: - sample_features = None - - fps_idx = sampler(sample_points_xyz.contiguous(), sample_features, - npoint) - - indices.append(fps_idx + last_fps_end_index) - last_fps_end_index += fps_sample_range - indices = torch.cat(indices, dim=1) - - return indices - - -class DFPSSampler(nn.Module): - """Using Euclidean distances of points for FPS.""" - - def __init__(self): - super().__init__() - - def forward(self, points, features, npoint): - """Sampling points with D-FPS.""" - fps_idx = furthest_point_sample(points.contiguous(), npoint) - return fps_idx - - -class FFPSSampler(nn.Module): - """Using feature distances for FPS.""" - - def __init__(self): - super().__init__() - - def forward(self, points, features, npoint): - """Sampling points with F-FPS.""" - assert features is not None, \ - 'feature input to FFPS_Sampler should not be None' - features_for_fps = torch.cat([points, features.transpose(1, 2)], dim=2) - features_dist = calc_square_dist( - features_for_fps, features_for_fps, norm=False) - fps_idx = furthest_point_sample_with_dist(features_dist, npoint) - return fps_idx - - -class FSSampler(nn.Module): - """Using F-FPS and D-FPS simultaneously.""" - - def __init__(self): - super().__init__() - - def forward(self, points, features, npoint): - """Sampling points with FS_Sampling.""" - assert features is not None, \ - 'feature input to FS_Sampler should not be None' - ffps_sampler = FFPSSampler() - dfps_sampler = DFPSSampler() - fps_idx_ffps = ffps_sampler(points, features, npoint) - fps_idx_dfps = dfps_sampler(points, features, npoint) - fps_idx = torch.cat([fps_idx_ffps, fps_idx_dfps], dim=1) - return fps_idx diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/core/seg/sampler/__init__.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/core/seg/sampler/__init__.py deleted file mode 100644 index 332b242c03d1c5e80d4577df442a9a037b1816e1..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/core/seg/sampler/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .base_pixel_sampler import BasePixelSampler -from .ohem_pixel_sampler import OHEMPixelSampler - -__all__ = ['BasePixelSampler', 'OHEMPixelSampler'] diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/layers/roi_pool.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/layers/roi_pool.py deleted file mode 100644 index c0e42756ee6fcd779387255391a30079a28f5e60..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/layers/roi_pool.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import torch -from torch import nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair - -from maskrcnn_benchmark import _C - - -class _ROIPool(Function): - @staticmethod - def forward(ctx, input, roi, output_size, spatial_scale): - ctx.output_size = _pair(output_size) - ctx.spatial_scale = spatial_scale - ctx.input_shape = input.size() - output, argmax = _C.roi_pool_forward( - input, roi, spatial_scale, output_size[0], output_size[1] - ) - ctx.save_for_backward(input, roi, argmax) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input, rois, argmax = ctx.saved_tensors - output_size = ctx.output_size - spatial_scale = ctx.spatial_scale - bs, ch, h, w = ctx.input_shape - grad_input = _C.roi_pool_backward( - grad_output, - input, - rois, - argmax, - spatial_scale, - output_size[0], - output_size[1], - bs, - ch, - h, - w, - ) - return grad_input, None, None, None - - -roi_pool = _ROIPool.apply - - -class ROIPool(nn.Module): - def __init__(self, output_size, spatial_scale): - super(ROIPool, self).__init__() - self.output_size = output_size - self.spatial_scale = spatial_scale - - def forward(self, input, rois): - return roi_pool(input, rois, self.output_size, self.spatial_scale) - - def __repr__(self): - tmpstr = self.__class__.__name__ + "(" - tmpstr += "output_size=" + str(self.output_size) - tmpstr += ", spatial_scale=" + str(self.spatial_scale) - tmpstr += ")" - return tmpstr diff --git a/spaces/Pranjal12345/Text_to_Speech/tortoise/models/random_latent_generator.py b/spaces/Pranjal12345/Text_to_Speech/tortoise/models/random_latent_generator.py deleted file mode 100644 index e90ef2130a47ec52160709877972716352e04c9c..0000000000000000000000000000000000000000 --- a/spaces/Pranjal12345/Text_to_Speech/tortoise/models/random_latent_generator.py +++ /dev/null @@ -1,55 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5): - if bias is not None: - rest_dim = [1] * (input.ndim - bias.ndim - 1) - return ( - F.leaky_relu( - input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope - ) - * scale - ) - else: - return F.leaky_relu(input, negative_slope=0.2) * scale - - -class EqualLinear(nn.Module): - def __init__( - self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1 - ): - super().__init__() - self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) - if bias: - self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) - else: - self.bias = None - self.scale = (1 / math.sqrt(in_dim)) * lr_mul - self.lr_mul = lr_mul - - def forward(self, input): - out = F.linear(input, self.weight * self.scale) - out = fused_leaky_relu(out, self.bias * self.lr_mul) - return out - - -class RandomLatentConverter(nn.Module): - def __init__(self, channels): - super().__init__() - self.layers = nn.Sequential(*[EqualLinear(channels, channels, lr_mul=.1) for _ in range(5)], - nn.Linear(channels, channels)) - self.channels = channels - - def forward(self, ref): - r = torch.randn(ref.shape[0], self.channels, device=ref.device) - y = self.layers(r) - return y - - -if __name__ == '__main__': - model = RandomLatentConverter(512) - model(torch.randn(5,512)) \ No newline at end of file diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/app.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/app.py deleted file mode 100644 index 8d760f8a1a769bba657d4aea47857f48f7cd0d7a..0000000000000000000000000000000000000000 --- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/app.py +++ /dev/null @@ -1,1832 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# Updated to account for UI changes from https://github.com/rkfg/audiocraft/blob/long/app.py -# also released under the MIT license. - -import argparse -from concurrent.futures import ProcessPoolExecutor -import os -from pathlib import Path -import subprocess as sp -from tempfile import NamedTemporaryFile -import time -import warnings -import glob -import re -from PIL import Image -from pydub import AudioSegment -from datetime import datetime - -import json -import shutil -import taglib -import torch -import torchaudio -import gradio as gr -import numpy as np -import typing as tp - -from audiocraft.data.audio_utils import convert_audio -from audiocraft.data.audio import audio_write -from audiocraft.models import AudioGen, MusicGen, MultiBandDiffusion -from audiocraft.utils import ui -import random, string - -version = "2.0.0a" - -theme = gr.themes.Base( - primary_hue="lime", - secondary_hue="lime", - neutral_hue="neutral", -).set( - button_primary_background_fill_hover='*primary_500', - button_primary_background_fill_hover_dark='*primary_500', - button_secondary_background_fill_hover='*primary_500', - button_secondary_background_fill_hover_dark='*primary_500' -) - -MODEL = None # Last used model -MODELS = None -UNLOAD_MODEL = False -MOVE_TO_CPU = False -IS_BATCHED = "facebook/MusicGen" in os.environ.get('SPACE_ID', '') -print(IS_BATCHED) -MAX_BATCH_SIZE = 12 -BATCHED_DURATION = 15 -INTERRUPTING = False -MBD = None -# We have to wrap subprocess call to clean a bit the log when using gr.make_waveform -_old_call = sp.call - - -def generate_random_string(length): - characters = string.ascii_letters + string.digits - return ''.join(random.choice(characters) for _ in range(length)) - - -def resize_video(input_path, output_path, target_width, target_height): - ffmpeg_cmd = [ - 'ffmpeg', - '-y', - '-i', input_path, - '-vf', f'scale={target_width}:{target_height}', - '-c:a', 'copy', - output_path - ] - sp.run(ffmpeg_cmd) - - -def _call_nostderr(*args, **kwargs): - # Avoid ffmpeg vomiting on the logs. - kwargs['stderr'] = sp.DEVNULL - kwargs['stdout'] = sp.DEVNULL - _old_call(*args, **kwargs) - - -sp.call = _call_nostderr -# Preallocating the pool of processes. -pool = ProcessPoolExecutor(4) -pool.__enter__() - - -def interrupt(): - global INTERRUPTING - INTERRUPTING = True - - -class FileCleaner: - def __init__(self, file_lifetime: float = 3600): - self.file_lifetime = file_lifetime - self.files = [] - - def add(self, path: tp.Union[str, Path]): - self._cleanup() - self.files.append((time.time(), Path(path))) - - def _cleanup(self): - now = time.time() - for time_added, path in list(self.files): - if now - time_added > self.file_lifetime: - if path.exists(): - path.unlink() - self.files.pop(0) - else: - break - - -file_cleaner = FileCleaner() - - -def make_waveform(*args, **kwargs): - # Further remove some warnings. - be = time.time() - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - height = kwargs.pop('height') - width = kwargs.pop('width') - if height < 256: - height = 256 - if width < 256: - width = 256 - waveform_video = gr.make_waveform(*args, **kwargs) - out = f"{generate_random_string(12)}.mp4" - image = kwargs.get('bg_image', None) - if image is None: - resize_video(waveform_video, out, 900, 300) - else: - resize_video(waveform_video, out, width, height) - print("Make a video took", time.time() - be) - return out - - -def load_model(version='GrandaddyShmax/musicgen-melody', custom_model=None, base_model='GrandaddyShmax/musicgen-medium', gen_type="music"): - global MODEL, MODELS - print("Loading model", version) - if MODELS is None: - if version == 'GrandaddyShmax/musicgen-custom': - MODEL = MusicGen.get_pretrained(base_model) - file_path = os.path.abspath("models/" + str(custom_model) + ".pt") - MODEL.lm.load_state_dict(torch.load(file_path)) - else: - if gen_type == "music": - MODEL = MusicGen.get_pretrained(version) - elif gen_type == "audio": - MODEL = AudioGen.get_pretrained(version) - - return - - else: - t1 = time.monotonic() - if MODEL is not None: - MODEL.to('cpu') # move to cache - print("Previous model moved to CPU in %.2fs" % (time.monotonic() - t1)) - t1 = time.monotonic() - if version != 'GrandaddyShmax/musicgen-custom' and MODELS.get(version) is None: - print("Loading model %s from disk" % version) - if gen_type == "music": - result = MusicGen.get_pretrained(version) - elif gen_type == "audio": - result = AudioGen.get_pretrained(version) - MODELS[version] = result - print("Model loaded in %.2fs" % (time.monotonic() - t1)) - MODEL = result - return - result = MODELS[version].to('cuda') - print("Cached model loaded in %.2fs" % (time.monotonic() - t1)) - MODEL = result - -def get_audio_info(audio_path): - if audio_path is not None: - if audio_path.name.endswith(".wav") or audio_path.name.endswith(".mp4") or audio_path.name.endswith(".json"): - if not audio_path.name.endswith(".json"): - with taglib.File(audio_path.name, save_on_exit=False) as song: - if 'COMMENT' not in song.tags: - return "No tags found. Either the file is not generated by MusicGen+ V1.2.7 and higher or the tags are corrupted. (Discord removes metadata from mp4 and wav files, so you can't use them)" - json_string = song.tags['COMMENT'][0] - data = json.loads(json_string) - global_prompt = str("\nGlobal Prompt: " + (data['global_prompt'] if data['global_prompt'] != "" else "none")) if 'global_prompt' in data else "" - bpm = str("\nBPM: " + data['bpm']) if 'bpm' in data else "" - key = str("\nKey: " + data['key']) if 'key' in data else "" - scale = str("\nScale: " + data['scale']) if 'scale' in data else "" - prompts = str("\nPrompts: " + (data['texts'] if data['texts'] != "['']" else "none")) if 'texts' in data else "" - duration = str("\nDuration: " + data['duration']) if 'duration' in data else "" - overlap = str("\nOverlap: " + data['overlap']) if 'overlap' in data else "" - seed = str("\nSeed: " + data['seed']) if 'seed' in data else "" - audio_mode = str("\nAudio Mode: " + data['audio_mode']) if 'audio_mode' in data else "" - input_length = str("\nInput Length: " + data['input_length']) if 'input_length' in data else "" - channel = str("\nChannel: " + data['channel']) if 'channel' in data else "" - sr_select = str("\nSample Rate: " + data['sr_select']) if 'sr_select' in data else "" - gen_type = str(data['generator'] + "gen-") if 'generator' in data else "" - model = str("\nModel: " + gen_type + data['model']) if 'model' in data else "" - custom_model = str("\nCustom Model: " + data['custom_model']) if 'custom_model' in data else "" - base_model = str("\nBase Model: " + data['base_model']) if 'base_model' in data else "" - decoder = str("\nDecoder: " + data['decoder']) if 'decoder' in data else "" - topk = str("\nTopk: " + data['topk']) if 'topk' in data else "" - topp = str("\nTopp: " + data['topp']) if 'topp' in data else "" - temperature = str("\nTemperature: " + data['temperature']) if 'temperature' in data else "" - cfg_coef = str("\nClassifier Free Guidance: " + data['cfg_coef']) if 'cfg_coef' in data else "" - version = str("Version: " + data['version']) if 'version' in data else "Version: Unknown" - info = str(version + global_prompt + bpm + key + scale + prompts + duration + overlap + seed + audio_mode + input_length + channel + sr_select + model + custom_model + base_model + decoder + topk + topp + temperature + cfg_coef) - if info == "": - return "No tags found. Either the file is not generated by MusicGen+ V1.2.7 and higher or the tags are corrupted. (Discord removes metadata from mp4 and wav files, so you can't use them)" - return info - else: - with open(audio_path.name) as json_file: - data = json.load(json_file) - #if 'global_prompt' not in data: - #return "No tags found. Either the file is not generated by MusicGen+ V1.2.8a and higher or the tags are corrupted." - global_prompt = str("\nGlobal Prompt: " + (data['global_prompt'] if data['global_prompt'] != "" else "none")) if 'global_prompt' in data else "" - bpm = str("\nBPM: " + data['bpm']) if 'bpm' in data else "" - key = str("\nKey: " + data['key']) if 'key' in data else "" - scale = str("\nScale: " + data['scale']) if 'scale' in data else "" - prompts = str("\nPrompts: " + (data['texts'] if data['texts'] != "['']" else "none")) if 'texts' in data else "" - duration = str("\nDuration: " + data['duration']) if 'duration' in data else "" - overlap = str("\nOverlap: " + data['overlap']) if 'overlap' in data else "" - seed = str("\nSeed: " + data['seed']) if 'seed' in data else "" - audio_mode = str("\nAudio Mode: " + data['audio_mode']) if 'audio_mode' in data else "" - input_length = str("\nInput Length: " + data['input_length']) if 'input_length' in data else "" - channel = str("\nChannel: " + data['channel']) if 'channel' in data else "" - sr_select = str("\nSample Rate: " + data['sr_select']) if 'sr_select' in data else "" - gen_type = str(data['generator'] + "gen-") if 'generator' in data else "" - model = str("\nModel: " + gen_type + data['model']) if 'model' in data else "" - custom_model = str("\nCustom Model: " + data['custom_model']) if 'custom_model' in data else "" - base_model = str("\nBase Model: " + data['base_model']) if 'base_model' in data else "" - decoder = str("\nDecoder: " + data['decoder']) if 'decoder' in data else "" - topk = str("\nTopk: " + data['topk']) if 'topk' in data else "" - topp = str("\nTopp: " + data['topp']) if 'topp' in data else "" - temperature = str("\nTemperature: " + data['temperature']) if 'temperature' in data else "" - cfg_coef = str("\nClassifier Free Guidance: " + data['cfg_coef']) if 'cfg_coef' in data else "" - version = str("Version: " + data['version']) if 'version' in data else "Version: Unknown" - info = str(version + global_prompt + bpm + key + scale + prompts + duration + overlap + seed + audio_mode + input_length + channel + sr_select + model + custom_model + base_model + decoder + topk + topp + temperature + cfg_coef) - if info == "": - return "No tags found. Either the file is not generated by MusicGen+ V1.2.7 and higher or the tags are corrupted." - return info - else: - return "Only .wav ,.mp4 and .json files are supported" - else: - return None - - -def info_to_params(audio_path): - if audio_path is not None: - if audio_path.name.endswith(".wav") or audio_path.name.endswith(".mp4") or audio_path.name.endswith(".json"): - if not audio_path.name.endswith(".json"): - with taglib.File(audio_path.name, save_on_exit=False) as song: - if 'COMMENT' not in song.tags: - return "Default", False, "", 120, "C", "Major", "large", None, "medium", 1, "", "", "", "", "", "", "", "", "", "", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, "sample", 10, 250, 0, 1.0, 5.0, -1, 12, "stereo", "48000" - json_string = song.tags['COMMENT'][0] - data = json.loads(json_string) - struc_prompt = (False if data['bpm'] == "none" else True) if 'bpm' in data else False - global_prompt = data['global_prompt'] if 'global_prompt' in data else "" - bpm = (120 if data['bpm'] == "none" else int(data['bpm'])) if 'bpm' in data else 120 - key = ("C" if data['key'] == "none" else data['key']) if 'key' in data else "C" - scale = ("Major" if data['scale'] == "none" else data['scale']) if 'scale' in data else "Major" - model = data['model'] if 'model' in data else "large" - custom_model = (data['custom_model'] if data['custom_model'] in get_available_models() else None) if 'custom_model' in data else None - base_model = data['base_model'] if 'base_model' in data else "medium" - decoder = data['decoder'] if 'decoder' in data else "Default" - if 'texts' not in data: - unique_prompts = 1 - text = ["", "", "", "", "", "", "", "", "", ""] - repeat = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - else: - s = data['texts'] - s = re.findall(r"'(.*?)'", s) - text = [] - repeat = [] - i = 0 - for elem in s: - if elem.strip(): - if i == 0 or elem != s[i-1]: - text.append(elem) - repeat.append(1) - else: - repeat[-1] += 1 - i += 1 - text.extend([""] * (10 - len(text))) - repeat.extend([1] * (10 - len(repeat))) - unique_prompts = len([t for t in text if t]) - audio_mode = ("sample" if data['audio_mode'] == "none" else data['audio_mode']) if 'audio_mode' in data else "sample" - duration = int(data['duration']) if 'duration' in data else 10 - topk = float(data['topk']) if 'topk' in data else 250 - topp = float(data['topp']) if 'topp' in data else 0 - temperature = float(data['temperature']) if 'temperature' in data else 1.0 - cfg_coef = float(data['cfg_coef']) if 'cfg_coef' in data else 5.0 - seed = int(data['seed']) if 'seed' in data else -1 - overlap = int(data['overlap']) if 'overlap' in data else 12 - channel = data['channel'] if 'channel' in data else "stereo" - sr_select = data['sr_select'] if 'sr_select' in data else "48000" - return decoder, struc_prompt, global_prompt, bpm, key, scale, model, custom_model, base_model, unique_prompts, text[0], text[1], text[2], text[3], text[4], text[5], text[6], text[7], text[8], text[9], repeat[0], repeat[1], repeat[2], repeat[3], repeat[4], repeat[5], repeat[6], repeat[7], repeat[8], repeat[9], audio_mode, duration, topk, topp, temperature, cfg_coef, seed, overlap, channel, sr_select - else: - with open(audio_path.name) as json_file: - data = json.load(json_file) - struc_prompt = (False if data['bpm'] == "none" else True) if 'bpm' in data else False - global_prompt = data['global_prompt'] if 'global_prompt' in data else "" - bpm = (120 if data['bpm'] == "none" else int(data['bpm'])) if 'bpm' in data else 120 - key = ("C" if data['key'] == "none" else data['key']) if 'key' in data else "C" - scale = ("Major" if data['scale'] == "none" else data['scale']) if 'scale' in data else "Major" - model = data['model'] if 'model' in data else "large" - custom_model = (data['custom_model'] if data['custom_model'] in get_available_models() else None) if 'custom_model' in data else None - base_model = data['base_model'] if 'base_model' in data else "medium" - decoder = data['decoder'] if 'decoder' in data else "Default" - if 'texts' not in data: - unique_prompts = 1 - text = ["", "", "", "", "", "", "", "", "", ""] - repeat = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - else: - s = data['texts'] - s = re.findall(r"'(.*?)'", s) - text = [] - repeat = [] - i = 0 - for elem in s: - if elem.strip(): - if i == 0 or elem != s[i-1]: - text.append(elem) - repeat.append(1) - else: - repeat[-1] += 1 - i += 1 - text.extend([""] * (10 - len(text))) - repeat.extend([1] * (10 - len(repeat))) - unique_prompts = len([t for t in text if t]) - audio_mode = ("sample" if data['audio_mode'] == "none" else data['audio_mode']) if 'audio_mode' in data else "sample" - duration = int(data['duration']) if 'duration' in data else 10 - topk = float(data['topk']) if 'topk' in data else 250 - topp = float(data['topp']) if 'topp' in data else 0 - temperature = float(data['temperature']) if 'temperature' in data else 1.0 - cfg_coef = float(data['cfg_coef']) if 'cfg_coef' in data else 5.0 - seed = int(data['seed']) if 'seed' in data else -1 - overlap = int(data['overlap']) if 'overlap' in data else 12 - channel = data['channel'] if 'channel' in data else "stereo" - sr_select = data['sr_select'] if 'sr_select' in data else "48000" - return decoder, struc_prompt, global_prompt, bpm, key, scale, model, custom_model, base_model, unique_prompts, text[0], text[1], text[2], text[3], text[4], text[5], text[6], text[7], text[8], text[9], repeat[0], repeat[1], repeat[2], repeat[3], repeat[4], repeat[5], repeat[6], repeat[7], repeat[8], repeat[9], audio_mode, duration, topk, topp, temperature, cfg_coef, seed, overlap, channel, sr_select - else: - return "Default", False, "", 120, "C", "Major", "large", None, "medium", 1, "", "", "", "", "", "", "", "", "", "", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, "sample", 10, 250, 0, 1.0, 5.0, -1, 12, "stereo", "48000" - else: - return "Default", False, "", 120, "C", "Major", "large", None, "medium", 1, "", "", "", "", "", "", "", "", "", "", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, "sample", 10, 250, 0, 1.0, 5.0, -1, 12, "stereo", "48000" - - -def info_to_params_a(audio_path): - if audio_path is not None: - if audio_path.name.endswith(".wav") or audio_path.name.endswith(".mp4") or audio_path.name.endswith(".json"): - if not audio_path.name.endswith(".json"): - with taglib.File(audio_path.name, save_on_exit=False) as song: - if 'COMMENT' not in song.tags: - return "Default", False, "", 1, "", "", "", "", "", "", "", "", "", "", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10, 250, 0, 1.0, 5.0, -1, 12, "stereo", "48000" - json_string = song.tags['COMMENT'][0] - data = json.loads(json_string) - struc_prompt = (False if data['global_prompt'] == "" else True) if 'global_prompt' in data else False - global_prompt = data['global_prompt'] if 'global_prompt' in data else "" - decoder = data['decoder'] if 'decoder' in data else "Default" - if 'texts' not in data: - unique_prompts = 1 - text = ["", "", "", "", "", "", "", "", "", ""] - repeat = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - else: - s = data['texts'] - s = re.findall(r"'(.*?)'", s) - text = [] - repeat = [] - i = 0 - for elem in s: - if elem.strip(): - if i == 0 or elem != s[i-1]: - text.append(elem) - repeat.append(1) - else: - repeat[-1] += 1 - i += 1 - text.extend([""] * (10 - len(text))) - repeat.extend([1] * (10 - len(repeat))) - unique_prompts = len([t for t in text if t]) - duration = int(data['duration']) if 'duration' in data else 10 - topk = float(data['topk']) if 'topk' in data else 250 - topp = float(data['topp']) if 'topp' in data else 0 - temperature = float(data['temperature']) if 'temperature' in data else 1.0 - cfg_coef = float(data['cfg_coef']) if 'cfg_coef' in data else 5.0 - seed = int(data['seed']) if 'seed' in data else -1 - overlap = int(data['overlap']) if 'overlap' in data else 12 - channel = data['channel'] if 'channel' in data else "stereo" - sr_select = data['sr_select'] if 'sr_select' in data else "48000" - return decoder, struc_prompt, global_prompt, unique_prompts, text[0], text[1], text[2], text[3], text[4], text[5], text[6], text[7], text[8], text[9], repeat[0], repeat[1], repeat[2], repeat[3], repeat[4], repeat[5], repeat[6], repeat[7], repeat[8], repeat[9], duration, topk, topp, temperature, cfg_coef, seed, overlap, channel, sr_select - else: - with open(audio_path.name) as json_file: - data = json.load(json_file) - struc_prompt = (False if data['global_prompt'] == "" else True) if 'global_prompt' in data else False - global_prompt = data['global_prompt'] if 'global_prompt' in data else "" - decoder = data['decoder'] if 'decoder' in data else "Default" - if 'texts' not in data: - unique_prompts = 1 - text = ["", "", "", "", "", "", "", "", "", ""] - repeat = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - else: - s = data['texts'] - s = re.findall(r"'(.*?)'", s) - text = [] - repeat = [] - i = 0 - for elem in s: - if elem.strip(): - if i == 0 or elem != s[i-1]: - text.append(elem) - repeat.append(1) - else: - repeat[-1] += 1 - i += 1 - text.extend([""] * (10 - len(text))) - repeat.extend([1] * (10 - len(repeat))) - unique_prompts = len([t for t in text if t]) - duration = int(data['duration']) if 'duration' in data else 10 - topk = float(data['topk']) if 'topk' in data else 250 - topp = float(data['topp']) if 'topp' in data else 0 - temperature = float(data['temperature']) if 'temperature' in data else 1.0 - cfg_coef = float(data['cfg_coef']) if 'cfg_coef' in data else 5.0 - seed = int(data['seed']) if 'seed' in data else -1 - overlap = int(data['overlap']) if 'overlap' in data else 12 - channel = data['channel'] if 'channel' in data else "stereo" - sr_select = data['sr_select'] if 'sr_select' in data else "48000" - return decoder, struc_prompt, global_prompt, unique_prompts, text[0], text[1], text[2], text[3], text[4], text[5], text[6], text[7], text[8], text[9], repeat[0], repeat[1], repeat[2], repeat[3], repeat[4], repeat[5], repeat[6], repeat[7], repeat[8], repeat[9], duration, topk, topp, temperature, cfg_coef, seed, overlap, channel, sr_select - - else: - return "Default", False, "", 1, "", "", "", "", "", "", "", "", "", "", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10, 250, 0, 1.0, 5.0, -1, 12, "stereo", "48000" - else: - return "Default", False, "", 1, "", "", "", "", "", "", "", "", "", "", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10, 250, 0, 1.0, 5.0, -1, 12, "stereo", "48000" - - -def make_pseudo_stereo (filename, sr_select, pan, delay): - if pan: - temp = AudioSegment.from_wav(filename) - if sr_select != "32000": - temp = temp.set_frame_rate(int(sr_select)) - left = temp.pan(-0.5) - 5 - right = temp.pan(0.6) - 5 - temp = left.overlay(right, position=5) - temp.export(filename, format="wav") - if delay: - waveform, sample_rate = torchaudio.load(filename) # load mono WAV file - delay_seconds = 0.01 # set delay 10ms - delay_samples = int(delay_seconds * sample_rate) # Calculating delay value in number of samples - stereo_waveform = torch.stack([waveform[0], torch.cat((torch.zeros(delay_samples), waveform[0][:-delay_samples]))]) # Generate a stereo file with original mono audio and delayed version - torchaudio.save(filename, stereo_waveform, sample_rate) - return - - -def normalize_audio(audio_data): - audio_data = audio_data.astype(np.float32) - max_value = np.max(np.abs(audio_data)) - audio_data /= max_value - return audio_data - - -def load_diffusion(): - global MBD - if MBD is None: - print("loading MBD") - MBD = MultiBandDiffusion.get_mbd_musicgen() - - -def unload_diffusion(): - global MBD - if MBD is not None: - print("unloading MBD") - MBD = None - - -def _do_predictions(gen_type, texts, melodies, sample, trim_start, trim_end, duration, image, height, width, background, bar1, bar2, channel, sr_select, progress=False, **gen_kwargs): - if gen_type == "music": - maximum_size = 29.5 - elif gen_type == "audio": - maximum_size = 9.5 - cut_size = 0 - input_length = 0 - sampleP = None - if sample is not None: - globalSR, sampleM = sample[0], sample[1] - sampleM = normalize_audio(sampleM) - sampleM = torch.from_numpy(sampleM).t() - if sampleM.dim() == 1: - sampleM = sampleM.unsqueeze(0) - sample_length = sampleM.shape[sampleM.dim() - 1] / globalSR - if trim_start >= sample_length: - trim_start = sample_length - 0.5 - if trim_end >= sample_length: - trim_end = sample_length - 0.5 - if trim_start + trim_end >= sample_length: - tmp = sample_length - 0.5 - trim_start = tmp / 2 - trim_end = tmp / 2 - sampleM = sampleM[..., int(globalSR * trim_start):int(globalSR * (sample_length - trim_end))] - sample_length = sample_length - (trim_start + trim_end) - if sample_length > maximum_size: - cut_size = sample_length - maximum_size - sampleP = sampleM[..., :int(globalSR * cut_size)] - sampleM = sampleM[..., int(globalSR * cut_size):] - if sample_length >= duration: - duration = sample_length + 0.5 - input_length = sample_length - global MODEL - MODEL.set_generation_params(duration=(duration - cut_size), **gen_kwargs) - print("new batch", len(texts), texts, [None if m is None else (m[0], m[1].shape) for m in melodies], [None if sample is None else (sample[0], sample[1].shape)]) - be = time.time() - processed_melodies = [] - if gen_type == "music": - target_sr = 32000 - elif gen_type == "audio": - target_sr = 16000 - target_ac = 1 - - for melody in melodies: - if melody is None: - processed_melodies.append(None) - else: - sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t() - if melody.dim() == 1: - melody = melody[None] - melody = melody[..., :int(sr * duration)] - melody = convert_audio(melody, sr, target_sr, target_ac) - processed_melodies.append(melody) - - if sample is not None: - if sampleP is None: - if gen_type == "music": - outputs = MODEL.generate_continuation( - prompt=sampleM, - prompt_sample_rate=globalSR, - descriptions=texts, - progress=progress, - return_tokens=USE_DIFFUSION - ) - elif gen_type == "audio": - outputs = MODEL.generate_continuation( - prompt=sampleM, - prompt_sample_rate=globalSR, - descriptions=texts, - progress=progress - ) - else: - if sampleP.dim() > 1: - sampleP = convert_audio(sampleP, globalSR, target_sr, target_ac) - sampleP = sampleP.to(MODEL.device).float().unsqueeze(0) - if gen_type == "music": - outputs = MODEL.generate_continuation( - prompt=sampleM, - prompt_sample_rate=globalSR, - descriptions=texts, - progress=progress, - return_tokens=USE_DIFFUSION - ) - elif gen_type == "audio": - outputs = MODEL.generate_continuation( - prompt=sampleM, - prompt_sample_rate=globalSR, - descriptions=texts, - progress=progress - ) - outputs = torch.cat([sampleP, outputs], 2) - - elif any(m is not None for m in processed_melodies): - if gen_type == "music": - outputs = MODEL.generate_with_chroma( - descriptions=texts, - melody_wavs=processed_melodies, - melody_sample_rate=target_sr, - progress=progress, - return_tokens=USE_DIFFUSION - ) - elif gen_type == "audio": - outputs = MODEL.generate_with_chroma( - descriptions=texts, - melody_wavs=processed_melodies, - melody_sample_rate=target_sr, - progress=progress - ) - else: - if gen_type == "music": - outputs = MODEL.generate(texts, progress=progress, return_tokens=USE_DIFFUSION) - elif gen_type == "audio": - outputs = MODEL.generate(texts, progress=progress) - - if USE_DIFFUSION: - print("outputs: " + str(outputs)) - outputs_diffusion = MBD.tokens_to_wav(outputs[1]) - outputs = torch.cat([outputs[0], outputs_diffusion], dim=0) - outputs = outputs.detach().cpu().float() - backups = outputs - if channel == "stereo": - outputs = convert_audio(outputs, target_sr, int(sr_select), 2) - elif channel == "mono" and sr_select != "32000": - outputs = convert_audio(outputs, target_sr, int(sr_select), 1) - out_files = [] - out_audios = [] - out_backup = [] - for output in outputs: - with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file: - audio_write( - file.name, output, (MODEL.sample_rate if channel == "stereo effect" else int(sr_select)), strategy="loudness", - loudness_headroom_db=16, loudness_compressor=True, add_suffix=False) - - if channel == "stereo effect": - make_pseudo_stereo(file.name, sr_select, pan=True, delay=True); - - out_files.append(pool.submit(make_waveform, file.name, bg_image=image, bg_color=background, bars_color=(bar1, bar2), fg_alpha=1.0, bar_count=75, height=height, width=width)) - out_audios.append(file.name) - file_cleaner.add(file.name) - print(f'wav: {file.name}') - for backup in backups: - with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file: - audio_write( - file.name, backup, MODEL.sample_rate, strategy="loudness", - loudness_headroom_db=16, loudness_compressor=True, add_suffix=False) - out_backup.append(file.name) - file_cleaner.add(file.name) - res = [out_file.result() for out_file in out_files] - res_audio = out_audios - res_backup = out_backup - for file in res: - file_cleaner.add(file) - print(f'video: {file}') - print("batch finished", len(texts), time.time() - be) - print("Tempfiles currently stored: ", len(file_cleaner.files)) - if MOVE_TO_CPU: - MODEL.to('cpu') - if UNLOAD_MODEL: - MODEL = None - torch.cuda.empty_cache() - torch.cuda.ipc_collect() - return res, res_audio, res_backup, input_length - - -def predict_batched(texts, melodies): - max_text_length = 512 - texts = [text[:max_text_length] for text in texts] - load_model('melody') - res = _do_predictions(texts, melodies, BATCHED_DURATION) - return res - - -def add_tags(filename, tags): - json_string = None - - data = { - "global_prompt": tags[0], - "bpm": tags[1], - "key": tags[2], - "scale": tags[3], - "texts": tags[4], - "duration": tags[5], - "overlap": tags[6], - "seed": tags[7], - "audio_mode": tags[8], - "input_length": tags[9], - "channel": tags[10], - "sr_select": tags[11], - "model": tags[12], - "custom_model": tags[13], - "base_model": tags[14], - "decoder": tags[15], - "topk": tags[16], - "topp": tags[17], - "temperature": tags[18], - "cfg_coef": tags[19], - "generator": tags[20], - "version": version - } - - json_string = json.dumps(data) - - if os.path.exists(filename): - with taglib.File(filename, save_on_exit=True) as song: - song.tags = {'COMMENT': json_string } - - json_file = open(tags[7] + '.json', 'w') - json_file.write(json_string) - json_file.close() - - return json_file.name; - - -def save_outputs(mp4, wav_tmp, tags, gen_type): - # mp4: .mp4 file name in root running folder of app.py - # wav_tmp: temporary wav file located in %TEMP% folder - # seed - used seed - # exanple BgnJtr4Pn1AJ.mp4, C:\Users\Alex\AppData\Local\Temp\tmp4ermrebs.wav, 195123182343465 - # procedure read generated .mp4 and wav files, rename it by using seed as name, - # and will store it to ./output/today_date/wav and ./output/today_date/mp4 folders. - # if file with same seed number already exist its make postfix in name like seed(n) - # where is n - consiqunce number 1-2-3-4 and so on - # then we store generated mp4 and wav into destination folders. - - current_date = datetime.now().strftime("%Y%m%d") - wav_directory = os.path.join(os.getcwd(), 'output', current_date, gen_type,'wav') - mp4_directory = os.path.join(os.getcwd(), 'output', current_date, gen_type,'mp4') - json_directory = os.path.join(os.getcwd(), 'output', current_date, gen_type,'json') - os.makedirs(wav_directory, exist_ok=True) - os.makedirs(mp4_directory, exist_ok=True) - os.makedirs(json_directory, exist_ok=True) - - filename = str(tags[7]) + '.wav' - target = os.path.join(wav_directory, filename) - counter = 1 - while os.path.exists(target): - filename = str(tags[7]) + f'({counter})' + '.wav' - target = os.path.join(wav_directory, filename) - counter += 1 - - shutil.copyfile(wav_tmp, target); # make copy of original file - json_file = add_tags(target, tags); - - wav_target=target; - target=target.replace('wav', 'mp4'); - mp4_target=target; - - mp4=r'./' +mp4; - shutil.copyfile(mp4, target); # make copy of original file - _ = add_tags(target, tags); - - target=target.replace('mp4', 'json'); # change the extension to json - json_target=target; # store the json target - - with open(target, 'w') as f: # open a writable file object - shutil.copyfile(json_file, target); # make copy of original file - - os.remove(json_file) - - return wav_target, mp4_target, json_target; - - -def clear_cash(): - # delete all temporary files genegated my system - current_date = datetime.now().date() - current_directory = os.getcwd() - files = glob.glob(os.path.join(current_directory, '*.mp4')) - for file in files: - creation_date = datetime.fromtimestamp(os.path.getctime(file)).date() - if creation_date == current_date: - os.remove(file) - - temp_directory = os.environ.get('TEMP') - files = glob.glob(os.path.join(temp_directory, 'tmp*.mp4')) - for file in files: - creation_date = datetime.fromtimestamp(os.path.getctime(file)).date() - if creation_date == current_date: - os.remove(file) - - files = glob.glob(os.path.join(temp_directory, 'tmp*.wav')) - for file in files: - creation_date = datetime.fromtimestamp(os.path.getctime(file)).date() - if creation_date == current_date: - os.remove(file) - - files = glob.glob(os.path.join(temp_directory, 'tmp*.png')) - for file in files: - creation_date = datetime.fromtimestamp(os.path.getctime(file)).date() - if creation_date == current_date: - os.remove(file) - return - - -def s2t(seconds, seconds2): - # convert seconds to time format - # seconds - time in seconds - # return time in format 00:00 - m, s = divmod(seconds, 60) - m2, s2 = divmod(seconds2, 60) - if seconds != 0 and seconds < seconds2: - s = s + 1 - return ("%02d:%02d - %02d:%02d" % (m, s, m2, s2)) - - -def calc_time(gen_type, s, duration, overlap, d0, d1, d2, d3, d4, d5, d6, d7, d8, d9): - # calculate the time of generation - # overlap - overlap in seconds - # d0-d9 - drag - # return time in seconds - d_amount = [int(d0), int(d1), int(d2), int(d3), int(d4), int(d5), int(d6), int(d7), int(d8), int(d9)] - calc = [] - tracks = [] - time = 0 - s = s - 1 - max_time = duration - max_limit = 0 - if gen_type == "music": - max_limit = 30 - elif gen_type == "audio": - max_limit = 10 - track_add = max_limit - overlap - tracks.append(max_limit + ((d_amount[0] - 1) * track_add)) - for i in range(1, 10): - tracks.append(d_amount[i] * track_add) - - if tracks[0] >= max_time or s == 0: - calc.append(s2t(time, max_time)) - time = max_time - else: - calc.append(s2t(time, tracks[0])) - time = tracks[0] - - for i in range(1, 10): - if time + tracks[i] >= max_time or i == s: - calc.append(s2t(time, max_time)) - time = max_time - else: - calc.append(s2t(time, time + tracks[i])) - time = time + tracks[i] - - return calc[0], calc[1], calc[2], calc[3], calc[4], calc[5], calc[6], calc[7], calc[8], calc[9] - - -def predict_full(gen_type, model, decoder, custom_model, base_model, prompt_amount, struc_prompt, bpm, key, scale, global_prompt, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, audio, mode, trim_start, trim_end, duration, topk, topp, temperature, cfg_coef, seed, overlap, image, height, width, background, bar1, bar2, channel, sr_select, progress=gr.Progress()): - global INTERRUPTING - global USE_DIFFUSION - INTERRUPTING = False - - if gen_type == "audio": - custom_model = None - base_model = "medium" - - if temperature < 0: - raise gr.Error("Temperature must be >= 0.") - if topk < 0: - raise gr.Error("Topk must be non-negative.") - if topp < 0: - raise gr.Error("Topp must be non-negative.") - - if trim_start < 0: - trim_start = 0 - if trim_end < 0: - trim_end = 0 - - topk = int(topk) - - if decoder == "MultiBand_Diffusion": - USE_DIFFUSION = True - load_diffusion() - else: - USE_DIFFUSION = False - unload_diffusion() - - if gen_type == "music": - model_shrt = model - model = "GrandaddyShmax/musicgen-" + model - elif gen_type == "audio": - model_shrt = model - model = "GrandaddyShmax/audiogen-" + model - base_model_shrt = base_model - base_model = "GrandaddyShmax/musicgen-" + base_model - - if MODEL is None or MODEL.name != (model): - load_model(model, custom_model, base_model, gen_type) - else: - if MOVE_TO_CPU: - MODEL.to('cuda') - - if seed < 0: - seed = random.randint(0, 0xffff_ffff_ffff) - torch.manual_seed(seed) - - def _progress(generated, to_generate): - progress((min(generated, to_generate), to_generate)) - if INTERRUPTING: - raise gr.Error("Interrupted.") - MODEL.set_custom_progress_callback(_progress) - - audio_mode = "none" - melody = None - sample = None - if audio: - audio_mode = mode - if mode == "sample": - sample = audio - elif mode == "melody": - melody = audio - - base_model = "none" if model != "custom" else base_model - custom_model = "none" if model != "custom" else custom_model - - text_cat = [p0, p1, p2, p3, p4, p5, p6, p7, p8, p9] - drag_cat = [d0, d1, d2, d3, d4, d5, d6, d7, d8, d9] - texts = [] - raw_texts = [] - ind = 0 - ind2 = 0 - while ind < prompt_amount: - for ind2 in range(int(drag_cat[ind])): - if not struc_prompt: - texts.append(text_cat[ind]) - global_prompt = "none" - bpm = "none" - key = "none" - scale = "none" - raw_texts.append(text_cat[ind]) - else: - if gen_type == "music": - bpm_str = str(bpm) + " bpm" - key_str = ", " + str(key) + " " + str(scale) - global_str = (", " + str(global_prompt)) if str(global_prompt) != "" else "" - elif gen_type == "audio": - bpm_str = "" - key_str = "" - global_str = (str(global_prompt)) if str(global_prompt) != "" else "" - texts_str = (", " + str(text_cat[ind])) if str(text_cat[ind]) != "" else "" - texts.append(bpm_str + key_str + global_str + texts_str) - raw_texts.append(text_cat[ind]) - ind2 = 0 - ind = ind + 1 - - outs, outs_audio, outs_backup, input_length = _do_predictions( - gen_type, [texts], [melody], sample, trim_start, trim_end, duration, image, height, width, background, bar1, bar2, channel, sr_select, progress=True, - top_k=topk, top_p=topp, temperature=temperature, cfg_coef=cfg_coef, extend_stride=MODEL.max_duration-overlap) - tags = [str(global_prompt), str(bpm), str(key), str(scale), str(raw_texts), str(duration), str(overlap), str(seed), str(audio_mode), str(input_length), str(channel), str(sr_select), str(model_shrt), str(custom_model), str(base_model_shrt), str(decoder), str(topk), str(topp), str(temperature), str(cfg_coef), str(gen_type)] - wav_target, mp4_target, json_target = save_outputs(outs[0], outs_audio[0], tags, gen_type); - # Removes the temporary files. - for out in outs: - os.remove(out) - for out in outs_audio: - os.remove(out) - - return mp4_target, wav_target, outs_backup[0], [mp4_target, wav_target, json_target], seed - - -max_textboxes = 10 - - -def get_available_models(): - return sorted([re.sub('.pt$', '', item.name) for item in list(Path('models/').glob('*')) if item.name.endswith('.pt')]) - - -def toggle_audio_src(choice): - if choice == "mic": - return gr.update(source="microphone", value=None, label="Microphone") - else: - return gr.update(source="upload", value=None, label="File") - - -def ui_full(launch_kwargs): - with gr.Blocks(title='AudioCraft Plus', theme=theme) as interface: - gr.Markdown( - """ - # AudioCraft Plus - v2.0.0a - - ### An All-in-One AudioCraft WebUI - - Thanks to: facebookresearch, Camenduru, rkfg, oobabooga, AlexHK and GrandaddyShmax - """ - ) - with gr.Tab("MusicGen"): - gr.Markdown( - """ - ### MusicGen - """ - ) - with gr.Row(): - with gr.Column(): - with gr.Tab("Generation"): - with gr.Accordion("Structure Prompts", open=False): - with gr.Column(): - with gr.Row(): - struc_prompts = gr.Checkbox(label="Enable", value=False, interactive=True, container=False) - bpm = gr.Number(label="BPM", value=120, interactive=True, scale=1, precision=0) - key = gr.Dropdown(["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "Bb", "B"], label="Key", value="C", interactive=True) - scale = gr.Dropdown(["Major", "Minor"], label="Scale", value="Major", interactive=True) - with gr.Row(): - global_prompt = gr.Text(label="Global Prompt", interactive=True, scale=3) - with gr.Row(): - s = gr.Slider(1, max_textboxes, value=1, step=1, label="Prompts:", interactive=True, scale=2) - #s_mode = gr.Radio(["segmentation", "batch"], value="segmentation", interactive=True, scale=1, label="Generation Mode") - with gr.Column(): - textboxes = [] - prompts = [] - repeats = [] - calcs = [] - with gr.Row(): - text0 = gr.Text(label="Input Text", interactive=True, scale=4) - prompts.append(text0) - drag0 = gr.Number(label="Repeat", value=1, interactive=True, scale=1) - repeats.append(drag0) - calc0 = gr.Text(interactive=False, value="00:00 - 00:00", scale=1, label="Time") - calcs.append(calc0) - for i in range(max_textboxes): - with gr.Row(visible=False) as t: - text = gr.Text(label="Input Text", interactive=True, scale=3) - repeat = gr.Number(label="Repeat", minimum=1, value=1, interactive=True, scale=1) - calc = gr.Text(interactive=False, value="00:00 - 00:00", scale=1, label="Time") - textboxes.append(t) - prompts.append(text) - repeats.append(repeat) - calcs.append(calc) - to_calc = gr.Button("Calculate Timings", variant="secondary") - with gr.Row(): - duration = gr.Slider(minimum=1, maximum=300, value=10, step=1, label="Duration", interactive=True) - with gr.Row(): - overlap = gr.Slider(minimum=1, maximum=29, value=12, step=1, label="Overlap", interactive=True) - with gr.Row(): - seed = gr.Number(label="Seed", value=-1, scale=4, precision=0, interactive=True) - gr.Button('\U0001f3b2\ufe0f', scale=1).click(fn=lambda: -1, outputs=[seed], queue=False) - reuse_seed = gr.Button('\u267b\ufe0f', scale=1) - - with gr.Tab("Audio"): - with gr.Row(): - with gr.Column(): - input_type = gr.Radio(["file", "mic"], value="file", label="Input Type (optional)", interactive=True) - mode = gr.Radio(["melody", "sample"], label="Input Audio Mode (optional)", value="sample", interactive=True) - with gr.Row(): - trim_start = gr.Number(label="Trim Start", value=0, interactive=True) - trim_end = gr.Number(label="Trim End", value=0, interactive=True) - audio = gr.Audio(source="upload", type="numpy", label="Input Audio (optional)", interactive=True) - - with gr.Tab("Customization"): - with gr.Row(): - with gr.Column(): - background = gr.ColorPicker(value="#0f0f0f", label="background color", interactive=True, scale=0) - bar1 = gr.ColorPicker(value="#84cc16", label="bar color start", interactive=True, scale=0) - bar2 = gr.ColorPicker(value="#10b981", label="bar color end", interactive=True, scale=0) - with gr.Column(): - image = gr.Image(label="Background Image", type="filepath", interactive=True, scale=4) - with gr.Row(): - height = gr.Number(label="Height", value=512, interactive=True) - width = gr.Number(label="Width", value=768, interactive=True) - - with gr.Tab("Settings"): - with gr.Row(): - channel = gr.Radio(["mono", "stereo", "stereo effect"], label="Output Audio Channels", value="stereo", interactive=True, scale=1) - sr_select = gr.Dropdown(["11025", "16000", "22050", "24000", "32000", "44100", "48000"], label="Output Audio Sample Rate", value="48000", interactive=True) - with gr.Row(): - model = gr.Radio(["melody", "small", "medium", "large", "custom"], label="Model", value="large", interactive=True, scale=1) - with gr.Column(): - dropdown = gr.Dropdown(choices=get_available_models(), value=("No models found" if len(get_available_models()) < 1 else get_available_models()[0]), label='Custom Model (models folder)', elem_classes='slim-dropdown', interactive=True) - ui.create_refresh_button(dropdown, lambda: None, lambda: {'choices': get_available_models()}, 'refresh-button') - basemodel = gr.Radio(["small", "medium", "melody", "large"], label="Base Model", value="medium", interactive=True, scale=1) - with gr.Row(): - decoder = gr.Radio(["Default", "MultiBand_Diffusion"], label="Decoder", value="Default", interactive=True) - with gr.Row(): - topk = gr.Number(label="Top-k", value=250, interactive=True) - topp = gr.Number(label="Top-p", value=0, interactive=True) - temperature = gr.Number(label="Temperature", value=1.0, interactive=True) - cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True) - with gr.Row(): - submit = gr.Button("Generate", variant="primary") - # Adapted from https://github.com/rkfg/audiocraft/blob/long/app.py, MIT license. - _ = gr.Button("Interrupt").click(fn=interrupt, queue=False) - with gr.Column() as c: - with gr.Tab("Output"): - output = gr.Video(label="Generated Music", scale=0) - with gr.Row(): - audio_only = gr.Audio(type="numpy", label="Audio Only", interactive=False) - backup_only = gr.Audio(type="numpy", label="Backup Audio", interactive=False, visible=False) - send_audio = gr.Button("Send to Input Audio") - seed_used = gr.Number(label='Seed used', value=-1, interactive=False) - download = gr.File(label="Generated Files", interactive=False) - with gr.Tab("Wiki"): - gr.Markdown( - """ - - **[Generate (button)]:** - Generates the music with the given settings and prompts. - - - **[Interrupt (button)]:** - Stops the music generation as soon as it can, providing an incomplete output. - - --- - - ### Generation Tab: - - #### Structure Prompts: - - This feature helps reduce repetetive prompts by allowing you to set global prompts - that will be used for all prompt segments. - - - **[Structure Prompts (checkbox)]:** - Enable/Disable the structure prompts feature. - - - **[BPM (number)]:** - Beats per minute of the generated music. - - - **[Key (dropdown)]:** - The key of the generated music. - - - **[Scale (dropdown)]:** - The scale of the generated music. - - - **[Global Prompt (text)]:** - Here write the prompt that you wish to be used for all prompt segments. - - #### Multi-Prompt: - - This feature allows you to control the music, adding variation to different time segments. - You have up to 10 prompt segments. the first prompt will always be 30s long - the other prompts will be [30s - overlap]. - for example if the overlap is 10s, each prompt segment will be 20s. - - - **[Prompt Segments (number)]:** - Amount of unique prompt to generate throughout the music generation. - - - **[Prompt/Input Text (prompt)]:** - Here describe the music you wish the model to generate. - - - **[Repeat (number)]:** - Write how many times this prompt will repeat (instead of wasting another prompt segment on the same prompt). - - - **[Time (text)]:** - The time of the prompt segment. - - - **[Calculate Timings (button)]:** - Calculates the timings of the prompt segments. - - - **[Duration (number)]:** - How long you want the generated music to be (in seconds). - - - **[Overlap (number)]:** - How much each new segment will reference the previous segment (in seconds). - For example, if you choose 20s: Each new segment after the first one will reference the previous segment 20s - and will generate only 10s of new music. The model can only process 30s of music. - - - **[Seed (number)]:** - Your generated music id. If you wish to generate the exact same music, - place the exact seed with the exact prompts - (This way you can also extend specific song that was generated short). - - - **[Random Seed (button)]:** - Gives "-1" as a seed, which counts as a random seed. - - - **[Copy Previous Seed (button)]:** - Copies the seed from the output seed (if you don't feel like doing it manualy). - - --- - - ### Audio Tab: - - - **[Input Type (selection)]:** - `File` mode allows you to upload an audio file to use as input - `Mic` mode allows you to use your microphone as input - - - **[Input Audio Mode (selection)]:** - `Melody` mode only works with the melody model: it conditions the music generation to reference the melody - `Sample` mode works with any model: it gives a music sample to the model to generate its continuation. - - - **[Trim Start and Trim End (numbers)]:** - `Trim Start` set how much you'd like to trim the input audio from the start - `Trim End` same as the above but from the end - - - **[Input Audio (audio file)]:** - Input here the audio you wish to use with "melody" or "sample" mode. - - --- - - ### Customization Tab: - - - **[Background Color (color)]:** - Works only if you don't upload image. Color of the background of the waveform. - - - **[Bar Color Start (color)]:** - First color of the waveform bars. - - - **[Bar Color End (color)]:** - Second color of the waveform bars. - - - **[Background Image (image)]:** - Background image that you wish to be attached to the generated video along with the waveform. - - - **[Height and Width (numbers)]:** - Output video resolution, only works with image. - (minimum height and width is 256). - - --- - - ### Settings Tab: - - - **[Output Audio Channels (selection)]:** - With this you can select the amount of channels that you wish for your output audio. - `mono` is a straightforward single channel audio - `stereo` is a dual channel audio but it will sound more or less like mono - `stereo effect` this one is also dual channel but uses tricks to simulate a stereo audio. - - - **[Output Audio Sample Rate (dropdown)]:** - The output audio sample rate, the model default is 32000. - - - **[Model (selection)]:** - Here you can choose which model you wish to use: - `melody` model is based on the medium model with a unique feature that lets you use melody conditioning - `small` model is trained on 300M parameters - `medium` model is trained on 1.5B parameters - `large` model is trained on 3.3B parameters - `custom` model runs the custom model that you provided. - - - **[Custom Model (selection)]:** - This dropdown will show you models that are placed in the `models` folder - you must select `custom` in the model options in order to use it. - - - **[Refresh (button)]:** - Refreshes the dropdown list for custom model. - - - **[Base Model (selection)]:** - Choose here the model that your custom model is based on. - - - **[Decoder (selection)]:** - Choose here the decoder that you wish to use: - `Default` is the default decoder - `MultiBand_Diffusion` is a decoder that uses diffusion to generate the audio. - - - **[Top-k (number)]:** - is a parameter used in text generation models, including music generation models. It determines the number of most likely next tokens to consider at each step of the generation process. The model ranks all possible tokens based on their predicted probabilities, and then selects the top-k tokens from the ranked list. The model then samples from this reduced set of tokens to determine the next token in the generated sequence. A smaller value of k results in a more focused and deterministic output, while a larger value of k allows for more diversity in the generated music. - - - **[Top-p (number)]:** - also known as nucleus sampling or probabilistic sampling, is another method used for token selection during text generation. Instead of specifying a fixed number like top-k, top-p considers the cumulative probability distribution of the ranked tokens. It selects the smallest possible set of tokens whose cumulative probability exceeds a certain threshold (usually denoted as p). The model then samples from this set to choose the next token. This approach ensures that the generated output maintains a balance between diversity and coherence, as it allows for a varying number of tokens to be considered based on their probabilities. - - - **[Temperature (number)]:** - is a parameter that controls the randomness of the generated output. It is applied during the sampling process, where a higher temperature value results in more random and diverse outputs, while a lower temperature value leads to more deterministic and focused outputs. In the context of music generation, a higher temperature can introduce more variability and creativity into the generated music, but it may also lead to less coherent or structured compositions. On the other hand, a lower temperature can produce more repetitive and predictable music. - - - **[Classifier Free Guidance (number)]:** - refers to a technique used in some music generation models where a separate classifier network is trained to provide guidance or control over the generated music. This classifier is trained on labeled data to recognize specific musical characteristics or styles. During the generation process, the output of the generator model is evaluated by the classifier, and the generator is encouraged to produce music that aligns with the desired characteristics or style. This approach allows for more fine-grained control over the generated music, enabling users to specify certain attributes they want the model to capture. - """ - ) - with gr.Tab("AudioGen"): - gr.Markdown( - """ - ### AudioGen - """ - ) - with gr.Row(): - with gr.Column(): - with gr.Tab("Generation"): - with gr.Accordion("Structure Prompts", open=False): - with gr.Row(): - struc_prompts_a = gr.Checkbox(label="Enable", value=False, interactive=True, container=False) - global_prompt_a = gr.Text(label="Global Prompt", interactive=True, scale=3) - with gr.Row(): - s_a = gr.Slider(1, max_textboxes, value=1, step=1, label="Prompts:", interactive=True, scale=2) - with gr.Column(): - textboxes_a = [] - prompts_a = [] - repeats_a = [] - calcs_a = [] - with gr.Row(): - text0_a = gr.Text(label="Input Text", interactive=True, scale=4) - prompts_a.append(text0_a) - drag0_a = gr.Number(label="Repeat", value=1, interactive=True, scale=1) - repeats_a.append(drag0_a) - calc0_a = gr.Text(interactive=False, value="00:00 - 00:00", scale=1, label="Time") - calcs_a.append(calc0_a) - for i in range(max_textboxes): - with gr.Row(visible=False) as t_a: - text_a = gr.Text(label="Input Text", interactive=True, scale=3) - repeat_a = gr.Number(label="Repeat", minimum=1, value=1, interactive=True, scale=1) - calc_a = gr.Text(interactive=False, value="00:00 - 00:00", scale=1, label="Time") - textboxes_a.append(t_a) - prompts_a.append(text_a) - repeats_a.append(repeat_a) - calcs_a.append(calc_a) - to_calc_a = gr.Button("Calculate Timings", variant="secondary") - with gr.Row(): - duration_a = gr.Slider(minimum=1, maximum=300, value=10, step=1, label="Duration", interactive=True) - with gr.Row(): - overlap_a = gr.Slider(minimum=1, maximum=9, value=2, step=1, label="Overlap", interactive=True) - with gr.Row(): - seed_a = gr.Number(label="Seed", value=-1, scale=4, precision=0, interactive=True) - gr.Button('\U0001f3b2\ufe0f', scale=1).click(fn=lambda: -1, outputs=[seed_a], queue=False) - reuse_seed_a = gr.Button('\u267b\ufe0f', scale=1) - - with gr.Tab("Audio"): - with gr.Row(): - with gr.Column(): - input_type_a = gr.Radio(["file", "mic"], value="file", label="Input Type (optional)", interactive=True) - mode_a = gr.Radio(["sample"], label="Input Audio Mode (optional)", value="sample", interactive=False, visible=False) - with gr.Row(): - trim_start_a = gr.Number(label="Trim Start", value=0, interactive=True) - trim_end_a = gr.Number(label="Trim End", value=0, interactive=True) - audio_a = gr.Audio(source="upload", type="numpy", label="Input Audio (optional)", interactive=True) - - with gr.Tab("Customization"): - with gr.Row(): - with gr.Column(): - background_a = gr.ColorPicker(value="#0f0f0f", label="background color", interactive=True, scale=0) - bar1_a = gr.ColorPicker(value="#84cc16", label="bar color start", interactive=True, scale=0) - bar2_a = gr.ColorPicker(value="#10b981", label="bar color end", interactive=True, scale=0) - with gr.Column(): - image_a = gr.Image(label="Background Image", type="filepath", interactive=True, scale=4) - with gr.Row(): - height_a = gr.Number(label="Height", value=512, interactive=True) - width_a = gr.Number(label="Width", value=768, interactive=True) - - with gr.Tab("Settings"): - with gr.Row(): - channel_a = gr.Radio(["mono", "stereo", "stereo effect"], label="Output Audio Channels", value="stereo", interactive=True, scale=1) - sr_select_a = gr.Dropdown(["11025", "16000", "22050", "24000", "32000", "44100", "48000"], label="Output Audio Sample Rate", value="48000", interactive=True) - with gr.Row(): - model_a = gr.Radio(["medium"], label="Model", value="medium", interactive=False, visible=False) - decoder_a = gr.Radio(["Default"], label="Decoder", value="Default", interactive=False, visible=False) - with gr.Row(): - topk_a = gr.Number(label="Top-k", value=250, interactive=True) - topp_a = gr.Number(label="Top-p", value=0, interactive=True) - temperature_a = gr.Number(label="Temperature", value=1.0, interactive=True) - cfg_coef_a = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True) - with gr.Row(): - submit_a = gr.Button("Generate", variant="primary") - _ = gr.Button("Interrupt").click(fn=interrupt, queue=False) - with gr.Column(): - with gr.Tab("Output"): - output_a = gr.Video(label="Generated Audio", scale=0) - with gr.Row(): - audio_only_a = gr.Audio(type="numpy", label="Audio Only", interactive=False) - backup_only_a = gr.Audio(type="numpy", label="Backup Audio", interactive=False, visible=False) - send_audio_a = gr.Button("Send to Input Audio") - seed_used_a = gr.Number(label='Seed used', value=-1, interactive=False) - download_a = gr.File(label="Generated Files", interactive=False) - with gr.Tab("Wiki"): - gr.Markdown( - """ - - **[Generate (button)]:** - Generates the audio with the given settings and prompts. - - - **[Interrupt (button)]:** - Stops the audio generation as soon as it can, providing an incomplete output. - - --- - - ### Generation Tab: - - #### Structure Prompts: - - This feature helps reduce repetetive prompts by allowing you to set global prompts - that will be used for all prompt segments. - - - **[Structure Prompts (checkbox)]:** - Enable/Disable the structure prompts feature. - - - **[Global Prompt (text)]:** - Here write the prompt that you wish to be used for all prompt segments. - - #### Multi-Prompt: - - This feature allows you to control the audio, adding variation to different time segments. - You have up to 10 prompt segments. the first prompt will always be 10s long - the other prompts will be [10s - overlap]. - for example if the overlap is 2s, each prompt segment will be 8s. - - - **[Prompt Segments (number)]:** - Amount of unique prompt to generate throughout the audio generation. - - - **[Prompt/Input Text (prompt)]:** - Here describe the audio you wish the model to generate. - - - **[Repeat (number)]:** - Write how many times this prompt will repeat (instead of wasting another prompt segment on the same prompt). - - - **[Time (text)]:** - The time of the prompt segment. - - - **[Calculate Timings (button)]:** - Calculates the timings of the prompt segments. - - - **[Duration (number)]:** - How long you want the generated audio to be (in seconds). - - - **[Overlap (number)]:** - How much each new segment will reference the previous segment (in seconds). - For example, if you choose 2s: Each new segment after the first one will reference the previous segment 2s - and will generate only 8s of new audio. The model can only process 10s of music. - - - **[Seed (number)]:** - Your generated audio id. If you wish to generate the exact same audio, - place the exact seed with the exact prompts - (This way you can also extend specific song that was generated short). - - - **[Random Seed (button)]:** - Gives "-1" as a seed, which counts as a random seed. - - - **[Copy Previous Seed (button)]:** - Copies the seed from the output seed (if you don't feel like doing it manualy). - - --- - - ### Audio Tab: - - - **[Input Type (selection)]:** - `File` mode allows you to upload an audio file to use as input - `Mic` mode allows you to use your microphone as input - - - **[Trim Start and Trim End (numbers)]:** - `Trim Start` set how much you'd like to trim the input audio from the start - `Trim End` same as the above but from the end - - - **[Input Audio (audio file)]:** - Input here the audio you wish to use. - - --- - - ### Customization Tab: - - - **[Background Color (color)]:** - Works only if you don't upload image. Color of the background of the waveform. - - - **[Bar Color Start (color)]:** - First color of the waveform bars. - - - **[Bar Color End (color)]:** - Second color of the waveform bars. - - - **[Background Image (image)]:** - Background image that you wish to be attached to the generated video along with the waveform. - - - **[Height and Width (numbers)]:** - Output video resolution, only works with image. - (minimum height and width is 256). - - --- - - ### Settings Tab: - - - **[Output Audio Channels (selection)]:** - With this you can select the amount of channels that you wish for your output audio. - `mono` is a straightforward single channel audio - `stereo` is a dual channel audio but it will sound more or less like mono - `stereo effect` this one is also dual channel but uses tricks to simulate a stereo audio. - - - **[Output Audio Sample Rate (dropdown)]:** - The output audio sample rate, the model default is 32000. - - - **[Top-k (number)]:** - is a parameter used in text generation models, including music generation models. It determines the number of most likely next tokens to consider at each step of the generation process. The model ranks all possible tokens based on their predicted probabilities, and then selects the top-k tokens from the ranked list. The model then samples from this reduced set of tokens to determine the next token in the generated sequence. A smaller value of k results in a more focused and deterministic output, while a larger value of k allows for more diversity in the generated music. - - - **[Top-p (number)]:** - also known as nucleus sampling or probabilistic sampling, is another method used for token selection during text generation. Instead of specifying a fixed number like top-k, top-p considers the cumulative probability distribution of the ranked tokens. It selects the smallest possible set of tokens whose cumulative probability exceeds a certain threshold (usually denoted as p). The model then samples from this set to choose the next token. This approach ensures that the generated output maintains a balance between diversity and coherence, as it allows for a varying number of tokens to be considered based on their probabilities. - - - **[Temperature (number)]:** - is a parameter that controls the randomness of the generated output. It is applied during the sampling process, where a higher temperature value results in more random and diverse outputs, while a lower temperature value leads to more deterministic and focused outputs. In the context of music generation, a higher temperature can introduce more variability and creativity into the generated music, but it may also lead to less coherent or structured compositions. On the other hand, a lower temperature can produce more repetitive and predictable music. - - - **[Classifier Free Guidance (number)]:** - refers to a technique used in some music generation models where a separate classifier network is trained to provide guidance or control over the generated music. This classifier is trained on labeled data to recognize specific musical characteristics or styles. During the generation process, the output of the generator model is evaluated by the classifier, and the generator is encouraged to produce music that aligns with the desired characteristics or style. This approach allows for more fine-grained control over the generated music, enabling users to specify certain attributes they want the model to capture. - """ - ) - with gr.Tab("Audio Info"): - gr.Markdown( - """ - ### Audio Info - """ - ) - with gr.Row(): - with gr.Column(): - in_audio = gr.File(type="file", label="Input Any Audio", interactive=True) - with gr.Row(): - send_gen = gr.Button("Send to MusicGen", variant="primary") - send_gen_a = gr.Button("Send to AudioGen", variant="primary") - with gr.Column(): - info = gr.Textbox(label="Audio Info", lines=10, interactive=False) - with gr.Tab("Changelog"): - gr.Markdown( - """ - ## Changelog: - - ### v2.0.0a - - - Forgot to move all the update to app.py from temp2.py... oops - - - - ### v2.0.0 - - - Changed name from MusicGen+ to AudioCraft Plus - - - Complete overhaul of the repo "backend" with the latest changes from the main facebookresearch repo - - - Added a new decoder: MultiBand_Diffusion - - - Added AudioGen: a new tab for generating audio - - - - ### v1.2.8c - - - Implemented Reverse compatibility for audio info tab with previous versions - - - - ### v1.2.8b - - - Fixed the error when loading default models - - - - ### v1.2.8a - - - Adapted Audio info tab to work with the new structure prompts feature - - - Now custom models actually work, make sure you select the correct base model - - - - ### v1.2.8 - - - Now you will also recieve json file with metadata of generated audio - - - Added error messages in Audio Info tab - - - Added structure prompts: you can select bpm, key and global prompt for all prompts - - - Added time display next to each prompt, can be calculated with "Calculate Timings" button - - - - ### v1.2.7 - - - When sending generated audio to Input Audio, it will send a backup audio with default settings - (best for continuos generation) - - - Added Metadata to generated audio (Thanks to AlexHK ♥) - - - Added Audio Info tab that will display the metadata of the input audio - - - Added "send to Text2Audio" button in Audio Info tab - - - Generated audio is now stored in the "output" folder (Thanks to AlexHK ♥) - - - Added an output area with generated files and download buttons - - - Enhanced Stereo effect (Thanks to AlexHK ♥) - - - - ### v1.2.6 - - - Added option to generate in stereo (instead of only mono) - - - Added dropdown for selecting output sample rate (model default is 32000) - - - - ### v1.2.5a - - - Added file cleaner (This comes from the main facebookresearch repo) - - - Reorganized a little, moved audio to a seperate tab - - - - ### v1.2.5 - - - Gave a unique lime theme to the webui - - - Added additional output for audio only - - - Added button to send generated audio to Input Audio - - - Added option to trim Input Audio - - - - ### v1.2.4 - - - Added mic input (This comes from the main facebookresearch repo) - - - - ### v1.2.3 - - - Added option to change video size to fit the image you upload - - - - ### v1.2.2 - - - Added Wiki, Changelog and About tabs - - - - ### v1.2.1 - - - Added tabs and organized the entire interface - - - Added option to attach image to the output video - - - Added option to load fine-tuned models (Yet to be tested) - - - - ### v1.2.0 - - - Added Multi-Prompt - - - - ### v1.1.3 - - - Added customization options for generated waveform - - - - ### v1.1.2 - - - Removed sample length limit: now you can input audio of any length as music sample - - - - ### v1.1.1 - - - Improved music sample audio quality when using music continuation - - - - ### v1.1.0 - - - Rebuilt the repo on top of the latest structure of the main MusicGen repo - - - Improved Music continuation feature - - - - ### v1.0.0 - Stable Version - - - Added Music continuation - """ - ) - with gr.Tab("About"): - gen_type = gr.Text(value="music", interactive=False, visible=False) - gen_type_a = gr.Text(value="audio", interactive=False, visible=False) - gr.Markdown( - """ - This is your private demo for [MusicGen](https://github.com/facebookresearch/audiocraft), a simple and controllable model for music generation - presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284) - - ## MusicGen+ is an extended version of the original MusicGen by facebookresearch. - - ### Repo: https://github.com/GrandaddyShmax/audiocraft_plus/tree/plus - - --- - - ### This project was possible thanks to: - - #### GrandaddyShmax - https://github.com/GrandaddyShmax - - #### Camenduru - https://github.com/camenduru - - #### rkfg - https://github.com/rkfg - - #### oobabooga - https://github.com/oobabooga - - #### AlexHK - https://github.com/alanhk147 - """ - ) - - send_gen.click(info_to_params, inputs=[in_audio], outputs=[decoder, struc_prompts, global_prompt, bpm, key, scale, model, dropdown, basemodel, s, prompts[0], prompts[1], prompts[2], prompts[3], prompts[4], prompts[5], prompts[6], prompts[7], prompts[8], prompts[9], repeats[0], repeats[1], repeats[2], repeats[3], repeats[4], repeats[5], repeats[6], repeats[7], repeats[8], repeats[9], mode, duration, topk, topp, temperature, cfg_coef, seed, overlap, channel, sr_select], queue=False) - reuse_seed.click(fn=lambda x: x, inputs=[seed_used], outputs=[seed], queue=False) - send_audio.click(fn=lambda x: x, inputs=[backup_only], outputs=[audio], queue=False) - submit.click(predict_full, inputs=[gen_type, model, decoder, dropdown, basemodel, s, struc_prompts, bpm, key, scale, global_prompt, prompts[0], prompts[1], prompts[2], prompts[3], prompts[4], prompts[5], prompts[6], prompts[7], prompts[8], prompts[9], repeats[0], repeats[1], repeats[2], repeats[3], repeats[4], repeats[5], repeats[6], repeats[7], repeats[8], repeats[9], audio, mode, trim_start, trim_end, duration, topk, topp, temperature, cfg_coef, seed, overlap, image, height, width, background, bar1, bar2, channel, sr_select], outputs=[output, audio_only, backup_only, download, seed_used]) - input_type.change(toggle_audio_src, input_type, [audio], queue=False, show_progress=False) - to_calc.click(calc_time, inputs=[gen_type, s, duration, overlap, repeats[0], repeats[1], repeats[2], repeats[3], repeats[4], repeats[5], repeats[6], repeats[7], repeats[8], repeats[9]], outputs=[calcs[0], calcs[1], calcs[2], calcs[3], calcs[4], calcs[5], calcs[6], calcs[7], calcs[8], calcs[9]], queue=False) - - send_gen_a.click(info_to_params_a, inputs=[in_audio], outputs=[decoder_a, struc_prompts_a, global_prompt_a, s_a, prompts_a[0], prompts_a[1], prompts_a[2], prompts_a[3], prompts_a[4], prompts_a[5], prompts_a[6], prompts_a[7], prompts_a[8], prompts_a[9], repeats_a[0], repeats_a[1], repeats_a[2], repeats_a[3], repeats_a[4], repeats_a[5], repeats_a[6], repeats_a[7], repeats_a[8], repeats_a[9], duration_a, topk_a, topp_a, temperature_a, cfg_coef_a, seed_a, overlap_a, channel_a, sr_select_a], queue=False) - reuse_seed_a.click(fn=lambda x: x, inputs=[seed_used_a], outputs=[seed_a], queue=False) - send_audio_a.click(fn=lambda x: x, inputs=[backup_only_a], outputs=[audio_a], queue=False) - submit_a.click(predict_full, inputs=[gen_type_a, model_a, decoder_a, dropdown, basemodel, s_a, struc_prompts_a, bpm, key, scale, global_prompt_a, prompts_a[0], prompts_a[1], prompts_a[2], prompts_a[3], prompts_a[4], prompts_a[5], prompts_a[6], prompts_a[7], prompts_a[8], prompts_a[9], repeats_a[0], repeats_a[1], repeats_a[2], repeats_a[3], repeats_a[4], repeats_a[5], repeats_a[6], repeats_a[7], repeats_a[8], repeats_a[9], audio_a, mode_a, trim_start_a, trim_end_a, duration_a, topk_a, topp_a, temperature_a, cfg_coef_a, seed_a, overlap_a, image_a, height_a, width_a, background_a, bar1_a, bar2_a, channel_a, sr_select_a], outputs=[output_a, audio_only_a, backup_only_a, download_a, seed_used_a]) - input_type_a.change(toggle_audio_src, input_type_a, [audio_a], queue=False, show_progress=False) - to_calc_a.click(calc_time, inputs=[gen_type_a, s_a, duration_a, overlap_a, repeats_a[0], repeats_a[1], repeats_a[2], repeats_a[3], repeats_a[4], repeats_a[5], repeats_a[6], repeats_a[7], repeats_a[8], repeats_a[9]], outputs=[calcs_a[0], calcs_a[1], calcs_a[2], calcs_a[3], calcs_a[4], calcs_a[5], calcs_a[6], calcs_a[7], calcs_a[8], calcs_a[9]], queue=False) - - in_audio.change(get_audio_info, in_audio, outputs=[info]) - - def variable_outputs(k): - k = int(k) - 1 - return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k) - def get_size(image): - if image is not None: - img = Image.open(image) - img_height = img.height - img_width = img.width - if (img_height%2) != 0: - img_height = img_height + 1 - if (img_width%2) != 0: - img_width = img_width + 1 - return img_height, img_width - else: - return 512, 768 - - image.change(get_size, image, outputs=[height, width]) - image_a.change(get_size, image_a, outputs=[height_a, width_a]) - s.change(variable_outputs, s, textboxes) - s_a.change(variable_outputs, s_a, textboxes_a) - interface.queue().launch(**launch_kwargs) - - -def ui_batched(launch_kwargs): - with gr.Blocks() as demo: - gr.Markdown( - """ - # MusicGen - - This is the demo for [MusicGen](https://github.com/facebookresearch/audiocraft), - a simple and controllable model for music generation - presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284). -
    - - Duplicate Space - for longer sequences, more control and no queue.

    - """ - ) - with gr.Row(): - with gr.Column(): - with gr.Row(): - text = gr.Text(label="Describe your music", lines=2, interactive=True) - with gr.Column(): - radio = gr.Radio(["file", "mic"], value="file", - label="Condition on a melody (optional) File or Mic") - melody = gr.Audio(source="upload", type="numpy", label="File", - interactive=True, elem_id="melody-input") - with gr.Row(): - submit = gr.Button("Generate") - with gr.Column(): - output = gr.Video(label="Generated Music") - audio_output = gr.Audio(label="Generated Music (wav)", type='filepath') - submit.click(predict_batched, inputs=[text, melody], - outputs=[output, audio_output], batch=True, max_batch_size=MAX_BATCH_SIZE) - radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False) - gr.Examples( - fn=predict_batched, - examples=[ - [ - "An 80s driving pop song with heavy drums and synth pads in the background", - "./assets/bach.mp3", - ], - [ - "A cheerful country song with acoustic guitars", - "./assets/bolero_ravel.mp3", - ], - [ - "90s rock song with electric guitar and heavy drums", - None, - ], - [ - "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130", - "./assets/bach.mp3", - ], - [ - "lofi slow bpm electro chill with organic samples", - None, - ], - ], - inputs=[text, melody], - outputs=[output] - ) - gr.Markdown(""" - ### More details - - The model will generate 12 seconds of audio based on the description you provided. - You can optionally provide a reference audio from which a broad melody will be extracted. - The model will then try to follow both the description and melody provided. - All samples are generated with the `melody` model. - - You can also use your own GPU or a Google Colab by following the instructions on our repo. - - See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft) - for more details. - """) - - demo.queue(max_size=8 * 4).launch(**launch_kwargs) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - '--listen', - type=str, - default='0.0.0.0' if 'SPACE_ID' in os.environ else '127.0.0.1', - help='IP to listen on for connections to Gradio', - ) - parser.add_argument( - '--username', type=str, default='', help='Username for authentication' - ) - parser.add_argument( - '--password', type=str, default='', help='Password for authentication' - ) - parser.add_argument( - '--server_port', - type=int, - default=0, - help='Port to run the server listener on', - ) - parser.add_argument( - '--inbrowser', action='store_true', help='Open in browser' - ) - parser.add_argument( - '--share', action='store_true', help='Share the gradio UI' - ) - parser.add_argument( - '--unload_model', action='store_true', help='Unload the model after every generation to save GPU memory' - ) - - parser.add_argument( - '--unload_to_cpu', action='store_true', help='Move the model to main RAM after every generation to save GPU memory but reload faster than after full unload (see above)' - ) - - parser.add_argument( - '--cache', action='store_true', help='Cache models in RAM to quickly switch between them' - ) - - args = parser.parse_args() - UNLOAD_MODEL = args.unload_model - MOVE_TO_CPU = args.unload_to_cpu - if args.cache: - MODELS = {} - - launch_kwargs = {} - launch_kwargs['server_name'] = args.listen - - if args.username and args.password: - launch_kwargs['auth'] = (args.username, args.password) - if args.server_port: - launch_kwargs['server_port'] = args.server_port - if args.inbrowser: - launch_kwargs['inbrowser'] = args.inbrowser - if args.share: - launch_kwargs['share'] = args.share - - # Show the interface - if IS_BATCHED: - global USE_DIFFUSION - USE_DIFFUSION = False - ui_batched(launch_kwargs) - else: - ui_full(launch_kwargs) \ No newline at end of file diff --git a/spaces/PunPk/AI_FallingAsleepDriving/README.md b/spaces/PunPk/AI_FallingAsleepDriving/README.md deleted file mode 100644 index 0855483dc5cdc696af1fe1d3502b584b42fde086..0000000000000000000000000000000000000000 --- a/spaces/PunPk/AI_FallingAsleepDriving/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AI FallingAsleepDriving -emoji: 📉 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/RMXK/RVC_HFF/tensorlowest.py b/spaces/RMXK/RVC_HFF/tensorlowest.py deleted file mode 100644 index eccd4dbf3494434e59f7defaae6ab91797263b90..0000000000000000000000000000000000000000 --- a/spaces/RMXK/RVC_HFF/tensorlowest.py +++ /dev/null @@ -1,123 +0,0 @@ -from tensorboard.backend.event_processing import event_accumulator - -import os -from shutil import copy2 -from re import search as RSearch -import pandas as pd -from ast import literal_eval as LEval - -weights_dir = 'weights/' - -def find_biggest_tensorboard(tensordir): - try: - files = [f for f in os.listdir(tensordir) if f.endswith('.0')] - if not files: - print("No files with the '.0' extension found!") - return - - max_size = 0 - biggest_file = "" - - for file in files: - file_path = os.path.join(tensordir, file) - if os.path.isfile(file_path): - file_size = os.path.getsize(file_path) - if file_size > max_size: - max_size = file_size - biggest_file = file - - return biggest_file - - except FileNotFoundError: - print("Couldn't find your model!") - return - -def main(model_name, save_freq, lastmdls): - global lowestval_weight_dir, scl - - tensordir = os.path.join('logs', model_name) - lowestval_weight_dir = os.path.join(tensordir, "lowestvals") - - latest_file = find_biggest_tensorboard(tensordir) - - if latest_file is None: - print("Couldn't find a valid tensorboard file!") - return - - tfile = os.path.join(tensordir, latest_file) - - ea = event_accumulator.EventAccumulator(tfile, - size_guidance={ - event_accumulator.COMPRESSED_HISTOGRAMS: 500, - event_accumulator.IMAGES: 4, - event_accumulator.AUDIO: 4, - event_accumulator.SCALARS: 0, - event_accumulator.HISTOGRAMS: 1, - }) - - ea.Reload() - ea.Tags() - - scl = ea.Scalars('loss/g/total') - - listwstep = {} - - for val in scl: - if (val.step // save_freq) * save_freq in [val.step for val in scl]: - listwstep[float(val.value)] = (val.step // save_freq) * save_freq - - lowest_vals = sorted(listwstep.keys())[:lastmdls] - - sorted_dict = {value: step for value, step in listwstep.items() if value in lowest_vals} - - return sorted_dict - -def selectweights(model_name, file_dict, weights_dir, lowestval_weight_dir): - os.makedirs(lowestval_weight_dir, exist_ok=True) - logdir = [] - files = [] - lbldict = { - 'Values': {}, - 'Names': {} - } - weights_dir_path = os.path.join(weights_dir, "") - low_val_path = os.path.join(os.getcwd(), os.path.join(lowestval_weight_dir, "")) - - try: - file_dict = LEval(file_dict) - except Exception as e: - print(f"Error! {e}") - return f"Couldn't load tensorboard file! {e}" - - weights = [f for f in os.scandir(weights_dir)] - for key, value in file_dict.items(): - pattern = fr"^{model_name}_.*_s{value}\.pth$" - matching_weights = [f.name for f in weights if f.is_file() and RSearch(pattern, f.name)] - for weight in matching_weights: - source_path = weights_dir_path + weight - destination_path = os.path.join(lowestval_weight_dir, weight) - - copy2(source_path, destination_path) - - logdir.append(f"File = {weight} Value: {key}, Step: {value}") - - lbldict['Names'][weight] = weight - lbldict['Values'][weight] = key - - files.append(low_val_path + weight) - - print(f"File = {weight} Value: {key}, Step: {value}") - - yield ('\n'.join(logdir), files, pd.DataFrame(lbldict)) - - - return ''.join(logdir), files, pd.DataFrame(lbldict) - - -if __name__ == "__main__": - model = str(input("Enter the name of the model: ")) - sav_freq = int(input("Enter save frequency of the model: ")) - ds = main(model, sav_freq) - - if ds: selectweights(model, ds, weights_dir, lowestval_weight_dir) - \ No newline at end of file diff --git a/spaces/Rekanice/hf_minimal_sushi/README.md b/spaces/Rekanice/hf_minimal_sushi/README.md deleted file mode 100644 index 4b038ea069e1817e717615da6679be0a913f0f5d..0000000000000000000000000000000000000000 --- a/spaces/Rekanice/hf_minimal_sushi/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Hf Minimal Sushi -emoji: 🌍 -colorFrom: purple -colorTo: yellow -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/RoCobo/WiggleGAN/main.py b/spaces/RoCobo/WiggleGAN/main.py deleted file mode 100644 index 097c3b9aed9be52619095419859205b71110555c..0000000000000000000000000000000000000000 --- a/spaces/RoCobo/WiggleGAN/main.py +++ /dev/null @@ -1,136 +0,0 @@ -import argparse -import os -import torch -from WiggleGAN import WiggleGAN -#from MyACGAN import MyACGAN -#from MyGAN import MyGAN - -"""parsing and configuration""" - - -def parse_args(): - desc = "Pytorch implementation of GAN collections" - parser = argparse.ArgumentParser(description=desc) - - parser.add_argument('--gan_type', type=str, default='WiggleGAN', - choices=['MyACGAN', 'MyGAN', 'WiggleGAN'], - help='The type of GAN') - parser.add_argument('--dataset', type=str, default='4cam', - choices=['mnist', 'fashion-mnist', 'cifar10', 'cifar100', 'svhn', 'stl10', 'lsun-bed', '4cam'], - help='The name of dataset') - parser.add_argument('--split', type=str, default='', help='The split flag for svhn and stl10') - parser.add_argument('--epoch', type=int, default=50, help='The number of epochs to run') - parser.add_argument('--batch_size', type=int, default=16, help='The size of batch') - parser.add_argument('--input_size', type=int, default=10, help='The size of input image') - parser.add_argument('--save_dir', type=str, default='models', - help='Directory name to save the model') - parser.add_argument('--result_dir', type=str, default='results', help='Directory name to save the generated images') - parser.add_argument('--log_dir', type=str, default='logs', help='Directory name to save training logs') - parser.add_argument('--lrG', type=float, default=0.0002) - parser.add_argument('--lrD', type=float, default=0.001) - parser.add_argument('--beta1', type=float, default=0.5) - parser.add_argument('--beta2', type=float, default=0.999) - parser.add_argument('--gpu_mode', type=str2bool, default=True) - parser.add_argument('--benchmark_mode', type=str2bool, default=True) - parser.add_argument('--cameras', type=int, default=2) - parser.add_argument('--imageDim', type=int, default=128) - parser.add_argument('--epochV', type=int, default=0) - parser.add_argument('--cIm', type=int, default=4) - parser.add_argument('--seedLoad', type=str, default="-0000") - parser.add_argument('--zGF', type=float, default=0.2) - parser.add_argument('--zDF', type=float, default=0.2) - parser.add_argument('--bF', type=float, default=0.2) - parser.add_argument('--expandGen', type=int, default=3) - parser.add_argument('--expandDis', type=int, default=3) - parser.add_argument('--wiggleDepth', type=int, default=-1) - parser.add_argument('--visdom', type=str2bool, default=True) - parser.add_argument('--lambdaL1', type=int, default=100) - parser.add_argument('--clipping', type=float, default=-1) - parser.add_argument('--depth', type=str2bool, default=True) - parser.add_argument('--recreate', type=str2bool, default=False) - parser.add_argument('--name_wiggle', type=str, default='wiggle-result') - - return check_args(parser.parse_args()) - - -"""checking arguments""" - -def str2bool(v): - if isinstance(v, bool): - return v - if v.lower() in ('yes', 'true', 't', 'y', '1'): - return True - elif v.lower() in ('no', 'false', 'f', 'n', '0'): - return False - else: - raise argparse.ArgumentTypeError('Boolean value expected.') - - -def check_args(args): - # --save_dir - if not os.path.exists(args.save_dir): - os.makedirs(args.save_dir) - - # --result_dir - if not os.path.exists(args.result_dir): - os.makedirs(args.result_dir) - - # --result_dir - if not os.path.exists(args.log_dir): - os.makedirs(args.log_dir) - - # --epoch - try: - assert args.epoch >= 1 - except: - print('number of epochs must be larger than or equal to one') - - # --batch_size - try: - assert args.batch_size >= 1 - except: - print('batch size must be larger than or equal to one') - - return args - - -"""main""" - - -def main(): - # parse arguments - args = parse_args() - if args is None: - exit() - - if args.benchmark_mode: - torch.backends.cudnn.benchmark = True - - # declare instance for GAN - if args.gan_type == 'WiggleGAN': - gan = WiggleGAN(args) - #elif args.gan_type == 'MyACGAN': - # gan = MyACGAN(args) - #elif args.gan_type == 'MyGAN': - # gan = MyGAN(args) - else: - raise Exception("[!] There is no option for " + args.gan_type) - - # launch the graph in a session - if (args.wiggleDepth < 0 and not args.recreate): - print(" [*] Training Starting!") - gan.train() - print(" [*] Training finished!") - else: - if not args.recreate: - print(" [*] Wiggle Started!") - gan.wiggleEf() - print(" [*] Wiggle finished!") - else: - print(" [*] Dataset recreation Started") - gan.recreate() - print(" [*] Dataset recreation finished") - - -if __name__ == '__main__': - main() diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/core/utils/__init__.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/core/utils/__init__.py deleted file mode 100644 index f2678b321c295bcceaef945111ac3524be19d6e4..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/core/utils/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .misc import add_prefix - -__all__ = ['add_prefix'] diff --git a/spaces/SAAZIZI/SummarizeAV/resource_loader/uploaded_media_loader.py b/spaces/SAAZIZI/SummarizeAV/resource_loader/uploaded_media_loader.py deleted file mode 100644 index cdd69f2a24d47166425d785ef2767708b44b6593..0000000000000000000000000000000000000000 --- a/spaces/SAAZIZI/SummarizeAV/resource_loader/uploaded_media_loader.py +++ /dev/null @@ -1,32 +0,0 @@ -from config import output_path_video, output_path_audio -from logger import logger -from resource_loader.video_loader_interface import VideoLoaderInterface - - -class UploadedMediaLoader(VideoLoaderInterface): - def __init__(self, uploaded_stream, original_name, media_type='video'): - self.uploaded_stream = uploaded_stream - self.original_name = original_name - self.media_type = media_type # 'video' or 'audio' - self.media_id = None - self.filename = None - self.output_path = None - self.extract_filename() - self.set_output_path() - - def extract_filename(self): - self.filename = self.original_name.split(" - ", 1)[1] - self.media_id = self.filename.rsplit(".", 1)[0] - - def set_output_path(self): - if self.media_type == 'video': - self.output_path = output_path_video - elif self.media_type == 'audio': - self.output_path = output_path_audio - else: - raise ValueError("Invalid media type") - - def download(self): - with open(f"{self.output_path}/{self.filename}", "wb") as f: - f.write(self.uploaded_stream.getvalue()) - logger.info(f"{self.media_type.capitalize()} processed: {self.original_name}") diff --git a/spaces/STF-R/docker-test3/app/templates/prediction.html b/spaces/STF-R/docker-test3/app/templates/prediction.html deleted file mode 100644 index 4366614474cbe8347297ec3d57a7b8b7bf72684d..0000000000000000000000000000000000000000 --- a/spaces/STF-R/docker-test3/app/templates/prediction.html +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - VIVADATA | {{iris_type}} - - - - - - - - - - - -
    -
    -

    VIVADATA - Flask Demo

    -
    - -
    -

    🏵️🌺🌸 It's a {{iris_type|title}} 🏵️🌺🌸

    -
    -
    - setosa -
    - -
    -
    - Back -
    - -
    -
    -

    © Vivadata 2019

    -
    - -
    - - diff --git a/spaces/Salesforce/EDICT/my_diffusers/utils/dummy_scipy_objects.py b/spaces/Salesforce/EDICT/my_diffusers/utils/dummy_scipy_objects.py deleted file mode 100644 index 3706c57541c1b7d9004957422b52cd1e2191ae68..0000000000000000000000000000000000000000 --- a/spaces/Salesforce/EDICT/my_diffusers/utils/dummy_scipy_objects.py +++ /dev/null @@ -1,11 +0,0 @@ -# This file is autogenerated by the command `make fix-copies`, do not edit. -# flake8: noqa - -from ..utils import DummyObject, requires_backends - - -class LMSDiscreteScheduler(metaclass=DummyObject): - _backends = ["scipy"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["scipy"]) diff --git a/spaces/ScottRobertsXR/image-captioning-01/vit_gpt2/modeling_flax_vit_gpt2.py b/spaces/ScottRobertsXR/image-captioning-01/vit_gpt2/modeling_flax_vit_gpt2.py deleted file mode 100644 index 2e25dd918f2714ae9a2c21d14401663c371b5f38..0000000000000000000000000000000000000000 --- a/spaces/ScottRobertsXR/image-captioning-01/vit_gpt2/modeling_flax_vit_gpt2.py +++ /dev/null @@ -1,704 +0,0 @@ -from typing import Callable, Optional, Tuple - -import flax.linen as nn -import jax -import jax.numpy as jnp -from flax.core.frozen_dict import FrozenDict, unfreeze -from jax import lax -from jax.random import PRNGKey -from transformers import GPT2Config, FlaxViTModel, ViTConfig -from transformers.modeling_flax_outputs import ( - FlaxCausalLMOutputWithCrossAttentions, - FlaxSeq2SeqLMOutput, - FlaxSeq2SeqModelOutput, -) -from transformers.models.bart.modeling_flax_bart import ( - shift_tokens_right, -) -from .modeling_flax_gpt2 import ( - FlaxGPT2Module, - FlaxGPT2Model, - FlaxPreTrainedModel -) -from transformers.models.vit.modeling_flax_vit import FlaxViTModule - -from .configuration_vit_gpt2 import ViTGPT2Config - - -class FlaxViTGPT2Module(nn.Module): - config: ViTGPT2Config - dtype: jnp.dtype = jnp.float32 # the dtype of the computation - - def setup(self): - - self.encoder = FlaxViTModule(self.config.vit_config, dtype=self.dtype) - self.decoder = FlaxGPT2Module(self.config.gpt2_config, dtype=self.dtype) - - def _get_encoder_module(self): - return self.encoder - - def _get_decoder_module(self): - return self.decoder - - def __call__( - self, - pixel_values, - input_ids, - attention_mask, - position_ids, - encoder_attention_mask: Optional[jnp.ndarray] = None, - output_attentions: bool = False, - output_hidden_states: bool = False, - return_dict: bool = True, - deterministic: bool = True, - ): - encoder_outputs = self.encoder( - pixel_values=pixel_values, - deterministic=deterministic, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - decoder_outputs = self.decoder( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - encoder_hidden_states=encoder_outputs[0], - encoder_attention_mask=encoder_attention_mask, - deterministic=deterministic, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict - ) - - return FlaxSeq2SeqModelOutput( - last_hidden_state=decoder_outputs.last_hidden_state, - decoder_hidden_states=decoder_outputs.hidden_states, - decoder_attentions=decoder_outputs.attentions, - cross_attentions=decoder_outputs.cross_attentions, - encoder_last_hidden_state=encoder_outputs.last_hidden_state, - encoder_hidden_states=encoder_outputs.hidden_states, - encoder_attentions=encoder_outputs.attentions, - ) - - -class FlaxViTGPT2ForConditionalGenerationModule(nn.Module): - config: ViTGPT2Config - dtype: jnp.dtype = jnp.float32 - bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros - - def setup(self): - self.model = FlaxViTGPT2Module(config=self.config, dtype=self.dtype) - self.lm_head = nn.Dense( - self.model.decoder.embed_dim, - use_bias=False, - dtype=self.dtype, - kernel_init=jax.nn.initializers.normal( - self.config.gpt2_config.initializer_range, self.dtype - ), - ) - self.final_logits_bias = self.param( - "final_logits_bias", self.bias_init, (1, self.model.decoder.embed_dim) - ) - - def _get_encoder_module(self): - return self.model.encoder - - def _get_decoder_module(self): - return self.model.decoder - - def __call__( - self, - pixel_values, - input_ids, - attention_mask, - position_ids, - encoder_attention_mask: Optional[jnp.ndarray] = None, - output_attentions: bool = False, - output_hidden_states: bool = False, - return_dict: bool = True, - deterministic: bool = True, - ): - outputs = self.model( - pixel_values=pixel_values, - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - encoder_attention_mask=encoder_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - deterministic=deterministic, - ) - - hidden_states = outputs[0] - lm_logits = self.lm_head(hidden_states) - lm_logits += self.final_logits_bias - - if not return_dict: - output = (lm_logits,) + outputs[1:] - return output - - return FlaxSeq2SeqLMOutput( - logits=lm_logits, - decoder_hidden_states=outputs.decoder_hidden_states, - decoder_attentions=outputs.decoder_attentions, - cross_attentions=outputs.cross_attentions, - encoder_last_hidden_state=outputs.encoder_last_hidden_state, - encoder_hidden_states=outputs.encoder_hidden_states, - encoder_attentions=outputs.encoder_attentions, - ) - -class FlaxViTGPT2PreTrainedModel(FlaxPreTrainedModel): - config_class = ViTGPT2Config - base_model_prefix: str = "model" - module_class: nn.Module = None - - def __init__( - self, - config: ViTGPT2Config, - input_shape: Tuple = None, - seed: int = 0, - dtype: jnp.dtype = jnp.float32, - **kwargs, - ): - if input_shape is None: - input_shape = ( - (1, config.vit_config.image_size, config.vit_config.image_size, 3), - (1, 1), - ) - - module = self.module_class(config=config, dtype=dtype, **kwargs) - super().__init__( - config, module, input_shape=input_shape, seed=seed, dtype=dtype - ) - - def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> FrozenDict: - # init input tensors - pixel_values = jax.random.normal(rng, input_shape[0]) - # # make sure initialization pass will work for FlaxBartForSequenceClassificationModule - # input_ids = jax.ops.index_update(input_ids, (..., -1), self.config.eos_token_id) - - input_ids = jnp.zeros(input_shape[1], dtype="i4") - attention_mask = jnp.ones_like(input_ids) - - batch_size, sequence_length = input_ids.shape - position_ids = jnp.broadcast_to( - jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) - ) - - params_rng, dropout_rng = jax.random.split(rng) - rngs = {"params": params_rng, "dropout": dropout_rng} - - return self.module.init( - rngs, - pixel_values, - input_ids, - attention_mask, - position_ids, - )["params"] - - def init_cache(self, batch_size, max_length, encoder_outputs): - - input_ids = jnp.ones((batch_size, max_length), dtype="i4") - attention_mask = jnp.ones_like(input_ids) - position_ids = jnp.broadcast_to( - jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), - input_ids.shape, - ) - - def _decoder_forward( - module, - input_ids, - attention_mask, - position_ids, - **kwargs, - ): - decoder_module = module._get_decoder_module() - return decoder_module( - input_ids, - attention_mask, - position_ids, - **kwargs, - ) - - init_variables = self.module.init( - jax.random.PRNGKey(0), - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - encoder_hidden_states=encoder_outputs[0], - init_cache=True, - method=_decoder_forward, # we only need to call the decoder to init the cache - ) - return unfreeze(init_variables["cache"]) - - def encode( - self, - pixel_values: jnp.ndarray, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - train: bool = False, - params: dict = None, - dropout_rng: PRNGKey = None, - ): - output_attentions = ( - output_attentions - if output_attentions is not None - else self.config.output_attentions - ) - output_hidden_states = ( - output_hidden_states - if output_hidden_states is not None - else self.config.output_hidden_states - ) - return_dict = ( - return_dict if return_dict is not None else self.config.return_dict - ) - - # Handle any PRNG if needed - rngs = {} - if dropout_rng is not None: - rngs["dropout"] = dropout_rng - - def _encoder_forward(module, pixel_values, **kwargs): - encode_module = module._get_encoder_module() - return encode_module(pixel_values, **kwargs) - - return self.module.apply( - {"params": params or self.params}, - pixel_values=jnp.array(pixel_values, dtype="i4"), - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - deterministic=not train, - rngs=rngs, - method=_encoder_forward, - ) - - def decode( - self, - input_ids, - encoder_outputs, - encoder_attention_mask: Optional[jnp.ndarray] = None, - attention_mask: Optional[jnp.ndarray] = None, - position_ids: Optional[jnp.ndarray] = None, - past_key_values: dict = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - train: bool = False, - params: dict = None, - dropout_rng: PRNGKey = None, - ): - - output_attentions = ( - output_attentions - if output_attentions is not None - else self.config.output_attentions - ) - output_hidden_states = ( - output_hidden_states - if output_hidden_states is not None - else self.config.output_hidden_states - ) - return_dict = ( - return_dict if return_dict is not None else self.config.return_dict - ) - - encoder_hidden_states = encoder_outputs[0] - if encoder_attention_mask is None: - batch_size, sequence_length = encoder_hidden_states.shape[:2] - encoder_attention_mask = jnp.ones((batch_size, sequence_length)) - - batch_size, sequence_length = input_ids.shape - if attention_mask is None: - attention_mask = jnp.ones((batch_size, sequence_length)) - - if position_ids is None: - if past_key_values is not None: - raise ValueError( - "Make sure to provide `position_ids` when passing `past_key_values`." - ) - - position_ids = jnp.broadcast_to( - jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) - ) - - # Handle any PRNG if needed - rngs = {} - if dropout_rng is not None: - rngs["dropout"] = dropout_rng - - inputs = {"params": params or self.params} - - # if past_key_values are passed then cache is already initialized a private flag init_cache has to be - # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that - # it can be changed by FlaxGPT2Attention module - if past_key_values: - inputs["cache"] = past_key_values - mutable = ["cache"] - else: - mutable = False - - def _decoder_forward( - module, - input_ids, - attention_mask, - position_ids, - **kwargs, - ): - decoder_module = module._get_decoder_module() - return decoder_module( - input_ids, - attention_mask, - position_ids, - **kwargs, - ) - - outputs = self.module.apply( - inputs, - input_ids=jnp.array(input_ids, dtype="i4"), - attention_mask=jnp.array(attention_mask, dtype="i4"), - position_ids=jnp.array(position_ids, dtype="i4"), - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - deterministic=not train, - rngs=rngs, - mutable=mutable, - method=_decoder_forward, - ) - - # add updated cache to model output - if past_key_values is not None and return_dict: - outputs, past = outputs - outputs["past_key_values"] = unfreeze(past["cache"]) - return outputs - elif past_key_values is not None and not return_dict: - outputs, past = outputs - outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] - - return outputs - - def __call__( - self, - pixel_values: jnp.ndarray, - input_ids: Optional[jnp.ndarray] = None, - attention_mask: Optional[jnp.ndarray] = None, - position_ids: Optional[jnp.ndarray] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - train: bool = False, - params: dict = None, - dropout_rng: PRNGKey = None, - ): - output_attentions = ( - output_attentions - if output_attentions is not None - else self.config.output_attentions - ) - output_hidden_states = ( - output_hidden_states - if output_hidden_states is not None - else self.config.output_hidden_states - ) - return_dict = ( - return_dict if return_dict is not None else self.config.return_dict - ) - - pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) - - # # prepare encoder inputs - # if encoder_attention_mask is None: - # encoder_attention_mask = jnp.ones_like(input_ids) - - # if position_ids is None: - # batch_size, sequence_length = input_ids.shape - # position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) - - # prepare decoder inputs - # if decoder_input_ids is None: - # decoder_input_ids = shift_tokens_right( - # input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id - # ) # TODO: Check how to use this - - if attention_mask is None: - attention_mask = jnp.ones_like(input_ids) - if position_ids is None: - batch_size, sequence_length = input_ids.shape - position_ids = jnp.broadcast_to( - jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) - ) - - # Handle any PRNG if needed - rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} - - return self.module.apply( - {"params": params or self.params}, - pixel_values=jnp.array(pixel_values, dtype=jnp.float32), - input_ids=jnp.array(input_ids, dtype="i4"), - attention_mask=jnp.array(attention_mask, dtype="i4"), - position_ids=jnp.array(position_ids, dtype="i4"), - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - deterministic=not train, - rngs=rngs, - ) - - -class FlaxViTGPT2ForConditionalGeneration(FlaxViTGPT2PreTrainedModel): - module_class = FlaxViTGPT2ForConditionalGenerationModule - dtype: jnp.dtype = jnp.float32 - - def decode( - self, - input_ids, - encoder_outputs, - encoder_attention_mask: Optional[jnp.ndarray] = None, - attention_mask: Optional[jnp.ndarray] = None, - position_ids: Optional[jnp.ndarray] = None, - past_key_values: dict = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - deterministic: bool = True, - params: dict = None, - dropout_rng: PRNGKey = None, - ): - output_attentions = ( - output_attentions - if output_attentions is not None - else self.config.output_attentions - ) - output_hidden_states = ( - output_hidden_states - if output_hidden_states is not None - else self.config.output_hidden_states - ) - return_dict = ( - return_dict if return_dict is not None else self.config.return_dict - ) - - encoder_hidden_states = encoder_outputs[0] - if encoder_attention_mask is None: - batch_size, sequence_length = encoder_hidden_states.shape[:2] - encoder_attention_mask = jnp.ones((batch_size, sequence_length)) - - batch_size, sequence_length = input_ids.shape - if attention_mask is None: - attention_mask = jnp.ones((batch_size, sequence_length)) - - if position_ids is None: - if past_key_values is not None: - raise ValueError( - "Make sure to provide `position_ids` when passing `past_key_values`." - ) - - position_ids = jnp.broadcast_to( - jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) - ) - - # Handle any PRNG if needed - rngs = {} - if dropout_rng is not None: - rngs["dropout"] = dropout_rng - - inputs = {"params": params or self.params} - - # if past_key_values are passed then cache is already initialized a private flag init_cache has to be - # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that - # it can be changed by FlaxGPT2Attention module - if past_key_values: - inputs["cache"] = past_key_values - mutable = ["cache"] - else: - mutable = False - - def _decoder_forward( - module, - input_ids, - attention_mask, - position_ids, - **kwargs, - ): - decoder_module = module._get_decoder_module() - outputs = decoder_module( - input_ids, - attention_mask, - position_ids, - **kwargs, - ) - hidden_states = outputs[0] - - if self.config.tie_word_embeddings: - shared_embedding = module.model.variables["params"]["shared"][ - "embedding" - ] - lm_logits = module.lm_head.apply( - {"params": {"kernel": shared_embedding.T}}, hidden_states - ) - else: - lm_logits = module.lm_head(hidden_states) - - lm_logits += module.final_logits_bias - return lm_logits, outputs - - outputs = self.module.apply( - inputs, - input_ids=jnp.array(input_ids, dtype="i4"), - attention_mask=jnp.array(attention_mask, dtype="i4"), - position_ids=jnp.array(position_ids, dtype="i4"), - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - deterministic=deterministic, - rngs=rngs, - mutable=mutable, - method=_decoder_forward, - ) - - if past_key_values is None: - lm_logits, outputs = outputs - else: - (lm_logits, outputs), past = outputs - - if return_dict: - outputs = FlaxCausalLMOutputWithCrossAttentions( - logits=lm_logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - cross_attentions=outputs.cross_attentions, - ) - else: - outputs = (lm_logits,) + outputs[1:] - - # add updated cache to model output - if past_key_values is not None and return_dict: - outputs["past_key_values"] = unfreeze(past["cache"]) - return outputs - elif past_key_values is not None and not return_dict: - outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] - - return outputs - - def prepare_inputs_for_generation( - self, - input_ids, - max_length, - encoder_attention_mask: Optional[jnp.DeviceArray] = None, - attention_mask: Optional[jnp.DeviceArray] = None, - encoder_outputs=None, - **kwargs, - ): - # initializing the cache - batch_size, seq_length = input_ids.shape - - past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) - # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. - # But since the decoder uses a causal mask, those positions are masked anyways. - # Thus we can create a single static attention_mask here, which is more efficient for compilation - extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") - if attention_mask is not None: - position_ids = attention_mask.cumsum(axis=-1) - 1 - extended_attention_mask = lax.dynamic_update_slice( - extended_attention_mask, attention_mask, (0, 0) - ) - else: - position_ids = jnp.broadcast_to( - jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length) - ) - - return { - "past_key_values": past_key_values, - "encoder_outputs": encoder_outputs, - "encoder_attention_mask": encoder_attention_mask, - "attention_mask": extended_attention_mask, - "position_ids": position_ids, - } - - def update_inputs_for_generation(self, model_outputs, model_kwargs): - model_kwargs["past_key_values"] = model_outputs.past_key_values - model_kwargs["position_ids"] = ( - model_kwargs["position_ids"][:, -1:] + 1 - ) - return model_kwargs - - @classmethod - def from_vit_gpt2_pretrained( - cls, - vit_model_name_or_path: str = None, - gpt2_model_name_or_path: str = None, - *model_args, - **kwargs, - ) -> FlaxViTGPT2PreTrainedModel: - - kwargs_gpt2 = { - argument[len("gpt2_") :]: value - for argument, value in kwargs.items() - if argument.startswith("gpt2_") - } - - kwargs_vit = { - argument[len("vit_") :]: value - for argument, value in kwargs.items() - if argument.startswith("vit_") - } - - # remove gpt2, vit kwargs from kwargs - for key in kwargs_gpt2.keys(): - del kwargs["gpt2_" + key] - for key in kwargs_vit.keys(): - del kwargs["vit_" + key] - - # Load and initialize the gpt2 and vit model - gpt2_model = kwargs_gpt2.pop("model", None) - if gpt2_model is None: - assert ( - gpt2_model_name_or_path is not None - ), "If `model` is not defined as an argument, a `gpt2_model_name_or_path` has to be defined" - - if "config" not in kwargs_gpt2: - gpt2_config = GPT2Config.from_pretrained(gpt2_model_name_or_path) - kwargs_gpt2["config"] = gpt2_config - - kwargs_gpt2["config"].add_cross_attention = True - gpt2_model = FlaxGPT2Model.from_pretrained( - gpt2_model_name_or_path, *model_args, **kwargs_gpt2 - ) - - vit_model = kwargs_vit.pop("model", None) - if vit_model is None: - assert ( - vit_model_name_or_path is not None - ), "If `model` is not defined as an argument, a `vit_model_name_or_path` has to be defined" - - if "config" not in kwargs_vit: - vit_config = ViTConfig.from_pretrained(vit_model_name_or_path) - kwargs_vit["config"] = vit_config - - vit_model = FlaxViTModel.from_pretrained( - vit_model_name_or_path, *model_args, **kwargs_vit - ) - - # instantiate config with corresponding kwargs - dtype = kwargs.pop("dtype", jnp.float32) - config = ViTGPT2Config.from_vit_gpt2_configs( - vit_model.config, gpt2_model.config, **kwargs - ) - - # init model - model = cls(config, *model_args, dtype=dtype, **kwargs) - model.params["model"]["encoder"] = vit_model.params - model.params["model"]["decoder"] = gpt2_model.params - - return model - diff --git a/spaces/Seetha/IMA-pipeline-streamlit/app.py b/spaces/Seetha/IMA-pipeline-streamlit/app.py deleted file mode 100644 index f0db4170922ebf1b9a612db7fac952e61207154a..0000000000000000000000000000000000000000 --- a/spaces/Seetha/IMA-pipeline-streamlit/app.py +++ /dev/null @@ -1,581 +0,0 @@ -# import all packages -import requests -import streamlit as st -from sklearn.model_selection import StratifiedKFold -from sklearn.model_selection import train_test_split -from sklearn.model_selection import KFold -# tokenizer -from transformers import AutoTokenizer, DistilBertTokenizerFast -# sequence tagging model + training-related -from transformers import DistilBertForTokenClassification, Trainer, TrainingArguments -import torch -import sys -import os -from sklearn.metrics import classification_report -from pandas import read_csv -from sklearn.linear_model import LogisticRegression -import sklearn.model_selection -from sklearn.feature_extraction.text import TfidfTransformer -from sklearn.feature_extraction.text import CountVectorizer -from sklearn.pipeline import Pipeline, FeatureUnion -import math -# from sklearn.metrics import accuracy_score -# from sklearn.metrics import precision_recall_fscore_support -import json -import re -import numpy as np -import pandas as pd -import nltk -nltk.download("punkt") -import string -from sklearn.model_selection import train_test_split -from transformers import AutoTokenizer, Trainer, TrainingArguments, AutoModelForSequenceClassification, AutoConfig -from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler -import itertools -from transformers import TextClassificationPipeline, TFAutoModelForSequenceClassification, AutoTokenizer -from transformers import pipeline -import pickle -import csv -import pdfplumber -import pathlib -import shutil -import webbrowser -from streamlit.components.v1 import html -import streamlit.components.v1 as components -from PyPDF2 import PdfReader -from huggingface_hub import HfApi -import io -from datasets import load_dataset -import time - -import huggingface_hub -from huggingface_hub import Repository -from datetime import datetime -import pathlib as Path -from requests import get -import urllib.request -# import gradio as gr -# from gradio import inputs, outputs -from datasets import load_dataset -from huggingface_hub import HfApi, list_models -import os -from huggingface_hub import HfFileSystem -from tensorflow.keras.models import Sequential, model_from_json -#import tensorflow_datasets as tfds -import tensorflow as tf -from tensorflow.keras.preprocessing.sequence import pad_sequences -import spacy -from tensorflow.keras.preprocessing.text import Tokenizer -#from spacy import en_core_web_lg -#import en_core_web_lg -#nlp = en_core_web_lg.load() -nlp = spacy.load('en_core_web_sm') - -#tfds.disable_progress_bar() -MAX_SEQUENCE_LENGTH = 500 - -# dataset = load_dataset('Seetha/Visualization', streaming=True) -# df = pd.DataFrame.from_dict(dataset['train']) -# DATASET_REPO_URL = "https://huggingface.co/datasets/Seetha/Visualization" -# DATA_FILENAME = "level2.json" -#DATA_FILE = os.path.join("data", DATA_FILENAME) -DATASET_REPO_URL = "https://huggingface.co/datasets/Seetha/visual_files" -DATA_FILENAME = "detailedResults.json" -DATA_FILENAME1 = "level2.json" - -HF_TOKEN = os.environ.get("HF_TOKEN") -#st.write("is none?", HF_TOKEN is None) - -def main(): - - st.title("Text to Causal Knowledge Graph") - st.sidebar.title("Please upload your text documents in one file here:") - k=2 - seed = 1 - k1= 5 - text_list = [] - causal_sents = [] - - uploaded_file = None - try: - uploaded_file = st.sidebar.file_uploader("Choose a file", type = "pdf") - except: - uploaded_file = PdfReader('sample_anno.pdf') - st.error("Please upload your own PDF to be analyzed") - - if uploaded_file is not None: - reader = PdfReader(uploaded_file) - for page in reader.pages: - text = page.extract_text() - text_list.append(text) - else: - st.error("Please upload your own PDF to be analyzed") - st.stop() - - text_list_final = [x.replace('\n', '') for x in text_list] - text_list_final = re.sub('"', '', str(text_list_final)) - - sentences = nltk.sent_tokenize(text_list_final) - - result =[] - for i in sentences: - result1 = i.lower() - result2 = re.sub(r'[^\w\s]','',result1) - result.append(result2) - - #st.write("--- %s seconds ---" % (time.time() - start_time)) - tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") #bert-base-uncased - - model_path = "checkpoint-2850" - - model = AutoModelForSequenceClassification.from_pretrained(model_path,id2label={0:'non-causal',1:'causal'}) - - #st.write('sequence classification loaded') - pipe1 = pipeline("text-classification", model=model,tokenizer=tokenizer) - for sent in result: - pred = pipe1(sent) - for lab in pred: - if lab['label'] == 'causal': #causal - causal_sents.append(sent) - - # st.write('causal sentence classification finished') - # st.write("--- %s seconds ---" % (time.time() - start_time)) - - model_name = "distilbert-base-cased" - tokenizer = DistilBertTokenizerFast.from_pretrained(model_name,low_cpu_mem_usage=True) - - model_path1 = "DistilBertforTokenclassification" - - model = DistilBertForTokenClassification.from_pretrained(model_path1,low_cpu_mem_usage=True) #len(unique_tags),, num_labels= 7, , id2label={0:'CT',1:'E',2:'C',3:'O'} - pipe = pipeline('ner', model=model, tokenizer=tokenizer,aggregation_strategy='simple') #grouped_entities=True - st.write('DistilBERT loaded') - sentence_pred = [] - class_list = [] - entity_list = [] - for k in causal_sents: - pred= pipe(k) - #st.write(pred) - #st.write('preds') - for i in pred: - sentence_pred.append(k) - class_list.append(i['word']) - entity_list.append(i['entity_group']) - - # st.write('causality extraction finished') - # st.write("--- %s seconds ---" % (time.time() - start_time)) - - # filename = 'Checkpoint-classification.sav' - # loaded_model = pickle.load(open(filename, 'rb')) - # loaded_vectorizer = pickle.load(open('vectorizefile_classification.pickle', 'rb')) - - # pipeline_test_output = loaded_vectorizer.transform(class_list) - # predicted = loaded_model.predict(pipeline_test_output) - - tokenizer = Tokenizer(num_words=100000) - tokenizer.fit_on_texts(class_list) - word_index = tokenizer.word_index - # text_embedding = np.zeros((len(word_index) + 1, 300)) - # for word, i in word_index.items(): - # text_embedding[i] = nlp(word).vector - json_file = open('model.json', 'r') - loaded_model_json = json_file.read() - json_file.close() - loaded_model = model_from_json(loaded_model_json) - # load weights into new model - loaded_model.load_weights("model.h5") - - loss = tf.keras.losses.CategoricalCrossentropy() #from_logits=True - loaded_model.compile(loss=loss,optimizer=tf.keras.optimizers.Adam(1e-4)) - - predictions = loaded_model.predict(pad_sequences(tokenizer.texts_to_sequences(class_list),maxlen=MAX_SEQUENCE_LENGTH)) - predicted = np.argmax(predictions,axis=1) - - st.write(predictions) - st.write(predicted) - # st.write('stakeholder taxonomy finished') - # st.write("--- %s seconds ---" % (time.time() - start_time)) - pred1 = predicted - level0 = [] - count =0 - for i in predicted: - if i == 3: - level0.append('Non-Performance') - count +=1 - else: - level0.append('Performance') - count +=1 - - list_pred = {0: 'Customers',1:'Employees',2:'Investors',3:'Non-performance',4:'Society',5:'Unclassified'} - pred_val = [list_pred[i] for i in pred1] - - #print('count',count) - for ind,(sent,preds) in enumerate(zip(class_list,pred_val)): - if 'customers' in sent or 'client' in sent or 'consumer' in sent or 'user' in sent: - pred_val[ind] = 'Customers' - elif 'investor' in sent or 'finance' in sent or 'shareholder' in sent or 'stockholder' in sent or 'owners' in sent: - pred_val[ind] = 'Investors' - elif 'employee' in sent or 'worker' in sent or 'staff' in sent: - pred_val[ind] = 'Employees' - elif 'society' in sent or 'societal' in sent or 'social responsib*' in sent or 'social performance' in sent or 'community' in sent: - pred_val[ind] = 'Society' - - sent_id, unique = pd.factorize(sentence_pred) - - final_list = pd.DataFrame( - {'Id': sent_id, - 'Fullsentence': sentence_pred, - 'Component': class_list, - 'causeOrEffect': entity_list, - 'Labellevel1': level0, - 'Labellevel2': pred_val - }) - s = final_list['Component'].shift(-1) - m = s.str.startswith('##', na=False) - final_list.loc[m, 'Component'] += (' ' + s[m]) - - - final_list1 = final_list[~final_list['Component'].astype(str).str.startswith('##')] - li = [] - uni = final_list1['Id'].unique() - for i in uni: - df_new = final_list1[final_list1['Id'] == i] - uni1 = df_new['Id'].unique() - - # if 'E' not in df_new.values: - # li.append(uni1) - # out = np.concatenate(li).ravel() - # li_pan = pd.DataFrame(out,columns=['Id']) - # df3 = pd.merge(final_list1, li_pan[['Id']], on='Id', how='left', indicator=True) \ - # .query("_merge == 'left_only'") \ - # .drop("_merge",axis=1) - - df3 = final_list1 - #df = df3.groupby(['Id','Fullsentence','causeOrEffect', 'Labellevel1', 'Labellevel2'])['Component'].apply(', '.join).reset_index() - #st.write(df) - - #df = df3 - df3["causeOrEffect"].replace({"C": "cause", "E": "effect"}, inplace=True) - df_final = df3[df3['causeOrEffect'] != 'CT'] - df3['New string'] = df_final['Component'].replace(r'[##]+', ' ', regex=True) - - df_final = df_final.drop("Component",axis=1) - df_final.insert(2, "Component", df3['New string'], True) - - df_final1 = df_final[df_final['Component'].str.split().str.len().gt(1)] - #st.write(df_final[df_final['Component'].str.len() != 1]) - #df_final1.to_csv('predictions.csv') - -# buffer = io.BytesIO() -# with pd.ExcelWriter(buffer, engine="xlsxwriter") as writer: -# df_final.to_excel(writer, sheet_name="Sheet1", index=False) -# writer.close() - - count_NP_NP = 0 - count_NP_investor = 0 - count_NP_customer = 0 - count_NP_employees = 0 - count_NP_society = 0 - - count_inv_np = 0 - count_inv_investor = 0 - count_inv_customer = 0 - count_inv_employee = 0 - count_inv_society = 0 - - count_cus_np = 0 - count_cus_investor = 0 - count_cus_customer = 0 - count_cus_employee = 0 - count_cus_society = 0 - - count_emp_np = 0 - count_emp_investor = 0 - count_emp_customer = 0 - count_emp_employee = 0 - count_emp_society = 0 - - count_soc_np = 0 - count_soc_investor = 0 - count_soc_customer = 0 - count_soc_employee = 0 - count_soc_society = 0 - for i in range(0,df_final['Id'].max()): - j = df_final.loc[df_final['Id'] == i] - cause_tab = j.loc[j['causeOrEffect'] == 'cause'] - effect_tab = j.loc[j['causeOrEffect'] == 'effect'] - cause_coun_NP = (cause_tab.Labellevel2 == 'Non-performance').sum() - effect_coun_NP = (effect_tab.Labellevel2 == 'Non-performance').sum() - - if (cause_coun_NP > 0) and (effect_coun_NP > 0): - count_NP = cause_coun_NP if cause_coun_NP >= effect_coun_NP else effect_coun_NP - else: - count_NP = 0 - effect_NP_inv = (effect_tab.Labellevel2 == 'Investors').sum() - if (cause_coun_NP > 0) and (effect_NP_inv > 0): - count_NP_inv = cause_coun_NP if cause_coun_NP >= effect_NP_inv else effect_NP_inv - else: - count_NP_inv = 0 - effect_NP_cus = (effect_tab.Labellevel2 == 'Customers').sum() - if (cause_coun_NP > 0) and (effect_NP_cus > 0): - count_NP_cus = cause_coun_NP if cause_coun_NP >= effect_NP_cus else effect_NP_cus - else: - count_NP_cus = 0 - effect_NP_emp = (effect_tab.Labellevel2 == 'Employees').sum() - if (cause_coun_NP > 0) and (effect_NP_emp > 0): - count_NP_emp = cause_coun_NP if cause_coun_NP >= effect_NP_emp else effect_NP_emp - else: - count_NP_emp = 0 - effect_NP_soc = (effect_tab.Labellevel2 == 'Society').sum() - if (cause_coun_NP > 0) and (effect_NP_soc > 0): - count_NP_soc = cause_coun_NP if cause_coun_NP >= effect_NP_soc else effect_NP_soc - else: - count_NP_soc = 0 - - cause_coun_inv = (cause_tab.Labellevel2 == 'Investors').sum() - effect_coun_inv = (effect_tab.Labellevel2 == 'Non-performance').sum() - if (cause_coun_inv > 0) and (effect_coun_inv > 0): - count_NP_inv = cause_coun_inv if cause_coun_inv >= effect_coun_inv else effect_coun_inv - else: - count_NP_inv = 0 - - effect_inv_inv = (effect_tab.Labellevel2 == 'Investors').sum() - if (cause_coun_inv > 0) and (effect_inv_inv > 0): - count_inv_inv = cause_coun_inv if cause_coun_inv >= effect_inv_inv else effect_inv_inv - else: - count_inv_inv = 0 - effect_inv_cus = (effect_tab.Labellevel2 == 'Customers').sum() - if (cause_coun_inv > 0) and (effect_inv_cus > 0): - count_inv_cus = cause_coun_inv if cause_coun_inv >= effect_inv_cus else effect_inv_cus - else: - count_inv_cus = 0 - effect_inv_emp = (effect_tab.Labellevel2 == 'Employees').sum() - if (cause_coun_inv > 0) and (effect_inv_emp > 0): - count_inv_emp = cause_coun_inv if cause_coun_inv >= effect_inv_emp else effect_inv_emp - else: - count_inv_emp = 0 - - effect_inv_soc = (effect_tab.Labellevel2 == 'Society').sum() - if (cause_coun_inv > 0) and (effect_inv_soc > 0): - count_inv_soc = cause_coun_inv if cause_coun_inv >= effect_inv_soc else effect_inv_soc - else: - count_inv_soc = 0 - - cause_coun_cus = (cause_tab.Labellevel2 == 'Customers').sum() - effect_coun_cus = (effect_tab.Labellevel2 == 'Non-performance').sum() - if (cause_coun_cus > 0) and (effect_coun_cus > 0): - count_NP_cus = cause_coun_cus if cause_coun_cus >= effect_coun_cus else effect_coun_cus - else: - count_NP_cus = 0 - - effect_cus_inv = (effect_tab.Labellevel2 == 'Investors').sum() - if (cause_coun_cus > 0) and (effect_cus_inv > 0): - count_cus_inv = cause_coun_cus if cause_coun_cus >= effect_cus_inv else effect_cus_inv - else: - count_cus_inv = 0 - - effect_cus_cus = (effect_tab.Labellevel2 == 'Customers').sum() - if (cause_coun_cus > 0) and (effect_cus_cus > 0): - count_cus_cus = cause_coun_cus if cause_coun_cus >= effect_cus_cus else effect_cus_cus - else: - count_cus_cus = 0 - - effect_cus_emp = (effect_tab.Labellevel2 == 'Employees').sum() - if (cause_coun_cus > 0) and (effect_cus_emp > 0): - count_cus_emp = cause_coun_cus if cause_coun_cus >= effect_cus_emp else effect_cus_emp - else: - count_cus_emp = 0 - - effect_cus_soc = (effect_tab.Labellevel2 == 'Society').sum() - if (cause_coun_cus > 0) and (effect_cus_soc > 0): - count_cus_soc = cause_coun_cus if cause_coun_cus >= effect_cus_soc else effect_cus_soc - else: - count_cus_soc = 0 - - cause_coun_emp = (cause_tab.Labellevel2 == 'Employees').sum() - effect_coun_emp = (effect_tab.Labellevel2 == 'Non-performance').sum() - if (cause_coun_emp > 0) and (effect_coun_emp > 0): - count_NP_emp = cause_coun_emp if cause_coun_emp >= effect_coun_emp else effect_coun_emp - else: - count_NP_emp = 0 - - effect_emp_inv = (effect_tab.Labellevel2 == 'Investors').sum() - if (cause_coun_emp > 0) and (effect_emp_inv > 0): - count_emp_inv = cause_coun_emp if cause_coun_emp >= effect_emp_inv else effect_emp_inv - else: - count_emp_inv = 0 - - effect_emp_cus = (effect_tab.Labellevel2 == 'Customers').sum() - if (cause_coun_emp > 0) and (effect_emp_cus > 0): - count_emp_cus = cause_coun_emp if cause_coun_emp >= effect_emp_cus else effect_emp_cus - else: - count_emp_cus = 0 - - effect_emp_emp = (effect_tab.Labellevel2 == 'Employees').sum() - if (cause_coun_emp > 0) and (effect_emp_emp > 0): - count_emp_emp = cause_coun_emp if cause_coun_emp >= effect_emp_emp else effect_emp_emp - else: - count_emp_emp = 0 - - effect_emp_soc = (effect_tab.Labellevel2 == 'Society').sum() - if (cause_coun_emp > 0) and (effect_emp_soc > 0): - count_emp_soc = cause_coun_emp if cause_coun_emp >= effect_emp_soc else effect_emp_soc - else: - count_emp_soc = 0 - - cause_coun_soc = (cause_tab.Labellevel2 == 'Society').sum() - effect_coun_soc = (effect_tab.Labellevel2 == 'Non-performance').sum() - if (cause_coun_soc > 0) and (effect_coun_soc > 0): - count_NP_soc = cause_coun_soc if cause_coun_soc >= effect_coun_soc else effect_coun_soc - else: - count_NP_soc = 0 - - effect_soc_inv = (effect_tab.Labellevel2 == 'Investors').sum() - if (cause_coun_soc > 0) and (effect_soc_inv > 0): - count_soc_inv = cause_coun_soc if cause_coun_soc >= effect_soc_inv else effect_soc_inv - else: - count_soc_inv = 0 - - effect_soc_cus = (effect_tab.Labellevel2 == 'Customers').sum() - if (cause_coun_soc > 0) and (effect_soc_cus > 0): - count_soc_cus = cause_coun_soc if cause_coun_soc >= effect_soc_cus else effect_soc_cus - else: - count_soc_cus = 0 - - effect_soc_emp = (effect_tab.Labellevel2 == 'Employees').sum() - if (cause_coun_soc > 0) and (effect_soc_emp > 0): - count_soc_emp = cause_coun_soc if cause_coun_soc >= effect_soc_emp else effect_soc_emp - else: - count_soc_emp = 0 - - effect_soc_soc = (effect_tab.Labellevel2 == 'Society').sum() - if (cause_coun_soc > 0) and (effect_soc_soc > 0): - count_soc_soc = cause_coun_soc if cause_coun_soc >= effect_soc_soc else effect_soc_soc - else: - count_soc_soc = 0 - - count_NP_NP = count_NP_NP + count_NP - count_NP_investor = count_NP_investor + count_NP_inv - count_NP_customer = count_NP_customer + count_NP_cus - count_NP_employees = count_NP_employees + count_NP_emp - count_NP_society = count_NP_society + count_NP_soc - - count_inv_np = count_inv_np + count_NP_inv - count_inv_investor = count_inv_investor + count_inv_inv - count_inv_customer = count_inv_customer + count_inv_cus - count_inv_employee = count_inv_employee + count_inv_emp - count_inv_society = count_inv_society + count_inv_soc - - count_cus_np = count_cus_np + count_NP_cus - count_cus_investor = count_cus_investor + count_cus_inv - count_cus_customer = count_cus_customer + count_cus_cus - count_cus_employee = count_cus_employee + count_cus_emp - count_cus_society = count_cus_society + count_cus_soc - - count_emp_np = count_emp_np + count_NP_emp - count_emp_investor = count_emp_investor + count_emp_inv - count_emp_customer = count_emp_customer + count_emp_cus - count_emp_employee = count_emp_employee + count_emp_emp - count_emp_society = count_emp_society + count_emp_soc - - count_soc_np = count_soc_np + count_NP_soc - count_soc_investor = count_soc_investor + count_soc_inv - count_soc_customer = count_soc_customer + count_soc_cus - count_soc_employee = count_soc_employee + count_soc_emp - count_soc_society = count_soc_society + count_soc_soc - - df_tab = pd.DataFrame(columns = ['Non-performance', 'Investors', 'Customers', 'Employees', 'Society'],index=['Non-performance', 'Investors', 'Customers', 'Employees', 'Society'], dtype=object) - - df_tab.loc['Non-performance'] = [count_NP_NP, count_NP_investor, count_NP_customer, count_NP_employees, count_NP_society] - df_tab.loc['Investors'] = [count_inv_np, count_inv_investor, count_inv_customer, count_inv_employee, count_inv_society] - df_tab.loc['Customers'] = [count_cus_np, count_cus_investor, count_cus_customer, count_cus_employee, count_cus_society] - df_tab.loc['Employees'] = [count_emp_np, count_emp_investor, count_emp_customer, count_emp_employee, count_emp_society] - df_tab.loc['Society'] = [count_soc_np, count_soc_investor, count_soc_customer, count_soc_employee, count_soc_society] - - -# df_tab = pd.DataFrame({ -# 'Non-performance': [count_NP_NP, count_NP_investor, count_NP_customer, count_NP_employees, count_NP_society], -# 'Investors': [count_inv_np, count_inv_investor, count_inv_customer, count_inv_employee, count_inv_society], -# 'Customers': [count_cus_np, count_cus_investor, count_cus_customer, count_cus_employee, count_cus_society], -# 'Employees': [count_emp_np, count_emp_investor, count_emp_customer, count_emp_employee, count_emp_society], -# 'Society': [count_soc_np, count_soc_investor, count_soc_customer, count_soc_employee, count_soc_society]}, -# index=['Non-performance', 'Investors', 'Customers', 'Employees', 'Society']) - - #df_tab.to_csv('final_data.csv') - - buffer = io.BytesIO() - with pd.ExcelWriter(buffer, engine="xlsxwriter") as writer: - df_tab.to_excel(writer,sheet_name="count_result",index=False) - df_final1.to_excel(writer,sheet_name="Detailed_results",index=False) - writer.close() - #df = pd.read_csv('final_data.csv', index_col=0) -#474-515 - # Convert to JSON format - json_data = [] - for row in df_tab.index: - for col in df_tab.columns: - json_data.append({ - 'source': row, - 'target': col, - 'value': int(df_tab.loc[row, col]) - }) - - HfApi().delete_file(path_in_repo = DATA_FILENAME1 ,repo_id = 'Seetha/visual_files',token= HF_TOKEN,repo_type='dataset') - #st.write('file-deleted') - fs = HfFileSystem(token=HF_TOKEN) - with fs.open('datasets/Seetha/visual_files/level2.json', 'w') as f: - json.dump(json_data, f) - - df_final1.to_csv('predictions.csv') - csv_file = "predictions.csv" - json_file = "detailedResults.json" - - # Open the CSV file and read the data - with open(csv_file, "r") as f: - csv_data = csv.DictReader(f) - - # # Convert the CSV data to a list of dictionaries - data_list = [] - for row in csv_data: - data_list.append(dict(row)) - - # # Convert the list of dictionaries to JSON - json_data = json.dumps(data_list) - - HfApi().delete_file(path_in_repo = DATA_FILENAME ,repo_id = 'Seetha/visual_files',token= HF_TOKEN,repo_type='dataset') - #st.write('file2-deleted') - with fs.open('datasets/Seetha/visual_files/detailedResults.json','w') as fi: - #data = json.load(fi) - fi.write(json_data) - - def convert_df(df): - - #IMPORTANT: Cache the conversion to prevent computation on every rerun - - return df.to_csv().encode('utf-8') - - - - csv1 = convert_df(df_final1.astype(str)) - csv2 = convert_df(df_tab.astype(str)) - - with st.container(): - - st.download_button(label="Download the result table",data=buffer,file_name="t2cg_outputs.xlsx",mime="application/vnd.ms-excel") - st.markdown('Click this link in a separate tab to view knowledge graph', unsafe_allow_html=True) - # st.download_button(label="Download the detailed result table_csv",data=csv1,file_name='results.csv',mime='text/csv') - # st.download_button(label="Download the result table_csv",data=csv2,file_name='final_data.csv',mime='text/csv') - -#with st.container(): - # Execute your app - #st.title("Visualization example") -# components.html(source_code) - #html(my_html) - #webbrowser.open('https://huggingface.co/spaces/Seetha/visual-knowledgegraph') -# # embed streamlit docs in a streamlit app -# #components.iframe("https://webpages.charlotte.edu/ltotapal/") - - - -if __name__ == '__main__': - start_time = time.time() - main() diff --git a/spaces/Shiro26/MendoBERT_RE/app.py b/spaces/Shiro26/MendoBERT_RE/app.py deleted file mode 100644 index 1bf256334d6604b5048b7f1054c9cb1915120f15..0000000000000000000000000000000000000000 --- a/spaces/Shiro26/MendoBERT_RE/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import streamlit as st -from transformers import pipeline - -model = pipeline("text-classification", model="/home/user/app/MendoBERT/", tokenizer="indolem/indobert-base-uncased") -basemodel = pipeline("text-classification", model="/home/user/app/IndoLEM/", tokenizer="indolem/indobert-base-uncased") - -st.title(':blue[MendoBERT] - Relation Extraction :family:') - -if 'options' not in st.session_state: - st.session_state['options'] = "" - -def button1_callback(): - st.session_state['options'] = "Kami menyimpulkan bahwa polimorfisme @GENE$ dan AGT tidak berkontribusi pada kerentanan genetik terhadap @DISEASE$ dan retinopati pada populasi Kaukasia Mediterania." -def button2_callback(): - st.session_state['options'] = "Genotipe bayi PON1 RR dan @GENE$ CC dikaitkan dengan @DISEASE$ dalam populasi penelitian kami, yang menunjukkan kemungkinan peran variabilitas paraoxonase manusia dalam etiologi kelahiran prematur." - - -placeholder = st.empty() - -st.info("Please replace the gene and disease that you want to get their relation predicted with @GENE\$ and \@DISEASE\$", icon="ℹ️") - -st.caption('_Examples_') -st.button('Kami menyimpulkan bahwa polimorfisme \@GENE\$ dan AGT tidak berkontribusi pada kerentanan genetik terhadap \@DISEASE\$ dan retinopati pada populasi Kaukasia Mediterania.', use_container_width=True, on_click = button1_callback) -st.button('Genotipe bayi PON1 RR dan \@GENE\$ CC dikaitkan dengan \@DISEASE\$ dalam populasi penelitian kami, yang menunjukkan kemungkinan peran variabilitas paraoxonase manusia dalam etiologi kelahiran prematur.', use_container_width=True, on_click = button2_callback) - -with placeholder: - text = st.text_area('Enter some text: ', key = 'options') - -if text: - st.subheader('MendoBERT') - st.write(model(text)) - st.write("\n") - st.subheader('IndoLEM') - st.write(basemodel(text)) \ No newline at end of file diff --git a/spaces/Shredder/CONBERT-2/README.md b/spaces/Shredder/CONBERT-2/README.md deleted file mode 100644 index daf10e5da8dd31671c54c18d24b0e7d1d77b62f0..0000000000000000000000000000000000000000 --- a/spaces/Shredder/CONBERT-2/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: CONBERT 2 -emoji: 👁 -colorFrom: purple -colorTo: blue -sdk: gradio -sdk_version: 3.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Stearns/Soar/soar_io.py b/spaces/Stearns/Soar/soar_io.py deleted file mode 100644 index c05d97417b69a804b35207fb839957a91258a5d2..0000000000000000000000000000000000000000 --- a/spaces/Stearns/Soar/soar_io.py +++ /dev/null @@ -1,35 +0,0 @@ -from pysoarlib import * - -## DEFINE MAIN CONNECTOR CLASS - -class GeneralSoarIOConnector(AgentConnector): - def __init__(self, client): - AgentConnector.__init__(self, client) - client.add_print_event_handler(self.agent_print_collector) - # client.execute_command("output callbacks on") - # client.execute_command("output console on") - - self.agent_printout = "" - - def on_input_phase(self, input_link): - pass - - def on_init_soar(self): - self.reset() - - def on_output_event(self, command_name, root_id): - pass - - - def agent_print_collector(self, text): - self.agent_printout += text+"\n" - - def get_agent_output(self): - return self.agent_printout - - def reset(self): - self.agent_printout = "" - - - - diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/process.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/process.py deleted file mode 100644 index 489b7c13d0ce82b769ba50412ecde0889d0b7851..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/process.py +++ /dev/null @@ -1,69 +0,0 @@ -# encoding: utf-8 -""" -Utilities for working with external processes. -""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - - -import os -import shutil -import sys - -if sys.platform == 'win32': - from ._process_win32 import system, getoutput, arg_split, check_pid -elif sys.platform == 'cli': - from ._process_cli import system, getoutput, arg_split, check_pid -else: - from ._process_posix import system, getoutput, arg_split, check_pid - -from ._process_common import getoutputerror, get_output_error_code, process_handler - - -class FindCmdError(Exception): - pass - - -def find_cmd(cmd): - """Find absolute path to executable cmd in a cross platform manner. - - This function tries to determine the full path to a command line program - using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the - time it will use the version that is first on the users `PATH`. - - Warning, don't use this to find IPython command line programs as there - is a risk you will find the wrong one. Instead find those using the - following code and looking for the application itself:: - - import sys - argv = [sys.executable, '-m', 'IPython'] - - Parameters - ---------- - cmd : str - The command line program to look for. - """ - path = shutil.which(cmd) - if path is None: - raise FindCmdError('command could not be found: %s' % cmd) - return path - - -def abbrev_cwd(): - """ Return abbreviated version of cwd, e.g. d:mydir """ - cwd = os.getcwd().replace('\\','/') - drivepart = '' - tail = cwd - if sys.platform == 'win32': - if len(cwd) < 4: - return cwd - drivepart,tail = os.path.splitdrive(cwd) - - - parts = tail.split('/') - if len(parts) > 2: - tail = '/'.join(parts[-2:]) - - return (drivepart + ( - cwd == '/' and '/' or tail)) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/altair/utils/plugin_registry.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/altair/utils/plugin_registry.py deleted file mode 100644 index 37d3db222ef2c7920628971a92e863d9915514c6..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/altair/utils/plugin_registry.py +++ /dev/null @@ -1,228 +0,0 @@ -import sys -from typing import Any, Dict, List, Optional, Generic, TypeVar, cast -from types import TracebackType - -if sys.version_info >= (3, 8): - from importlib.metadata import entry_points -else: - from importlib_metadata import entry_points - -from toolz import curry - - -PluginType = TypeVar("PluginType") - - -class NoSuchEntryPoint(Exception): - def __init__(self, group, name): - self.group = group - self.name = name - - def __str__(self): - return f"No {self.name!r} entry point found in group {self.group!r}" - - -class PluginEnabler: - """Context manager for enabling plugins - - This object lets you use enable() as a context manager to - temporarily enable a given plugin:: - - with plugins.enable('name'): - do_something() # 'name' plugin temporarily enabled - # plugins back to original state - """ - - def __init__(self, registry: "PluginRegistry", name: str, **options): - self.registry = registry # type: PluginRegistry - self.name = name # type: str - self.options = options # type: Dict[str, Any] - self.original_state = registry._get_state() # type: Dict[str, Any] - self.registry._enable(name, **options) - - def __enter__(self) -> "PluginEnabler": - return self - - def __exit__(self, typ: type, value: Exception, traceback: TracebackType) -> None: - self.registry._set_state(self.original_state) - - def __repr__(self) -> str: - return "{}.enable({!r})".format(self.registry.__class__.__name__, self.name) - - -class PluginRegistry(Generic[PluginType]): - """A registry for plugins. - - This is a plugin registry that allows plugins to be loaded/registered - in two ways: - - 1. Through an explicit call to ``.register(name, value)``. - 2. By looking for other Python packages that are installed and provide - a setuptools entry point group. - - When you create an instance of this class, provide the name of the - entry point group to use:: - - reg = PluginRegister('my_entrypoint_group') - - """ - - # this is a mapping of name to error message to allow custom error messages - # in case an entrypoint is not found - entrypoint_err_messages = {} # type: Dict[str, str] - - # global settings is a key-value mapping of settings that are stored globally - # in the registry rather than passed to the plugins - _global_settings = {} # type: Dict[str, Any] - - def __init__(self, entry_point_group: str = "", plugin_type: type = object): - """Create a PluginRegistry for a named entry point group. - - Parameters - ========== - entry_point_group: str - The name of the entry point group. - plugin_type: object - A type that will optionally be used for runtime type checking of - loaded plugins using isinstance. - """ - self.entry_point_group = entry_point_group # type: str - self.plugin_type = plugin_type # type: Optional[type] - self._active = None # type: Optional[PluginType] - self._active_name = "" # type: str - self._plugins = {} # type: Dict[str, PluginType] - self._options = {} # type: Dict[str, Any] - self._global_settings = self.__class__._global_settings.copy() # type: dict - - def register(self, name: str, value: Optional[PluginType]) -> Optional[PluginType]: - """Register a plugin by name and value. - - This method is used for explicit registration of a plugin and shouldn't be - used to manage entry point managed plugins, which are auto-loaded. - - Parameters - ========== - name: str - The name of the plugin. - value: PluginType or None - The actual plugin object to register or None to unregister that plugin. - - Returns - ======= - plugin: PluginType or None - The plugin that was registered or unregistered. - """ - if value is None: - return self._plugins.pop(name, None) - else: - assert isinstance(value, self.plugin_type) # type: ignore[arg-type] # Should ideally be fixed by better annotating plugin_type - self._plugins[name] = value - return value - - def names(self) -> List[str]: - """List the names of the registered and entry points plugins.""" - exts = list(self._plugins.keys()) - e_points = importlib_metadata_get(self.entry_point_group) - more_exts = [ep.name for ep in e_points] - exts.extend(more_exts) - return sorted(set(exts)) - - def _get_state(self) -> Dict[str, Any]: - """Return a dictionary representing the current state of the registry""" - return { - "_active": self._active, - "_active_name": self._active_name, - "_plugins": self._plugins.copy(), - "_options": self._options.copy(), - "_global_settings": self._global_settings.copy(), - } - - def _set_state(self, state: Dict[str, Any]) -> None: - """Reset the state of the registry""" - assert set(state.keys()) == { - "_active", - "_active_name", - "_plugins", - "_options", - "_global_settings", - } - for key, val in state.items(): - setattr(self, key, val) - - def _enable(self, name: str, **options) -> None: - if name not in self._plugins: - try: - (ep,) = [ - ep - for ep in importlib_metadata_get(self.entry_point_group) - if ep.name == name - ] - except ValueError as err: - if name in self.entrypoint_err_messages: - raise ValueError(self.entrypoint_err_messages[name]) from err - else: - raise NoSuchEntryPoint(self.entry_point_group, name) from err - value = cast(PluginType, ep.load()) - self.register(name, value) - self._active_name = name - self._active = self._plugins[name] - for key in set(options.keys()) & set(self._global_settings.keys()): - self._global_settings[key] = options.pop(key) - self._options = options - - def enable(self, name: Optional[str] = None, **options) -> PluginEnabler: - """Enable a plugin by name. - - This can be either called directly, or used as a context manager. - - Parameters - ---------- - name : string (optional) - The name of the plugin to enable. If not specified, then use the - current active name. - **options : - Any additional parameters will be passed to the plugin as keyword - arguments - - Returns - ------- - PluginEnabler: - An object that allows enable() to be used as a context manager - """ - if name is None: - name = self.active - return PluginEnabler(self, name, **options) - - @property - def active(self) -> str: - """Return the name of the currently active plugin""" - return self._active_name - - @property - def options(self) -> Dict[str, Any]: - """Return the current options dictionary""" - return self._options - - def get(self) -> Optional[PluginType]: - """Return the currently active plugin.""" - if self._options: - return curry(self._active, **self._options) - else: - return self._active - - def __repr__(self) -> str: - return "{}(active={!r}, registered={!r})" "".format( - self.__class__.__name__, self._active_name, list(self.names()) - ) - - -def importlib_metadata_get(group): - ep = entry_points() - # 'select' was introduced in Python 3.10 and 'get' got deprecated - # We don't check for Python version here as by checking with hasattr we - # also get compatibility with the importlib_metadata package which had a different - # deprecation cycle for 'get' - if hasattr(ep, "select"): - return ep.select(group=group) - else: - return ep.get(group, []) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/anyio/streams/text.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/anyio/streams/text.py deleted file mode 100644 index bba2d3f7dfffa3bdbf921bdad4ca7143be97c2fd..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/anyio/streams/text.py +++ /dev/null @@ -1,143 +0,0 @@ -from __future__ import annotations - -import codecs -from dataclasses import InitVar, dataclass, field -from typing import Any, Callable, Mapping - -from ..abc import ( - AnyByteReceiveStream, - AnyByteSendStream, - AnyByteStream, - ObjectReceiveStream, - ObjectSendStream, - ObjectStream, -) - - -@dataclass(eq=False) -class TextReceiveStream(ObjectReceiveStream[str]): - """ - Stream wrapper that decodes bytes to strings using the given encoding. - - Decoding is done using :class:`~codecs.IncrementalDecoder` which returns any completely - received unicode characters as soon as they come in. - - :param transport_stream: any bytes-based receive stream - :param encoding: character encoding to use for decoding bytes to strings (defaults to - ``utf-8``) - :param errors: handling scheme for decoding errors (defaults to ``strict``; see the - `codecs module documentation`_ for a comprehensive list of options) - - .. _codecs module documentation: https://docs.python.org/3/library/codecs.html#codec-objects - """ - - transport_stream: AnyByteReceiveStream - encoding: InitVar[str] = "utf-8" - errors: InitVar[str] = "strict" - _decoder: codecs.IncrementalDecoder = field(init=False) - - def __post_init__(self, encoding: str, errors: str) -> None: - decoder_class = codecs.getincrementaldecoder(encoding) - self._decoder = decoder_class(errors=errors) - - async def receive(self) -> str: - while True: - chunk = await self.transport_stream.receive() - decoded = self._decoder.decode(chunk) - if decoded: - return decoded - - async def aclose(self) -> None: - await self.transport_stream.aclose() - self._decoder.reset() - - @property - def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: - return self.transport_stream.extra_attributes - - -@dataclass(eq=False) -class TextSendStream(ObjectSendStream[str]): - """ - Sends strings to the wrapped stream as bytes using the given encoding. - - :param AnyByteSendStream transport_stream: any bytes-based send stream - :param str encoding: character encoding to use for encoding strings to bytes (defaults to - ``utf-8``) - :param str errors: handling scheme for encoding errors (defaults to ``strict``; see the - `codecs module documentation`_ for a comprehensive list of options) - - .. _codecs module documentation: https://docs.python.org/3/library/codecs.html#codec-objects - """ - - transport_stream: AnyByteSendStream - encoding: InitVar[str] = "utf-8" - errors: str = "strict" - _encoder: Callable[..., tuple[bytes, int]] = field(init=False) - - def __post_init__(self, encoding: str) -> None: - self._encoder = codecs.getencoder(encoding) - - async def send(self, item: str) -> None: - encoded = self._encoder(item, self.errors)[0] - await self.transport_stream.send(encoded) - - async def aclose(self) -> None: - await self.transport_stream.aclose() - - @property - def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: - return self.transport_stream.extra_attributes - - -@dataclass(eq=False) -class TextStream(ObjectStream[str]): - """ - A bidirectional stream that decodes bytes to strings on receive and encodes strings to bytes on - send. - - Extra attributes will be provided from both streams, with the receive stream providing the - values in case of a conflict. - - :param AnyByteStream transport_stream: any bytes-based stream - :param str encoding: character encoding to use for encoding/decoding strings to/from bytes - (defaults to ``utf-8``) - :param str errors: handling scheme for encoding errors (defaults to ``strict``; see the - `codecs module documentation`_ for a comprehensive list of options) - - .. _codecs module documentation: https://docs.python.org/3/library/codecs.html#codec-objects - """ - - transport_stream: AnyByteStream - encoding: InitVar[str] = "utf-8" - errors: InitVar[str] = "strict" - _receive_stream: TextReceiveStream = field(init=False) - _send_stream: TextSendStream = field(init=False) - - def __post_init__(self, encoding: str, errors: str) -> None: - self._receive_stream = TextReceiveStream( - self.transport_stream, encoding=encoding, errors=errors - ) - self._send_stream = TextSendStream( - self.transport_stream, encoding=encoding, errors=errors - ) - - async def receive(self) -> str: - return await self._receive_stream.receive() - - async def send(self, item: str) -> None: - await self._send_stream.send(item) - - async def send_eof(self) -> None: - await self.transport_stream.send_eof() - - async def aclose(self) -> None: - await self._send_stream.aclose() - await self._receive_stream.aclose() - - @property - def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: - return { - **self._send_stream.extra_attributes, - **self._receive_stream.extra_attributes, - } diff --git a/spaces/TH5314/newbing/src/lib/bots/bing/types.ts b/spaces/TH5314/newbing/src/lib/bots/bing/types.ts deleted file mode 100644 index 02cd5e8b01e3529642d28dc1539bf958f4ac420b..0000000000000000000000000000000000000000 --- a/spaces/TH5314/newbing/src/lib/bots/bing/types.ts +++ /dev/null @@ -1,259 +0,0 @@ -export type Author = 'user' | 'system' | 'bot' - -export type BotId = 'bing' - -export enum BingConversationStyle { - Creative = 'Creative', - Balanced = 'Balanced', - Precise = 'Precise' -} - -export enum ErrorCode { - CONVERSATION_LIMIT = 'CONVERSATION_LIMIT', - BING_UNAUTHORIZED = 'BING_UNAUTHORIZED', - BING_FORBIDDEN = 'BING_FORBIDDEN', - BING_CAPTCHA = 'BING_CAPTCHA', - THROTTLE_LIMIT = 'THROTTLE_LIMIT', - NOTFOUND_ERROR = 'NOT_FOUND_ERROR', - UNKOWN_ERROR = 'UNKOWN_ERROR', - NETWORK_ERROR = 'NETWORK_ERROR', -} - -export class ChatError extends Error { - code: ErrorCode - constructor(message: string, code: ErrorCode) { - super(message) - this.code = code - } -} - -export type ChatMessageModel = { - id: string - author: Author - text: string - error?: ChatError - throttling?: Throttling - sourceAttributions?: SourceAttribution[] - suggestedResponses?: SuggestedResponse[] -} - -export interface ConversationModel { - messages: ChatMessageModel[] -} - -export type Event = - | { - type: 'UPDATE_ANSWER' - data: { - text: string - spokenText?: string - sourceAttributions?: SourceAttribution[] - suggestedResponses?: SuggestedResponse[] - throttling?: Throttling - } - } - | { - type: 'DONE' - } - | { - type: 'ERROR' - error: ChatError - } - -export interface SendMessageParams { - prompt: string - imageUrl?: string - options: T - onEvent: (event: Event) => void - signal?: AbortSignal -} - -export interface ConversationResponse { - conversationId: string - clientId: string - conversationSignature: string - result: { - value: string - message?: string - } -} - -export interface Telemetry { - metrics?: null - startTime: string -} - -export interface ChatUpdateArgument { - messages?: ChatResponseMessage[] - throttling?: Throttling - requestId: string - result: null -} - -export type ChatUpdateCompleteResponse = { - type: 2 - invocationId: string - item: ChatResponseItem -} | { - type: 1 - target: string - arguments: ChatUpdateArgument[] -} | { - type: 3 - invocationId: string -} | { - type: 6 | 7 -} - -export interface ChatRequestResult { - value: string - serviceVersion: string - error?: string -} - -export interface ChatResponseItem { - messages: ChatResponseMessage[] - firstNewMessageIndex: number - suggestedResponses: null - conversationId: string - requestId: string - conversationExpiryTime: string - telemetry: Telemetry - result: ChatRequestResult - throttling: Throttling -} -export enum InvocationEventType { - Invocation = 1, - StreamItem = 2, - Completion = 3, - StreamInvocation = 4, - CancelInvocation = 5, - Ping = 6, - Close = 7, -} - -// https://github.com/bytemate/bingchat-api/blob/main/src/lib.ts - -export interface ConversationInfo { - conversationId: string - clientId: string - conversationSignature: string - invocationId: number - conversationStyle: BingConversationStyle - prompt: string - imageUrl?: string -} - -export interface BingChatResponse { - conversationSignature: string - conversationId: string - clientId: string - invocationId: number - conversationExpiryTime: Date - response: string - details: ChatResponseMessage -} - -export interface Throttling { - maxNumLongDocSummaryUserMessagesInConversation: number - maxNumUserMessagesInConversation: number - numLongDocSummaryUserMessagesInConversation: number - numUserMessagesInConversation: number -} - -export interface ChatResponseMessage { - text: string - spokenText?: string - author: string - createdAt: Date - timestamp: Date - messageId: string - requestId: string - offense: string - adaptiveCards: AdaptiveCard[] - sourceAttributions: SourceAttribution[] - feedback: Feedback - contentOrigin: string - messageType?: string - contentType?: string - privacy: null - suggestedResponses: SuggestedResponse[] -} - -export interface AdaptiveCard { - type: string - version: string - body: Body[] -} - -export interface Body { - type: string - text: string - wrap: boolean - size?: string -} - -export interface Feedback { - tag: null - updatedOn: null - type: string -} - -export interface SourceAttribution { - providerDisplayName: string - seeMoreUrl: string - searchQuery: string -} - -export interface SuggestedResponse { - text: string - author?: Author - createdAt?: Date - timestamp?: Date - messageId?: string - messageType?: string - offense?: string - feedback?: Feedback - contentOrigin?: string - privacy?: null -} - -export interface KBlobRequest { - knowledgeRequest: KnowledgeRequestContext - imageBase64?: string -} - -export interface KBlobResponse { - blobId: string - processedBlobId?: string -} - -export interface KnowledgeRequestContext { - imageInfo: ImageInfo; - knowledgeRequest: KnowledgeRequest; -} - -export interface ImageInfo { - url?: string; -} - -export interface KnowledgeRequest { - invokedSkills: string[]; - subscriptionId: string; - invokedSkillsRequestData: InvokedSkillsRequestData; - convoData: ConvoData; -} - -export interface ConvoData { - convoid: string; - convotone: BingConversationStyle; -} - -export interface InvokedSkillsRequestData { - enableFaceBlur: boolean; -} - -export interface FileItem { - url: string; - status?: 'loading' | 'error' | 'loaded' -} diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py deleted file mode 100644 index 264d564dbda676b52f446c0d25433a15939a78a3..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py +++ /dev/null @@ -1,519 +0,0 @@ -""" -This module uses ctypes to bind a whole bunch of functions and constants from -SecureTransport. The goal here is to provide the low-level API to -SecureTransport. These are essentially the C-level functions and constants, and -they're pretty gross to work with. - -This code is a bastardised version of the code found in Will Bond's oscrypto -library. An enormous debt is owed to him for blazing this trail for us. For -that reason, this code should be considered to be covered both by urllib3's -license and by oscrypto's: - - Copyright (c) 2015-2016 Will Bond - - Permission is hereby granted, free of charge, to any person obtaining a - copy of this software and associated documentation files (the "Software"), - to deal in the Software without restriction, including without limitation - the rights to use, copy, modify, merge, publish, distribute, sublicense, - and/or sell copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. -""" -from __future__ import absolute_import - -import platform -from ctypes import ( - CDLL, - CFUNCTYPE, - POINTER, - c_bool, - c_byte, - c_char_p, - c_int32, - c_long, - c_size_t, - c_uint32, - c_ulong, - c_void_p, -) -from ctypes.util import find_library - -from ...packages.six import raise_from - -if platform.system() != "Darwin": - raise ImportError("Only macOS is supported") - -version = platform.mac_ver()[0] -version_info = tuple(map(int, version.split("."))) -if version_info < (10, 8): - raise OSError( - "Only OS X 10.8 and newer are supported, not %s.%s" - % (version_info[0], version_info[1]) - ) - - -def load_cdll(name, macos10_16_path): - """Loads a CDLL by name, falling back to known path on 10.16+""" - try: - # Big Sur is technically 11 but we use 10.16 due to the Big Sur - # beta being labeled as 10.16. - if version_info >= (10, 16): - path = macos10_16_path - else: - path = find_library(name) - if not path: - raise OSError # Caught and reraised as 'ImportError' - return CDLL(path, use_errno=True) - except OSError: - raise_from(ImportError("The library %s failed to load" % name), None) - - -Security = load_cdll( - "Security", "/System/Library/Frameworks/Security.framework/Security" -) -CoreFoundation = load_cdll( - "CoreFoundation", - "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation", -) - - -Boolean = c_bool -CFIndex = c_long -CFStringEncoding = c_uint32 -CFData = c_void_p -CFString = c_void_p -CFArray = c_void_p -CFMutableArray = c_void_p -CFDictionary = c_void_p -CFError = c_void_p -CFType = c_void_p -CFTypeID = c_ulong - -CFTypeRef = POINTER(CFType) -CFAllocatorRef = c_void_p - -OSStatus = c_int32 - -CFDataRef = POINTER(CFData) -CFStringRef = POINTER(CFString) -CFArrayRef = POINTER(CFArray) -CFMutableArrayRef = POINTER(CFMutableArray) -CFDictionaryRef = POINTER(CFDictionary) -CFArrayCallBacks = c_void_p -CFDictionaryKeyCallBacks = c_void_p -CFDictionaryValueCallBacks = c_void_p - -SecCertificateRef = POINTER(c_void_p) -SecExternalFormat = c_uint32 -SecExternalItemType = c_uint32 -SecIdentityRef = POINTER(c_void_p) -SecItemImportExportFlags = c_uint32 -SecItemImportExportKeyParameters = c_void_p -SecKeychainRef = POINTER(c_void_p) -SSLProtocol = c_uint32 -SSLCipherSuite = c_uint32 -SSLContextRef = POINTER(c_void_p) -SecTrustRef = POINTER(c_void_p) -SSLConnectionRef = c_uint32 -SecTrustResultType = c_uint32 -SecTrustOptionFlags = c_uint32 -SSLProtocolSide = c_uint32 -SSLConnectionType = c_uint32 -SSLSessionOption = c_uint32 - - -try: - Security.SecItemImport.argtypes = [ - CFDataRef, - CFStringRef, - POINTER(SecExternalFormat), - POINTER(SecExternalItemType), - SecItemImportExportFlags, - POINTER(SecItemImportExportKeyParameters), - SecKeychainRef, - POINTER(CFArrayRef), - ] - Security.SecItemImport.restype = OSStatus - - Security.SecCertificateGetTypeID.argtypes = [] - Security.SecCertificateGetTypeID.restype = CFTypeID - - Security.SecIdentityGetTypeID.argtypes = [] - Security.SecIdentityGetTypeID.restype = CFTypeID - - Security.SecKeyGetTypeID.argtypes = [] - Security.SecKeyGetTypeID.restype = CFTypeID - - Security.SecCertificateCreateWithData.argtypes = [CFAllocatorRef, CFDataRef] - Security.SecCertificateCreateWithData.restype = SecCertificateRef - - Security.SecCertificateCopyData.argtypes = [SecCertificateRef] - Security.SecCertificateCopyData.restype = CFDataRef - - Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p] - Security.SecCopyErrorMessageString.restype = CFStringRef - - Security.SecIdentityCreateWithCertificate.argtypes = [ - CFTypeRef, - SecCertificateRef, - POINTER(SecIdentityRef), - ] - Security.SecIdentityCreateWithCertificate.restype = OSStatus - - Security.SecKeychainCreate.argtypes = [ - c_char_p, - c_uint32, - c_void_p, - Boolean, - c_void_p, - POINTER(SecKeychainRef), - ] - Security.SecKeychainCreate.restype = OSStatus - - Security.SecKeychainDelete.argtypes = [SecKeychainRef] - Security.SecKeychainDelete.restype = OSStatus - - Security.SecPKCS12Import.argtypes = [ - CFDataRef, - CFDictionaryRef, - POINTER(CFArrayRef), - ] - Security.SecPKCS12Import.restype = OSStatus - - SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t)) - SSLWriteFunc = CFUNCTYPE( - OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t) - ) - - Security.SSLSetIOFuncs.argtypes = [SSLContextRef, SSLReadFunc, SSLWriteFunc] - Security.SSLSetIOFuncs.restype = OSStatus - - Security.SSLSetPeerID.argtypes = [SSLContextRef, c_char_p, c_size_t] - Security.SSLSetPeerID.restype = OSStatus - - Security.SSLSetCertificate.argtypes = [SSLContextRef, CFArrayRef] - Security.SSLSetCertificate.restype = OSStatus - - Security.SSLSetCertificateAuthorities.argtypes = [SSLContextRef, CFTypeRef, Boolean] - Security.SSLSetCertificateAuthorities.restype = OSStatus - - Security.SSLSetConnection.argtypes = [SSLContextRef, SSLConnectionRef] - Security.SSLSetConnection.restype = OSStatus - - Security.SSLSetPeerDomainName.argtypes = [SSLContextRef, c_char_p, c_size_t] - Security.SSLSetPeerDomainName.restype = OSStatus - - Security.SSLHandshake.argtypes = [SSLContextRef] - Security.SSLHandshake.restype = OSStatus - - Security.SSLRead.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)] - Security.SSLRead.restype = OSStatus - - Security.SSLWrite.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)] - Security.SSLWrite.restype = OSStatus - - Security.SSLClose.argtypes = [SSLContextRef] - Security.SSLClose.restype = OSStatus - - Security.SSLGetNumberSupportedCiphers.argtypes = [SSLContextRef, POINTER(c_size_t)] - Security.SSLGetNumberSupportedCiphers.restype = OSStatus - - Security.SSLGetSupportedCiphers.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite), - POINTER(c_size_t), - ] - Security.SSLGetSupportedCiphers.restype = OSStatus - - Security.SSLSetEnabledCiphers.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite), - c_size_t, - ] - Security.SSLSetEnabledCiphers.restype = OSStatus - - Security.SSLGetNumberEnabledCiphers.argtype = [SSLContextRef, POINTER(c_size_t)] - Security.SSLGetNumberEnabledCiphers.restype = OSStatus - - Security.SSLGetEnabledCiphers.argtypes = [ - SSLContextRef, - POINTER(SSLCipherSuite), - POINTER(c_size_t), - ] - Security.SSLGetEnabledCiphers.restype = OSStatus - - Security.SSLGetNegotiatedCipher.argtypes = [SSLContextRef, POINTER(SSLCipherSuite)] - Security.SSLGetNegotiatedCipher.restype = OSStatus - - Security.SSLGetNegotiatedProtocolVersion.argtypes = [ - SSLContextRef, - POINTER(SSLProtocol), - ] - Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus - - Security.SSLCopyPeerTrust.argtypes = [SSLContextRef, POINTER(SecTrustRef)] - Security.SSLCopyPeerTrust.restype = OSStatus - - Security.SecTrustSetAnchorCertificates.argtypes = [SecTrustRef, CFArrayRef] - Security.SecTrustSetAnchorCertificates.restype = OSStatus - - Security.SecTrustSetAnchorCertificatesOnly.argstypes = [SecTrustRef, Boolean] - Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus - - Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)] - Security.SecTrustEvaluate.restype = OSStatus - - Security.SecTrustGetCertificateCount.argtypes = [SecTrustRef] - Security.SecTrustGetCertificateCount.restype = CFIndex - - Security.SecTrustGetCertificateAtIndex.argtypes = [SecTrustRef, CFIndex] - Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef - - Security.SSLCreateContext.argtypes = [ - CFAllocatorRef, - SSLProtocolSide, - SSLConnectionType, - ] - Security.SSLCreateContext.restype = SSLContextRef - - Security.SSLSetSessionOption.argtypes = [SSLContextRef, SSLSessionOption, Boolean] - Security.SSLSetSessionOption.restype = OSStatus - - Security.SSLSetProtocolVersionMin.argtypes = [SSLContextRef, SSLProtocol] - Security.SSLSetProtocolVersionMin.restype = OSStatus - - Security.SSLSetProtocolVersionMax.argtypes = [SSLContextRef, SSLProtocol] - Security.SSLSetProtocolVersionMax.restype = OSStatus - - try: - Security.SSLSetALPNProtocols.argtypes = [SSLContextRef, CFArrayRef] - Security.SSLSetALPNProtocols.restype = OSStatus - except AttributeError: - # Supported only in 10.12+ - pass - - Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p] - Security.SecCopyErrorMessageString.restype = CFStringRef - - Security.SSLReadFunc = SSLReadFunc - Security.SSLWriteFunc = SSLWriteFunc - Security.SSLContextRef = SSLContextRef - Security.SSLProtocol = SSLProtocol - Security.SSLCipherSuite = SSLCipherSuite - Security.SecIdentityRef = SecIdentityRef - Security.SecKeychainRef = SecKeychainRef - Security.SecTrustRef = SecTrustRef - Security.SecTrustResultType = SecTrustResultType - Security.SecExternalFormat = SecExternalFormat - Security.OSStatus = OSStatus - - Security.kSecImportExportPassphrase = CFStringRef.in_dll( - Security, "kSecImportExportPassphrase" - ) - Security.kSecImportItemIdentity = CFStringRef.in_dll( - Security, "kSecImportItemIdentity" - ) - - # CoreFoundation time! - CoreFoundation.CFRetain.argtypes = [CFTypeRef] - CoreFoundation.CFRetain.restype = CFTypeRef - - CoreFoundation.CFRelease.argtypes = [CFTypeRef] - CoreFoundation.CFRelease.restype = None - - CoreFoundation.CFGetTypeID.argtypes = [CFTypeRef] - CoreFoundation.CFGetTypeID.restype = CFTypeID - - CoreFoundation.CFStringCreateWithCString.argtypes = [ - CFAllocatorRef, - c_char_p, - CFStringEncoding, - ] - CoreFoundation.CFStringCreateWithCString.restype = CFStringRef - - CoreFoundation.CFStringGetCStringPtr.argtypes = [CFStringRef, CFStringEncoding] - CoreFoundation.CFStringGetCStringPtr.restype = c_char_p - - CoreFoundation.CFStringGetCString.argtypes = [ - CFStringRef, - c_char_p, - CFIndex, - CFStringEncoding, - ] - CoreFoundation.CFStringGetCString.restype = c_bool - - CoreFoundation.CFDataCreate.argtypes = [CFAllocatorRef, c_char_p, CFIndex] - CoreFoundation.CFDataCreate.restype = CFDataRef - - CoreFoundation.CFDataGetLength.argtypes = [CFDataRef] - CoreFoundation.CFDataGetLength.restype = CFIndex - - CoreFoundation.CFDataGetBytePtr.argtypes = [CFDataRef] - CoreFoundation.CFDataGetBytePtr.restype = c_void_p - - CoreFoundation.CFDictionaryCreate.argtypes = [ - CFAllocatorRef, - POINTER(CFTypeRef), - POINTER(CFTypeRef), - CFIndex, - CFDictionaryKeyCallBacks, - CFDictionaryValueCallBacks, - ] - CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef - - CoreFoundation.CFDictionaryGetValue.argtypes = [CFDictionaryRef, CFTypeRef] - CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef - - CoreFoundation.CFArrayCreate.argtypes = [ - CFAllocatorRef, - POINTER(CFTypeRef), - CFIndex, - CFArrayCallBacks, - ] - CoreFoundation.CFArrayCreate.restype = CFArrayRef - - CoreFoundation.CFArrayCreateMutable.argtypes = [ - CFAllocatorRef, - CFIndex, - CFArrayCallBacks, - ] - CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef - - CoreFoundation.CFArrayAppendValue.argtypes = [CFMutableArrayRef, c_void_p] - CoreFoundation.CFArrayAppendValue.restype = None - - CoreFoundation.CFArrayGetCount.argtypes = [CFArrayRef] - CoreFoundation.CFArrayGetCount.restype = CFIndex - - CoreFoundation.CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex] - CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p - - CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll( - CoreFoundation, "kCFAllocatorDefault" - ) - CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll( - CoreFoundation, "kCFTypeArrayCallBacks" - ) - CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll( - CoreFoundation, "kCFTypeDictionaryKeyCallBacks" - ) - CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll( - CoreFoundation, "kCFTypeDictionaryValueCallBacks" - ) - - CoreFoundation.CFTypeRef = CFTypeRef - CoreFoundation.CFArrayRef = CFArrayRef - CoreFoundation.CFStringRef = CFStringRef - CoreFoundation.CFDictionaryRef = CFDictionaryRef - -except (AttributeError): - raise ImportError("Error initializing ctypes") - - -class CFConst(object): - """ - A class object that acts as essentially a namespace for CoreFoundation - constants. - """ - - kCFStringEncodingUTF8 = CFStringEncoding(0x08000100) - - -class SecurityConst(object): - """ - A class object that acts as essentially a namespace for Security constants. - """ - - kSSLSessionOptionBreakOnServerAuth = 0 - - kSSLProtocol2 = 1 - kSSLProtocol3 = 2 - kTLSProtocol1 = 4 - kTLSProtocol11 = 7 - kTLSProtocol12 = 8 - # SecureTransport does not support TLS 1.3 even if there's a constant for it - kTLSProtocol13 = 10 - kTLSProtocolMaxSupported = 999 - - kSSLClientSide = 1 - kSSLStreamType = 0 - - kSecFormatPEMSequence = 10 - - kSecTrustResultInvalid = 0 - kSecTrustResultProceed = 1 - # This gap is present on purpose: this was kSecTrustResultConfirm, which - # is deprecated. - kSecTrustResultDeny = 3 - kSecTrustResultUnspecified = 4 - kSecTrustResultRecoverableTrustFailure = 5 - kSecTrustResultFatalTrustFailure = 6 - kSecTrustResultOtherError = 7 - - errSSLProtocol = -9800 - errSSLWouldBlock = -9803 - errSSLClosedGraceful = -9805 - errSSLClosedNoNotify = -9816 - errSSLClosedAbort = -9806 - - errSSLXCertChainInvalid = -9807 - errSSLCrypto = -9809 - errSSLInternal = -9810 - errSSLCertExpired = -9814 - errSSLCertNotYetValid = -9815 - errSSLUnknownRootCert = -9812 - errSSLNoRootCert = -9813 - errSSLHostNameMismatch = -9843 - errSSLPeerHandshakeFail = -9824 - errSSLPeerUserCancelled = -9839 - errSSLWeakPeerEphemeralDHKey = -9850 - errSSLServerAuthCompleted = -9841 - errSSLRecordOverflow = -9847 - - errSecVerifyFailed = -67808 - errSecNoTrustSettings = -25263 - errSecItemNotFound = -25300 - errSecInvalidTrustSettings = -25262 - - # Cipher suites. We only pick the ones our default cipher string allows. - # Source: https://developer.apple.com/documentation/security/1550981-ssl_cipher_suite_values - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030 - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9 - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8 - TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F - TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024 - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028 - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014 - TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B - TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039 - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027 - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013 - TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067 - TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033 - TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D - TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C - TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D - TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C - TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 - TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F - TLS_AES_128_GCM_SHA256 = 0x1301 - TLS_AES_256_GCM_SHA384 = 0x1302 - TLS_AES_128_CCM_8_SHA256 = 0x1305 - TLS_AES_128_CCM_SHA256 = 0x1304 diff --git a/spaces/TeamTonic/MultiMed/app.py b/spaces/TeamTonic/MultiMed/app.py deleted file mode 100644 index 29b1ad119a6d38fc2fca1fa616c0f221e9751208..0000000000000000000000000000000000000000 --- a/spaces/TeamTonic/MultiMed/app.py +++ /dev/null @@ -1,490 +0,0 @@ -# Welcome to Team Tonic's MultiMed - -from gradio_client import Client -import os -import numpy as np -import base64 -import gradio as gr -import requests -import json -import dotenv -from scipy.io.wavfile import write -import PIL -from openai import OpenAI -import time -dotenv.load_dotenv() - -seamless_client = Client("facebook/seamless_m4t") -HuggingFace_Token = os.getenv("HuggingFace_Token") - -def check_hallucination(assertion,citation): - API_URL = "https://api-inference.huggingface.co/models/vectara/hallucination_evaluation_model" - headers = {"Authorization": f"Bearer {HuggingFace_Token}"} - payload = {"inputs" : f"{assertion} [SEP] {citation}"} - - response = requests.post(API_URL, headers=headers, json=payload,timeout=120) - output = response.json() - output = output[0][0]["score"] - - return f"**hullicination score:** {output}" - - - -def process_speech(input_language, audio_input): - """ - processing sound using seamless_m4t - """ - if audio_input is None : - return "no audio or audio did not save yet \nplease try again ! " - print(f"audio : {audio_input}") - print(f"audio type : {type(audio_input)}") - out = seamless_client.predict( - "S2TT", - "file", - None, - audio_input, #audio_name - "", - input_language,# source language - input_language,# target language - api_name="/run", - ) - out = out[1] # get the text - try : - return f"{out}" - except Exception as e : - return f"{e}" - - - - -def process_image(image) : - img_name = f"{np.random.randint(0, 100)}.jpg" - PIL.Image.fromarray(image.astype('uint8'), 'RGB').save(img_name) - image = open(img_name, "rb").read() - base64_image = base64_image = base64.b64encode(image).decode('utf-8') - openai_api_key = os.getenv('OPENAI_API_KEY') - # oai_org = os.getenv('OAI_ORG') - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}" - } - - payload = { - "model": "gpt-4-vision-preview", - "messages": [ - { - "role": "user", - "content": [ - { - "type": "text", - "text": "You are clinical consultant discussion training cases with students at TonicUniversity. Assess and describe the photo in minute detail. Explain why each area or item in the photograph would be inappropriate to describe if required. Pay attention to anatomy, symptoms and remedies. Propose a course of action based on your assessment. Exclude any other commentary:" - }, - { - "type": "image_url", - "image_url": { - "url": f"data:image/jpeg;base64,{base64_image}" - } - } - ] - } - ], - "max_tokens": 1200 - } - - response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload) - - try : - out = response.json() - out = out["choices"][0]["message"]["content"] - - return out - except Exception as e : - return f"{e}" - - -def query_vectara(text): - user_message = text - - # Read authentication parameters from the .env file - CUSTOMER_ID = os.getenv('CUSTOMER_ID') - CORPUS_ID = os.getenv('CORPUS_ID') - API_KEY = os.getenv('API_KEY') - - # Define the headers - api_key_header = { - "customer-id": CUSTOMER_ID, - "x-api-key": API_KEY - } - - # Define the request body in the structure provided in the example - request_body = { - "query": [ - { - "query": user_message, - "queryContext": "", - "start": 1, - "numResults": 50, - "contextConfig": { - "charsBefore": 0, - "charsAfter": 0, - "sentencesBefore": 2, - "sentencesAfter": 2, - "startTag": "%START_SNIPPET%", - "endTag": "%END_SNIPPET%", - }, - "rerankingConfig": { - "rerankerId": 272725718, - "mmrConfig": { - "diversityBias": 0.35 - } - }, - "corpusKey": [ - { - "customerId": CUSTOMER_ID, - "corpusId": CORPUS_ID, - "semantics": 0, - "metadataFilter": "", - "lexicalInterpolationConfig": { - "lambda": 0 - }, - "dim": [] - } - ], - "summary": [ - { - "maxSummarizedResults": 5, - "responseLang": "auto", - "summarizerPromptName": "vectara-summary-ext-v1.2.0" - } - ] - } - ] - } - - # Make the API request using Gradio - response = requests.post( - "https://api.vectara.io/v1/query", - json=request_body, # Use json to automatically serialize the request body - verify=True, - headers=api_key_header - ) - - if response.status_code == 200: - query_data = response.json() - if query_data: - sources_info = [] - - # Extract the summary. - summary = query_data['responseSet'][0]['summary'][0]['text'] - - # Iterate over all response sets - for response_set in query_data.get('responseSet', []): - # Extract sources - # Limit to top 5 sources. - for source in response_set.get('response', [])[:5]: - source_metadata = source.get('metadata', []) - source_info = {} - - for metadata in source_metadata: - metadata_name = metadata.get('name', '') - metadata_value = metadata.get('value', '') - - if metadata_name == 'title': - source_info['title'] = metadata_value - elif metadata_name == 'author': - source_info['author'] = metadata_value - elif metadata_name == 'pageNumber': - source_info['page number'] = metadata_value - - if source_info: - sources_info.append(source_info) - - result = {"summary": summary, "sources": sources_info} - return f"{json.dumps(result, indent=2)}" - else: - return "No data found in the response." - else: - return f"Error: {response.status_code}" - - -def convert_to_markdown(vectara_response_json): - vectara_response = json.loads(vectara_response_json) - if vectara_response: - summary = vectara_response.get('summary', 'No summary available') - sources_info = vectara_response.get('sources', []) - - # Format the summary as Markdown - markdown_summary = f' {summary}\n\n' - - # Format the sources as a numbered list - markdown_sources = "" - for i, source_info in enumerate(sources_info): - author = source_info.get('author', 'Unknown author') - title = source_info.get('title', 'Unknown title') - page_number = source_info.get('page number', 'Unknown page number') - markdown_sources += f"{i+1}. {title} by {author}, Page {page_number}\n" - - return f"{markdown_summary}**Sources:**\n{markdown_sources}" - else: - return "No data found in the response." -# Main function to handle the Gradio interface logic - -def process_summary_with_openai(summary): - """ - This function takes a summary text as input and processes it with OpenAI's GPT model. - """ - try: - # Ensure that the OpenAI client is properly initialized - client = OpenAI(api_key=os.getenv('OPENAI_API_KEY')) - - # Create the prompt for OpenAI's completion - prompt = "You are clinical consultant discussion training cases with students at TonicUniversity. Assess and describe the proper options in minute detail. Propose a course of action based on your assessment. You will recieve a summary assessment in a language, respond ONLY in the original language. Exclude any other commentary:" - - # Call the OpenAI API with the prompt and the summary - completion = client.chat.completions.create( - model="gpt-4-1106-preview", # Make sure to use the correct model name - messages=[ - {"role": "system", "content": prompt}, - {"role": "user", "content": summary} - ] - ) - - # Extract the content from the completion - final_summary = completion.choices[0].message.content - return final_summary - except Exception as e: - return str(e) - - -def process_and_query(text=None): - try: - # augment the prompt before feeding it to vectara - text = "the user asks the following to his health adviser " + text - # If an image is provided, process it with OpenAI and use the response as the text query for Vectara - # if image is not None: - # text = process_image(image) - # return "**Summary:** "+text - # if audio is not None: - # text = process_speech(audio) - # # augment the prompt before feeding it to vectara - # text = "the user asks the following to his health adviser " + text - - - - # Use the text to query Vectara - vectara_response_json = query_vectara(text) - - # Convert the Vectara response to Markdown - markdown_output = convert_to_markdown(vectara_response_json) - - # Process the summary with OpenAI - final_response = process_summary_with_openai(markdown_output) - - # Return the processed summary along with the full output - return f"**Summary**: {final_response}\n\n**Full output**:\n{markdown_output}" - except Exception as e: - return str(e) - - completion = client.chat.completions.create( - model="gpt-3.5-turbo", - messages=[ - {"role": "system", "content": prompt}, - {"role": "user", "content": markdown_output_final} - ] - ) - final_response= completion.choices[0].message.content - return f"**Summary**: {final_response}\n\n**Full output**:\n{markdown_output}" - except Exception as e: - return str(e) - - -# Define the Gradio interface -# iface = gr.Interface( -# fn=process_and_query, -# inputs=[ -# gr.Textbox(label="Input Text"), -# gr.Image(label="Upload Image"), -# gr.Audio(label="talk in french", -# sources=["microphone"]), -# ], -# outputs=[gr.Markdown(label="Output Text")], -# title="👋🏻Welcome to ⚕🗣️😷MultiMed - Access Chat ⚕🗣️😷", -# description=''' -# ### How To Use ⚕🗣️😷MultiMed⚕: -# #### 🗣️📝Interact with ⚕🗣️😷MultiMed⚕ in any language using audio or text! -# #### 🗣️📝 This is an educational and accessible conversational tool to improve wellness and sanitation in support of public health. -# #### 📚🌟💼 The knowledge base is composed of publicly available medical and health sources in multiple languages. We also used [Kelvalya/MedAware](https://huggingface.co/datasets/keivalya/MedQuad-MedicalQnADataset) that we processed and converted to HTML. The quality of the answers depends on the quality of the dataset, so if you want to see some data represented here, do [get in touch](https://discord.gg/GWpVpekp). You can also use 😷MultiMed⚕️ on your own data & in your own way by cloning this space. 🧬🔬🔍 Simply click here: Duplicate Space

-# #### Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community on 👻Discord: [Discord](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [PolyGPT](https://github.com/tonic-ai/polygpt-alpha)" -# ''', -# theme='ParityError/Anime', -# examples=[ -# ["What is the proper treatment for buccal herpes?"], -# ["Male, 40 presenting with swollen glands and a rash"], -# ["How does cellular metabolism work TCA cycle"], -# ["What special care must be provided to children with chicken pox?"], -# ["When and how often should I wash my hands ?"], -# ["بکل ہرپس کا صحیح علاج کیا ہے؟"], -# ["구강 헤르페스의 적절한 치료법은 무엇입니까?"], -# ["Je, ni matibabu gani sahihi kwa herpes ya buccal?"], -# ], -# ) - -welcome_message = """ -# 👋🏻Welcome to ⚕🗣️😷MultiMed - Access Chat ⚕🗣️😷 -### How To Use ⚕🗣️😷MultiMed⚕: -#### 🗣️📝Interact with ⚕🗣️😷MultiMed⚕ in any language using audio or text! -#### 🗣️📝 This is an educational and accessible conversational tool to improve wellness and sanitation in support of public health. -#### 📚🌟💼 The knowledge base is composed of publicly available medical and health sources in multiple languages. We also used [Kelvalya/MedAware](https://huggingface.co/datasets/keivalya/MedQuad-MedicalQnADataset) that we processed and converted to HTML. The quality of the answers depends on the quality of the dataset, so if you want to see some data represented here, do [get in touch](https://discord.gg/GWpVpekp). You can also use 😷MultiMed⚕️ on your own data & in your own way by cloning this space. 🧬🔬🔍 Simply click here: Duplicate Space

-#### Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community on 👻Discord: [Discord](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [PolyGPT](https://github.com/tonic-ai/polygpt-alpha)" -""" - - -languages = [ - "Afrikaans", - "Amharic", - "Modern Standard Arabic", - "Moroccan Arabic", - "Egyptian Arabic", - "Assamese", - "Asturian", - "North Azerbaijani", - "Belarusian", - "Bengali", - "Bosnian", - "Bulgarian", - "Catalan", - "Cebuano", - "Czech", - "Central Kurdish", - "Mandarin Chinese", - "Welsh", - "Danish", - "German", - "Greek", - "English", - "Estonian", - "Basque", - "Finnish", - "French", - "West Central Oromo", - "Irish", - "Galician", - "Gujarati", - "Hebrew", - "Hindi", - "Croatian", - "Hungarian", - "Armenian", - "Igbo", - "Indonesian", - "Icelandic", - "Italian", - "Javanese", - "Japanese", - "Kamba", - "Kannada", - "Georgian", - "Kazakh", - "Kabuverdianu", - "Halh Mongolian", - "Khmer", - "Kyrgyz", - "Korean", - "Lao", - "Lithuanian", - "Luxembourgish", - "Ganda", - "Luo", - "Standard Latvian", - "Maithili", - "Malayalam", - "Marathi", - "Macedonian", - "Maltese", - "Meitei", - "Burmese", - "Dutch", - "Norwegian Nynorsk", - "Norwegian Bokmål", - "Nepali", - "Nyanja", - "Occitan", - "Odia", - "Punjabi", - "Southern Pashto", - "Western Persian", - "Polish", - "Portuguese", - "Romanian", - "Russian", - "Slovak", - "Slovenian", - "Shona", - "Sindhi", - "Somali", - "Spanish", - "Serbian", - "Swedish", - "Swahili", - "Tamil", - "Telugu", - "Tajik", - "Tagalog", - "Thai", - "Turkish", - "Ukrainian", - "Urdu", - "Northern Uzbek", - "Vietnamese", - "Xhosa", - "Yoruba", - "Cantonese", - "Colloquial Malay", - "Standard Malay", - "Zulu" -] - - -with gr.Blocks(theme='ParityError/Anime') as iface : - gr.Markdown(welcome_message) - with gr.Tab("text summarization"): - text_input = gr.Textbox(label="input text",lines=5) - text_output = gr.Markdown(label="output text") - text_button = gr.Button("process text") - gr.Examples([ - ["What is the proper treatment for buccal herpes?"], - ["Male, 40 presenting with swollen glands and a rash"], - ["How does cellular metabolism work TCA cycle"], - ["What special care must be provided to children with chicken pox?"], - ["When and how often should I wash my hands?"], - ["بکل ہرپس کا صحیح علاج کیا ہے؟"], - ["구강 헤르페스의 적절한 치료법은 무엇입니까?"], - ["Je, ni matibabu gani sahihi kwa herpes ya buccal?"], - ],inputs=[text_input]) - with gr.Tab("image identification"): - image_input = gr.Image(label="upload image") - image_output = gr.Markdown(label="output text") - image_button = gr.Button("process image") - image_button.click(process_image, inputs=image_input, outputs=image_output) - gr.Examples(["sick person.jpeg"],inputs=[image_input]) - with gr.Tab("speech to text"): - input_language = gr.Dropdown(languages, label="select the language",value="English",interactive=True) - audio_input = gr.Audio(label="speak",type="filepath",sources="microphone") - audio_output = gr.Markdown(label="output text") - audio_button = gr.Button("process audio") - audio_button.click(process_speech, inputs=[input_language,audio_input], outputs=audio_output) - gr.Examples([["English","sample_input.mp3"]],inputs=[input_language,audio_input]) - with gr.Tab("hallucination check"): - assertion = gr.Textbox(label="assertion") - citation = gr.Textbox(label="citation text") - hullucination_output = gr.Markdown(label="output text") - audio_button = gr.Button("check hallucination") - gr.Examples([["i am drunk","sarah is pregnant"]],inputs=[assertion,citation]) - text_button.click(process_and_query, inputs=text_input, outputs=text_output) - audio_button.click(check_hallucination,inputs=[assertion,citation],outputs=hullucination_output) - - - - -iface.queue().launch(show_error=True,debug=True) diff --git a/spaces/Thanarit/GPT-Detection-Demo/app.py b/spaces/Thanarit/GPT-Detection-Demo/app.py deleted file mode 100644 index b0de7e28071553c6b86ec9a73b9881b4d540c0bc..0000000000000000000000000000000000000000 --- a/spaces/Thanarit/GPT-Detection-Demo/app.py +++ /dev/null @@ -1,54 +0,0 @@ -import streamlit as st -from transformers import pipeline -from ModelDriver import * -import numpy as np - -# Add a title -st.title('GPT Detection Demo') -st.write("This is a demo for GPT detection. You can use this demo to test the model. The model is trained on two datasets: OpenGPT and CSAbstract. You can choose the model and dataset in the sidebar.") -st.write("Reference on how we built Roberta Sentinel: https://arxiv.org/abs/2305.07969") - -# Add 4 options for 4 models -ModelOption = st.sidebar.selectbox( - 'Which Model do you want to use?', - ('RobertaSentinel', 'RobertaClassifier'), -) - -DatasetOption = st.sidebar.selectbox( - 'Which Dataset the model was trained on?', - ('OpenGPT', 'CSAbstract'), -) - - -text = st.text_area('Enter text here (max 500 words)', '') - -if st.button('Generate'): - if ModelOption == 'RobertaSentinel': - if DatasetOption == 'OpenGPT': - result = RobertaSentinelOpenGPTInference(text) - st.write("Model: RobertaSentinelOpenGPT") - elif DatasetOption == 'CSAbstract': - result = RobertaSentinelCSAbstractInference(text) - st.write("Model: RobertaSentinelCSAbstract") - - elif ModelOption == 'RobertaClassifier': - if DatasetOption == 'OpenGPT': - result = RobertaClassifierOpenGPTInference(text) - st.write("Model: RobertaClassifierOpenGPT") - elif DatasetOption == 'CSAbstract': - result = RobertaClassifierCSAbstractInference(text) - st.write("Model: RobertaClassifierCSAbstract") - - Prediction = "Human Written" if not np.argmax(result) else "Machine Generated" - - st.write(f"Prediction: {Prediction} ") - st.write(f"Probabilty:", max(result)) - - - - - - - - - diff --git a/spaces/ThomasSimonini/Unity-MLAgents-Pyramids/index.html b/spaces/ThomasSimonini/Unity-MLAgents-Pyramids/index.html deleted file mode 100644 index 37f4404c8056925b6523479300dec8d2406d5c96..0000000000000000000000000000000000000000 --- a/spaces/ThomasSimonini/Unity-MLAgents-Pyramids/index.html +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - Unity-MLAgents-Pyramids - - - - -
- -
- - - - - diff --git a/spaces/Tirendaz/Text-Classification/app.py b/spaces/Tirendaz/Text-Classification/app.py deleted file mode 100644 index 125b46d586913e6a24ccb8585102a076baf95d9d..0000000000000000000000000000000000000000 --- a/spaces/Tirendaz/Text-Classification/app.py +++ /dev/null @@ -1,23 +0,0 @@ -import gradio as gr -from transformers import pipeline - -classifier = pipeline("sentiment-analysis", model="Tirendaz/my_distilbert_model") - -def text_classification(text): - result= classifier(text) - sentiment_label = result[0]['label'] - sentiment_score = result[0]['score'] - formatted_output = f"This sentiment is {sentiment_label} with the probability {sentiment_score*100:.2f}%" - return formatted_output - -examples=["This is wonderful movie!", "The movie was really bad; I didn't like it."] - -io = gr.Interface(fn=text_classification, - inputs= gr.Textbox(lines=2, label="Text", placeholder="Enter title here..."), - outputs=gr.Textbox(lines=2, label="Text Classification Result"), - title="Text Classification", - description="Enter a text and see the text classification result!", - examples=examples) - -io.launch() - diff --git a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/Vercel.py b/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/Vercel.py deleted file mode 100644 index e5df9cf017e4c1a265f5c9d5e48eb5c10a56e60a..0000000000000000000000000000000000000000 --- a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/Vercel.py +++ /dev/null @@ -1,162 +0,0 @@ -import os -import json -import base64 -import execjs -import queue -import threading - -from curl_cffi import requests -from ...typing import sha256, Dict, get_type_hints - -url = 'https://play.vercel.ai' -supports_stream = True -needs_auth = False - -models = { - 'claude-instant-v1': 'anthropic:claude-instant-v1', - 'claude-v1': 'anthropic:claude-v1', - 'alpaca-7b': 'replicate:replicate/alpaca-7b', - 'stablelm-tuned-alpha-7b': 'replicate:stability-ai/stablelm-tuned-alpha-7b', - 'bloom': 'huggingface:bigscience/bloom', - 'bloomz': 'huggingface:bigscience/bloomz', - 'flan-t5-xxl': 'huggingface:google/flan-t5-xxl', - 'flan-ul2': 'huggingface:google/flan-ul2', - 'gpt-neox-20b': 'huggingface:EleutherAI/gpt-neox-20b', - 'oasst-sft-4-pythia-12b-epoch-3.5': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', - 'santacoder': 'huggingface:bigcode/santacoder', - 'command-medium-nightly': 'cohere:command-medium-nightly', - 'command-xlarge-nightly': 'cohere:command-xlarge-nightly', - 'code-cushman-001': 'openai:code-cushman-001', - 'code-davinci-002': 'openai:code-davinci-002', - 'gpt-3.5-turbo': 'openai:gpt-3.5-turbo', - 'text-ada-001': 'openai:text-ada-001', - 'text-babbage-001': 'openai:text-babbage-001', - 'text-curie-001': 'openai:text-curie-001', - 'text-davinci-002': 'openai:text-davinci-002', - 'text-davinci-003': 'openai:text-davinci-003' -} -model = models.keys() - -vercel_models = {'anthropic:claude-instant-v1': {'id': 'anthropic:claude-instant-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-instant-v1'}, 'anthropic:claude-v1': {'id': 'anthropic:claude-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-v1'}, 'replicate:replicate/alpaca-7b': {'id': 'replicate:replicate/alpaca-7b', 'provider': 'replicate', 'providerHumanName': 'Replicate', 'makerHumanName': 'Stanford', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '2014ee1247354f2e81c0b3650d71ca715bc1e610189855f134c30ecb841fae21', 'name': 'alpaca-7b'}, 'replicate:stability-ai/stablelm-tuned-alpha-7b': {'id': 'replicate:stability-ai/stablelm-tuned-alpha-7b', 'provider': 'replicate', 'makerHumanName': 'StabilityAI', 'providerHumanName': 'Replicate', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '4a9a32b4fd86c2d047f1d271fa93972683ec6ef1cf82f402bd021f267330b50b', 'name': 'stablelm-tuned-alpha-7b'}, 'huggingface:bigscience/bloom': {'id': 'huggingface:bigscience/bloom', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': "Do NOT talk to Bloom as an entity, it's not a chatbot but a webpage/blog/article completion model. For the best results: mimic a few words of a webpage similar to the content you want to generate. Start a sentence as if YOU were writing a blog, webpage, math post, coding article and Bloom will generate a coherent follow-up.", 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloom'}, 'huggingface:bigscience/bloomz': {'id': 'huggingface:bigscience/bloomz', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': 'We recommend using the model to perform tasks expressed in natural language. For example, given the prompt "Translate to English: Je t\'aime.", the model will most likely answer "I love you.".', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloomz'}, 'huggingface:google/flan-t5-xxl': {'id': 'huggingface:google/flan-t5-xxl', 'provider': 'huggingface', 'makerHumanName': 'Google', 'providerHumanName': 'HuggingFace', 'name': 'flan-t5-xxl', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}}, 'huggingface:google/flan-ul2': {'id': 'huggingface:google/flan-ul2', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'Google', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'flan-ul2'}, 'huggingface:EleutherAI/gpt-neox-20b': {'id': 'huggingface:EleutherAI/gpt-neox-20b', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'EleutherAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-neox-20b'}, 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'OpenAssistant', 'parameters': {'maximumLength': {'value': 200, 'range': [50, 1024]}, 'typicalP': {'value': 0.2, 'range': [0.1, 0.99]}, 'repetitionPenalty': {'value': 1, 'range': [0.1, 2]}}, 'name': 'oasst-sft-4-pythia-12b-epoch-3.5'}, 'huggingface:bigcode/santacoder': { - 'id': 'huggingface:bigcode/santacoder', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigCode', 'instructions': 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'santacoder'}, 'cohere:command-medium-nightly': {'id': 'cohere:command-medium-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-medium-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'cohere:command-xlarge-nightly': {'id': 'cohere:command-xlarge-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-xlarge-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:gpt-4': {'id': 'openai:gpt-4', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'gpt-4', 'minBillingTier': 'pro', 'parameters': {'temperature': {'value': 0.7, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:code-cushman-001': {'id': 'openai:code-cushman-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-cushman-001'}, 'openai:code-davinci-002': {'id': 'openai:code-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-davinci-002'}, 'openai:gpt-3.5-turbo': {'id': 'openai:gpt-3.5-turbo', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.7, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-3.5-turbo'}, 'openai:text-ada-001': {'id': 'openai:text-ada-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-ada-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-babbage-001': {'id': 'openai:text-babbage-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-babbage-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-curie-001': {'id': 'openai:text-curie-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-curie-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-002': {'id': 'openai:text-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-002', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-003': {'id': 'openai:text-davinci-003', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-003', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}} - - -# based on https://github.com/ading2210/vercel-llm-api // modified -class Client: - def __init__(self): - self.session = requests.Session() - self.headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110 Safari/537.36', - 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8', - 'Accept-Encoding': 'gzip, deflate, br', - 'Accept-Language': 'en-US,en;q=0.5', - 'Te': 'trailers', - 'Upgrade-Insecure-Requests': '1' - } - self.session.headers.update(self.headers) - - def get_token(self): - b64 = self.session.get('https://sdk.vercel.ai/openai.jpeg').text - data = json.loads(base64.b64decode(b64)) - - code = 'const globalThis = {data: `sentinel`}; function token() {return (%s)(%s)}' % ( - data['c'], data['a']) - - token_string = json.dumps(separators=(',', ':'), - obj={'r': execjs.compile(code).call('token'), 't': data['t']}) - - return base64.b64encode(token_string.encode()).decode() - - def get_default_params(self, model_id): - return {key: param['value'] for key, param in vercel_models[model_id]['parameters'].items()} - - def generate(self, model_id: str, prompt: str, params: dict = {}): - if not ':' in model_id: - model_id = models[model_id] - - defaults = self.get_default_params(model_id) - - payload = defaults | params | { - 'prompt': prompt, - 'model': model_id, - } - - headers = self.headers | { - 'Accept-Encoding': 'gzip, deflate, br', - 'Custom-Encoding': self.get_token(), - 'Host': 'sdk.vercel.ai', - 'Origin': 'https://sdk.vercel.ai', - 'Referrer': 'https://sdk.vercel.ai', - 'Sec-Fetch-Dest': 'empty', - 'Sec-Fetch-Mode': 'cors', - 'Sec-Fetch-Site': 'same-origin', - } - - chunks_queue = queue.Queue() - error = None - response = None - - def callback(data): - chunks_queue.put(data.decode()) - - def request_thread(): - nonlocal response, error - for _ in range(3): - try: - response = self.session.post('https://sdk.vercel.ai/api/generate', - json=payload, headers=headers, content_callback=callback) - response.raise_for_status() - - except Exception as e: - if _ == 2: - error = e - - else: - continue - - thread = threading.Thread(target=request_thread, daemon=True) - thread.start() - - text = '' - index = 0 - while True: - try: - chunk = chunks_queue.get(block=True, timeout=0.1) - - except queue.Empty: - if error: - raise error - - elif response: - break - - else: - continue - - text += chunk - lines = text.split('\n') - - if len(lines) - 1 > index: - new = lines[index:-1] - for word in new: - yield json.loads(word) - index = len(lines) - 1 - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - yield 'Vercel is currently not working.' - return - - conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n' - - for message in messages: - conversation += '%s: %s\n' % (message['role'], message['content']) - - conversation += 'assistant: ' - - completion = Client().generate(model, conversation) - - for token in completion: - yield token - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/Vijish/Crop-CLIP/README.md b/spaces/Vijish/Crop-CLIP/README.md deleted file mode 100644 index bdcae9f45efba75036d8cd67008634852f99dcfc..0000000000000000000000000000000000000000 --- a/spaces/Vijish/Crop-CLIP/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Crop CLIP -emoji: 🦀 -colorFrom: gray -colorTo: purple -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Violette/Protogen_x3.4_Official_Release/README.md b/spaces/Violette/Protogen_x3.4_Official_Release/README.md deleted file mode 100644 index f956b53e422114f032bad59a721dd2d563081a37..0000000000000000000000000000000000000000 --- a/spaces/Violette/Protogen_x3.4_Official_Release/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Protogen X3.4 Official Release -emoji: 📈 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/tasks/__init__.py b/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/tasks/__init__.py deleted file mode 100644 index ab1fb1c8289535cf9397bb9805c0cba3666ad26f..0000000000000000000000000000000000000000 --- a/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/tasks/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -from minigpt4.common.registry import registry -from minigpt4.tasks.base_task import BaseTask -from minigpt4.tasks.image_text_pretrain import ImageTextPretrainTask - - -def setup_task(cfg): - assert "task" in cfg.run_cfg, "Task name must be provided." - - task_name = cfg.run_cfg.task - task = registry.get_task_class(task_name).setup_task(cfg=cfg) - assert task is not None, "Task {} not properly registered.".format(task_name) - - return task - - -__all__ = [ - "BaseTask", - "ImageTextPretrainTask", -] diff --git a/spaces/Voicelab/vlT5-keywords-generation/README.md b/spaces/Voicelab/vlT5-keywords-generation/README.md deleted file mode 100644 index bb0ec20c0d1e6457305ec098f8f8437c1d529252..0000000000000000000000000000000000000000 --- a/spaces/Voicelab/vlT5-keywords-generation/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: VlT5 Keywords Generation -emoji: 🌍 -colorFrom: pink -colorTo: pink -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/WorldlineChanger/sayashi-vits-uma-genshin-honkai/utils.py b/spaces/WorldlineChanger/sayashi-vits-uma-genshin-honkai/utils.py deleted file mode 100644 index ee4b01ddfbe8173965371b29f770f3e87615fe71..0000000000000000000000000000000000000000 --- a/spaces/WorldlineChanger/sayashi-vits-uma-genshin-honkai/utils.py +++ /dev/null @@ -1,225 +0,0 @@ -import os -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -import librosa -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_audio_to_torch(full_path, target_sampling_rate): - audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True) - return torch.FloatTensor(audio.astype(np.float32)) - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/XPMaster/KSA_Weather_Prediction/README.md b/spaces/XPMaster/KSA_Weather_Prediction/README.md deleted file mode 100644 index eb0dad165265158f4093d00781e64bb2dc852b4f..0000000000000000000000000000000000000000 --- a/spaces/XPMaster/KSA_Weather_Prediction/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: KSA Weather Prediction -emoji: ⚡ -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/metrics.py b/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/metrics.py deleted file mode 100644 index 46fdddf3de2cf8d987ecb4c7d7cb3503afa995ad..0000000000000000000000000000000000000000 --- a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/metrics.py +++ /dev/null @@ -1,361 +0,0 @@ -"Implements various metrics to measure training accuracy" -from .torch_core import * -from .callback import * -from .layers import * -from .basic_train import LearnerCallback - -__all__ = ['error_rate', 'accuracy', 'accuracy_thresh', 'dice', 'exp_rmspe', 'fbeta','FBeta', 'mse', 'mean_squared_error', - 'mae', 'mean_absolute_error', 'rmse', 'root_mean_squared_error', 'msle', 'mean_squared_logarithmic_error', - 'explained_variance', 'r2_score', 'top_k_accuracy', 'KappaScore', 'ConfusionMatrix', 'MatthewsCorreff', - 'Precision', 'Recall', 'R2Score', 'ExplainedVariance', 'ExpRMSPE', 'RMSE', 'Perplexity', 'AUROC', 'auc_roc_score', - 'roc_curve', 'MultiLabelFbeta', 'foreground_acc'] - -def fbeta(y_pred:Tensor, y_true:Tensor, thresh:float=0.2, beta:float=2, eps:float=1e-9, sigmoid:bool=True)->Rank0Tensor: - "Computes the f_beta between `preds` and `targets`" - beta2 = beta ** 2 - if sigmoid: y_pred = y_pred.sigmoid() - y_pred = (y_pred>thresh).float() - y_true = y_true.float() - TP = (y_pred*y_true).sum(dim=1) - prec = TP/(y_pred.sum(dim=1)+eps) - rec = TP/(y_true.sum(dim=1)+eps) - res = (prec*rec)/(prec*beta2+rec+eps)*(1+beta2) - return res.mean() - -def accuracy(input:Tensor, targs:Tensor)->Rank0Tensor: - "Computes accuracy with `targs` when `input` is bs * n_classes." - n = targs.shape[0] - input = input.argmax(dim=-1).view(n,-1) - targs = targs.view(n,-1) - return (input==targs).float().mean() - -def accuracy_thresh(y_pred:Tensor, y_true:Tensor, thresh:float=0.5, sigmoid:bool=True)->Rank0Tensor: - "Computes accuracy when `y_pred` and `y_true` are the same size." - if sigmoid: y_pred = y_pred.sigmoid() - return ((y_pred>thresh)==y_true.byte()).float().mean() - -def top_k_accuracy(input:Tensor, targs:Tensor, k:int=5)->Rank0Tensor: - "Computes the Top-k accuracy (target is in the top k predictions)." - input = input.topk(k=k, dim=-1)[1] - targs = targs.unsqueeze(dim=-1).expand_as(input) - return (input == targs).max(dim=-1)[0].float().mean() - -def foreground_acc(input, target, void_code): - "Computes non-background accuracy, e.g. camvid for multiclass segmentation" - target = target.squeeze(1) - mask = target != void_code - return (input.argmax(dim=1)[mask]==target[mask]).float().mean() - -def error_rate(input:Tensor, targs:Tensor)->Rank0Tensor: - "1 - `accuracy`" - return 1 - accuracy(input, targs) - -def dice(input:Tensor, targs:Tensor, iou:bool=False, eps:float=1e-8)->Rank0Tensor: - "Dice coefficient metric for binary target. If iou=True, returns iou metric, classic for segmentation problems." - n = targs.shape[0] - input = input.argmax(dim=1).view(n,-1) - targs = targs.view(n,-1) - intersect = (input * targs).sum().float() - union = (input+targs).sum().float() - if not iou: return (2. * intersect / union if union > 0 else union.new([1.]).squeeze()) - else: return (intersect / (union-intersect+eps) if union > 0 else union.new([1.]).squeeze()) - -def psnr(input:Tensor, targs:Tensor)->Rank0Tensor: - return 10 * (1. / mean_squared_error(input, targs)).log10() - -def exp_rmspe(pred:Tensor, targ:Tensor)->Rank0Tensor: - "Exp RMSE between `pred` and `targ`." - pred,targ = flatten_check(pred,targ) - pred, targ = torch.exp(pred), torch.exp(targ) - pct_var = (targ - pred)/targ - return torch.sqrt((pct_var**2).mean()) - -def mean_absolute_error(pred:Tensor, targ:Tensor)->Rank0Tensor: - "Mean absolute error between `pred` and `targ`." - pred,targ = flatten_check(pred,targ) - return torch.abs(targ - pred).mean() - -def mean_squared_error(pred:Tensor, targ:Tensor)->Rank0Tensor: - "Mean squared error between `pred` and `targ`." - pred,targ = flatten_check(pred,targ) - return F.mse_loss(pred, targ) - -def root_mean_squared_error(pred:Tensor, targ:Tensor)->Rank0Tensor: - "Root mean squared error between `pred` and `targ`." - pred,targ = flatten_check(pred,targ) - return torch.sqrt(F.mse_loss(pred, targ)) - -def mean_squared_logarithmic_error(pred:Tensor, targ:Tensor)->Rank0Tensor: - "Mean squared logarithmic error between `pred` and `targ`." - pred,targ = flatten_check(pred,targ) - return F.mse_loss(torch.log(1 + pred), torch.log(1 + targ)) - -def explained_variance(pred:Tensor, targ:Tensor)->Rank0Tensor: - "Explained variance between `pred` and `targ`." - pred,targ = flatten_check(pred,targ) - var_pct = torch.var(targ - pred) / torch.var(targ) - return 1 - var_pct - -def r2_score(pred:Tensor, targ:Tensor)->Rank0Tensor: - "R2 score (coefficient of determination) between `pred` and `targ`." - pred,targ = flatten_check(pred,targ) - u = torch.sum((targ - pred) ** 2) - d = torch.sum((targ - targ.mean()) ** 2) - return 1 - u / d - -class RegMetrics(Callback): - "Stores predictions and targets to perform calculations on epoch end." - def on_epoch_begin(self, **kwargs): - self.targs, self.preds = Tensor([]), Tensor([]) - - def on_batch_end(self, last_output:Tensor, last_target:Tensor, **kwargs): - assert last_output.numel() == last_target.numel(), "Expected same numbers of elements in pred & targ" - self.preds = torch.cat((self.preds, last_output.cpu())) - self.targs = torch.cat((self.targs, last_target.cpu())) - -class R2Score(RegMetrics): - "Computes the R2 score (coefficient of determination)." - def on_epoch_end(self, last_metrics, **kwargs): - return add_metrics(last_metrics, r2_score(self.preds, self.targs)) - -class ExplainedVariance(RegMetrics): - "Computes the explained variance." - def on_epoch_end(self, last_metrics, **kwargs): - return add_metrics(last_metrics, explained_variance(self.preds, self.targs)) - -class RMSE(RegMetrics): - "Computes the root mean squared error." - def on_epoch_end(self, last_metrics, **kwargs): - return add_metrics(last_metrics, root_mean_squared_error(self.preds, self.targs)) - -class ExpRMSPE(RegMetrics): - "Computes the exponential of the root mean square error." - def on_epoch_end(self, last_metrics, **kwargs): - return add_metrics(last_metrics, exp_rmspe(self.preds, self.targs)) - -# Aliases -mse = mean_squared_error -mae = mean_absolute_error -msle = mean_squared_logarithmic_error -rmse = root_mean_squared_error - -class ConfusionMatrix(Callback): - "Computes the confusion matrix." - - def on_train_begin(self, **kwargs): - self.n_classes = 0 - - def on_epoch_begin(self, **kwargs): - self.cm = None - - def on_batch_end(self, last_output:Tensor, last_target:Tensor, **kwargs): - preds = last_output.argmax(-1).view(-1).cpu() - targs = last_target.cpu() - if self.n_classes == 0: - self.n_classes = last_output.shape[-1] - self.x = torch.arange(0, self.n_classes) - cm = ((preds==self.x[:, None]) & (targs==self.x[:, None, None])).sum(dim=2, dtype=torch.float32) - if self.cm is None: self.cm = cm - else: self.cm += cm - - def on_epoch_end(self, **kwargs): - self.metric = self.cm - -@dataclass -class CMScores(ConfusionMatrix): - "Base class for metrics which rely on the calculation of the precision and/or recall score." - average:Optional[str]="binary" # `binary`, `micro`, `macro`, `weigthed` or None - pos_label:int=1 # 0 or 1 - eps:float=1e-9 - - def _recall(self): - rec = torch.diag(self.cm) / self.cm.sum(dim=1) - if self.average is None: return rec - else: - if self.average == "micro": weights = self._weights(avg="weighted") - else: weights = self._weights(avg=self.average) - return (rec * weights).sum() - - def _precision(self): - prec = torch.diag(self.cm) / self.cm.sum(dim=0) - if self.average is None: return prec - else: - weights = self._weights(avg=self.average) - return (prec * weights).sum() - - def _weights(self, avg:str): - if self.n_classes != 2 and avg == "binary": - avg = self.average = "macro" - warn("average=`binary` was selected for a non binary case. Value for average has now been set to `macro` instead.") - if avg == "binary": - if self.pos_label not in (0, 1): - self.pos_label = 1 - warn("Invalid value for pos_label. It has now been set to 1.") - if self.pos_label == 1: return Tensor([0,1]) - else: return Tensor([1,0]) - elif avg == "micro": return self.cm.sum(dim=0) / self.cm.sum() - elif avg == "macro": return torch.ones((self.n_classes,)) / self.n_classes - elif avg == "weighted": return self.cm.sum(dim=1) / self.cm.sum() - - -class Recall(CMScores): - "Computes the Recall." - def on_epoch_end(self, last_metrics, **kwargs): - return add_metrics(last_metrics, self._recall()) - -class Precision(CMScores): - "Computes the Precision." - def on_epoch_end(self, last_metrics, **kwargs): - return add_metrics(last_metrics, self._precision()) - -@dataclass -class FBeta(CMScores): - "Computes the F`beta` score." - beta:float=2 - - def on_train_begin(self, **kwargs): - self.n_classes = 0 - self.beta2 = self.beta ** 2 - self.avg = self.average - if self.average != "micro": self.average = None - - def on_epoch_end(self, last_metrics, **kwargs): - prec = self._precision() - rec = self._recall() - metric = (1 + self.beta2) * prec * rec / (prec * self.beta2 + rec + self.eps) - metric[metric != metric] = 0 # removing potential "nan"s - if self.avg: metric = (self._weights(avg=self.avg) * metric).sum() - return add_metrics(last_metrics, metric) - - def on_train_end(self, **kwargs): self.average = self.avg - -@dataclass -class KappaScore(ConfusionMatrix): - "Computes the rate of agreement (Cohens Kappa)." - weights:Optional[str]=None # None, `linear`, or `quadratic` - - def on_epoch_end(self, last_metrics, **kwargs): - sum0 = self.cm.sum(dim=0) - sum1 = self.cm.sum(dim=1) - expected = torch.einsum('i,j->ij', (sum0, sum1)) / sum0.sum() - if self.weights is None: - w = torch.ones((self.n_classes, self.n_classes)) - w[self.x, self.x] = 0 - elif self.weights == "linear" or self.weights == "quadratic": - w = torch.zeros((self.n_classes, self.n_classes)) - w += torch.arange(self.n_classes, dtype=torch.float) - w = torch.abs(w - torch.t(w)) if self.weights == "linear" else (w - torch.t(w)) ** 2 - else: raise ValueError('Unknown weights. Expected None, "linear", or "quadratic".') - k = torch.sum(w * self.cm) / torch.sum(w * expected) - return add_metrics(last_metrics, 1-k) - -@dataclass -class MatthewsCorreff(ConfusionMatrix): - "Computes the Matthews correlation coefficient." - def on_epoch_end(self, last_metrics, **kwargs): - t_sum = self.cm.sum(dim=1) - p_sum = self.cm.sum(dim=0) - n_correct = torch.trace(self.cm) - n_samples = p_sum.sum() - cov_ytyp = n_correct * n_samples - torch.dot(t_sum, p_sum) - cov_ypyp = n_samples ** 2 - torch.dot(p_sum, p_sum) - cov_ytyt = n_samples ** 2 - torch.dot(t_sum, t_sum) - return add_metrics(last_metrics, cov_ytyp / torch.sqrt(cov_ytyt * cov_ypyp)) - -class Perplexity(Callback): - "Perplexity metric for language models." - def on_epoch_begin(self, **kwargs): self.loss,self.len = 0.,0 - - def on_batch_end(self, last_output, last_target, **kwargs): - self.loss += last_target.size(1) * CrossEntropyFlat()(last_output, last_target) - self.len += last_target.size(1) - - def on_epoch_end(self, last_metrics, **kwargs): - return add_metrics(last_metrics, torch.exp(self.loss / self.len)) - -def auc_roc_score(input:Tensor, targ:Tensor): - "Computes the area under the receiver operator characteristic (ROC) curve using the trapezoid method. Restricted binary classification tasks." - fpr, tpr = roc_curve(input, targ) - d = fpr[1:] - fpr[:-1] - sl1, sl2 = [slice(None)], [slice(None)] - sl1[-1], sl2[-1] = slice(1, None), slice(None, -1) - return (d * (tpr[tuple(sl1)] + tpr[tuple(sl2)]) / 2.).sum(-1) - -def roc_curve(input:Tensor, targ:Tensor): - "Computes the receiver operator characteristic (ROC) curve by determining the true positive ratio (TPR) and false positive ratio (FPR) for various classification thresholds. Restricted binary classification tasks." - targ = (targ == 1) - desc_score_indices = torch.flip(input.argsort(-1), [-1]) - input = input[desc_score_indices] - targ = targ[desc_score_indices] - d = input[1:] - input[:-1] - distinct_value_indices = torch.nonzero(d).transpose(0,1)[0] - threshold_idxs = torch.cat((distinct_value_indices, LongTensor([len(targ) - 1]).to(targ.device))) - tps = torch.cumsum(targ * 1, dim=-1)[threshold_idxs] - fps = (1 + threshold_idxs - tps) - if tps[0] != 0 or fps[0] != 0: - fps = torch.cat((LongTensor([0]), fps)) - tps = torch.cat((LongTensor([0]), tps)) - fpr, tpr = fps.float() / fps[-1], tps.float() / tps[-1] - return fpr, tpr - -@dataclass -class AUROC(Callback): - "Computes the area under the curve (AUC) score based on the receiver operator characteristic (ROC) curve. Restricted to binary classification tasks." - def on_epoch_begin(self, **kwargs): - self.targs, self.preds = LongTensor([]), Tensor([]) - - def on_batch_end(self, last_output:Tensor, last_target:Tensor, **kwargs): - last_output = F.softmax(last_output, dim=1)[:,-1] - self.preds = torch.cat((self.preds, last_output.cpu())) - self.targs = torch.cat((self.targs, last_target.cpu().long())) - - def on_epoch_end(self, last_metrics, **kwargs): - return add_metrics(last_metrics, auc_roc_score(self.preds, self.targs)) - -class MultiLabelFbeta(LearnerCallback): - "Computes the fbeta score for multilabel classification" - # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html - _order = -20 - def __init__(self, learn, beta=2, eps=1e-15, thresh=0.3, sigmoid=True, average="micro"): - super().__init__(learn) - self.eps, self.thresh, self.sigmoid, self.average, self.beta2 = \ - eps, thresh, sigmoid, average, beta**2 - - def on_train_begin(self, **kwargs): - self.c = self.learn.data.c - if self.average != "none": self.learn.recorder.add_metric_names([f'{self.average}_fbeta']) - else: self.learn.recorder.add_metric_names([f"fbeta_{c}" for c in self.learn.data.classes]) - - def on_epoch_begin(self, **kwargs): - dvc = self.learn.data.device - self.tp = torch.zeros(self.c).to(dvc) - self.total_pred = torch.zeros(self.c).to(dvc) - self.total_targ = torch.zeros(self.c).to(dvc) - - def on_batch_end(self, last_output, last_target, **kwargs): - pred, targ = (last_output.sigmoid() if self.sigmoid else last_output) > self.thresh, last_target.byte() - m = pred*targ - self.tp += m.sum(0).float() - self.total_pred += pred.sum(0).float() - self.total_targ += targ.sum(0).float() - - def fbeta_score(self, precision, recall): - return (1 + self.beta2)*(precision*recall)/((self.beta2*precision + recall) + self.eps) - - def on_epoch_end(self, last_metrics, **kwargs): - self.total_pred += self.eps - self.total_targ += self.eps - if self.average == "micro": - precision, recall = self.tp.sum() / self.total_pred.sum(), self.tp.sum() / self.total_targ.sum() - res = self.fbeta_score(precision, recall) - elif self.average == "macro": - res = self.fbeta_score((self.tp / self.total_pred), (self.tp / self.total_targ)).mean() - elif self.average == "weighted": - scores = self.fbeta_score((self.tp / self.total_pred), (self.tp / self.total_targ)) - res = (scores*self.total_targ).sum() / self.total_targ.sum() - elif self.average == "none": - res = listify(self.fbeta_score((self.tp / self.total_pred), (self.tp / self.total_targ))) - else: - raise Exception("Choose one of the average types: [micro, macro, weighted, none]") - - return add_metrics(last_metrics, res) diff --git a/spaces/XzJosh/Taffy-Bert-VITS2/transforms.py b/spaces/XzJosh/Taffy-Bert-VITS2/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Taffy-Bert-VITS2/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/XzJosh/Wenjing-Bert-VITS2/server.py b/spaces/XzJosh/Wenjing-Bert-VITS2/server.py deleted file mode 100644 index c736ca4f95fec853950eef6654ef79856beffc0a..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Wenjing-Bert-VITS2/server.py +++ /dev/null @@ -1,123 +0,0 @@ -from flask import Flask, request, Response -from io import BytesIO -import torch -from av import open as avopen - -import commons -import utils -from models import SynthesizerTrn -from text.symbols import symbols -from text import cleaned_text_to_sequence, get_bert -from text.cleaner import clean_text -from scipy.io import wavfile - -# Flask Init -app = Flask(__name__) -app.config['JSON_AS_ASCII'] = False -def get_text(text, language_str, hps): - norm_text, phone, tone, word2ph = clean_text(text, language_str) - print([f"{p}{t}" for p, t in zip(phone, tone)]) - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - - if hps.data.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert = get_bert(norm_text, word2ph, language_str) - - assert bert.shape[-1] == len(phone) - - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - - return bert, phone, tone, language - -def infer(text, sdp_ratio, noise_scale, noise_scale_w,length_scale,sid): - bert, phones, tones, lang_ids = get_text(text,"ZH", hps,) - with torch.no_grad(): - x_tst=phones.to(dev).unsqueeze(0) - tones=tones.to(dev).unsqueeze(0) - lang_ids=lang_ids.to(dev).unsqueeze(0) - bert = bert.to(dev).unsqueeze(0) - x_tst_lengths = torch.LongTensor([phones.size(0)]).to(dev) - speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(dev) - audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids,bert, sdp_ratio=sdp_ratio - , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy() - return audio - -def replace_punctuation(text, i=2): - punctuation = ",。?!" - for char in punctuation: - text = text.replace(char, char * i) - return text - -def wav2(i, o, format): - inp = avopen(i, 'rb') - out = avopen(o, 'wb', format=format) - if format == "ogg": format = "libvorbis" - - ostream = out.add_stream(format) - - for frame in inp.decode(audio=0): - for p in ostream.encode(frame): out.mux(p) - - for p in ostream.encode(None): out.mux(p) - - out.close() - inp.close() - -# Load Generator -hps = utils.get_hparams_from_file("./configs/config.json") - -dev='cuda' -net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model).to(dev) -_ = net_g.eval() - -_ = utils.load_checkpoint("logs/G_649000.pth", net_g, None,skip_optimizer=True) - -@app.route("/",methods=['GET','POST']) -def main(): - if request.method == 'GET': - try: - speaker = request.args.get('speaker') - text = request.args.get('text').replace("/n","") - sdp_ratio = float(request.args.get("sdp_ratio", 0.2)) - noise = float(request.args.get("noise", 0.5)) - noisew = float(request.args.get("noisew", 0.6)) - length = float(request.args.get("length", 1.2)) - if length >= 2: - return "Too big length" - if len(text) >=200: - return "Too long text" - fmt = request.args.get("format", "wav") - if None in (speaker, text): - return "Missing Parameter" - if fmt not in ("mp3", "wav", "ogg"): - return "Invalid Format" - except: - return "Invalid Parameter" - - with torch.no_grad(): - audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise, noise_scale_w=noisew, length_scale=length, sid=speaker) - - with BytesIO() as wav: - wavfile.write(wav, hps.data.sampling_rate, audio) - torch.cuda.empty_cache() - if fmt == "wav": - return Response(wav.getvalue(), mimetype="audio/wav") - wav.seek(0, 0) - with BytesIO() as ofp: - wav2(wav, ofp, fmt) - return Response( - ofp.getvalue(), - mimetype="audio/mpeg" if fmt == "mp3" else "audio/ogg" - ) diff --git a/spaces/YlcldKlns/bing/src/components/settings.tsx b/spaces/YlcldKlns/bing/src/components/settings.tsx deleted file mode 100644 index 80b8a2d3b252b875f5b6f7dfc2f6e3ad9cdfb22a..0000000000000000000000000000000000000000 --- a/spaces/YlcldKlns/bing/src/components/settings.tsx +++ /dev/null @@ -1,157 +0,0 @@ -import { useEffect, useState } from 'react' -import { useAtom } from 'jotai' -import { Switch } from '@headlessui/react' -import { toast } from 'react-hot-toast' -import { hashAtom, voiceAtom } from '@/state' -import { - Dialog, - DialogContent, - DialogDescription, - DialogFooter, - DialogHeader, - DialogTitle -} from '@/components/ui/dialog' -import { Button } from './ui/button' -import { Input } from './ui/input' -import { ChunkKeys, parseCookies, extraCurlFromCookie, encodeHeadersToCookie, getCookie, setCookie } from '@/lib/utils' -import { ExternalLink } from './external-link' -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' - - -export function Settings() { - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - const [loc, setLoc] = useAtom(hashAtom) - const [curlValue, setCurlValue] = useState(extraCurlFromCookie(parseCookies(document.cookie, ChunkKeys))) - const [imageOnly, setImageOnly] = useState(getCookie('IMAGE_ONLY') !== '0') - const [enableTTS, setEnableTTS] = useAtom(voiceAtom) - - useEffect(() => { - if (isCopied) { - toast.success('复制成功') - } - }, [isCopied]) - - if (loc === 'settings') { - return ( - setLoc('')} modal> - - - 设置你的用户信息 - - 请使用 Edge 浏览器 - - 打开并登录 Bing - - ,然后再打开 - Challenge 接口 - 右键 》检查。打开开发者工具,在网络里面找到 Create 接口 》右键复制》复制为 cURL(bash),粘贴到此处,然后保存。 -
- 图文示例: - 如何获取 BING_HEADER - - -
- -
- setCurlValue(e.target.value)} - /> -
- 身份信息仅用于画图(推荐) - setImageOnly(checked)} - > - - -
- - - - - - - -
- ) - } else if (loc === 'voice') { - return ( - setLoc('')} modal> - - - 语音设置 - - 目前仅支持 PC 端 Edge 及 Chrome 浏览器 - - - -
- 启用语音回答 - setEnableTTS(checked)} - > - - -
- - - - -
-
- ) - } - return null -} diff --git a/spaces/Yudha515/Rvc-Models/audiocraft/models/lm.py b/spaces/Yudha515/Rvc-Models/audiocraft/models/lm.py deleted file mode 100644 index c8aad8f06797eef3293605056e1de14d07c56c2a..0000000000000000000000000000000000000000 --- a/spaces/Yudha515/Rvc-Models/audiocraft/models/lm.py +++ /dev/null @@ -1,527 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass -from functools import partial -import logging -import math -import typing as tp - -import torch -from torch import nn - -from ..utils import utils -from ..modules.streaming import StreamingModule, State -from ..modules.transformer import StreamingTransformer, create_norm_fn -from ..modules.conditioners import ( - ConditionFuser, - ClassifierFreeGuidanceDropout, - AttributeDropout, - ConditioningProvider, - ConditioningAttributes, - ConditionType, -) -from ..modules.codebooks_patterns import CodebooksPatternProvider -from ..modules.activations import get_activation_fn - - -logger = logging.getLogger(__name__) -ConditionTensors = tp.Dict[str, ConditionType] -CFGConditions = tp.Union[ConditionTensors, tp.Tuple[ConditionTensors, ConditionTensors]] - - -def get_init_fn(method: str, input_dim: int, init_depth: tp.Optional[int] = None): - """LM layer initialization. - Inspired from xlformers: https://github.com/fairinternal/xlformers - - Args: - method (str): Method name for init function. Valid options are: - 'gaussian', 'uniform'. - input_dim (int): Input dimension of the initialized module. - init_depth (Optional[int]): Optional init depth value used to rescale - the standard deviation if defined. - """ - # Compute std - std = 1 / math.sqrt(input_dim) - # Rescale with depth - if init_depth is not None: - std = std / math.sqrt(2 * init_depth) - - if method == 'gaussian': - return partial( - torch.nn.init.trunc_normal_, mean=0.0, std=std, a=-3 * std, b=3 * std - ) - elif method == 'uniform': - bound = math.sqrt(3) * std # ensure the standard deviation is `std` - return partial(torch.nn.init.uniform_, a=-bound, b=bound) - else: - raise ValueError("Unsupported layer initialization method") - - -def init_layer(m: nn.Module, - method: str, - init_depth: tp.Optional[int] = None, - zero_bias_init: bool = False): - """Wrapper around ``get_init_fn`` for proper initialization of LM modules. - - Args: - m (nn.Module): Module to initialize. - method (str): Method name for the init function. - init_depth (Optional[int]): Optional init depth value used to rescale - the standard deviation if defined. - zero_bias_init (bool): Whether to initialize the bias to 0 or not. - """ - if isinstance(m, nn.Linear): - init_fn = get_init_fn(method, m.in_features, init_depth=init_depth) - if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16: - weight = m.weight.float() - init_fn(weight) - m.weight.data[:] = weight.half() - else: - init_fn(m.weight) - if zero_bias_init and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.Embedding): - init_fn = get_init_fn(method, m.embedding_dim, init_depth=None) - if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16: - weight = m.weight.float() - init_fn(weight) - m.weight.data[:] = weight.half() - else: - init_fn(m.weight) - - -class ScaledEmbedding(nn.Embedding): - """Boost learning rate for embeddings (with `scale`). - """ - def __init__(self, *args, lr=None, **kwargs): - super().__init__(*args, **kwargs) - self.lr = lr - - def make_optim_group(self): - group = {"params": list(self.parameters())} - if self.lr is not None: - group["lr"] = self.lr - return group - - -@dataclass -class LMOutput: - # The logits are already re-aligned with the input codes - # hence no extra shift is required, e.g. when computing CE - logits: torch.Tensor # [B, K, T, card] - mask: torch.Tensor # [B, K, T] - - -class LMModel(StreamingModule): - """Transformer-based language model on multiple streams of codes. - - Args: - pattern_provider (CodebooksPatternProvider): Pattern provider for codebook interleaving. - condition_provider (MusicConditioningProvider): Conditioning provider from metadata. - fuser (ConditionFuser): Fuser handling the fusing of conditions with language model input. - n_q (int): Number of parallel streams to model. - card (int): Cardinality, vocabulary size. - dim (int): Dimension of the transformer encoder. - num_heads (int): Number of heads for the transformer encoder. - hidden_scale (int): Scale for hidden feed forward dimension of the transformer encoder. - norm (str): Normalization method. - norm_first (bool): Use pre-norm instead of post-norm. - emb_lr (Optional[float]): Embedding-specific learning rate. - bias_proj (bool): Use bias for output projections. - weight_init (Optional[str]): Method for weight initialization. - depthwise_init (Optional[str]): Method for depthwise weight initialization. - zero_bias_init (bool): If true and bias in Linears, initialize bias to zeros. - cfg_dropout (float): Classifier-free guidance dropout. - cfg_coef (float): Classifier-free guidance coefficient. - attribute_dropout (dict): Attribute dropout probabilities. - two_step_cfg (bool): Whether to run classifier free-guidance with 2 distinct steps. - **kwargs: Additional parameters for the transformer encoder. - """ - def __init__(self, pattern_provider: CodebooksPatternProvider, condition_provider: ConditioningProvider, - fuser: ConditionFuser, n_q: int = 8, card: int = 1024, dim: int = 128, num_heads: int = 8, - hidden_scale: int = 4, norm: str = 'layer_norm', norm_first: bool = False, - emb_lr: tp.Optional[float] = None, bias_proj: bool = True, - weight_init: tp.Optional[str] = None, depthwise_init: tp.Optional[str] = None, - zero_bias_init: bool = False, cfg_dropout: float = 0, cfg_coef: float = 1.0, - attribute_dropout: tp.Dict[str, tp.Dict[str, float]] = {}, two_step_cfg: bool = False, - **kwargs): - super().__init__() - self.cfg_coef = cfg_coef - self.cfg_dropout = ClassifierFreeGuidanceDropout(p=cfg_dropout) - self.att_dropout = AttributeDropout(p=attribute_dropout) - self.condition_provider = condition_provider - self.fuser = fuser - self.card = card - embed_dim = self.card + 1 - self.n_q = n_q - self.dim = dim - self.pattern_provider = pattern_provider - self.two_step_cfg = two_step_cfg - self.emb = nn.ModuleList([ScaledEmbedding(embed_dim, dim, lr=emb_lr) for _ in range(n_q)]) - if 'activation' in kwargs: - kwargs['activation'] = get_activation_fn(kwargs['activation']) - self.transformer = StreamingTransformer( - d_model=dim, num_heads=num_heads, dim_feedforward=int(hidden_scale * dim), - norm=norm, norm_first=norm_first, **kwargs) - self.out_norm: tp.Optional[nn.Module] = None - if norm_first: - self.out_norm = create_norm_fn(norm, dim) - self.linears = nn.ModuleList([nn.Linear(dim, self.card, bias=bias_proj) for _ in range(n_q)]) - self._init_weights(weight_init, depthwise_init, zero_bias_init) - self._fsdp: tp.Optional[nn.Module] - self.__dict__['_fsdp'] = None - - def _init_weights(self, weight_init: tp.Optional[str], depthwise_init: tp.Optional[str], zero_bias_init: bool): - """Initialization of the transformer module weights. - - Args: - weight_init (Optional[str]): Weight initialization strategy. See ``get_init_fn`` for valid options. - depthwise_init (Optional[str]): Depwthwise initialization strategy. The following options are valid: - 'current' where the depth corresponds to the current layer index or 'global' where the total number - of layer is used as depth. If not set, no depthwise initialization strategy is used. - zero_bias_init (bool): Whether to initalize bias to zero or not. - """ - assert depthwise_init is None or depthwise_init in ['current', 'global'] - assert depthwise_init is None or weight_init is not None, \ - "If 'depthwise_init' is defined, a 'weight_init' method should be provided." - assert not zero_bias_init or weight_init is not None, \ - "If 'zero_bias_init', a 'weight_init' method should be provided" - - if weight_init is None: - return - - for emb_layer in self.emb: - init_layer(emb_layer, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init) - - for layer_idx, tr_layer in enumerate(self.transformer.layers): - depth = None - if depthwise_init == 'current': - depth = layer_idx + 1 - elif depthwise_init == 'global': - depth = len(self.transformer.layers) - init_fn = partial(init_layer, method=weight_init, init_depth=depth, zero_bias_init=zero_bias_init) - tr_layer.apply(init_fn) - - for linear in self.linears: - init_layer(linear, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init) - - @property - def special_token_id(self) -> int: - return self.card - - @property - def num_codebooks(self) -> int: - return self.n_q - - def forward(self, sequence: torch.Tensor, - conditions: tp.List[ConditioningAttributes], - condition_tensors: tp.Optional[ConditionTensors] = None) -> torch.Tensor: - """Apply language model on sequence and conditions. - Given a tensor of sequence of shape [B, K, S] with K the number of codebooks and - S the sequence steps, return the logits with shape [B, card, K, S]. - - Args: - indices (torch.Tensor): indices of the codes to model. - conditions (list[ConditioningAttributes]): conditionings to use when modeling - the given codes. Note that when evaluating multiple time with the same conditioning - you should pre-compute those and pass them as `condition_tensors`. - condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning - tensors, see `conditions`. - Returns: - torch.Tensor: Logits. - """ - B, K, S = sequence.shape - assert K == self.num_codebooks, 'Sequence shape must match the specified number of codebooks' - input_ = sum([self.emb[k](sequence[:, k]) for k in range(K)]) - if condition_tensors is None: - assert not self._is_streaming, "Conditions tensors should be precomputed when streaming." - # apply dropout modules - conditions = self.cfg_dropout(conditions) - conditions = self.att_dropout(conditions) - tokenized = self.condition_provider.tokenize(conditions) - # encode conditions and fuse, both have a streaming cache to not recompute when generating. - condition_tensors = self.condition_provider(tokenized) - else: - assert not conditions, "Shouldn't pass both conditions and condition_tensors." - - input_, cross_attention_input = self.fuser(input_, condition_tensors) - - out = self.transformer(input_, cross_attention_src=cross_attention_input) - if self.out_norm: - out = self.out_norm(out) - logits = torch.stack([self.linears[k](out) for k in range(K)], dim=1) # [B, K, S, card] - - # remove the prefix from the model outputs - if len(self.fuser.fuse2cond['prepend']) > 0: - logits = logits[:, :, -S:] - - return logits # [B, K, S, card] - - def compute_predictions( - self, codes: torch.Tensor, - conditions: tp.List[ConditioningAttributes], - condition_tensors: tp.Optional[ConditionTensors] = None) -> LMOutput: - """Given an input tensor of codes [B, K, T] and list of conditions, runs the model - forward using the specified codes interleaving pattern. - - Args: - codes (torch.Tensor): Input codes of shape [B, K, T] with B the batch size, - K the number of codebooks and T the number of timesteps. - conditions (list[ConditioningAttributes]): conditionings to use when modeling - the given codes. Note that when evaluating multiple time with the same conditioning - you should pre-compute those and pass them as `condition_tensors`. - condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning - tensors, see `conditions`. - Returns: - LMOutput: Language model outputs - logits (torch.Tensor) of shape [B, K, T, card] corresponding to the provided codes, - i.e. the first item corresponds to logits to predict the first code, meaning that - no additional shifting of codes and logits is required. - mask (torch.Tensor) of shape [B, K, T], mask over valid and invalid positions. - Given the specified interleaving strategies, parts of the logits and codes should - not be considered as valid predictions because of invalid context. - """ - B, K, T = codes.shape - codes = codes.contiguous() - # map codes [B, K, T] into pattern sequence [B, K, S] using special_token_id for masked tokens - pattern = self.pattern_provider.get_pattern(T) - sequence_codes, sequence_indexes, sequence_mask = pattern.build_pattern_sequence( - codes, self.special_token_id, keep_only_valid_steps=True - ) - # apply model on pattern sequence - model = self if self._fsdp is None else self._fsdp - logits = model(sequence_codes, conditions, condition_tensors) # [B, K, S, card] - # map back the logits on pattern sequence to logits on original codes: [B, K, S, card] -> [B, K, T, card] - # and provide the corresponding mask over invalid positions of tokens - logits = logits.permute(0, 3, 1, 2) # [B, card, K, S] - # note: we use nans as special token to make it obvious if we feed unexpected logits - logits, logits_indexes, logits_mask = pattern.revert_pattern_logits( - logits, float('nan'), keep_only_valid_steps=True - ) - logits = logits.permute(0, 2, 3, 1) # [B, K, T, card] - logits_mask = logits_mask[None, :, :].expand(B, -1, -1) # [K, T] -> [B, K, T] - return LMOutput(logits, logits_mask) - - def _sample_next_token(self, - sequence: torch.Tensor, - cfg_conditions: CFGConditions, - unconditional_state: State, - use_sampling: bool = False, - temp: float = 1.0, - top_k: int = 0, - top_p: float = 0.0, - cfg_coef: tp.Optional[float] = None) -> torch.Tensor: - """Sample next token from the model given a sequence and a set of conditions. The model supports - multiple sampling strategies (greedy sampling, softmax, top-k, top-p...). - - Args: - sequence (torch.Tensor): Current sequence of shape [B, K, S] - with K corresponding to the number of codebooks and S the number of sequence steps. - S = 1 in streaming mode, except for the first step that contains a bigger prompt. - condition_tensors (Dict[str, ConditionType): Set of conditions. If CFG is used, - should be twice the batch size, being the concatenation of the conditions + null conditions. - use_sampling (bool): Whether to use a sampling strategy or not. - temp (float): Sampling temperature. - top_k (int): K for "top-k" sampling. - top_p (float): P for "top-p" sampling. - cfg_coef (float): classifier free guidance coefficient - Returns: - next_token (torch.Tensor): Next token tensor of shape [B, K, 1]. - """ - B = sequence.shape[0] - cfg_coef = self.cfg_coef if cfg_coef is None else cfg_coef - model = self if self._fsdp is None else self._fsdp - if self.two_step_cfg and cfg_conditions != {}: - assert isinstance(cfg_conditions, tuple) - condition_tensors, null_condition_tensors = cfg_conditions - cond_logits = model(sequence, conditions=[], condition_tensors=condition_tensors) - state = self.get_streaming_state() - self.set_streaming_state(unconditional_state) - uncond_logits = model(sequence, conditions=[], condition_tensors=null_condition_tensors) - unconditional_state.update(self.get_streaming_state()) - self.set_streaming_state(state) - logits = uncond_logits + (cond_logits - uncond_logits) * self.cfg_coef - else: - assert isinstance(cfg_conditions, dict) - condition_tensors = cfg_conditions - if condition_tensors: - # Preparing for CFG, predicting both conditional and unconditional logits. - sequence = torch.cat([sequence, sequence], dim=0) - all_logits = model( - sequence, - conditions=[], condition_tensors=condition_tensors) - if condition_tensors: - cond_logits, uncond_logits = all_logits.split(B, dim=0) # [B, K, T, card] - logits = uncond_logits + (cond_logits - uncond_logits) * cfg_coef - else: - logits = all_logits - - logits = logits.permute(0, 1, 3, 2) # [B, K, card, T] - logits = logits[..., -1] # [B x K x card] - - # Apply softmax for sampling if temp > 0. Else, do greedy sampling to avoid zero division error. - if use_sampling and temp > 0.0: - probs = torch.softmax(logits / temp, dim=-1) - if top_p > 0.0: - next_token = utils.sample_top_p(probs, p=top_p) - elif top_k > 0: - next_token = utils.sample_top_k(probs, k=top_k) - else: - next_token = utils.multinomial(probs, num_samples=1) - else: - next_token = torch.argmax(logits, dim=-1, keepdim=True) - - return next_token - - @torch.no_grad() - def generate(self, - prompt: tp.Optional[torch.Tensor] = None, - conditions: tp.List[ConditioningAttributes] = [], - num_samples: tp.Optional[int] = None, - max_gen_len: int = 256, - use_sampling: bool = True, - temp: float = 1.0, - top_k: int = 250, - top_p: float = 0.0, - cfg_coef: tp.Optional[float] = None, - two_step_cfg: bool = False, - remove_prompts: bool = False, - check: bool = False, - callback: tp.Optional[tp.Callable[[int, int], None]] = None) -> torch.Tensor: - """Generate tokens sampling from the model given a prompt or unconditionally. Generation can - be perform in a greedy fashion or using sampling with top K and top P strategies. - - Args: - prompt (Optional[torch.Tensor]): Prompt tokens of shape [B, K, T]. - conditions_tensors (Dict[str, torch.Tensor]): Set of conditions or None. - num_samples (int or None): Number of samples to generate when no prompt and no conditions are given. - max_gen_len (int): Maximum generation length. - use_sampling (bool): Whether to use a sampling strategy or not. - temp (float): Sampling temperature. - top_k (int): K for "top-k" sampling. - top_p (float): P for "top-p" sampling. - remove_prompts (bool): Whether to remove prompts from generation or not. - Returns: - torch.Tensor: Generated tokens. - """ - assert not self.training, "generation shouldn't be used in training mode." - first_param = next(iter(self.parameters())) - device = first_param.device - - # Checking all input shapes are consistents. - possible_num_samples = [] - if num_samples is not None: - possible_num_samples.append(num_samples) - elif prompt is not None: - possible_num_samples.append(prompt.shape[0]) - elif conditions: - possible_num_samples.append(len(conditions)) - else: - possible_num_samples.append(1) - assert [x == possible_num_samples[0] for x in possible_num_samples], "Inconsitent inputs shapes" - num_samples = possible_num_samples[0] - - # below we create set of conditions: one conditional and one unconditional - # to do that we merge the regular condition together with the null condition - # we then do 1 forward pass instead of 2. - # the reason for that is two-fold: - # 1. it is about x2 faster than doing 2 forward passes - # 2. avoid the streaming API treating the 2 passes as part of different time steps - # We also support doing two different passes, in particular to ensure that - # the padding structure is exactly the same between train anf test. - # With a batch size of 1, this can be slower though. - cfg_conditions: CFGConditions - two_step_cfg = self.two_step_cfg if two_step_cfg is None else two_step_cfg - if conditions: - null_conditions = ClassifierFreeGuidanceDropout(p=1.0)(conditions) - if two_step_cfg: - cfg_conditions = ( - self.condition_provider(self.condition_provider.tokenize(conditions)), - self.condition_provider(self.condition_provider.tokenize(null_conditions)), - ) - else: - conditions = conditions + null_conditions - tokenized = self.condition_provider.tokenize(conditions) - cfg_conditions = self.condition_provider(tokenized) - else: - cfg_conditions = {} - - if prompt is None: - assert num_samples > 0 - prompt = torch.zeros((num_samples, self.num_codebooks, 0), dtype=torch.long, device=device) - - B, K, T = prompt.shape - start_offset = T - assert start_offset < max_gen_len - - pattern = self.pattern_provider.get_pattern(max_gen_len) - # this token is used as default value for codes that are not generated yet - unknown_token = -1 - - # we generate codes up to the max_gen_len that will be mapped to the pattern sequence - gen_codes = torch.full((B, K, max_gen_len), unknown_token, dtype=torch.long, device=device) - # filling the gen_codes with the prompt if needed - gen_codes[..., :start_offset] = prompt - # create the gen_sequence with proper interleaving from the pattern: [B, K, S] - gen_sequence, indexes, mask = pattern.build_pattern_sequence(gen_codes, self.special_token_id) - # retrieve the start_offset in the sequence: - # it is the first sequence step that contains the `start_offset` timestep - start_offset_sequence = pattern.get_first_step_with_timesteps(start_offset) - assert start_offset_sequence is not None - - with self.streaming(): - unconditional_state = self.get_streaming_state() - prev_offset = 0 - gen_sequence_len = gen_sequence.shape[-1] # gen_sequence shape is [B, K, S] - for offset in range(start_offset_sequence, gen_sequence_len): - # get current sequence (note that the streaming API is providing the caching over previous offsets) - curr_sequence = gen_sequence[..., prev_offset:offset] - curr_mask = mask[None, ..., prev_offset:offset].expand(B, -1, -1) - if check: - # check coherence between mask and sequence - assert (curr_sequence == torch.where(curr_mask, curr_sequence, self.special_token_id)).all() - # should never happen as gen_sequence is filled progressively - assert not (curr_sequence == unknown_token).any() - # sample next token from the model, next token shape is [B, K, 1] - next_token = self._sample_next_token( - curr_sequence, cfg_conditions, unconditional_state, use_sampling, temp, top_k, top_p, - cfg_coef=cfg_coef) - # ensure the tokens that should be masked are properly set to special_token_id - # as the model never output special_token_id - valid_mask = mask[..., offset:offset+1].expand(B, -1, -1) - next_token[~valid_mask] = self.special_token_id - # ensure we don't overwrite prompt tokens, we only write over unknown tokens - # (then mask tokens should be left as is as well, which is correct) - gen_sequence[..., offset:offset+1] = torch.where( - gen_sequence[..., offset:offset+1] == unknown_token, - next_token, gen_sequence[..., offset:offset+1] - ) - prev_offset = offset - if callback is not None: - callback(1 + offset - start_offset_sequence, gen_sequence_len - start_offset_sequence) - unconditional_state.clear() - - # ensure sequence has been entirely filled - assert not (gen_sequence == unknown_token).any() - # ensure gen_sequence pattern and mask are matching - # which means the gen_sequence is valid according to the pattern - assert ( - gen_sequence == torch.where(mask[None, ...].expand(B, -1, -1), gen_sequence, self.special_token_id) - ).all() - # get back the codes, trimming the prompt if needed and cutting potentially incomplete timesteps - out_codes, out_indexes, out_mask = pattern.revert_pattern_sequence(gen_sequence, special_token=unknown_token) - - # sanity checks over the returned codes and corresponding masks - assert (out_codes[..., :max_gen_len] != unknown_token).all() - assert (out_mask[..., :max_gen_len] == 1).all() - - out_start_offset = start_offset if remove_prompts else 0 - out_codes = out_codes[..., out_start_offset:max_gen_len] - - # ensure the returned codes are all valid - assert (out_codes >= 0).all() and (out_codes <= self.card).all() - return out_codes diff --git a/spaces/YueMafighting/FollowYourPose/Dockerfile b/spaces/YueMafighting/FollowYourPose/Dockerfile deleted file mode 100644 index 845d631472e9eca8e3bc2da734bb1b17eb72cc02..0000000000000000000000000000000000000000 --- a/spaces/YueMafighting/FollowYourPose/Dockerfile +++ /dev/null @@ -1,64 +0,0 @@ -FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu22.04 -ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y --no-install-recommends \ - git \ - zip \ - unzip \ - git-lfs \ - wget \ - curl \ - # ffmpeg \ - ffmpeg \ - x264 \ - # python build dependencies \ - build-essential \ - libssl-dev \ - zlib1g-dev \ - libbz2-dev \ - libreadline-dev \ - libsqlite3-dev \ - libncursesw5-dev \ - xz-utils \ - tk-dev \ - libxml2-dev \ - libxmlsec1-dev \ - libffi-dev \ - liblzma-dev && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* -# RUN apt-get update && \ -# apt-get install zip -# RUN wget https://github.com/ChenyangQiQi/FateZero/releases/download/v0.0.1/style.zip && unzip style.zip -RUN useradd -m -u 1000 user -RUN sudo apt-get install unzip -USER user -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:${PATH} -WORKDIR ${HOME}/app - -RUN curl https://pyenv.run | bash -ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH} -ENV PYTHON_VERSION=3.10.9 -RUN pyenv install ${PYTHON_VERSION} && \ - pyenv global ${PYTHON_VERSION} && \ - pyenv rehash && \ - pip install --no-cache-dir -U pip setuptools wheel - -RUN pip install --no-cache-dir -U torch==1.13.1 torchvision==0.14.1 -COPY --chown=1000 requirements.txt /tmp/requirements.txt -RUN pip install --no-cache-dir -U -r /tmp/requirements.txt - -COPY --chown=1000 . ${HOME}/app -RUN ls -a -# RUN cd ./FateZero/ckpt && bash download.sh -RUN cd {WORKDIR}/data && bash download.sh -ENV PYTHONPATH=${HOME}/app \ - PYTHONUNBUFFERED=1 \ - GRADIO_ALLOW_FLAGGING=never \ - GRADIO_NUM_PORTS=1 \ - GRADIO_SERVER_NAME=0.0.0.0 \ - GRADIO_THEME=huggingface \ - SYSTEM=spaces -CMD ["python", "app.py"] diff --git a/spaces/Yuliang/ECON/lib/common/render_utils.py b/spaces/Yuliang/ECON/lib/common/render_utils.py deleted file mode 100644 index 389eab740fc341b9395e1e008b4fa91e9ef3cf83..0000000000000000000000000000000000000000 --- a/spaces/Yuliang/ECON/lib/common/render_utils.py +++ /dev/null @@ -1,226 +0,0 @@ -# -*- coding: utf-8 -*- - -# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is -# holder of all proprietary rights on this computer program. -# You can only use this computer program if you have closed -# a license agreement with MPG or you get the right to use the computer -# program from someone who is authorized to grant you that right. -# Any use of the computer program without a valid license is prohibited and -# liable to prosecution. -# -# Copyright©2019 Max-Planck-Gesellschaft zur Förderung -# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute -# for Intelligent Systems. All rights reserved. -# -# Contact: ps-license@tuebingen.mpg.de - -import math -from typing import NewType - -import numpy as np -import torch -import trimesh -from pytorch3d.renderer.mesh import rasterize_meshes -from pytorch3d.structures import Meshes -from torch import nn - -Tensor = NewType("Tensor", torch.Tensor) - - -def solid_angles(points: Tensor, triangles: Tensor, thresh: float = 1e-8) -> Tensor: - """Compute solid angle between the input points and triangles - Follows the method described in: - The Solid Angle of a Plane Triangle - A. VAN OOSTEROM AND J. STRACKEE - IEEE TRANSACTIONS ON BIOMEDICAL ENGINEERING, - VOL. BME-30, NO. 2, FEBRUARY 1983 - Parameters - ----------- - points: BxQx3 - Tensor of input query points - triangles: BxFx3x3 - Target triangles - thresh: float - float threshold - Returns - ------- - solid_angles: BxQxF - A tensor containing the solid angle between all query points - and input triangles - """ - # Center the triangles on the query points. Size should be BxQxFx3x3 - centered_tris = triangles[:, None] - points[:, :, None, None] - - # BxQxFx3 - norms = torch.norm(centered_tris, dim=-1) - - # Should be BxQxFx3 - cross_prod = torch.cross(centered_tris[:, :, :, 1], centered_tris[:, :, :, 2], dim=-1) - # Should be BxQxF - numerator = (centered_tris[:, :, :, 0] * cross_prod).sum(dim=-1) - del cross_prod - - dot01 = (centered_tris[:, :, :, 0] * centered_tris[:, :, :, 1]).sum(dim=-1) - dot12 = (centered_tris[:, :, :, 1] * centered_tris[:, :, :, 2]).sum(dim=-1) - dot02 = (centered_tris[:, :, :, 0] * centered_tris[:, :, :, 2]).sum(dim=-1) - del centered_tris - - denominator = ( - norms.prod(dim=-1) + dot01 * norms[:, :, :, 2] + dot02 * norms[:, :, :, 1] + - dot12 * norms[:, :, :, 0] - ) - del dot01, dot12, dot02, norms - - # Should be BxQ - solid_angle = torch.atan2(numerator, denominator) - del numerator, denominator - - torch.cuda.empty_cache() - - return 2 * solid_angle - - -def winding_numbers(points: Tensor, triangles: Tensor, thresh: float = 1e-8) -> Tensor: - """Uses winding_numbers to compute inside/outside - Robust inside-outside segmentation using generalized winding numbers - Alec Jacobson, - Ladislav Kavan, - Olga Sorkine-Hornung - Fast Winding Numbers for Soups and Clouds SIGGRAPH 2018 - Gavin Barill - NEIL G. Dickson - Ryan Schmidt - David I.W. Levin - and Alec Jacobson - Parameters - ----------- - points: BxQx3 - Tensor of input query points - triangles: BxFx3x3 - Target triangles - thresh: float - float threshold - Returns - ------- - winding_numbers: BxQ - A tensor containing the Generalized winding numbers - """ - # The generalized winding number is the sum of solid angles of the point - # with respect to all triangles. - return (1 / (4 * math.pi) * solid_angles(points, triangles, thresh=thresh).sum(dim=-1)) - - -def batch_contains(verts, faces, points): - - B = verts.shape[0] - N = points.shape[1] - - verts = verts.detach().cpu() - faces = faces.detach().cpu() - points = points.detach().cpu() - contains = torch.zeros(B, N) - - for i in range(B): - contains[i] = torch.as_tensor(trimesh.Trimesh(verts[i], faces[i]).contains(points[i])) - - return 2.0 * (contains - 0.5) - - -def dict2obj(d): - if not isinstance(d, dict): - return d - - class C(object): - pass - - o = C() - for k in d: - o.__dict__[k] = dict2obj(d[k]) - return o - - -def face_vertices(vertices, faces): - """ - :param vertices: [batch size, number of vertices, 3] - :param faces: [batch size, number of faces, 3] - :return: [batch size, number of faces, 3, 3] - """ - - bs, nv = vertices.shape[:2] - bs, nf = faces.shape[:2] - device = vertices.device - faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None] - vertices = vertices.reshape((bs * nv, vertices.shape[-1])) - - return vertices[faces.long()] - - -class Pytorch3dRasterizer(nn.Module): - """Borrowed from https://github.com/facebookresearch/pytorch3d - Notice: - x,y,z are in image space, normalized - can only render squared image now - """ - def __init__( - self, image_size=224, blur_radius=0.0, faces_per_pixel=1, device=torch.device("cuda:0") - ): - """ - use fixed raster_settings for rendering faces - """ - super().__init__() - raster_settings = { - "image_size": image_size, - "blur_radius": blur_radius, - "faces_per_pixel": faces_per_pixel, - "bin_size": -1, - "max_faces_per_bin": None, - "perspective_correct": False, - "cull_backfaces": True, - } - raster_settings = dict2obj(raster_settings) - self.raster_settings = raster_settings - self.device = device - - def forward(self, vertices, faces, attributes=None): - fixed_vertices = vertices.clone() - fixed_vertices[..., :2] = -fixed_vertices[..., :2] - meshes_screen = Meshes(verts=fixed_vertices.float(), faces=faces.long()) - raster_settings = self.raster_settings - pix_to_face, zbuf, bary_coords, dists = rasterize_meshes( - meshes_screen, - image_size=raster_settings.image_size, - blur_radius=raster_settings.blur_radius, - faces_per_pixel=raster_settings.faces_per_pixel, - bin_size=raster_settings.bin_size, - max_faces_per_bin=raster_settings.max_faces_per_bin, - perspective_correct=raster_settings.perspective_correct, - ) - vismask = (pix_to_face > -1).float() - D = attributes.shape[-1] - attributes = attributes.clone() - attributes = attributes.view( - attributes.shape[0] * attributes.shape[1], 3, attributes.shape[-1] - ) - N, H, W, K, _ = bary_coords.shape - mask = pix_to_face == -1 - pix_to_face = pix_to_face.clone() - pix_to_face[mask] = 0 - idx = pix_to_face.view(N * H * W * K, 1, 1).expand(N * H * W * K, 3, D) - pixel_face_vals = attributes.gather(0, idx).view(N, H, W, K, 3, D) - pixel_vals = (bary_coords[..., None] * pixel_face_vals).sum(dim=-2) - pixel_vals[mask] = 0 # Replace masked values in output. - pixel_vals = pixel_vals[:, :, :, 0].permute(0, 3, 1, 2) - pixel_vals = torch.cat([pixel_vals, vismask[:, :, :, 0][:, None, :, :]], dim=1) - return pixel_vals - - def get_texture(self, uvcoords, uvfaces, verts, faces, verts_color): - - batch_size = verts.shape[0] - uv_verts_color = face_vertices(verts_color, faces.expand(batch_size, -1, - -1)).to(self.device) - uv_map = self.forward( - uvcoords.expand(batch_size, -1, -1), uvfaces.expand(batch_size, -1, -1), uv_verts_color - )[:, :3] - uv_map_npy = np.flip(uv_map.squeeze(0).permute(1, 2, 0).cpu().numpy(), 0) - - return uv_map_npy diff --git a/spaces/Yunshansongbai/SVC-Nahida/app.py b/spaces/Yunshansongbai/SVC-Nahida/app.py deleted file mode 100644 index 7321a5fc776b31c97fe5729b6c4fb88a0e389ee1..0000000000000000000000000000000000000000 --- a/spaces/Yunshansongbai/SVC-Nahida/app.py +++ /dev/null @@ -1,194 +0,0 @@ -import io -import os - -import gradio as gr -import librosa -import numpy as np -import soundfile -from inference.infer_tool import Svc -import logging -import os -import paddle -import requests -import utils -from spleeter import Separator -import time -from datetime import datetime, timedelta - -build_dir=os.getcwd() -if build_dir == "/home/aistudio": - build_dir += "/build" - -model_dir=build_dir+'/trained_models' - -model_list_path = model_dir + "/model_list.txt" - -# 筛选出文件夹 -models = [] -for filename in os.listdir(model_dir): - # 判断文件名是否以 '.pdparams' 结尾,并且不包含后缀部分 - if filename.endswith('.pdparams') and os.path.splitext(filename)[0].isalpha(): - models.append(os.path.splitext(filename)[0]) -cache_model = {} - -def callback(text): - if text == "reboot": - os._exit(0) - one_hour_later = datetime.now() + timedelta(hours=1) - else: - global start_time - if time.time() - start_time >= 3600: - os._exit(0) - one_hour_later = datetime.now() + timedelta(hours=1) - else: - return text - -def separate_fn(song_input): - try: - if song_input is None: - return "请上传歌曲",None,None,None,None - params_2stems = { - 'sample_rate': 44100, - 'frame_length': 4096, - 'frame_step': 1024, - 'T': 512, - 'F': 1024, - 'num_instruments': ['vocals', 'instrumental'], - 'output_dir': build_dir+'/output_2stems', - 'checkpoint_path': build_dir+'/spleeter', - 'use_elu': False} - sampling_rate, song = song_input - soundfile.write("temp.wav", song, sampling_rate, format="wav") - # 初始化分离器 - sep = Separator(params_2stems) - sep.separate('temp.wav') - vocal_path = params_2stems["output_dir"]+"/temp-vocals.wav" - instrumental_path = params_2stems["output_dir"]+"/temp-instrumental.wav" - return "分离成功,请继续前往体验【转换】和【混音】",vocal_path,instrumental_path,vocal_path,instrumental_path - except Exception as e: - import traceback - return traceback.format_exc() , None,None,None,None - - -def convert_fn(model_name, input_audio,input_audio_micro, vc_transform, auto_f0,cluster_ratio, slice_db, noise_scale): - try: - if model_name in cache_model: - model = cache_model[model_name] - else: - if paddle.device.is_compiled_with_cuda()==False and len(cache_model)!=0: - return f"目前运行环境为CPU,受制于平台算力,每次启动本项目只允许加载1个模型,当前已加载{next(iter(cache_model))}",None,None - config_path = f"{build_dir}/trained_models/config.json" - model = Svc(f"{build_dir}/trained_models/{model_name}.pdparams", config_path,mode="test") - cache_model[model_name] = model - if input_audio is None and input_audio_micro is None: - return "请上传音频", None,None - if input_audio_micro is not None: - input_audio = input_audio_micro - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - print(audio.shape) - out_wav_path = "temp.wav" - soundfile.write(out_wav_path, audio, 16000, format="wav") - print(cluster_ratio, auto_f0, noise_scale) - _audio = model.slice_inference(out_wav_path, model_name, vc_transform, slice_db, cluster_ratio, auto_f0, noise_scale) - del model - return "转换成功,请继续前往体验【混音】", (44100, _audio),(44100, _audio) - except Exception as e: - import traceback - return traceback.format_exc() , None,None - -def compose_fn(input_vocal,input_instrumental,mixing_ratio=0.5): - try: - outlog = "混音成功" - if input_vocal is None: - return "请上传人声", None - if input_instrumental is None: - return "请上传伴奏", None - vocal_sampling_rate, vocal = input_vocal - vocal_duration = vocal.shape[0] / vocal_sampling_rate - vocal = (vocal / np.iinfo(vocal.dtype).max).astype(np.float32) - if len(vocal.shape) > 1: - vocal = librosa.to_mono(vocal.transpose(1, 0)) - if vocal_sampling_rate != 44100: - vocal = librosa.resample(vocal, orig_sr=vocal_sampling_rate, target_sr=44100) - - instrumental_sampling_rate, instrumental = input_instrumental - instrumental_duration = instrumental.shape[0] / instrumental_sampling_rate - instrumental = (instrumental / np.iinfo(instrumental.dtype).max).astype(np.float32) - if len(instrumental.shape) > 1: - instrumental = librosa.to_mono(instrumental.transpose(1, 0)) - if instrumental_sampling_rate != 44100: - instrumental = librosa.resample(instrumental, orig_sr=instrumental_sampling_rate, target_sr=44100) - if len(vocal)!=len(instrumental): - min_length = min(len(vocal),len(instrumental)) - instrumental = instrumental[:min_length] - vocal = vocal[:min_length] - outlog = "人声伴奏长度不一致,已自动截断较长的音频" - - mixed_audio = (1 - mixing_ratio) * vocal + mixing_ratio * instrumental - mixed_audio_data = mixed_audio.astype(np.float32) - return outlog,(44100,mixed_audio_data) - except Exception as e: - import traceback - return traceback.format_exc() , None - - -app = gr.Blocks() - -with app: - start_time = time.time() - one_hour_later = datetime.now() + timedelta(hours=1) - gr.Markdown('

SVC歌声转换全流程体验(伴奏分离,转换,混音)

') - with gr.Tabs() as tabs: - with gr.TabItem("人声伴奏分离"): - gr.Markdown('

该项目人声分离的效果弱于UVR5,如自备分离好的伴奏和人声可跳过该步骤

') - song_input = gr.Audio(label="上传歌曲(tips:上传后点击右上角✏可以进行歌曲剪辑)",interactive=True) - gr.Examples(examples=[build_dir+"/examples/song/blue.wav",build_dir+"/examples/song/Counter_clockwise_Clock.wav",build_dir+"/examples/song/one_last_kiss.wav"],inputs=song_input,label="歌曲样例") - - btn_separate = gr.Button("人声伴奏分离", variant="primary") - text_output1 = gr.Textbox(label="输出信息") - vocal_output1 = gr.Audio(label="输出人声",interactive=False) - instrumental_output1 = gr.Audio(label="输出伴奏",interactive=False) - with gr.TabItem("转换"): - model_name = gr.Dropdown(label="模型", choices=models, value="纳西妲") - vocal_input1 = gr.Audio(label="上传人声",interactive=True) - gr.Examples(examples=[build_dir+"/examples/vocals/blue_vocal.wav",build_dir+"/examples/vocals/Counter_clockwise_Clock_vocal.wav",build_dir+"/examples/vocals/one_last_kiss_vocal.wav"],inputs=vocal_input1,label="人声样例") - btn_use_separate = gr.Button("使用【人声伴奏分离】分离的人声") - micro_input = gr.Audio(label="麦克风输入(优先于上传的人声)",source="microphone",interactive=True) - vc_transform = gr.Number(label="变调(半音数量,升八度12降八度-12)", value=0) - cluster_ratio = gr.Number(label="聚类模型混合比例", value=0,visible=False) - auto_f0 = gr.Checkbox(label="自动预测音高(转换歌声时不要打开,会严重跑调)", value=False) - slice_db = gr.Number(label="静音分贝阈值(嘈杂的音频可以-30,干声保留呼吸可以-50)", value=-50) - noise_scale = gr.Number(label="noise_scale", value=0.2) - btn_convert = gr.Button("转换", variant="primary") - text_output2 = gr.Textbox(label="输出信息") - vc_output2 = gr.Audio(label="输出音频",interactive=False) - - with gr.TabItem("混音"): - vocal_input2 = gr.Audio(label="上传人声",interactive=True) - btn_use_convert = gr.Button("使用【转换】输出的人声") - instrumental_input1 = gr.Audio(label="上传伴奏") - gr.Examples(examples=[build_dir+"/examples/instrumental/blue_instrumental.wav",build_dir+"/examples/instrumental/Counter_clockwise_Clock_instrumental.wav",build_dir+"/examples/instrumental/one_last_kiss_instrumental.wav"],inputs=instrumental_input1,label="伴奏样例") - btn_use_separate2 = gr.Button("使用【人声伴奏分离】分离的伴奏") - mixing_ratio = gr.Slider(0, 1, value=0.75,step=0.01,label="混音比例(人声:伴奏)", info="人声:伴奏") - btn_compose = gr.Button("混音", variant="primary") - text_output3 = gr.Textbox(label="输出信息") - song_output = gr.Audio(label="输出歌曲",interactive=False) - - with gr.TabItem("设置"): - output = gr.Textbox(label="输出",placeholder=f"距离下一次允许重启时间为{one_hour_later}") - btn_reboot = gr.Button("重启",variant="primary") - btn_separate.click(separate_fn, song_input, [text_output1, vocal_output1,instrumental_output1,vocal_input1,instrumental_input1]) - btn_convert.click(convert_fn, [model_name, vocal_input1,micro_input,vc_transform,auto_f0,cluster_ratio, slice_db, noise_scale], [text_output2, vc_output2,vocal_input2]) - btn_compose.click(compose_fn,[vocal_input2,instrumental_input1,mixing_ratio],[text_output3,song_output]) - btn_reboot.click(callback,output) - btn_use_convert.click(lambda x:x,vc_output2,vocal_input2) - btn_use_separate.click(lambda x:x,vocal_output1,vocal_input1) - btn_use_separate2.click(lambda x:x,instrumental_output1,instrumental_input1) - -app.launch() \ No newline at end of file diff --git a/spaces/Ziqi/ReVersion/README.md b/spaces/Ziqi/ReVersion/README.md deleted file mode 100644 index 1823df2c77ed395faf47aed9a4e04093a151848e..0000000000000000000000000000000000000000 --- a/spaces/Ziqi/ReVersion/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: ReVersion -emoji: 🐠 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/abcde1234www/tts/app.py b/spaces/abcde1234www/tts/app.py deleted file mode 100644 index a97450c289843433835ebc4da6d5d885ea42167c..0000000000000000000000000000000000000000 --- a/spaces/abcde1234www/tts/app.py +++ /dev/null @@ -1,103 +0,0 @@ -""" -Copyright 2022 Balacoon - -TTS interactive demo -""" - -import logging -from typing import cast - -import gradio as gr -from balacoon_tts import TTS -from huggingface_hub import hf_hub_download, list_repo_files - -# global tts module, initialized from a model selected -tts = None - - -def main(): - logging.basicConfig(level=logging.INFO) - - with gr.Blocks() as demo: - gr.Markdown( - """ -

Text-to-Speech

- - 1. Write an utterance to generate, - 2. Select the model to synthesize with - 3. Select speaker (only for multi-speaker models) - 4. Hit "Generate" and listen to the result! - - When you select model for the first time, - it will take a little time to download it. - You can learn more about models available - [here](https://huggingface.co/balacoon/tts), - visit [Balacoon website](https://balacoon.com/) for more info. - """ - ) - with gr.Row(variant="panel"): - text = gr.Textbox(label="Text", placeholder="Type something here...") - - with gr.Row(): - with gr.Column(variant="panel"): - repo_files = list_repo_files(repo_id="balacoon/tts") - model_files = [x for x in repo_files if x.endswith(".addon")] - model_name = gr.Dropdown( - label="Model", - choices=model_files, - ) - with gr.Column(variant="panel"): - speaker = gr.Dropdown(label="Speaker", choices=[]) - - def set_model(model_name_str: str): - """ - gets value from `model_name`, loads model, - re-initializes tts object, gets list of - speakers that model supports and set them to `speaker` - """ - model_path = hf_hub_download( - repo_id="balacoon/tts", filename=model_name_str - ) - global tts - tts = TTS(model_path) - speakers = tts.get_speakers() - if speakers: - visible = True - value = speakers[-1] - else: - visible = False - value = "" - return gr.Dropdown.update( - choices=speakers, value=value, visible=visible - ) - - model_name.change(set_model, inputs=model_name, outputs=speaker) - - with gr.Row(variant="panel"): - generate = gr.Button("Generate") - with gr.Row(variant="panel"): - audio = gr.Audio() - - def synthesize_audio(text_str: str, speaker_str: str = ""): - """ - gets utterance to synthesize from `text` Textbox - and speaker name from `speaker` dropdown list. - speaker name might be empty for single-speaker models. - Synthesizes the waveform and updates `audio` with it. - """ - if not text_str: - logging.info("text or speaker are not provided") - return None - global tts - if len(text_str) > 1024: - text_str = text_str[:1024] - samples = cast(TTS, tts).synthesize(text_str, speaker_str) - return gr.Audio.update(value=(24000, samples)) - - generate.click(synthesize_audio, inputs=[text, speaker], outputs=audio) - - demo.launch() - - -if __name__ == "__main__": - main() diff --git a/spaces/abdvl/datahub_qa_bot/docs/quick-ingestion-guides/tableau/setup.md b/spaces/abdvl/datahub_qa_bot/docs/quick-ingestion-guides/tableau/setup.md deleted file mode 100644 index 73dac977c474116928b47b28dbab6cc0705660c0..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/quick-ingestion-guides/tableau/setup.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Setup ---- -# Tableau Ingestion Guide: Setup & Prerequisites - -In order to configure ingestion from Tableau, you'll first have to enable Tableau Metadata API and you should have a user with Site Administrator Explorer permissions. - -## Tableau Prerequisites - -1. Grant `Site Administrator Explorer permissions` to a user - - A. Log in to Tableau Cloud https://sso.online.tableau.com/public/idp/SSO. - - B. Navigate to `Users`. - -

- Navigate to the Users tab -

- - - C. **For New User**: Follow below steps to grant permission for new user. - - - Click `Add Users` -> `Add Users by Email` - -

- Navigate to the Users tab -

- - - Fill `Enter email addresses`, set `Site role` to `Site Administrator Explorer` and Click `Add Users` - -

- Navigate to the Users tab -

- - - D. **For Existing User:** Follow below steps to grant permission for existing user. - - - Select a user and click `Actions` -> `Site Role` - -

- Actions Site Role -

- - - Change user role to `Site Administrator Explorer` - -

- tableau site role -

- -2. **Enable Tableau Metadata API:** This step is required only for Tableau Server. The Metadata API is installed with Tableau Server but disabled by default. - - - Open a command prompt as an admin on the initial node (*where TSM is installed*) in the cluster - - Run the command: `tsm maintenance metadata-services enable` - - -## Next Steps - -Once you've done all of the above in Tableau, it's time to [move on](configuration.md) to configuring the actual ingestion source within DataHub. - -*Need more help? Join the conversation in [Slack](http://slack.datahubproject.io)!* \ No newline at end of file diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/multi_scale_deform_attn.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/multi_scale_deform_attn.py deleted file mode 100644 index c52dda18b41705705b47dd0e995b124048c16fba..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/multi_scale_deform_attn.py +++ /dev/null @@ -1,358 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -import warnings - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.autograd.function import Function, once_differentiable - -from annotator.uniformer.mmcv import deprecated_api_warning -from annotator.uniformer.mmcv.cnn import constant_init, xavier_init -from annotator.uniformer.mmcv.cnn.bricks.registry import ATTENTION -from annotator.uniformer.mmcv.runner import BaseModule -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) - - -class MultiScaleDeformableAttnFunction(Function): - - @staticmethod - def forward(ctx, value, value_spatial_shapes, value_level_start_index, - sampling_locations, attention_weights, im2col_step): - """GPU version of multi-scale deformable attention. - - Args: - value (Tensor): The value has shape - (bs, num_keys, mum_heads, embed_dims//num_heads) - value_spatial_shapes (Tensor): Spatial shape of - each feature map, has shape (num_levels, 2), - last dimension 2 represent (h, w) - sampling_locations (Tensor): The location of sampling points, - has shape - (bs ,num_queries, num_heads, num_levels, num_points, 2), - the last dimension 2 represent (x, y). - attention_weights (Tensor): The weight of sampling points used - when calculate the attention, has shape - (bs ,num_queries, num_heads, num_levels, num_points), - im2col_step (Tensor): The step used in image to column. - - Returns: - Tensor: has shape (bs, num_queries, embed_dims) - """ - - ctx.im2col_step = im2col_step - output = ext_module.ms_deform_attn_forward( - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - im2col_step=ctx.im2col_step) - ctx.save_for_backward(value, value_spatial_shapes, - value_level_start_index, sampling_locations, - attention_weights) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - """GPU version of backward function. - - Args: - grad_output (Tensor): Gradient - of output tensor of forward. - - Returns: - Tuple[Tensor]: Gradient - of input tensors in forward. - """ - value, value_spatial_shapes, value_level_start_index,\ - sampling_locations, attention_weights = ctx.saved_tensors - grad_value = torch.zeros_like(value) - grad_sampling_loc = torch.zeros_like(sampling_locations) - grad_attn_weight = torch.zeros_like(attention_weights) - - ext_module.ms_deform_attn_backward( - value, - value_spatial_shapes, - value_level_start_index, - sampling_locations, - attention_weights, - grad_output.contiguous(), - grad_value, - grad_sampling_loc, - grad_attn_weight, - im2col_step=ctx.im2col_step) - - return grad_value, None, None, \ - grad_sampling_loc, grad_attn_weight, None - - -def multi_scale_deformable_attn_pytorch(value, value_spatial_shapes, - sampling_locations, attention_weights): - """CPU version of multi-scale deformable attention. - - Args: - value (Tensor): The value has shape - (bs, num_keys, mum_heads, embed_dims//num_heads) - value_spatial_shapes (Tensor): Spatial shape of - each feature map, has shape (num_levels, 2), - last dimension 2 represent (h, w) - sampling_locations (Tensor): The location of sampling points, - has shape - (bs ,num_queries, num_heads, num_levels, num_points, 2), - the last dimension 2 represent (x, y). - attention_weights (Tensor): The weight of sampling points used - when calculate the attention, has shape - (bs ,num_queries, num_heads, num_levels, num_points), - - Returns: - Tensor: has shape (bs, num_queries, embed_dims) - """ - - bs, _, num_heads, embed_dims = value.shape - _, num_queries, num_heads, num_levels, num_points, _ =\ - sampling_locations.shape - value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], - dim=1) - sampling_grids = 2 * sampling_locations - 1 - sampling_value_list = [] - for level, (H_, W_) in enumerate(value_spatial_shapes): - # bs, H_*W_, num_heads, embed_dims -> - # bs, H_*W_, num_heads*embed_dims -> - # bs, num_heads*embed_dims, H_*W_ -> - # bs*num_heads, embed_dims, H_, W_ - value_l_ = value_list[level].flatten(2).transpose(1, 2).reshape( - bs * num_heads, embed_dims, H_, W_) - # bs, num_queries, num_heads, num_points, 2 -> - # bs, num_heads, num_queries, num_points, 2 -> - # bs*num_heads, num_queries, num_points, 2 - sampling_grid_l_ = sampling_grids[:, :, :, - level].transpose(1, 2).flatten(0, 1) - # bs*num_heads, embed_dims, num_queries, num_points - sampling_value_l_ = F.grid_sample( - value_l_, - sampling_grid_l_, - mode='bilinear', - padding_mode='zeros', - align_corners=False) - sampling_value_list.append(sampling_value_l_) - # (bs, num_queries, num_heads, num_levels, num_points) -> - # (bs, num_heads, num_queries, num_levels, num_points) -> - # (bs, num_heads, 1, num_queries, num_levels*num_points) - attention_weights = attention_weights.transpose(1, 2).reshape( - bs * num_heads, 1, num_queries, num_levels * num_points) - output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * - attention_weights).sum(-1).view(bs, num_heads * embed_dims, - num_queries) - return output.transpose(1, 2).contiguous() - - -@ATTENTION.register_module() -class MultiScaleDeformableAttention(BaseModule): - """An attention module used in Deformable-Detr. - - `Deformable DETR: Deformable Transformers for End-to-End Object Detection. - `_. - - Args: - embed_dims (int): The embedding dimension of Attention. - Default: 256. - num_heads (int): Parallel attention heads. Default: 64. - num_levels (int): The number of feature map used in - Attention. Default: 4. - num_points (int): The number of sampling points for - each query in each head. Default: 4. - im2col_step (int): The step used in image_to_column. - Default: 64. - dropout (float): A Dropout layer on `inp_identity`. - Default: 0.1. - batch_first (bool): Key, Query and Value are shape of - (batch, n, embed_dim) - or (n, batch, embed_dim). Default to False. - norm_cfg (dict): Config dict for normalization layer. - Default: None. - init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. - Default: None. - """ - - def __init__(self, - embed_dims=256, - num_heads=8, - num_levels=4, - num_points=4, - im2col_step=64, - dropout=0.1, - batch_first=False, - norm_cfg=None, - init_cfg=None): - super().__init__(init_cfg) - if embed_dims % num_heads != 0: - raise ValueError(f'embed_dims must be divisible by num_heads, ' - f'but got {embed_dims} and {num_heads}') - dim_per_head = embed_dims // num_heads - self.norm_cfg = norm_cfg - self.dropout = nn.Dropout(dropout) - self.batch_first = batch_first - - # you'd better set dim_per_head to a power of 2 - # which is more efficient in the CUDA implementation - def _is_power_of_2(n): - if (not isinstance(n, int)) or (n < 0): - raise ValueError( - 'invalid input for _is_power_of_2: {} (type: {})'.format( - n, type(n))) - return (n & (n - 1) == 0) and n != 0 - - if not _is_power_of_2(dim_per_head): - warnings.warn( - "You'd better set embed_dims in " - 'MultiScaleDeformAttention to make ' - 'the dimension of each attention head a power of 2 ' - 'which is more efficient in our CUDA implementation.') - - self.im2col_step = im2col_step - self.embed_dims = embed_dims - self.num_levels = num_levels - self.num_heads = num_heads - self.num_points = num_points - self.sampling_offsets = nn.Linear( - embed_dims, num_heads * num_levels * num_points * 2) - self.attention_weights = nn.Linear(embed_dims, - num_heads * num_levels * num_points) - self.value_proj = nn.Linear(embed_dims, embed_dims) - self.output_proj = nn.Linear(embed_dims, embed_dims) - self.init_weights() - - def init_weights(self): - """Default initialization for Parameters of Module.""" - constant_init(self.sampling_offsets, 0.) - thetas = torch.arange( - self.num_heads, - dtype=torch.float32) * (2.0 * math.pi / self.num_heads) - grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) - grid_init = (grid_init / - grid_init.abs().max(-1, keepdim=True)[0]).view( - self.num_heads, 1, 1, - 2).repeat(1, self.num_levels, self.num_points, 1) - for i in range(self.num_points): - grid_init[:, :, i, :] *= i + 1 - - self.sampling_offsets.bias.data = grid_init.view(-1) - constant_init(self.attention_weights, val=0., bias=0.) - xavier_init(self.value_proj, distribution='uniform', bias=0.) - xavier_init(self.output_proj, distribution='uniform', bias=0.) - self._is_init = True - - @deprecated_api_warning({'residual': 'identity'}, - cls_name='MultiScaleDeformableAttention') - def forward(self, - query, - key=None, - value=None, - identity=None, - query_pos=None, - key_padding_mask=None, - reference_points=None, - spatial_shapes=None, - level_start_index=None, - **kwargs): - """Forward Function of MultiScaleDeformAttention. - - Args: - query (Tensor): Query of Transformer with shape - (num_query, bs, embed_dims). - key (Tensor): The key tensor with shape - `(num_key, bs, embed_dims)`. - value (Tensor): The value tensor with shape - `(num_key, bs, embed_dims)`. - identity (Tensor): The tensor used for addition, with the - same shape as `query`. Default None. If None, - `query` will be used. - query_pos (Tensor): The positional encoding for `query`. - Default: None. - key_pos (Tensor): The positional encoding for `key`. Default - None. - reference_points (Tensor): The normalized reference - points with shape (bs, num_query, num_levels, 2), - all elements is range in [0, 1], top-left (0,0), - bottom-right (1, 1), including padding area. - or (N, Length_{query}, num_levels, 4), add - additional two dimensions is (w, h) to - form reference boxes. - key_padding_mask (Tensor): ByteTensor for `query`, with - shape [bs, num_key]. - spatial_shapes (Tensor): Spatial shape of features in - different levels. With shape (num_levels, 2), - last dimension represents (h, w). - level_start_index (Tensor): The start index of each level. - A tensor has shape ``(num_levels, )`` and can be represented - as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. - - Returns: - Tensor: forwarded results with shape [num_query, bs, embed_dims]. - """ - - if value is None: - value = query - - if identity is None: - identity = query - if query_pos is not None: - query = query + query_pos - if not self.batch_first: - # change to (bs, num_query ,embed_dims) - query = query.permute(1, 0, 2) - value = value.permute(1, 0, 2) - - bs, num_query, _ = query.shape - bs, num_value, _ = value.shape - assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value - - value = self.value_proj(value) - if key_padding_mask is not None: - value = value.masked_fill(key_padding_mask[..., None], 0.0) - value = value.view(bs, num_value, self.num_heads, -1) - sampling_offsets = self.sampling_offsets(query).view( - bs, num_query, self.num_heads, self.num_levels, self.num_points, 2) - attention_weights = self.attention_weights(query).view( - bs, num_query, self.num_heads, self.num_levels * self.num_points) - attention_weights = attention_weights.softmax(-1) - - attention_weights = attention_weights.view(bs, num_query, - self.num_heads, - self.num_levels, - self.num_points) - if reference_points.shape[-1] == 2: - offset_normalizer = torch.stack( - [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) - sampling_locations = reference_points[:, :, None, :, None, :] \ - + sampling_offsets \ - / offset_normalizer[None, None, None, :, None, :] - elif reference_points.shape[-1] == 4: - sampling_locations = reference_points[:, :, None, :, None, :2] \ - + sampling_offsets / self.num_points \ - * reference_points[:, :, None, :, None, 2:] \ - * 0.5 - else: - raise ValueError( - f'Last dim of reference_points must be' - f' 2 or 4, but get {reference_points.shape[-1]} instead.') - if torch.cuda.is_available() and value.is_cuda: - output = MultiScaleDeformableAttnFunction.apply( - value, spatial_shapes, level_start_index, sampling_locations, - attention_weights, self.im2col_step) - else: - output = multi_scale_deformable_attn_pytorch( - value, spatial_shapes, sampling_locations, attention_weights) - - output = self.output_proj(output) - - if not self.batch_first: - # (num_query, bs ,embed_dims) - output = output.permute(1, 0, 2) - - return self.dropout(output) + identity diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/utils/misc.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/utils/misc.py deleted file mode 100644 index 2c58d0d7fee9fe3d4519270ad8c1e998d0d8a18c..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/utils/misc.py +++ /dev/null @@ -1,377 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import collections.abc -import functools -import itertools -import subprocess -import warnings -from collections import abc -from importlib import import_module -from inspect import getfullargspec -from itertools import repeat - - -# From PyTorch internals -def _ntuple(n): - - def parse(x): - if isinstance(x, collections.abc.Iterable): - return x - return tuple(repeat(x, n)) - - return parse - - -to_1tuple = _ntuple(1) -to_2tuple = _ntuple(2) -to_3tuple = _ntuple(3) -to_4tuple = _ntuple(4) -to_ntuple = _ntuple - - -def is_str(x): - """Whether the input is an string instance. - - Note: This method is deprecated since python 2 is no longer supported. - """ - return isinstance(x, str) - - -def import_modules_from_strings(imports, allow_failed_imports=False): - """Import modules from the given list of strings. - - Args: - imports (list | str | None): The given module names to be imported. - allow_failed_imports (bool): If True, the failed imports will return - None. Otherwise, an ImportError is raise. Default: False. - - Returns: - list[module] | module | None: The imported modules. - - Examples: - >>> osp, sys = import_modules_from_strings( - ... ['os.path', 'sys']) - >>> import os.path as osp_ - >>> import sys as sys_ - >>> assert osp == osp_ - >>> assert sys == sys_ - """ - if not imports: - return - single_import = False - if isinstance(imports, str): - single_import = True - imports = [imports] - if not isinstance(imports, list): - raise TypeError( - f'custom_imports must be a list but got type {type(imports)}') - imported = [] - for imp in imports: - if not isinstance(imp, str): - raise TypeError( - f'{imp} is of type {type(imp)} and cannot be imported.') - try: - imported_tmp = import_module(imp) - except ImportError: - if allow_failed_imports: - warnings.warn(f'{imp} failed to import and is ignored.', - UserWarning) - imported_tmp = None - else: - raise ImportError - imported.append(imported_tmp) - if single_import: - imported = imported[0] - return imported - - -def iter_cast(inputs, dst_type, return_type=None): - """Cast elements of an iterable object into some type. - - Args: - inputs (Iterable): The input object. - dst_type (type): Destination type. - return_type (type, optional): If specified, the output object will be - converted to this type, otherwise an iterator. - - Returns: - iterator or specified type: The converted object. - """ - if not isinstance(inputs, abc.Iterable): - raise TypeError('inputs must be an iterable object') - if not isinstance(dst_type, type): - raise TypeError('"dst_type" must be a valid type') - - out_iterable = map(dst_type, inputs) - - if return_type is None: - return out_iterable - else: - return return_type(out_iterable) - - -def list_cast(inputs, dst_type): - """Cast elements of an iterable object into a list of some type. - - A partial method of :func:`iter_cast`. - """ - return iter_cast(inputs, dst_type, return_type=list) - - -def tuple_cast(inputs, dst_type): - """Cast elements of an iterable object into a tuple of some type. - - A partial method of :func:`iter_cast`. - """ - return iter_cast(inputs, dst_type, return_type=tuple) - - -def is_seq_of(seq, expected_type, seq_type=None): - """Check whether it is a sequence of some type. - - Args: - seq (Sequence): The sequence to be checked. - expected_type (type): Expected type of sequence items. - seq_type (type, optional): Expected sequence type. - - Returns: - bool: Whether the sequence is valid. - """ - if seq_type is None: - exp_seq_type = abc.Sequence - else: - assert isinstance(seq_type, type) - exp_seq_type = seq_type - if not isinstance(seq, exp_seq_type): - return False - for item in seq: - if not isinstance(item, expected_type): - return False - return True - - -def is_list_of(seq, expected_type): - """Check whether it is a list of some type. - - A partial method of :func:`is_seq_of`. - """ - return is_seq_of(seq, expected_type, seq_type=list) - - -def is_tuple_of(seq, expected_type): - """Check whether it is a tuple of some type. - - A partial method of :func:`is_seq_of`. - """ - return is_seq_of(seq, expected_type, seq_type=tuple) - - -def slice_list(in_list, lens): - """Slice a list into several sub lists by a list of given length. - - Args: - in_list (list): The list to be sliced. - lens(int or list): The expected length of each out list. - - Returns: - list: A list of sliced list. - """ - if isinstance(lens, int): - assert len(in_list) % lens == 0 - lens = [lens] * int(len(in_list) / lens) - if not isinstance(lens, list): - raise TypeError('"indices" must be an integer or a list of integers') - elif sum(lens) != len(in_list): - raise ValueError('sum of lens and list length does not ' - f'match: {sum(lens)} != {len(in_list)}') - out_list = [] - idx = 0 - for i in range(len(lens)): - out_list.append(in_list[idx:idx + lens[i]]) - idx += lens[i] - return out_list - - -def concat_list(in_list): - """Concatenate a list of list into a single list. - - Args: - in_list (list): The list of list to be merged. - - Returns: - list: The concatenated flat list. - """ - return list(itertools.chain(*in_list)) - - -def check_prerequisites( - prerequisites, - checker, - msg_tmpl='Prerequisites "{}" are required in method "{}" but not ' - 'found, please install them first.'): # yapf: disable - """A decorator factory to check if prerequisites are satisfied. - - Args: - prerequisites (str of list[str]): Prerequisites to be checked. - checker (callable): The checker method that returns True if a - prerequisite is meet, False otherwise. - msg_tmpl (str): The message template with two variables. - - Returns: - decorator: A specific decorator. - """ - - def wrap(func): - - @functools.wraps(func) - def wrapped_func(*args, **kwargs): - requirements = [prerequisites] if isinstance( - prerequisites, str) else prerequisites - missing = [] - for item in requirements: - if not checker(item): - missing.append(item) - if missing: - print(msg_tmpl.format(', '.join(missing), func.__name__)) - raise RuntimeError('Prerequisites not meet.') - else: - return func(*args, **kwargs) - - return wrapped_func - - return wrap - - -def _check_py_package(package): - try: - import_module(package) - except ImportError: - return False - else: - return True - - -def _check_executable(cmd): - if subprocess.call(f'which {cmd}', shell=True) != 0: - return False - else: - return True - - -def requires_package(prerequisites): - """A decorator to check if some python packages are installed. - - Example: - >>> @requires_package('numpy') - >>> func(arg1, args): - >>> return numpy.zeros(1) - array([0.]) - >>> @requires_package(['numpy', 'non_package']) - >>> func(arg1, args): - >>> return numpy.zeros(1) - ImportError - """ - return check_prerequisites(prerequisites, checker=_check_py_package) - - -def requires_executable(prerequisites): - """A decorator to check if some executable files are installed. - - Example: - >>> @requires_executable('ffmpeg') - >>> func(arg1, args): - >>> print(1) - 1 - """ - return check_prerequisites(prerequisites, checker=_check_executable) - - -def deprecated_api_warning(name_dict, cls_name=None): - """A decorator to check if some arguments are deprecate and try to replace - deprecate src_arg_name to dst_arg_name. - - Args: - name_dict(dict): - key (str): Deprecate argument names. - val (str): Expected argument names. - - Returns: - func: New function. - """ - - def api_warning_wrapper(old_func): - - @functools.wraps(old_func) - def new_func(*args, **kwargs): - # get the arg spec of the decorated method - args_info = getfullargspec(old_func) - # get name of the function - func_name = old_func.__name__ - if cls_name is not None: - func_name = f'{cls_name}.{func_name}' - if args: - arg_names = args_info.args[:len(args)] - for src_arg_name, dst_arg_name in name_dict.items(): - if src_arg_name in arg_names: - warnings.warn( - f'"{src_arg_name}" is deprecated in ' - f'`{func_name}`, please use "{dst_arg_name}" ' - 'instead') - arg_names[arg_names.index(src_arg_name)] = dst_arg_name - if kwargs: - for src_arg_name, dst_arg_name in name_dict.items(): - if src_arg_name in kwargs: - - assert dst_arg_name not in kwargs, ( - f'The expected behavior is to replace ' - f'the deprecated key `{src_arg_name}` to ' - f'new key `{dst_arg_name}`, but got them ' - f'in the arguments at the same time, which ' - f'is confusing. `{src_arg_name} will be ' - f'deprecated in the future, please ' - f'use `{dst_arg_name}` instead.') - - warnings.warn( - f'"{src_arg_name}" is deprecated in ' - f'`{func_name}`, please use "{dst_arg_name}" ' - 'instead') - kwargs[dst_arg_name] = kwargs.pop(src_arg_name) - - # apply converted arguments to the decorated method - output = old_func(*args, **kwargs) - return output - - return new_func - - return api_warning_wrapper - - -def is_method_overridden(method, base_class, derived_class): - """Check if a method of base class is overridden in derived class. - - Args: - method (str): the method name to check. - base_class (type): the class of the base class. - derived_class (type | Any): the class or instance of the derived class. - """ - assert isinstance(base_class, type), \ - "base_class doesn't accept instance, Please pass class instead." - - if not isinstance(derived_class, type): - derived_class = derived_class.__class__ - - base_method = getattr(base_class, method) - derived_method = getattr(derived_class, method) - return derived_method != base_method - - -def has_method(obj: object, method: str) -> bool: - """Check whether the object has a method. - - Args: - method (str): The method name to check. - obj (object): The object to check. - - Returns: - bool: True if the object has the method else False. - """ - return hasattr(obj, method) and callable(getattr(obj, method)) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/hooks/momentum_updater.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/hooks/momentum_updater.py deleted file mode 100644 index 60437756ceedf06055ec349df69a25465738d3f0..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/hooks/momentum_updater.py +++ /dev/null @@ -1,493 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import annotator.uniformer.mmcv as mmcv -from .hook import HOOKS, Hook -from .lr_updater import annealing_cos, annealing_linear, format_param - - -class MomentumUpdaterHook(Hook): - - def __init__(self, - by_epoch=True, - warmup=None, - warmup_iters=0, - warmup_ratio=0.9): - # validate the "warmup" argument - if warmup is not None: - if warmup not in ['constant', 'linear', 'exp']: - raise ValueError( - f'"{warmup}" is not a supported type for warming up, valid' - ' types are "constant" and "linear"') - if warmup is not None: - assert warmup_iters > 0, \ - '"warmup_iters" must be a positive integer' - assert 0 < warmup_ratio <= 1.0, \ - '"warmup_momentum" must be in range (0,1]' - - self.by_epoch = by_epoch - self.warmup = warmup - self.warmup_iters = warmup_iters - self.warmup_ratio = warmup_ratio - - self.base_momentum = [] # initial momentum for all param groups - self.regular_momentum = [ - ] # expected momentum if no warming up is performed - - def _set_momentum(self, runner, momentum_groups): - if isinstance(runner.optimizer, dict): - for k, optim in runner.optimizer.items(): - for param_group, mom in zip(optim.param_groups, - momentum_groups[k]): - if 'momentum' in param_group.keys(): - param_group['momentum'] = mom - elif 'betas' in param_group.keys(): - param_group['betas'] = (mom, param_group['betas'][1]) - else: - for param_group, mom in zip(runner.optimizer.param_groups, - momentum_groups): - if 'momentum' in param_group.keys(): - param_group['momentum'] = mom - elif 'betas' in param_group.keys(): - param_group['betas'] = (mom, param_group['betas'][1]) - - def get_momentum(self, runner, base_momentum): - raise NotImplementedError - - def get_regular_momentum(self, runner): - if isinstance(runner.optimizer, dict): - momentum_groups = {} - for k in runner.optimizer.keys(): - _momentum_group = [ - self.get_momentum(runner, _base_momentum) - for _base_momentum in self.base_momentum[k] - ] - momentum_groups.update({k: _momentum_group}) - return momentum_groups - else: - return [ - self.get_momentum(runner, _base_momentum) - for _base_momentum in self.base_momentum - ] - - def get_warmup_momentum(self, cur_iters): - - def _get_warmup_momentum(cur_iters, regular_momentum): - if self.warmup == 'constant': - warmup_momentum = [ - _momentum / self.warmup_ratio - for _momentum in self.regular_momentum - ] - elif self.warmup == 'linear': - k = (1 - cur_iters / self.warmup_iters) * (1 - - self.warmup_ratio) - warmup_momentum = [ - _momentum / (1 - k) for _momentum in self.regular_mom - ] - elif self.warmup == 'exp': - k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters) - warmup_momentum = [ - _momentum / k for _momentum in self.regular_mom - ] - return warmup_momentum - - if isinstance(self.regular_momentum, dict): - momentum_groups = {} - for key, regular_momentum in self.regular_momentum.items(): - momentum_groups[key] = _get_warmup_momentum( - cur_iters, regular_momentum) - return momentum_groups - else: - return _get_warmup_momentum(cur_iters, self.regular_momentum) - - def before_run(self, runner): - # NOTE: when resuming from a checkpoint, - # if 'initial_momentum' is not saved, - # it will be set according to the optimizer params - if isinstance(runner.optimizer, dict): - self.base_momentum = {} - for k, optim in runner.optimizer.items(): - for group in optim.param_groups: - if 'momentum' in group.keys(): - group.setdefault('initial_momentum', group['momentum']) - else: - group.setdefault('initial_momentum', group['betas'][0]) - _base_momentum = [ - group['initial_momentum'] for group in optim.param_groups - ] - self.base_momentum.update({k: _base_momentum}) - else: - for group in runner.optimizer.param_groups: - if 'momentum' in group.keys(): - group.setdefault('initial_momentum', group['momentum']) - else: - group.setdefault('initial_momentum', group['betas'][0]) - self.base_momentum = [ - group['initial_momentum'] - for group in runner.optimizer.param_groups - ] - - def before_train_epoch(self, runner): - if not self.by_epoch: - return - self.regular_mom = self.get_regular_momentum(runner) - self._set_momentum(runner, self.regular_mom) - - def before_train_iter(self, runner): - cur_iter = runner.iter - if not self.by_epoch: - self.regular_mom = self.get_regular_momentum(runner) - if self.warmup is None or cur_iter >= self.warmup_iters: - self._set_momentum(runner, self.regular_mom) - else: - warmup_momentum = self.get_warmup_momentum(cur_iter) - self._set_momentum(runner, warmup_momentum) - elif self.by_epoch: - if self.warmup is None or cur_iter > self.warmup_iters: - return - elif cur_iter == self.warmup_iters: - self._set_momentum(runner, self.regular_mom) - else: - warmup_momentum = self.get_warmup_momentum(cur_iter) - self._set_momentum(runner, warmup_momentum) - - -@HOOKS.register_module() -class StepMomentumUpdaterHook(MomentumUpdaterHook): - """Step momentum scheduler with min value clipping. - - Args: - step (int | list[int]): Step to decay the momentum. If an int value is - given, regard it as the decay interval. If a list is given, decay - momentum at these steps. - gamma (float, optional): Decay momentum ratio. Default: 0.5. - min_momentum (float, optional): Minimum momentum value to keep. If - momentum after decay is lower than this value, it will be clipped - accordingly. If None is given, we don't perform lr clipping. - Default: None. - """ - - def __init__(self, step, gamma=0.5, min_momentum=None, **kwargs): - if isinstance(step, list): - assert mmcv.is_list_of(step, int) - assert all([s > 0 for s in step]) - elif isinstance(step, int): - assert step > 0 - else: - raise TypeError('"step" must be a list or integer') - self.step = step - self.gamma = gamma - self.min_momentum = min_momentum - super(StepMomentumUpdaterHook, self).__init__(**kwargs) - - def get_momentum(self, runner, base_momentum): - progress = runner.epoch if self.by_epoch else runner.iter - - # calculate exponential term - if isinstance(self.step, int): - exp = progress // self.step - else: - exp = len(self.step) - for i, s in enumerate(self.step): - if progress < s: - exp = i - break - - momentum = base_momentum * (self.gamma**exp) - if self.min_momentum is not None: - # clip to a minimum value - momentum = max(momentum, self.min_momentum) - return momentum - - -@HOOKS.register_module() -class CosineAnnealingMomentumUpdaterHook(MomentumUpdaterHook): - - def __init__(self, min_momentum=None, min_momentum_ratio=None, **kwargs): - assert (min_momentum is None) ^ (min_momentum_ratio is None) - self.min_momentum = min_momentum - self.min_momentum_ratio = min_momentum_ratio - super(CosineAnnealingMomentumUpdaterHook, self).__init__(**kwargs) - - def get_momentum(self, runner, base_momentum): - if self.by_epoch: - progress = runner.epoch - max_progress = runner.max_epochs - else: - progress = runner.iter - max_progress = runner.max_iters - if self.min_momentum_ratio is not None: - target_momentum = base_momentum * self.min_momentum_ratio - else: - target_momentum = self.min_momentum - return annealing_cos(base_momentum, target_momentum, - progress / max_progress) - - -@HOOKS.register_module() -class CyclicMomentumUpdaterHook(MomentumUpdaterHook): - """Cyclic momentum Scheduler. - - Implement the cyclical momentum scheduler policy described in - https://arxiv.org/pdf/1708.07120.pdf - - This momentum scheduler usually used together with the CyclicLRUpdater - to improve the performance in the 3D detection area. - - Attributes: - target_ratio (tuple[float]): Relative ratio of the lowest momentum and - the highest momentum to the initial momentum. - cyclic_times (int): Number of cycles during training - step_ratio_up (float): The ratio of the increasing process of momentum - in the total cycle. - by_epoch (bool): Whether to update momentum by epoch. - """ - - def __init__(self, - by_epoch=False, - target_ratio=(0.85 / 0.95, 1), - cyclic_times=1, - step_ratio_up=0.4, - **kwargs): - if isinstance(target_ratio, float): - target_ratio = (target_ratio, target_ratio / 1e5) - elif isinstance(target_ratio, tuple): - target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \ - if len(target_ratio) == 1 else target_ratio - else: - raise ValueError('target_ratio should be either float ' - f'or tuple, got {type(target_ratio)}') - - assert len(target_ratio) == 2, \ - '"target_ratio" must be list or tuple of two floats' - assert 0 <= step_ratio_up < 1.0, \ - '"step_ratio_up" must be in range [0,1)' - - self.target_ratio = target_ratio - self.cyclic_times = cyclic_times - self.step_ratio_up = step_ratio_up - self.momentum_phases = [] # init momentum_phases - # currently only support by_epoch=False - assert not by_epoch, \ - 'currently only support "by_epoch" = False' - super(CyclicMomentumUpdaterHook, self).__init__(by_epoch, **kwargs) - - def before_run(self, runner): - super(CyclicMomentumUpdaterHook, self).before_run(runner) - # initiate momentum_phases - # total momentum_phases are separated as up and down - max_iter_per_phase = runner.max_iters // self.cyclic_times - iter_up_phase = int(self.step_ratio_up * max_iter_per_phase) - self.momentum_phases.append( - [0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]]) - self.momentum_phases.append([ - iter_up_phase, max_iter_per_phase, max_iter_per_phase, - self.target_ratio[0], self.target_ratio[1] - ]) - - def get_momentum(self, runner, base_momentum): - curr_iter = runner.iter - for (start_iter, end_iter, max_iter_per_phase, start_ratio, - end_ratio) in self.momentum_phases: - curr_iter %= max_iter_per_phase - if start_iter <= curr_iter < end_iter: - progress = curr_iter - start_iter - return annealing_cos(base_momentum * start_ratio, - base_momentum * end_ratio, - progress / (end_iter - start_iter)) - - -@HOOKS.register_module() -class OneCycleMomentumUpdaterHook(MomentumUpdaterHook): - """OneCycle momentum Scheduler. - - This momentum scheduler usually used together with the OneCycleLrUpdater - to improve the performance. - - Args: - base_momentum (float or list): Lower momentum boundaries in the cycle - for each parameter group. Note that momentum is cycled inversely - to learning rate; at the peak of a cycle, momentum is - 'base_momentum' and learning rate is 'max_lr'. - Default: 0.85 - max_momentum (float or list): Upper momentum boundaries in the cycle - for each parameter group. Functionally, - it defines the cycle amplitude (max_momentum - base_momentum). - Note that momentum is cycled inversely - to learning rate; at the start of a cycle, momentum is - 'max_momentum' and learning rate is 'base_lr' - Default: 0.95 - pct_start (float): The percentage of the cycle (in number of steps) - spent increasing the learning rate. - Default: 0.3 - anneal_strategy (str): {'cos', 'linear'} - Specifies the annealing strategy: 'cos' for cosine annealing, - 'linear' for linear annealing. - Default: 'cos' - three_phase (bool): If three_phase is True, use a third phase of the - schedule to annihilate the learning rate according to - final_div_factor instead of modifying the second phase (the first - two phases will be symmetrical about the step indicated by - pct_start). - Default: False - """ - - def __init__(self, - base_momentum=0.85, - max_momentum=0.95, - pct_start=0.3, - anneal_strategy='cos', - three_phase=False, - **kwargs): - # validate by_epoch, currently only support by_epoch=False - if 'by_epoch' not in kwargs: - kwargs['by_epoch'] = False - else: - assert not kwargs['by_epoch'], \ - 'currently only support "by_epoch" = False' - if not isinstance(base_momentum, (float, list, dict)): - raise ValueError('base_momentum must be the type among of float,' - 'list or dict.') - self._base_momentum = base_momentum - if not isinstance(max_momentum, (float, list, dict)): - raise ValueError('max_momentum must be the type among of float,' - 'list or dict.') - self._max_momentum = max_momentum - # validate pct_start - if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float): - raise ValueError('Expected float between 0 and 1 pct_start, but ' - f'got {pct_start}') - self.pct_start = pct_start - # validate anneal_strategy - if anneal_strategy not in ['cos', 'linear']: - raise ValueError('anneal_strategy must by one of "cos" or ' - f'"linear", instead got {anneal_strategy}') - elif anneal_strategy == 'cos': - self.anneal_func = annealing_cos - elif anneal_strategy == 'linear': - self.anneal_func = annealing_linear - self.three_phase = three_phase - self.momentum_phases = [] # init momentum_phases - super(OneCycleMomentumUpdaterHook, self).__init__(**kwargs) - - def before_run(self, runner): - if isinstance(runner.optimizer, dict): - for k, optim in runner.optimizer.items(): - if ('momentum' not in optim.defaults - and 'betas' not in optim.defaults): - raise ValueError('optimizer must support momentum with' - 'option enabled') - self.use_beta1 = 'betas' in optim.defaults - _base_momentum = format_param(k, optim, self._base_momentum) - _max_momentum = format_param(k, optim, self._max_momentum) - for group, b_momentum, m_momentum in zip( - optim.param_groups, _base_momentum, _max_momentum): - if self.use_beta1: - _, beta2 = group['betas'] - group['betas'] = (m_momentum, beta2) - else: - group['momentum'] = m_momentum - group['base_momentum'] = b_momentum - group['max_momentum'] = m_momentum - else: - optim = runner.optimizer - if ('momentum' not in optim.defaults - and 'betas' not in optim.defaults): - raise ValueError('optimizer must support momentum with' - 'option enabled') - self.use_beta1 = 'betas' in optim.defaults - k = type(optim).__name__ - _base_momentum = format_param(k, optim, self._base_momentum) - _max_momentum = format_param(k, optim, self._max_momentum) - for group, b_momentum, m_momentum in zip(optim.param_groups, - _base_momentum, - _max_momentum): - if self.use_beta1: - _, beta2 = group['betas'] - group['betas'] = (m_momentum, beta2) - else: - group['momentum'] = m_momentum - group['base_momentum'] = b_momentum - group['max_momentum'] = m_momentum - - if self.three_phase: - self.momentum_phases.append({ - 'end_iter': - float(self.pct_start * runner.max_iters) - 1, - 'start_momentum': - 'max_momentum', - 'end_momentum': - 'base_momentum' - }) - self.momentum_phases.append({ - 'end_iter': - float(2 * self.pct_start * runner.max_iters) - 2, - 'start_momentum': - 'base_momentum', - 'end_momentum': - 'max_momentum' - }) - self.momentum_phases.append({ - 'end_iter': runner.max_iters - 1, - 'start_momentum': 'max_momentum', - 'end_momentum': 'max_momentum' - }) - else: - self.momentum_phases.append({ - 'end_iter': - float(self.pct_start * runner.max_iters) - 1, - 'start_momentum': - 'max_momentum', - 'end_momentum': - 'base_momentum' - }) - self.momentum_phases.append({ - 'end_iter': runner.max_iters - 1, - 'start_momentum': 'base_momentum', - 'end_momentum': 'max_momentum' - }) - - def _set_momentum(self, runner, momentum_groups): - if isinstance(runner.optimizer, dict): - for k, optim in runner.optimizer.items(): - for param_group, mom in zip(optim.param_groups, - momentum_groups[k]): - if 'momentum' in param_group.keys(): - param_group['momentum'] = mom - elif 'betas' in param_group.keys(): - param_group['betas'] = (mom, param_group['betas'][1]) - else: - for param_group, mom in zip(runner.optimizer.param_groups, - momentum_groups): - if 'momentum' in param_group.keys(): - param_group['momentum'] = mom - elif 'betas' in param_group.keys(): - param_group['betas'] = (mom, param_group['betas'][1]) - - def get_momentum(self, runner, param_group): - curr_iter = runner.iter - start_iter = 0 - for i, phase in enumerate(self.momentum_phases): - end_iter = phase['end_iter'] - if curr_iter <= end_iter or i == len(self.momentum_phases) - 1: - pct = (curr_iter - start_iter) / (end_iter - start_iter) - momentum = self.anneal_func( - param_group[phase['start_momentum']], - param_group[phase['end_momentum']], pct) - break - start_iter = end_iter - return momentum - - def get_regular_momentum(self, runner): - if isinstance(runner.optimizer, dict): - momentum_groups = {} - for k, optim in runner.optimizer.items(): - _momentum_group = [ - self.get_momentum(runner, param_group) - for param_group in optim.param_groups - ] - momentum_groups.update({k: _momentum_group}) - return momentum_groups - else: - momentum_groups = [] - for param_group in runner.optimizer.param_groups: - momentum_groups.append(self.get_momentum(runner, param_group)) - return momentum_groups diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/core/evaluation/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/core/evaluation/__init__.py deleted file mode 100644 index f7cc4b23413a0639e9de00eeb0bf600632d2c6cd..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/core/evaluation/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .class_names import get_classes, get_palette -from .eval_hooks import DistEvalHook, EvalHook -from .metrics import eval_metrics, mean_dice, mean_fscore, mean_iou - -__all__ = [ - 'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'mean_fscore', - 'eval_metrics', 'get_classes', 'get_palette' -] diff --git a/spaces/abnerh/video-to-subs/README.md b/spaces/abnerh/video-to-subs/README.md deleted file mode 100644 index ed32bb5f36f156684d9ebd5458b8a93e085d7e12..0000000000000000000000000000000000000000 --- a/spaces/abnerh/video-to-subs/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Video To Subs -emoji: 🎙️ -colorFrom: white -colorTo: blue -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/ai-guru/composer/static/_app/pages/index.svelte-9646dd5a.js b/spaces/ai-guru/composer/static/_app/pages/index.svelte-9646dd5a.js deleted file mode 100644 index 510a133267536503ed780d792110a1ff8173e49a..0000000000000000000000000000000000000000 --- a/spaces/ai-guru/composer/static/_app/pages/index.svelte-9646dd5a.js +++ /dev/null @@ -1,5 +0,0 @@ -import{S as Z,i as x,s as ee,e as b,k as I,c as w,a as E,d as g,m as C,K as se,b as d,g as P,J as f,L as U,t as N,h as H,j as Ee,E as re,M as Re,N as R,O as fe,P as Ge,w as Y,x as J,y as K,Q as Ve,q as L,o as V,B as Q,R as ue,T as Fe,f as X,U as _e,V as ie,W as qe,n as $e,p as Te,X as We,v as Xe,u as Ye,Y as Je,l as Me}from"../chunks/index-7c452e28.js";import{w as le}from"../chunks/index-d282aaf8.js";const he=le("synth"),be=le("medium"),we=le("medium"),ve=le(!1),pe=le(""),ye=le(""),ke=le(""),ne={piano:"Piano",chamber:"Chamber Music",rock_and_metal:"Rock and Metal",synth:"Synthesizer",church:"Church",timpani_strings_harp:"Timpani, Contrabass, Harp",country:"Country",reggae:"Reggae-esque"},Ke={low:"Low",medium:"Medium",high:"High"},Qe={low:"Low",medium:"Medium",high:"High",very_high:"Very High"};function Ne(l,e,s){const t=l.slice();return t[4]=e[s],t[6]=s,t}function He(l){let e,s,t,o,n,a,i,c,u,p,r,_;return{c(){e=b("label"),s=b("div"),t=b("img"),a=I(),i=b("input"),u=I(),this.h()},l(m){e=w(m,"LABEL",{"data-selected":!0,class:!0});var T=E(e);s=w(T,"DIV",{class:!0});var v=E(s);t=w(v,"IMG",{src:!0,alt:!0,class:!0}),v.forEach(g),a=C(T),i=w(T,"INPUT",{type:!0,class:!0}),u=C(T),T.forEach(g),this.h()},h(){se(t.src,o=`static/${l[4]}.svg`)||d(t,"src",o),d(t,"alt",n=ne[l[4]]),d(t,"class","svelte-1r9pswz"),d(s,"class","svelte-1r9pswz"),d(i,"type","radio"),i.__value=c=l[4],i.value=i.__value,d(i,"class","svelte-1r9pswz"),l[3][0].push(i),d(e,"data-selected",p=l[0]===l[4]),d(e,"class","svelte-1r9pswz")},m(m,T){P(m,e,T),f(e,s),f(s,t),f(e,a),f(e,i),i.checked=i.__value===l[0],f(e,u),r||(_=U(i,"change",l[2]),r=!0)},p(m,T){T&1&&(i.checked=i.__value===m[0]),T&1&&p!==(p=m[0]===m[4])&&d(e,"data-selected",p)},d(m){m&&g(e),l[3][0].splice(l[3][0].indexOf(i),1),r=!1,_()}}}function Ze(l){let e,s,t=(ne[l[0]]||"Synthesizer")+"",o,n,a,i=l[1],c=[];for(let u=0;us(0,t=i));const o=Object.keys(ne),n=[[]];function a(){t=this.__value,he.set(t)}return[t,o,a,n]}class et extends Z{constructor(e){super(),x(this,e,xe,Ze,ee,{})}}function je(l,e,s){const t=l.slice();return t[5]=e[s],t}function Be(l){let e,s=l[1][l[5]]+"",t,o,n,a,i,c,u,p;return{c(){e=b("label"),t=N(s),o=I(),n=b("input"),i=I(),this.h()},l(r){e=w(r,"LABEL",{"data-selected":!0,class:!0});var _=E(e);t=H(_,s),o=C(_),n=w(_,"INPUT",{type:!0,class:!0}),i=C(_),_.forEach(g),this.h()},h(){d(n,"type","radio"),n.__value=a=l[5],n.value=n.__value,d(n,"class","svelte-1m848u0"),l[4][0].push(n),d(e,"data-selected",c=l[5]===l[0]),d(e,"class","svelte-1m848u0")},m(r,_){P(r,e,_),f(e,t),f(e,o),f(e,n),n.checked=n.__value===l[0],f(e,i),u||(p=U(n,"change",l[3]),u=!0)},p(r,_){_&2&&s!==(s=r[1][r[5]]+"")&&Ee(t,s),_&1&&(n.checked=n.__value===r[0]),_&1&&c!==(c=r[5]===r[0])&&d(e,"data-selected",c)},d(r){r&&g(e),l[4][0].splice(l[4][0].indexOf(n),1),u=!1,p()}}}function tt(l){let e,s=l[2],t=[];for(let o=0;o{"options"in c&&s(1,t=c.options),"selection"in c&&s(0,n=c.selection)},[n,t,o,i,a]}class Ue extends Z{constructor(e){super(),x(this,e,st,tt,ee,{options:1,selection:0})}}function nt(l){let e,s,t,o,n,a,i,c;function u(r){l[1](r)}let p={options:Ke};return l[0]!==void 0&&(p.selection=l[0]),a=new Ue({props:p}),fe.push(()=>Ge(a,"selection",u)),{c(){e=b("div"),s=b("fieldset"),t=b("legend"),o=N("Note density"),n=I(),Y(a.$$.fragment),this.h()},l(r){e=w(r,"DIV",{});var _=E(e);s=w(_,"FIELDSET",{class:!0});var m=E(s);t=w(m,"LEGEND",{class:!0});var T=E(t);o=H(T,"Note density"),T.forEach(g),n=C(m),J(a.$$.fragment,m),m.forEach(g),_.forEach(g),this.h()},h(){d(t,"class","svelte-1ikh8be"),d(s,"class","svelte-1ikh8be")},m(r,_){P(r,e,_),f(e,s),f(s,t),f(t,o),f(s,n),K(a,s,null),c=!0},p(r,[_]){const m={};!i&&_&1&&(i=!0,m.selection=r[0],Ve(()=>i=!1)),a.$set(m)},i(r){c||(L(a.$$.fragment,r),c=!0)},o(r){V(a.$$.fragment,r),c=!1},d(r){r&&g(e),Q(a)}}}function lt(l,e,s){let t;R(l,be,n=>s(0,t=n));function o(n){t=n,be.set(t)}return[t,o]}class at extends Z{constructor(e){super(),x(this,e,lt,nt,ee,{})}}function ot(l){let e,s,t,o,n,a,i,c;function u(r){l[1](r)}let p={options:Qe};return l[0]!==void 0&&(p.selection=l[0]),a=new Ue({props:p}),fe.push(()=>Ge(a,"selection",u)),{c(){e=b("div"),s=b("fieldset"),t=b("legend"),o=N("Temperature"),n=I(),Y(a.$$.fragment),this.h()},l(r){e=w(r,"DIV",{});var _=E(e);s=w(_,"FIELDSET",{class:!0});var m=E(s);t=w(m,"LEGEND",{class:!0});var T=E(t);o=H(T,"Temperature"),T.forEach(g),n=C(m),J(a.$$.fragment,m),m.forEach(g),_.forEach(g),this.h()},h(){d(t,"class","svelte-1ikh8be"),d(s,"class","svelte-1ikh8be")},m(r,_){P(r,e,_),f(e,s),f(s,t),f(t,o),f(s,n),K(a,s,null),c=!0},p(r,[_]){const m={};!i&&_&1&&(i=!0,m.selection=r[0],Ve(()=>i=!1)),a.$set(m)},i(r){c||(L(a.$$.fragment,r),c=!0)},o(r){V(a.$$.fragment,r),c=!1},d(r){r&&g(e),Q(a)}}}function rt(l,e,s){let t;R(l,we,n=>s(0,t=n));function o(n){t=n,we.set(t)}return[t,o]}class it extends Z{constructor(e){super(),x(this,e,rt,ot,ee,{})}}function ct(l){let e,s,t;return{c(){e=N("Compose "),s=b("img"),this.h()},l(o){e=H(o,"Compose "),s=w(o,"IMG",{src:!0,alt:!0,class:!0}),this.h()},h(){se(s.src,t="static/wand.svg")||d(s,"src",t),d(s,"alt","Magic wand"),d(s,"class","svelte-18w38ow")},m(o,n){P(o,e,n),P(o,s,n)},d(o){o&&g(e),o&&g(s)}}}function ut(l){let e;return{c(){e=N("Composing...")},l(s){e=H(s,"Composing...")},m(s,t){P(s,e,t)},d(s){s&&g(e)}}}function ft(l){let e,s,t;function o(i,c){return i[0]?ut:ct}let n=o(l),a=n(l);return{c(){e=b("button"),a.c(),this.h()},l(i){e=w(i,"BUTTON",{class:!0});var c=E(e);a.l(c),c.forEach(g),this.h()},h(){e.disabled=l[0],d(e,"class","svelte-18w38ow")},m(i,c){P(i,e,c),a.m(e,null),s||(t=U(e,"click",l[1]),s=!0)},p(i,[c]){n!==(n=o(i))&&(a.d(1),a=n(i),a&&(a.c(),a.m(e,null))),c&1&&(e.disabled=i[0])},i:re,o:re,d(i){i&&g(e),a.d(),s=!1,t()}}}function dt(l,e,s){let t,o,n,a,i,c,u;R(l,ve,v=>s(0,t=v)),R(l,ke,v=>s(2,o=v)),R(l,ye,v=>s(3,n=v)),R(l,pe,v=>s(4,a=v)),R(l,we,v=>s(5,i=v)),R(l,be,v=>s(6,c=v)),R(l,he,v=>s(7,u=v));const p=()=>{"mediaSession"in navigator&&(navigator.mediaSession.metadata=new MediaMetadata({title:`${ne[u]} Composition`,artist:"AI Guru Composer",album:"Hugging Face",artwork:[{src:"static/hugging-face-headphones.png",sizes:"512x512",type:"image/png"}]}))},r=async({music_style:v,density:y,temperature:D})=>{var A;const S=await fetch("task/create",{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({music_style:v,density:y,temperature:D})});if(!S.ok||!((A=S.headers.get("content-type"))!=null&&A.includes("application/json")))throw new Error(`Unable to create composition: [${S.status}] ${await S.text()}`);return await S.json()},_=async v=>{var D;const y=await fetch(`task/poll?task_id=${v.task_id}`);if(!y.ok||!((D=y.headers.get("content-type"))!=null&&D.includes("application/json")))throw new Error(`Unable to create composition: [${y.status}] ${await y.text()}`);return await y.json()},m=async(v,y=1e3,D=100)=>(v=await _(v),v.status==="completed"||v.status==="failed"||D&&v.poll_count>D?v:(await new Promise(S=>setTimeout(S,y)),await m(v,y,D)));return[t,async()=>{ue(ve,t=!0,t);try{const v=await r({music_style:u,density:c,temperature:i}),y=await m(v),{audio:D,image:S,tokens:j}=y.output;ue(pe,a=D,a),ue(ye,n=S,n),ue(ke,o=j,o),p()}catch(v){console.error(v)}finally{ue(ve,t=!1,t)}}]}class _t extends Z{constructor(e){super(),x(this,e,dt,ft,ee,{})}}function ce(l,{delay:e=0,duration:s=400,easing:t=Fe}={}){const o=+getComputedStyle(l).opacity;return{delay:e,duration:s,easing:t,css:n=>`opacity: ${n*o}`}}function Le(l){let e,s,t,o;return{c(){e=b("img"),this.h()},l(n){e=w(n,"IMG",{class:!0,src:!0,alt:!0,draggable:!0}),this.h()},h(){d(e,"class","play-button svelte-1536b0c"),se(e.src,s="static/play.svg")||d(e,"src",s),d(e,"alt","Play button"),d(e,"draggable","false"),X(e,"width",l[7]>100?"20%":"7.5%",!1)},m(n,a){P(n,e,a),o=!0},p(n,a){a&128&&X(e,"width",n[7]>100?"20%":"7.5%",!1)},i(n){o||(_e(()=>{t||(t=ie(e,ce,{},!0)),t.run(1)}),o=!0)},o(n){t||(t=ie(e,ce,{},!1)),t.run(0),o=!1},d(n){n&&g(e),n&&t&&t.end()}}}function ht(l){let e,s,t,o,n,a,i,c=!1,u,p=!0,r,_,m=`translate(${Math.min(l[6]*(l[1]/(l[2]-.9)),l[6])}px, -2%)`,T,v,y,D,S,j,A,q,W;function z(){cancelAnimationFrame(u),a.paused||(u=Je(z),c=!0),l[15].call(a)}let $=l[3]&&Le(l);return{c(){e=b("section"),s=b("img"),o=I(),n=b("div"),a=b("audio"),r=I(),_=b("div"),T=I(),$&&$.c(),v=I(),y=b("a"),D=N("Download"),this.h()},l(h){e=w(h,"SECTION",{class:!0});var k=E(e);s=w(k,"IMG",{class:!0,src:!0,alt:!0}),o=C(k),n=w(k,"DIV",{class:!0,tabindex:!0});var G=E(n);a=w(G,"AUDIO",{src:!0,class:!0}),E(a).forEach(g),r=C(G),_=w(G,"DIV",{class:!0}),E(_).forEach(g),T=C(G),$&&$.l(G),G.forEach(g),v=C(k),y=w(k,"A",{href:!0,download:!0,class:!0});var F=E(y);D=H(F,"Download"),F.forEach(g),k.forEach(g),this.h()},h(){d(s,"class","notes svelte-1536b0c"),se(s.src,t=l[8])||d(s,"src",t),d(s,"alt",""),se(a.src,i=l[9])||d(a,"src",i),d(a,"class","svelte-1536b0c"),l[2]===void 0&&_e(()=>l[16].call(a)),d(_,"class","handle svelte-1536b0c"),X(_,"transform",m,!1),d(n,"class","player svelte-1536b0c"),d(n,"tabindex","0"),X(n,"width",l[6]+"px",!1),X(n,"height",l[7]+"px",!1),d(y,"href",l[9]),d(y,"download",S=`${ne[l[10]]} Composition - AI Guru ft. Hugging Face.wav`),d(y,"class","download svelte-1536b0c"),d(e,"class","svelte-1536b0c")},m(h,k){P(h,e,k),f(e,s),l[14](s),f(e,o),f(e,n),f(n,a),f(n,r),f(n,_),f(n,T),$&&$.m(n,null),l[18](n),f(e,v),f(e,y),f(y,D),l[20](e),A=!0,q||(W=[U(a,"timeupdate",z),U(a,"durationchange",l[16]),U(a,"play",l[17]),U(a,"pause",l[17]),U(n,"mousemove",l[11]),U(n,"touchmove",qe(l[12])),U(n,"keydown",l[13]),U(n,"click",l[19])],q=!0)},p(h,[k]){(!A||k&256&&!se(s.src,t=h[8]))&&d(s,"src",t),(!A||k&512&&!se(a.src,i=h[9]))&&d(a,"src",i),!c&&k&2&&!isNaN(h[1])&&(a.currentTime=h[1]),c=!1,k&8&&p!==(p=h[3])&&a[p?"pause":"play"](),k&70&&m!==(m=`translate(${Math.min(h[6]*(h[1]/(h[2]-.9)),h[6])}px, -2%)`)&&X(_,"transform",m,!1),h[3]?$?($.p(h,k),k&8&&L($,1)):($=Le(h),$.c(),L($,1),$.m(n,null)):$&&($e(),V($,1,1,()=>{$=null}),Te()),k&64&&X(n,"width",h[6]+"px",!1),k&128&&X(n,"height",h[7]+"px",!1),(!A||k&512)&&d(y,"href",h[9]),(!A||k&1024&&S!==(S=`${ne[h[10]]} Composition - AI Guru ft. Hugging Face.wav`))&&d(y,"download",S)},i(h){A||(L($),_e(()=>{j||(j=ie(e,ce,{},!0)),j.run(1)}),A=!0)},o(h){V($),j||(j=ie(e,ce,{},!1)),j.run(0),A=!1},d(h){h&&g(e),l[14](null),$&&$.d(),l[18](null),l[20](null),h&&j&&j.end(),q=!1,We(W)}}}function pt(l,e,s){let t,o,n;R(l,ye,h=>s(8,t=h)),R(l,pe,h=>s(9,o=h)),R(l,he,h=>s(10,n=h));let a,i,c,u=!0,p,r,_,m;const T=()=>{s(6,_=r&&r.clientWidth),s(7,m=r&&r.clientHeight)};Xe(()=>{T(),"mediaSession"in navigator&&(navigator.mediaSession.setActionHandler("play",()=>s(3,u=!1)),navigator.mediaSession.setActionHandler("pause",()=>s(3,u=!0)),navigator.mediaSession.setActionHandler("stop",()=>{s(3,u=!0),s(1,i=0)})),window.scrollTo({top:a.offsetTop,behavior:"smooth"})}),Ye(()=>{T()});const v=h=>{if(!c||!h.buttons)return;const{left:k,right:G}=p.getBoundingClientRect();s(1,i=c*(h.clientX-k)/(G-k))},y=h=>{if(!c)return;const{left:k,right:G}=p.getBoundingClientRect();s(1,i=c*(h.touches[0].clientX-k)/(G-k))},D=h=>{h.preventDefault(),h.code==="Space"&&s(3,u=!u),h.code==="ArrowLeft"&&s(1,i=i>=1?i-1:0),h.code==="ArrowRight"&&s(1,i=i<=c-1?i+1:c)};function S(h){fe[h?"unshift":"push"](()=>{r=h,s(5,r)})}function j(){i=this.currentTime,s(1,i)}function A(){c=this.duration,s(2,c)}function q(){u=this.paused,s(3,u)}function W(h){fe[h?"unshift":"push"](()=>{p=h,s(4,p)})}const z=()=>s(3,u=!u);function $(h){fe[h?"unshift":"push"](()=>{a=h,s(0,a)})}return[a,i,c,u,p,r,_,m,t,o,n,v,y,D,S,j,A,q,W,z,$]}class mt extends Z{constructor(e){super(),x(this,e,pt,ht,ee,{})}}function ze(l){let e,s,t,o,n,a,i,c;return{c(){e=b("section"),s=b("h2"),t=N("Tokenized notes"),o=I(),n=b("p"),a=N(l[0]),this.h()},l(u){e=w(u,"SECTION",{class:!0});var p=E(e);s=w(p,"H2",{});var r=E(s);t=H(r,"Tokenized notes"),r.forEach(g),o=C(p),n=w(p,"P",{class:!0});var _=E(n);a=H(_,l[0]),_.forEach(g),p.forEach(g),this.h()},h(){d(n,"class","svelte-4un5mw"),d(e,"class","svelte-4un5mw")},m(u,p){P(u,e,p),f(e,s),f(s,t),f(e,o),f(e,n),f(n,a),c=!0},p(u,p){(!c||p&1)&&Ee(a,u[0])},i(u){c||(_e(()=>{i||(i=ie(e,ce,{},!0)),i.run(1)}),c=!0)},o(u){i||(i=ie(e,ce,{},!1)),i.run(0),c=!1},d(u){u&&g(e),u&&i&&i.end()}}}function gt(l){let e,s,t=l[0]&&ze(l);return{c(){t&&t.c(),e=Me()},l(o){t&&t.l(o),e=Me()},m(o,n){t&&t.m(o,n),P(o,e,n),s=!0},p(o,[n]){o[0]?t?(t.p(o,n),n&1&&L(t,1)):(t=ze(o),t.c(),L(t,1),t.m(e.parentNode,e)):t&&($e(),V(t,1,1,()=>{t=null}),Te())},i(o){s||(L(t),s=!0)},o(o){V(t),s=!1},d(o){t&&t.d(o),o&&g(e)}}}function vt(l,e,s){let t;return R(l,ke,o=>s(0,t=o)),[t]}class bt extends Z{constructor(e){super(),x(this,e,vt,gt,ee,{})}}function Pe(l){let e,s,t,o;return e=new mt({}),t=new bt({}),{c(){Y(e.$$.fragment),s=I(),Y(t.$$.fragment)},l(n){J(e.$$.fragment,n),s=C(n),J(t.$$.fragment,n)},m(n,a){K(e,n,a),P(n,s,a),K(t,n,a),o=!0},i(n){o||(L(e.$$.fragment,n),L(t.$$.fragment,n),o=!0)},o(n){V(e.$$.fragment,n),V(t.$$.fragment,n),o=!1},d(n){Q(e,n),n&&g(s),Q(t,n)}}}function wt(l){let e,s,t,o,n,a,i,c,u,p,r,_,m,T,v,y,D,S,j,A,q,W,z,$,h,k,G,F,me,te,ge,de;$=new et({}),k=new at({}),F=new it({}),te=new _t({});let M=l[0]&&Pe();return{c(){e=b("main"),s=b("h1"),t=N("Composer"),o=I(),n=b("p"),a=N("Trained on fifteen thousand songs. One AI model. Infinite compositions."),i=I(),c=b("p"),u=N(`This space contains a deep neural network model that can compose music. You can use it to generate audio in - different styles, 4 bars at a time.`),p=I(),r=b("p"),_=N("Developed by "),m=b("a"),T=N("Ron Au"),v=N(` and - `),y=b("a"),D=N("Tristan Behrens"),S=N("."),j=I(),A=b("p"),q=N("Have fun! And always feel free to send us some feedback and share your compositions!"),W=I(),z=b("section"),Y($.$$.fragment),h=I(),Y(k.$$.fragment),G=I(),Y(F.$$.fragment),me=I(),Y(te.$$.fragment),ge=I(),M&&M.c(),this.h()},l(B){e=w(B,"MAIN",{class:!0});var O=E(e);s=w(O,"H1",{class:!0});var Ie=E(s);t=H(Ie,"Composer"),Ie.forEach(g),o=C(O),n=w(O,"P",{class:!0});var Ce=E(n);a=H(Ce,"Trained on fifteen thousand songs. One AI model. Infinite compositions."),Ce.forEach(g),i=C(O),c=w(O,"P",{class:!0});var De=E(c);u=H(De,`This space contains a deep neural network model that can compose music. You can use it to generate audio in - different styles, 4 bars at a time.`),De.forEach(g),p=C(O),r=w(O,"P",{class:!0});var ae=E(r);_=H(ae,"Developed by "),m=w(ae,"A",{href:!0,rel:!0,target:!0});var Se=E(m);T=H(Se,"Ron Au"),Se.forEach(g),v=H(ae,` and - `),y=w(ae,"A",{href:!0,rel:!0,target:!0});var Ae=E(y);D=H(Ae,"Tristan Behrens"),Ae.forEach(g),S=H(ae,"."),ae.forEach(g),j=C(O),A=w(O,"P",{class:!0});var Oe=E(A);q=H(Oe,"Have fun! And always feel free to send us some feedback and share your compositions!"),Oe.forEach(g),W=C(O),z=w(O,"SECTION",{id:!0,class:!0});var oe=E(z);J($.$$.fragment,oe),h=C(oe),J(k.$$.fragment,oe),G=C(oe),J(F.$$.fragment,oe),oe.forEach(g),me=C(O),J(te.$$.fragment,O),ge=C(O),M&&M.l(O),O.forEach(g),this.h()},h(){d(s,"class","svelte-1rfjlkw"),d(n,"class","heading svelte-1rfjlkw"),d(c,"class","svelte-1rfjlkw"),d(m,"href","https://twitter.com/ronvoluted"),d(m,"rel","noopener"),d(m,"target","_blank"),d(y,"href","https://twitter.com/DrTBehrens"),d(y,"rel","noopener"),d(y,"target","_blank"),d(r,"class","svelte-1rfjlkw"),d(A,"class","svelte-1rfjlkw"),d(z,"id","options"),d(z,"class","svelte-1rfjlkw"),d(e,"class","svelte-1rfjlkw")},m(B,O){P(B,e,O),f(e,s),f(s,t),f(e,o),f(e,n),f(n,a),f(e,i),f(e,c),f(c,u),f(e,p),f(e,r),f(r,_),f(r,m),f(m,T),f(r,v),f(r,y),f(y,D),f(r,S),f(e,j),f(e,A),f(A,q),f(e,W),f(e,z),K($,z,null),f(z,h),K(k,z,null),f(z,G),K(F,z,null),f(e,me),K(te,e,null),f(e,ge),M&&M.m(e,null),de=!0},p(B,[O]){B[0]?M?O&1&&L(M,1):(M=Pe(),M.c(),L(M,1),M.m(e,null)):M&&($e(),V(M,1,1,()=>{M=null}),Te())},i(B){de||(L($.$$.fragment,B),L(k.$$.fragment,B),L(F.$$.fragment,B),L(te.$$.fragment,B),L(M),de=!0)},o(B){V($.$$.fragment,B),V(k.$$.fragment,B),V(F.$$.fragment,B),V(te.$$.fragment,B),V(M),de=!1},d(B){B&&g(e),Q($),Q(k),Q(F),Q(te),M&&M.d()}}}function yt(l,e,s){let t;return R(l,pe,o=>s(0,t=o)),[t]}class $t extends Z{constructor(e){super(),x(this,e,yt,wt,ee,{})}}export{$t as default}; diff --git a/spaces/aimstack/bloom/Dockerfile b/spaces/aimstack/bloom/Dockerfile deleted file mode 100644 index 1b5313643bce150166d4135b441cd315634a68b9..0000000000000000000000000000000000000000 --- a/spaces/aimstack/bloom/Dockerfile +++ /dev/null @@ -1,30 +0,0 @@ -FROM python:3.9 - - -RUN useradd -m -u 1000 aim_user - -# Switch to the "aim_user" user -USER aim_user - -# Set home to the user's home directory -ENV HOME=/home/aim_user \ - PATH=/home/aim_user/.local/bin:$PATH - -# Set the working directory to the user's home directory -WORKDIR $HOME - -# install the `aim` package on the latest version -RUN pip install aim==4.0.0.dev6 - -RUN aim telemetry off - -ENTRYPOINT ["/bin/sh", "-c"] - -COPY aim_repo.tar.gz . -RUN tar xvzf aim_repo.tar.gz -# have to run `aim init` in the directory that stores aim data for -# otherwise `aim up` will prompt for confirmation to create the directory itself. -# We run aim listening on 0.0.0.0 to expose all ports. Also, we run -# using `--dev` to print verbose logs. Port 43800 is the default port of -# `aim up` but explicit is better than implicit. -CMD ["aim up --host 0.0.0.0 --port 7860 --workers 2"] \ No newline at end of file diff --git a/spaces/airus/img-to-music/constants.py b/spaces/airus/img-to-music/constants.py deleted file mode 100644 index 86863d1b778d4c66f0d8e1e0b699f1bb937c1d50..0000000000000000000000000000000000000000 --- a/spaces/airus/img-to-music/constants.py +++ /dev/null @@ -1,9 +0,0 @@ -import numpy as np -import os - -MUBERT_LICENSE = os.environ.get('MUBERT_LICENSE') -MUBERT_TOKEN = os.environ.get('MUBERT_TOKEN') - -MUBERT_MODE = "loop" -MUBERT_TAGS_STRING = 'tribal,action,kids,neo-classic,run 130,pumped,jazz / funk,ethnic,dubtechno,reggae,acid jazz,liquidfunk,funk,witch house,tech house,underground,artists,mystical,disco,sensorium,r&b,agender,psychedelic trance / psytrance,peaceful,run 140,piano,run 160,setting,meditation,christmas,ambient,horror,cinematic,electro house,idm,bass,minimal,underscore,drums,glitchy,beautiful,technology,tribal house,country pop,jazz & funk,documentary,space,classical,valentines,chillstep,experimental,trap,new jack swing,drama,post-rock,tense,corporate,neutral,happy,analog,funky,spiritual,sberzvuk special,chill hop,dramatic,catchy,holidays,fitness 90,optimistic,orchestra,acid techno,energizing,romantic,minimal house,breaks,hyper pop,warm up,dreamy,dark,urban,microfunk,dub,nu disco,vogue,keys,hardcore,aggressive,indie,electro funk,beauty,relaxing,trance,pop,hiphop,soft,acoustic,chillrave / ethno-house,deep techno,angry,dance,fun,dubstep,tropical,latin pop,heroic,world music,inspirational,uplifting,atmosphere,art,epic,advertising,chillout,scary,spooky,slow ballad,saxophone,summer,erotic,jazzy,energy 100,kara mar,xmas,atmospheric,indie pop,hip-hop,yoga,reggaeton,lounge,travel,running,folk,chillrave & ethno-house,detective,darkambient,chill,fantasy,minimal techno,special,night,tropical house,downtempo,lullaby,meditative,upbeat,glitch hop,fitness,neurofunk,sexual,indie rock,future pop,jazz,cyberpunk,melancholic,happy hardcore,family / kids,synths,electric guitar,comedy,psychedelic trance & psytrance,edm,psychedelic rock,calm,zen,bells,podcast,melodic house,ethnic percussion,nature,heavy,bassline,indie dance,techno,drumnbass,synth pop,vaporwave,sad,8-bit,chillgressive,deep,orchestral,futuristic,hardtechno,nostalgic,big room,sci-fi,tutorial,joyful,pads,minimal 170,drill,ethnic 108,amusing,sleepy ambient,psychill,italo disco,lofi,house,acoustic guitar,bassline house,rock,k-pop,synthwave,deep house,electronica,gabber,nightlife,sport & fitness,road trip,celebration,electro,disco house,electronic' -MUBERT_TAGS = np.array(MUBERT_TAGS_STRING.split(',')) \ No newline at end of file diff --git a/spaces/akhaliq/Real-ESRGAN/realesrgan/data/realesrgan_paired_dataset.py b/spaces/akhaliq/Real-ESRGAN/realesrgan/data/realesrgan_paired_dataset.py deleted file mode 100644 index 386c8d72496245dae8df033c2ebbd76b41ff45f1..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Real-ESRGAN/realesrgan/data/realesrgan_paired_dataset.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -from basicsr.data.data_util import paired_paths_from_folder, paired_paths_from_lmdb -from basicsr.data.transforms import augment, paired_random_crop -from basicsr.utils import FileClient, imfrombytes, img2tensor -from basicsr.utils.registry import DATASET_REGISTRY -from torch.utils import data as data -from torchvision.transforms.functional import normalize - - -@DATASET_REGISTRY.register() -class RealESRGANPairedDataset(data.Dataset): - """Paired image dataset for image restoration. - - Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc) and GT image pairs. - - There are three modes: - 1. 'lmdb': Use lmdb files. - If opt['io_backend'] == lmdb. - 2. 'meta_info': Use meta information file to generate paths. - If opt['io_backend'] != lmdb and opt['meta_info'] is not None. - 3. 'folder': Scan folders to generate paths. - The rest. - - Args: - opt (dict): Config for train datasets. It contains the following keys: - dataroot_gt (str): Data root path for gt. - dataroot_lq (str): Data root path for lq. - meta_info (str): Path for meta information file. - io_backend (dict): IO backend type and other kwarg. - filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. - Default: '{}'. - gt_size (int): Cropped patched size for gt patches. - use_hflip (bool): Use horizontal flips. - use_rot (bool): Use rotation (use vertical flip and transposing h - and w for implementation). - - scale (bool): Scale, which will be added automatically. - phase (str): 'train' or 'val'. - """ - - def __init__(self, opt): - super(RealESRGANPairedDataset, self).__init__() - self.opt = opt - self.file_client = None - self.io_backend_opt = opt['io_backend'] - # mean and std for normalizing the input images - self.mean = opt['mean'] if 'mean' in opt else None - self.std = opt['std'] if 'std' in opt else None - - self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq'] - self.filename_tmpl = opt['filename_tmpl'] if 'filename_tmpl' in opt else '{}' - - # file client (lmdb io backend) - if self.io_backend_opt['type'] == 'lmdb': - self.io_backend_opt['db_paths'] = [self.lq_folder, self.gt_folder] - self.io_backend_opt['client_keys'] = ['lq', 'gt'] - self.paths = paired_paths_from_lmdb([self.lq_folder, self.gt_folder], ['lq', 'gt']) - elif 'meta_info' in self.opt and self.opt['meta_info'] is not None: - # disk backend with meta_info - # Each line in the meta_info describes the relative path to an image - with open(self.opt['meta_info']) as fin: - paths = [line.strip() for line in fin] - self.paths = [] - for path in paths: - gt_path, lq_path = path.split(', ') - gt_path = os.path.join(self.gt_folder, gt_path) - lq_path = os.path.join(self.lq_folder, lq_path) - self.paths.append(dict([('gt_path', gt_path), ('lq_path', lq_path)])) - else: - # disk backend - # it will scan the whole folder to get meta info - # it will be time-consuming for folders with too many files. It is recommended using an extra meta txt file - self.paths = paired_paths_from_folder([self.lq_folder, self.gt_folder], ['lq', 'gt'], self.filename_tmpl) - - def __getitem__(self, index): - if self.file_client is None: - self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) - - scale = self.opt['scale'] - - # Load gt and lq images. Dimension order: HWC; channel order: BGR; - # image range: [0, 1], float32. - gt_path = self.paths[index]['gt_path'] - img_bytes = self.file_client.get(gt_path, 'gt') - img_gt = imfrombytes(img_bytes, float32=True) - lq_path = self.paths[index]['lq_path'] - img_bytes = self.file_client.get(lq_path, 'lq') - img_lq = imfrombytes(img_bytes, float32=True) - - # augmentation for training - if self.opt['phase'] == 'train': - gt_size = self.opt['gt_size'] - # random crop - img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, gt_path) - # flip, rotation - img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_hflip'], self.opt['use_rot']) - - # BGR to RGB, HWC to CHW, numpy to tensor - img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True) - # normalize - if self.mean is not None or self.std is not None: - normalize(img_lq, self.mean, self.std, inplace=True) - normalize(img_gt, self.mean, self.std, inplace=True) - - return {'lq': img_lq, 'gt': img_gt, 'lq_path': lq_path, 'gt_path': gt_path} - - def __len__(self): - return len(self.paths) diff --git a/spaces/akhaliq/SummerTime/evaluation/rougewe_metric.py b/spaces/akhaliq/SummerTime/evaluation/rougewe_metric.py deleted file mode 100644 index b27aa0ce2266903a3aa898e6e1e4ea095ecbf1cf..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/SummerTime/evaluation/rougewe_metric.py +++ /dev/null @@ -1,24 +0,0 @@ -from evaluation.summeval_metric import SummEvalMetric -from typing import List, Dict - -import nltk - - -class RougeWe(SummEvalMetric): - metric_name = "rougeWE" - range = (0, 1) - higher_is_better = True - requires_heavy_compute = True - - def __init__(self): - from summ_eval.rouge_we_metric import RougeWeMetric - - nltk.download("stopwords") - se_metric = RougeWeMetric() - super(RougeWe, self).__init__(se_metric) - - def evaluate( - self, inputs: List[str], targets: List[str], keys: List[str] = ["rouge_we_3_f"] - ) -> Dict[str, float]: - # TODO zhangir: update when dataset api is merged. - return super(RougeWe, self).evaluate(inputs, targets, keys) diff --git a/spaces/akhaliq/runwayml-stable-diffusion-v1-5/README.md b/spaces/akhaliq/runwayml-stable-diffusion-v1-5/README.md deleted file mode 100644 index 26a6a20988103807b5f70ec8d8d68cb530640a10..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/runwayml-stable-diffusion-v1-5/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Runwayml Stable Diffusion V1 5 -emoji: 📉 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/alexiserodriguez/whisper-transcription-app/README.md b/spaces/alexiserodriguez/whisper-transcription-app/README.md deleted file mode 100644 index e35a01a8bf10a8e6c8e19acf836343dc6a32e40e..0000000000000000000000000000000000000000 --- a/spaces/alexiserodriguez/whisper-transcription-app/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Whisper Transcription App -emoji: 🐢 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/utils/glibc.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/utils/glibc.py deleted file mode 100644 index 7bd3c20681d865cb4fa42617cf939b5512c7663f..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/utils/glibc.py +++ /dev/null @@ -1,88 +0,0 @@ -# The following comment should be removed at some point in the future. -# mypy: strict-optional=False - -import os -import sys -from typing import Optional, Tuple - - -def glibc_version_string() -> Optional[str]: - "Returns glibc version string, or None if not using glibc." - return glibc_version_string_confstr() or glibc_version_string_ctypes() - - -def glibc_version_string_confstr() -> Optional[str]: - "Primary implementation of glibc_version_string using os.confstr." - # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely - # to be broken or missing. This strategy is used in the standard library - # platform module: - # https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183 - if sys.platform == "win32": - return None - try: - # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17": - _, version = os.confstr("CS_GNU_LIBC_VERSION").split() - except (AttributeError, OSError, ValueError): - # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... - return None - return version - - -def glibc_version_string_ctypes() -> Optional[str]: - "Fallback implementation of glibc_version_string using ctypes." - - try: - import ctypes - except ImportError: - return None - - # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen - # manpage says, "If filename is NULL, then the returned handle is for the - # main program". This way we can let the linker do the work to figure out - # which libc our process is actually using. - process_namespace = ctypes.CDLL(None) - try: - gnu_get_libc_version = process_namespace.gnu_get_libc_version - except AttributeError: - # Symbol doesn't exist -> therefore, we are not linked to - # glibc. - return None - - # Call gnu_get_libc_version, which returns a string like "2.5" - gnu_get_libc_version.restype = ctypes.c_char_p - version_str = gnu_get_libc_version() - # py2 / py3 compatibility: - if not isinstance(version_str, str): - version_str = version_str.decode("ascii") - - return version_str - - -# platform.libc_ver regularly returns completely nonsensical glibc -# versions. E.g. on my computer, platform says: -# -# ~$ python2.7 -c 'import platform; print(platform.libc_ver())' -# ('glibc', '2.7') -# ~$ python3.5 -c 'import platform; print(platform.libc_ver())' -# ('glibc', '2.9') -# -# But the truth is: -# -# ~$ ldd --version -# ldd (Debian GLIBC 2.22-11) 2.22 -# -# This is unfortunate, because it means that the linehaul data on libc -# versions that was generated by pip 8.1.2 and earlier is useless and -# misleading. Solution: instead of using platform, use our code that actually -# works. -def libc_ver() -> Tuple[str, str]: - """Try to determine the glibc version - - Returns a tuple of strings (lib, version) which default to empty strings - in case the lookup fails. - """ - glibc_version = glibc_version_string() - if glibc_version is None: - return ("", "") - else: - return ("glibc", glibc_version) diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/utils/temp_dir.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/utils/temp_dir.py deleted file mode 100644 index 442679a758b7b4b965ce3e615a243a7a6d318b4e..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/utils/temp_dir.py +++ /dev/null @@ -1,246 +0,0 @@ -import errno -import itertools -import logging -import os.path -import tempfile -from contextlib import ExitStack, contextmanager -from typing import Any, Dict, Iterator, Optional, TypeVar, Union - -from pip._internal.utils.misc import enum, rmtree - -logger = logging.getLogger(__name__) - -_T = TypeVar("_T", bound="TempDirectory") - - -# Kinds of temporary directories. Only needed for ones that are -# globally-managed. -tempdir_kinds = enum( - BUILD_ENV="build-env", - EPHEM_WHEEL_CACHE="ephem-wheel-cache", - REQ_BUILD="req-build", -) - - -_tempdir_manager: Optional[ExitStack] = None - - -@contextmanager -def global_tempdir_manager() -> Iterator[None]: - global _tempdir_manager - with ExitStack() as stack: - old_tempdir_manager, _tempdir_manager = _tempdir_manager, stack - try: - yield - finally: - _tempdir_manager = old_tempdir_manager - - -class TempDirectoryTypeRegistry: - """Manages temp directory behavior""" - - def __init__(self) -> None: - self._should_delete: Dict[str, bool] = {} - - def set_delete(self, kind: str, value: bool) -> None: - """Indicate whether a TempDirectory of the given kind should be - auto-deleted. - """ - self._should_delete[kind] = value - - def get_delete(self, kind: str) -> bool: - """Get configured auto-delete flag for a given TempDirectory type, - default True. - """ - return self._should_delete.get(kind, True) - - -_tempdir_registry: Optional[TempDirectoryTypeRegistry] = None - - -@contextmanager -def tempdir_registry() -> Iterator[TempDirectoryTypeRegistry]: - """Provides a scoped global tempdir registry that can be used to dictate - whether directories should be deleted. - """ - global _tempdir_registry - old_tempdir_registry = _tempdir_registry - _tempdir_registry = TempDirectoryTypeRegistry() - try: - yield _tempdir_registry - finally: - _tempdir_registry = old_tempdir_registry - - -class _Default: - pass - - -_default = _Default() - - -class TempDirectory: - """Helper class that owns and cleans up a temporary directory. - - This class can be used as a context manager or as an OO representation of a - temporary directory. - - Attributes: - path - Location to the created temporary directory - delete - Whether the directory should be deleted when exiting - (when used as a contextmanager) - - Methods: - cleanup() - Deletes the temporary directory - - When used as a context manager, if the delete attribute is True, on - exiting the context the temporary directory is deleted. - """ - - def __init__( - self, - path: Optional[str] = None, - delete: Union[bool, None, _Default] = _default, - kind: str = "temp", - globally_managed: bool = False, - ): - super().__init__() - - if delete is _default: - if path is not None: - # If we were given an explicit directory, resolve delete option - # now. - delete = False - else: - # Otherwise, we wait until cleanup and see what - # tempdir_registry says. - delete = None - - # The only time we specify path is in for editables where it - # is the value of the --src option. - if path is None: - path = self._create(kind) - - self._path = path - self._deleted = False - self.delete = delete - self.kind = kind - - if globally_managed: - assert _tempdir_manager is not None - _tempdir_manager.enter_context(self) - - @property - def path(self) -> str: - assert not self._deleted, f"Attempted to access deleted path: {self._path}" - return self._path - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} {self.path!r}>" - - def __enter__(self: _T) -> _T: - return self - - def __exit__(self, exc: Any, value: Any, tb: Any) -> None: - if self.delete is not None: - delete = self.delete - elif _tempdir_registry: - delete = _tempdir_registry.get_delete(self.kind) - else: - delete = True - - if delete: - self.cleanup() - - def _create(self, kind: str) -> str: - """Create a temporary directory and store its path in self.path""" - # We realpath here because some systems have their default tmpdir - # symlinked to another directory. This tends to confuse build - # scripts, so we canonicalize the path by traversing potential - # symlinks here. - path = os.path.realpath(tempfile.mkdtemp(prefix=f"pip-{kind}-")) - logger.debug("Created temporary directory: %s", path) - return path - - def cleanup(self) -> None: - """Remove the temporary directory created and reset state""" - self._deleted = True - if not os.path.exists(self._path): - return - rmtree(self._path) - - -class AdjacentTempDirectory(TempDirectory): - """Helper class that creates a temporary directory adjacent to a real one. - - Attributes: - original - The original directory to create a temp directory for. - path - After calling create() or entering, contains the full - path to the temporary directory. - delete - Whether the directory should be deleted when exiting - (when used as a contextmanager) - - """ - - # The characters that may be used to name the temp directory - # We always prepend a ~ and then rotate through these until - # a usable name is found. - # pkg_resources raises a different error for .dist-info folder - # with leading '-' and invalid metadata - LEADING_CHARS = "-~.=%0123456789" - - def __init__(self, original: str, delete: Optional[bool] = None) -> None: - self.original = original.rstrip("/\\") - super().__init__(delete=delete) - - @classmethod - def _generate_names(cls, name: str) -> Iterator[str]: - """Generates a series of temporary names. - - The algorithm replaces the leading characters in the name - with ones that are valid filesystem characters, but are not - valid package names (for both Python and pip definitions of - package). - """ - for i in range(1, len(name)): - for candidate in itertools.combinations_with_replacement( - cls.LEADING_CHARS, i - 1 - ): - new_name = "~" + "".join(candidate) + name[i:] - if new_name != name: - yield new_name - - # If we make it this far, we will have to make a longer name - for i in range(len(cls.LEADING_CHARS)): - for candidate in itertools.combinations_with_replacement( - cls.LEADING_CHARS, i - ): - new_name = "~" + "".join(candidate) + name - if new_name != name: - yield new_name - - def _create(self, kind: str) -> str: - root, name = os.path.split(self.original) - for candidate in self._generate_names(name): - path = os.path.join(root, candidate) - try: - os.mkdir(path) - except OSError as ex: - # Continue if the name exists already - if ex.errno != errno.EEXIST: - raise - else: - path = os.path.realpath(path) - break - else: - # Final fallback on the default behavior. - path = os.path.realpath(tempfile.mkdtemp(prefix=f"pip-{kind}-")) - - logger.debug("Created temporary directory: %s", path) - return path diff --git a/spaces/anaclaudia13ct/insect_detection/utils/segment/augmentations.py b/spaces/anaclaudia13ct/insect_detection/utils/segment/augmentations.py deleted file mode 100644 index 169addedf0f58cf37e774e6a85eddff6eebc30be..0000000000000000000000000000000000000000 --- a/spaces/anaclaudia13ct/insect_detection/utils/segment/augmentations.py +++ /dev/null @@ -1,104 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Image augmentation functions -""" - -import math -import random - -import cv2 -import numpy as np - -from ..augmentations import box_candidates -from ..general import resample_segments, segment2box - - -def mixup(im, labels, segments, im2, labels2, segments2): - # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf - r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - im = (im * r + im2 * (1 - r)).astype(np.uint8) - labels = np.concatenate((labels, labels2), 0) - segments = np.concatenate((segments, segments2), 0) - return im, labels, segments - - -def random_perspective(im, - targets=(), - segments=(), - degrees=10, - translate=.1, - scale=.1, - shear=10, - perspective=0.0, - border=(0, 0)): - # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) - # targets = [cls, xyxy] - - height = im.shape[0] + border[0] * 2 # shape(h,w,c) - width = im.shape[1] + border[1] * 2 - - # Center - C = np.eye(3) - C[0, 2] = -im.shape[1] / 2 # x translation (pixels) - C[1, 2] = -im.shape[0] / 2 # y translation (pixels) - - # Perspective - P = np.eye(3) - P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) - P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) - - # Rotation and Scale - R = np.eye(3) - a = random.uniform(-degrees, degrees) - # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations - s = random.uniform(1 - scale, 1 + scale) - # s = 2 ** random.uniform(-scale, scale) - R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) - - # Shear - S = np.eye(3) - S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) - S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) - - # Translation - T = np.eye(3) - T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels) - T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels) - - # Combined rotation matrix - M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT - if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed - if perspective: - im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) - else: # affine - im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(im[:, :, ::-1]) # base - # ax[1].imshow(im2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) - new_segments = [] - if n: - new = np.zeros((n, 4)) - segments = resample_segments(segments) # upsample - for i, segment in enumerate(segments): - xy = np.ones((len(segment), 3)) - xy[:, :2] = segment - xy = xy @ M.T # transform - xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine - - # clip - new[i] = segment2box(xy, width, height) - new_segments.append(xy) - - # filter candidates - i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01) - targets = targets[i] - targets[:, 1:5] = new[i] - new_segments = np.array(new_segments)[i] - - return im, targets, new_segments diff --git a/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/clipseg/models/vitseg.py b/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/clipseg/models/vitseg.py deleted file mode 100644 index ed621431ddf930fcfa27b5929999776b96fede63..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/clipseg/models/vitseg.py +++ /dev/null @@ -1,286 +0,0 @@ -import math -from posixpath import basename, dirname, join -# import clip -from clip.model import convert_weights -import torch -import json -from torch import nn -from torch.nn import functional as nnf -from torch.nn.modules import activation -from torch.nn.modules.activation import ReLU -from torchvision import transforms - -normalize = transforms.Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711)) - -from torchvision.models import ResNet - - -def process_prompts(conditional, prompt_list, conditional_map): - # DEPRECATED - - # randomly sample a synonym - words = [conditional_map[int(i)] for i in conditional] - words = [syns[torch.multinomial(torch.ones(len(syns)), 1, replacement=True).item()] for syns in words] - words = [w.replace('_', ' ') for w in words] - - if prompt_list is not None: - prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True) - prompts = [prompt_list[i] for i in prompt_indices] - else: - prompts = ['a photo of {}'] * (len(words)) - - return [promt.format(w) for promt, w in zip(prompts, words)] - - -class VITDenseBase(nn.Module): - - def rescaled_pos_emb(self, new_size): - assert len(new_size) == 2 - - a = self.model.positional_embedding[1:].T.view(1, 768, *self.token_shape) - b = nnf.interpolate(a, new_size, mode='bicubic', align_corners=False).squeeze(0).view(768, new_size[0]*new_size[1]).T - return torch.cat([self.model.positional_embedding[:1], b]) - - def visual_forward(self, x_inp, extract_layers=(), skip=False, mask=None): - - with torch.no_grad(): - - x_inp = nnf.interpolate(x_inp, (384, 384)) - - x = self.model.patch_embed(x_inp) - cls_token = self.model.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks - if self.model.dist_token is None: - x = torch.cat((cls_token, x), dim=1) - else: - x = torch.cat((cls_token, self.model.dist_token.expand(x.shape[0], -1, -1), x), dim=1) - x = self.model.pos_drop(x + self.model.pos_embed) - - activations = [] - for i, block in enumerate(self.model.blocks): - x = block(x) - - if i in extract_layers: - # permute to be compatible with CLIP - activations += [x.permute(1,0,2)] - - x = self.model.norm(x) - x = self.model.head(self.model.pre_logits(x[:, 0])) - - # again for CLIP compatibility - # x = x.permute(1, 0, 2) - - return x, activations, None - - def sample_prompts(self, words, prompt_list=None): - - prompt_list = prompt_list if prompt_list is not None else self.prompt_list - - prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True) - prompts = [prompt_list[i] for i in prompt_indices] - return [promt.format(w) for promt, w in zip(prompts, words)] - - def get_cond_vec(self, conditional, batch_size): - # compute conditional from a single string - if conditional is not None and type(conditional) == str: - cond = self.compute_conditional(conditional) - cond = cond.repeat(batch_size, 1) - - # compute conditional from string list/tuple - elif conditional is not None and type(conditional) in {list, tuple} and type(conditional[0]) == str: - assert len(conditional) == batch_size - cond = self.compute_conditional(conditional) - - # use conditional directly - elif conditional is not None and type(conditional) == torch.Tensor and conditional.ndim == 2: - cond = conditional - - # compute conditional from image - elif conditional is not None and type(conditional) == torch.Tensor: - with torch.no_grad(): - cond, _, _ = self.visual_forward(conditional) - else: - raise ValueError('invalid conditional') - return cond - - def compute_conditional(self, conditional): - import clip - - dev = next(self.parameters()).device - - if type(conditional) in {list, tuple}: - text_tokens = clip.tokenize(conditional).to(dev) - cond = self.clip_model.encode_text(text_tokens) - else: - if conditional in self.precomputed_prompts: - cond = self.precomputed_prompts[conditional].float().to(dev) - else: - text_tokens = clip.tokenize([conditional]).to(dev) - cond = self.clip_model.encode_text(text_tokens)[0] - - return cond - - -class VITDensePredT(VITDenseBase): - - def __init__(self, extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4, prompt='fixed', - depth=3, extra_blocks=0, reduce_cond=None, fix_shift=False, - learn_trans_conv_only=False, refine=None, limit_to_clip_only=False, upsample=False, - add_calibration=False, process_cond=None, not_pretrained=False): - super().__init__() - # device = 'cpu' - - self.extract_layers = extract_layers - self.cond_layer = cond_layer - self.limit_to_clip_only = limit_to_clip_only - self.process_cond = None - - if add_calibration: - self.calibration_conds = 1 - - self.upsample_proj = nn.Conv2d(reduce_dim, 1, kernel_size=1) if upsample else None - - self.add_activation1 = True - - import timm - self.model = timm.create_model('vit_base_patch16_384', pretrained=True) - self.model.head = nn.Linear(768, 512 if reduce_cond is None else reduce_cond) - - for p in self.model.parameters(): - p.requires_grad_(False) - - import clip - self.clip_model, _ = clip.load('ViT-B/16', device='cpu', jit=False) - # del self.clip_model.visual - - - self.token_shape = (14, 14) - - # conditional - if reduce_cond is not None: - self.reduce_cond = nn.Linear(512, reduce_cond) - for p in self.reduce_cond.parameters(): - p.requires_grad_(False) - else: - self.reduce_cond = None - - # self.film = AVAILABLE_BLOCKS['film'](512, 128) - self.film_mul = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim) - self.film_add = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim) - - # DEPRECATED - # self.conditional_map = {c['id']: c['synonyms'] for c in json.load(open(cond_map))} - - assert len(self.extract_layers) == depth - - self.reduces = nn.ModuleList([nn.Linear(768, reduce_dim) for _ in range(depth)]) - self.blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(len(self.extract_layers))]) - self.extra_blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(extra_blocks)]) - - trans_conv_ks = (16, 16) - self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks) - - # refinement and trans conv - - if learn_trans_conv_only: - for p in self.parameters(): - p.requires_grad_(False) - - for p in self.trans_conv.parameters(): - p.requires_grad_(True) - - if prompt == 'fixed': - self.prompt_list = ['a photo of a {}.'] - elif prompt == 'shuffle': - self.prompt_list = ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.'] - elif prompt == 'shuffle+': - self.prompt_list = ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.', - 'a cropped photo of a {}.', 'a good photo of a {}.', 'a photo of one {}.', - 'a bad photo of a {}.', 'a photo of the {}.'] - elif prompt == 'shuffle_clip': - from models.clip_prompts import imagenet_templates - self.prompt_list = imagenet_templates - - if process_cond is not None: - if process_cond == 'clamp' or process_cond[0] == 'clamp': - - val = process_cond[1] if type(process_cond) in {list, tuple} else 0.2 - - def clamp_vec(x): - return torch.clamp(x, -val, val) - - self.process_cond = clamp_vec - - elif process_cond.endswith('.pth'): - - shift = torch.load(process_cond) - def add_shift(x): - return x + shift.to(x.device) - - self.process_cond = add_shift - - import pickle - precomp = pickle.load(open('precomputed_prompt_vectors.pickle', 'rb')) - self.precomputed_prompts = {k: torch.from_numpy(v) for k, v in precomp.items()} - - - def forward(self, inp_image, conditional=None, return_features=False, mask=None): - - assert type(return_features) == bool - - # inp_image = inp_image.to(self.model.positional_embedding.device) - - if mask is not None: - raise ValueError('mask not supported') - - # x_inp = normalize(inp_image) - x_inp = inp_image - - bs, dev = inp_image.shape[0], x_inp.device - - inp_image_size = inp_image.shape[2:] - - cond = self.get_cond_vec(conditional, bs) - - visual_q, activations, _ = self.visual_forward(x_inp, extract_layers=[0] + list(self.extract_layers)) - - activation1 = activations[0] - activations = activations[1:] - - a = None - for i, (activation, block, reduce) in enumerate(zip(activations[::-1], self.blocks, self.reduces)): - - if a is not None: - a = reduce(activation) + a - else: - a = reduce(activation) - - if i == self.cond_layer: - if self.reduce_cond is not None: - cond = self.reduce_cond(cond) - - a = self.film_mul(cond) * a + self.film_add(cond) - - a = block(a) - - for block in self.extra_blocks: - a = a + block(a) - - a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens - - size = int(math.sqrt(a.shape[2])) - - a = a.view(bs, a.shape[1], size, size) - - if self.trans_conv is not None: - a = self.trans_conv(a) - - if self.upsample_proj is not None: - a = self.upsample_proj(a) - a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear') - - a = nnf.interpolate(a, inp_image_size) - - if return_features: - return a, visual_q, cond, [activation1] + activations - else: - return a, diff --git a/spaces/aodianyun/stable-diffusion-webui/modules/script_callbacks.py b/spaces/aodianyun/stable-diffusion-webui/modules/script_callbacks.py deleted file mode 100644 index c98c2395b6fe46ddec2a10cc6a54ee0f3ba248f5..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/modules/script_callbacks.py +++ /dev/null @@ -1,359 +0,0 @@ -import sys -import traceback -from collections import namedtuple -import inspect -from typing import Optional, Dict, Any - -from fastapi import FastAPI -from gradio import Blocks - - -def report_exception(c, job): - print(f"Error executing callback {job} for {c.script}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - - -class ImageSaveParams: - def __init__(self, image, p, filename, pnginfo): - self.image = image - """the PIL image itself""" - - self.p = p - """p object with processing parameters; either StableDiffusionProcessing or an object with same fields""" - - self.filename = filename - """name of file that the image would be saved to""" - - self.pnginfo = pnginfo - """dictionary with parameters for image's PNG info data; infotext will have the key 'parameters'""" - - -class CFGDenoiserParams: - def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps): - self.x = x - """Latent image representation in the process of being denoised""" - - self.image_cond = image_cond - """Conditioning image""" - - self.sigma = sigma - """Current sigma noise step value""" - - self.sampling_step = sampling_step - """Current Sampling step number""" - - self.total_sampling_steps = total_sampling_steps - """Total number of sampling steps planned""" - - -class CFGDenoisedParams: - def __init__(self, x, sampling_step, total_sampling_steps): - self.x = x - """Latent image representation in the process of being denoised""" - - self.sampling_step = sampling_step - """Current Sampling step number""" - - self.total_sampling_steps = total_sampling_steps - """Total number of sampling steps planned""" - - -class UiTrainTabParams: - def __init__(self, txt2img_preview_params): - self.txt2img_preview_params = txt2img_preview_params - - -class ImageGridLoopParams: - def __init__(self, imgs, cols, rows): - self.imgs = imgs - self.cols = cols - self.rows = rows - - -ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"]) -callback_map = dict( - callbacks_app_started=[], - callbacks_model_loaded=[], - callbacks_ui_tabs=[], - callbacks_ui_train_tabs=[], - callbacks_ui_settings=[], - callbacks_before_image_saved=[], - callbacks_image_saved=[], - callbacks_cfg_denoiser=[], - callbacks_cfg_denoised=[], - callbacks_before_component=[], - callbacks_after_component=[], - callbacks_image_grid=[], - callbacks_infotext_pasted=[], - callbacks_script_unloaded=[], - callbacks_before_ui=[], -) - - -def clear_callbacks(): - for callback_list in callback_map.values(): - callback_list.clear() - - -def app_started_callback(demo: Optional[Blocks], app: FastAPI): - for c in callback_map['callbacks_app_started']: - try: - c.callback(demo, app) - except Exception: - report_exception(c, 'app_started_callback') - - -def model_loaded_callback(sd_model): - for c in callback_map['callbacks_model_loaded']: - try: - c.callback(sd_model) - except Exception: - report_exception(c, 'model_loaded_callback') - - -def ui_tabs_callback(): - res = [] - - for c in callback_map['callbacks_ui_tabs']: - try: - res += c.callback() or [] - except Exception: - report_exception(c, 'ui_tabs_callback') - - return res - - -def ui_train_tabs_callback(params: UiTrainTabParams): - for c in callback_map['callbacks_ui_train_tabs']: - try: - c.callback(params) - except Exception: - report_exception(c, 'callbacks_ui_train_tabs') - - -def ui_settings_callback(): - for c in callback_map['callbacks_ui_settings']: - try: - c.callback() - except Exception: - report_exception(c, 'ui_settings_callback') - - -def before_image_saved_callback(params: ImageSaveParams): - for c in callback_map['callbacks_before_image_saved']: - try: - c.callback(params) - except Exception: - report_exception(c, 'before_image_saved_callback') - - -def image_saved_callback(params: ImageSaveParams): - for c in callback_map['callbacks_image_saved']: - try: - c.callback(params) - except Exception: - report_exception(c, 'image_saved_callback') - - -def cfg_denoiser_callback(params: CFGDenoiserParams): - for c in callback_map['callbacks_cfg_denoiser']: - try: - c.callback(params) - except Exception: - report_exception(c, 'cfg_denoiser_callback') - - -def cfg_denoised_callback(params: CFGDenoisedParams): - for c in callback_map['callbacks_cfg_denoised']: - try: - c.callback(params) - except Exception: - report_exception(c, 'cfg_denoised_callback') - - -def before_component_callback(component, **kwargs): - for c in callback_map['callbacks_before_component']: - try: - c.callback(component, **kwargs) - except Exception: - report_exception(c, 'before_component_callback') - - -def after_component_callback(component, **kwargs): - for c in callback_map['callbacks_after_component']: - try: - c.callback(component, **kwargs) - except Exception: - report_exception(c, 'after_component_callback') - - -def image_grid_callback(params: ImageGridLoopParams): - for c in callback_map['callbacks_image_grid']: - try: - c.callback(params) - except Exception: - report_exception(c, 'image_grid') - - -def infotext_pasted_callback(infotext: str, params: Dict[str, Any]): - for c in callback_map['callbacks_infotext_pasted']: - try: - c.callback(infotext, params) - except Exception: - report_exception(c, 'infotext_pasted') - - -def script_unloaded_callback(): - for c in reversed(callback_map['callbacks_script_unloaded']): - try: - c.callback() - except Exception: - report_exception(c, 'script_unloaded') - - -def before_ui_callback(): - for c in reversed(callback_map['callbacks_before_ui']): - try: - c.callback() - except Exception: - report_exception(c, 'before_ui') - - -def add_callback(callbacks, fun): - stack = [x for x in inspect.stack() if x.filename != __file__] - filename = stack[0].filename if len(stack) > 0 else 'unknown file' - - callbacks.append(ScriptCallback(filename, fun)) - - -def remove_current_script_callbacks(): - stack = [x for x in inspect.stack() if x.filename != __file__] - filename = stack[0].filename if len(stack) > 0 else 'unknown file' - if filename == 'unknown file': - return - for callback_list in callback_map.values(): - for callback_to_remove in [cb for cb in callback_list if cb.script == filename]: - callback_list.remove(callback_to_remove) - - -def remove_callbacks_for_function(callback_func): - for callback_list in callback_map.values(): - for callback_to_remove in [cb for cb in callback_list if cb.callback == callback_func]: - callback_list.remove(callback_to_remove) - - -def on_app_started(callback): - """register a function to be called when the webui started, the gradio `Block` component and - fastapi `FastAPI` object are passed as the arguments""" - add_callback(callback_map['callbacks_app_started'], callback) - - -def on_model_loaded(callback): - """register a function to be called when the stable diffusion model is created; the model is - passed as an argument; this function is also called when the script is reloaded. """ - add_callback(callback_map['callbacks_model_loaded'], callback) - - -def on_ui_tabs(callback): - """register a function to be called when the UI is creating new tabs. - The function must either return a None, which means no new tabs to be added, or a list, where - each element is a tuple: - (gradio_component, title, elem_id) - - gradio_component is a gradio component to be used for contents of the tab (usually gr.Blocks) - title is tab text displayed to user in the UI - elem_id is HTML id for the tab - """ - add_callback(callback_map['callbacks_ui_tabs'], callback) - - -def on_ui_train_tabs(callback): - """register a function to be called when the UI is creating new tabs for the train tab. - Create your new tabs with gr.Tab. - """ - add_callback(callback_map['callbacks_ui_train_tabs'], callback) - - -def on_ui_settings(callback): - """register a function to be called before UI settings are populated; add your settings - by using shared.opts.add_option(shared.OptionInfo(...)) """ - add_callback(callback_map['callbacks_ui_settings'], callback) - - -def on_before_image_saved(callback): - """register a function to be called before an image is saved to a file. - The callback is called with one argument: - - params: ImageSaveParams - parameters the image is to be saved with. You can change fields in this object. - """ - add_callback(callback_map['callbacks_before_image_saved'], callback) - - -def on_image_saved(callback): - """register a function to be called after an image is saved to a file. - The callback is called with one argument: - - params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing. - """ - add_callback(callback_map['callbacks_image_saved'], callback) - - -def on_cfg_denoiser(callback): - """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs. - The callback is called with one argument: - - params: CFGDenoiserParams - parameters to be passed to the inner model and sampling state details. - """ - add_callback(callback_map['callbacks_cfg_denoiser'], callback) - - -def on_cfg_denoised(callback): - """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs. - The callback is called with one argument: - - params: CFGDenoisedParams - parameters to be passed to the inner model and sampling state details. - """ - add_callback(callback_map['callbacks_cfg_denoised'], callback) - - -def on_before_component(callback): - """register a function to be called before a component is created. - The callback is called with arguments: - - component - gradio component that is about to be created. - - **kwargs - args to gradio.components.IOComponent.__init__ function - - Use elem_id/label fields of kwargs to figure out which component it is. - This can be useful to inject your own components somewhere in the middle of vanilla UI. - """ - add_callback(callback_map['callbacks_before_component'], callback) - - -def on_after_component(callback): - """register a function to be called after a component is created. See on_before_component for more.""" - add_callback(callback_map['callbacks_after_component'], callback) - - -def on_image_grid(callback): - """register a function to be called before making an image grid. - The callback is called with one argument: - - params: ImageGridLoopParams - parameters to be used for grid creation. Can be modified. - """ - add_callback(callback_map['callbacks_image_grid'], callback) - - -def on_infotext_pasted(callback): - """register a function to be called before applying an infotext. - The callback is called with two arguments: - - infotext: str - raw infotext. - - result: Dict[str, any] - parsed infotext parameters. - """ - add_callback(callback_map['callbacks_infotext_pasted'], callback) - - -def on_script_unloaded(callback): - """register a function to be called before the script is unloaded. Any hooks/hijacks/monkeying about that - the script did should be reverted here""" - - add_callback(callback_map['callbacks_script_unloaded'], callback) - - -def on_before_ui(callback): - """register a function to be called before the UI is created.""" - - add_callback(callback_map['callbacks_before_ui'], callback) diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/configs/fast_pitch_config.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/configs/fast_pitch_config.py deleted file mode 100644 index d086d26564450c60fa04a7f3a068506f4147d3be..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/configs/fast_pitch_config.py +++ /dev/null @@ -1,183 +0,0 @@ -from dataclasses import dataclass, field -from typing import List - -from TTS.tts.configs.shared_configs import BaseTTSConfig -from TTS.tts.models.forward_tts import ForwardTTSArgs - - -@dataclass -class FastPitchConfig(BaseTTSConfig): - """Configure `ForwardTTS` as FastPitch model. - - Example: - - >>> from TTS.tts.configs.fast_pitch_config import FastPitchConfig - >>> config = FastPitchConfig() - - Args: - model (str): - Model name used for selecting the right model at initialization. Defaults to `fast_pitch`. - - base_model (str): - Name of the base model being configured as this model so that 🐸 TTS knows it needs to initiate - the base model rather than searching for the `model` implementation. Defaults to `forward_tts`. - - model_args (Coqpit): - Model class arguments. Check `FastPitchArgs` for more details. Defaults to `FastPitchArgs()`. - - data_dep_init_steps (int): - Number of steps used for computing normalization parameters at the beginning of the training. GlowTTS uses - Activation Normalization that pre-computes normalization stats at the beginning and use the same values - for the rest. Defaults to 10. - - speakers_file (str): - Path to the file containing the list of speakers. Needed at inference for loading matching speaker ids to - speaker names. Defaults to `None`. - - use_speaker_embedding (bool): - enable / disable using speaker embeddings for multi-speaker models. If set True, the model is - in the multi-speaker mode. Defaults to False. - - use_d_vector_file (bool): - enable /disable using external speaker embeddings in place of the learned embeddings. Defaults to False. - - d_vector_file (str): - Path to the file including pre-computed speaker embeddings. Defaults to None. - - d_vector_dim (int): - Dimension of the external speaker embeddings. Defaults to 0. - - optimizer (str): - Name of the model optimizer. Defaults to `Adam`. - - optimizer_params (dict): - Arguments of the model optimizer. Defaults to `{"betas": [0.9, 0.998], "weight_decay": 1e-6}`. - - lr_scheduler (str): - Name of the learning rate scheduler. Defaults to `Noam`. - - lr_scheduler_params (dict): - Arguments of the learning rate scheduler. Defaults to `{"warmup_steps": 4000}`. - - lr (float): - Initial learning rate. Defaults to `1e-3`. - - grad_clip (float): - Gradient norm clipping value. Defaults to `5.0`. - - spec_loss_type (str): - Type of the spectrogram loss. Check `ForwardTTSLoss` for possible values. Defaults to `mse`. - - duration_loss_type (str): - Type of the duration loss. Check `ForwardTTSLoss` for possible values. Defaults to `mse`. - - use_ssim_loss (bool): - Enable/disable the use of SSIM (Structural Similarity) loss. Defaults to True. - - wd (float): - Weight decay coefficient. Defaults to `1e-7`. - - ssim_loss_alpha (float): - Weight for the SSIM loss. If set 0, disables the SSIM loss. Defaults to 1.0. - - dur_loss_alpha (float): - Weight for the duration predictor's loss. If set 0, disables the huber loss. Defaults to 1.0. - - spec_loss_alpha (float): - Weight for the L1 spectrogram loss. If set 0, disables the L1 loss. Defaults to 1.0. - - pitch_loss_alpha (float): - Weight for the pitch predictor's loss. If set 0, disables the pitch predictor. Defaults to 1.0. - - binary_align_loss_alpha (float): - Weight for the binary loss. If set 0, disables the binary loss. Defaults to 1.0. - - binary_loss_warmup_epochs (float): - Number of epochs to gradually increase the binary loss impact. Defaults to 150. - - min_seq_len (int): - Minimum input sequence length to be used at training. - - max_seq_len (int): - Maximum input sequence length to be used at training. Larger values result in more VRAM usage. - - # dataset configs - compute_f0(bool): - Compute pitch. defaults to True - - f0_cache_path(str): - pith cache path. defaults to None - """ - - model: str = "fast_pitch" - base_model: str = "forward_tts" - - # model specific params - model_args: ForwardTTSArgs = field(default_factory=ForwardTTSArgs) - - # multi-speaker settings - num_speakers: int = 0 - speakers_file: str = None - use_speaker_embedding: bool = False - use_d_vector_file: bool = False - d_vector_file: str = False - d_vector_dim: int = 0 - - # optimizer parameters - optimizer: str = "Adam" - optimizer_params: dict = field(default_factory=lambda: {"betas": [0.9, 0.998], "weight_decay": 1e-6}) - lr_scheduler: str = "NoamLR" - lr_scheduler_params: dict = field(default_factory=lambda: {"warmup_steps": 4000}) - lr: float = 1e-4 - grad_clip: float = 5.0 - - # loss params - spec_loss_type: str = "mse" - duration_loss_type: str = "mse" - use_ssim_loss: bool = True - ssim_loss_alpha: float = 1.0 - spec_loss_alpha: float = 1.0 - aligner_loss_alpha: float = 1.0 - pitch_loss_alpha: float = 0.1 - dur_loss_alpha: float = 0.1 - binary_align_loss_alpha: float = 0.1 - binary_loss_warmup_epochs: int = 150 - - # overrides - min_seq_len: int = 13 - max_seq_len: int = 200 - r: int = 1 # DO NOT CHANGE - - # dataset configs - compute_f0: bool = True - f0_cache_path: str = None - - # testing - test_sentences: List[str] = field( - default_factory=lambda: [ - "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.", - "Be a voice, not an echo.", - "I'm sorry Dave. I'm afraid I can't do that.", - "This cake is great. It's so delicious and moist.", - "Prior to November 22, 1963.", - ] - ) - - def __post_init__(self): - # Pass multi-speaker parameters to the model args as `model.init_multispeaker()` looks for it there. - if self.num_speakers > 0: - self.model_args.num_speakers = self.num_speakers - - # speaker embedding settings - if self.use_speaker_embedding: - self.model_args.use_speaker_embedding = True - if self.speakers_file: - self.model_args.speakers_file = self.speakers_file - - # d-vector settings - if self.use_d_vector_file: - self.model_args.use_d_vector_file = True - if self.d_vector_dim is not None and self.d_vector_dim > 0: - self.model_args.d_vector_dim = self.d_vector_dim - if self.d_vector_file: - self.model_args.d_vector_file = self.d_vector_file diff --git a/spaces/artificialguybr/video-dubbing/Wav2Lip/face_detection/__init__.py b/spaces/artificialguybr/video-dubbing/Wav2Lip/face_detection/__init__.py deleted file mode 100644 index 4bae29fd5f85b41e4669302bd2603bc6924eddc7..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/Wav2Lip/face_detection/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# -*- coding: utf-8 -*- - -__author__ = """Adrian Bulat""" -__email__ = 'adrian.bulat@nottingham.ac.uk' -__version__ = '1.0.1' - -from .api import FaceAlignment, LandmarksType, NetworkSize diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/scatter_with_minimap.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/scatter_with_minimap.py deleted file mode 100644 index 7de894c43ae56bf135411e9912ca3d396aa71dec..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/scatter_with_minimap.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -Scatter Plot with Minimap -------------------------- -This example shows how to create a miniature version of a plot -such that creating a selection in the miniature version -adjusts the axis limits in another, more detailed view. -""" -# category: scatter plots - -import altair as alt -from vega_datasets import data - -source = data.seattle_weather() - -zoom = alt.selection_interval(encodings=["x", "y"]) - -minimap = ( - alt.Chart(source) - .mark_point() - .add_selection(zoom) - .encode( - x="date:T", - y="temp_max:Q", - color=alt.condition(zoom, "weather", alt.value("lightgray")), - ) - .properties( - width=200, - height=200, - title="Minimap -- click and drag to zoom in the detail view", - ) -) - -detail = ( - alt.Chart(source) - .mark_point() - .encode( - x=alt.X( - "date:T", scale=alt.Scale(domain={"selection": zoom.name, "encoding": "x"}) - ), - y=alt.Y( - "temp_max:Q", - scale=alt.Scale(domain={"selection": zoom.name, "encoding": "y"}), - ), - color="weather", - ) - .properties(width=600, height=400, title="Seattle weather -- detail view") -) - -detail | minimap diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/distributed/legacy_distributed_data_parallel.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/distributed/legacy_distributed_data_parallel.py deleted file mode 100644 index cd434c7372ba30ea0e6f87e084230448f53480e9..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/distributed/legacy_distributed_data_parallel.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -""" -A modified version of the legacy DistributedDataParallel module that uses c10d -communication primitives. This version is simpler than the latest PyTorch -version and is useful for debugging. Notably it does not overlap gradient -communication with the backward pass, which makes it slower but more robust -than the PyTorch version. - -This version also supports the *no_sync* context manager, which allows faster -training with `--update-freq`. -""" - -from collections import OrderedDict -from contextlib import contextmanager - -import torch -from torch import nn - -from fairseq.distributed import utils - - -class LegacyDistributedDataParallel(nn.Module): - """Implements distributed data parallelism at the module level. - - A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`. - This version uses a c10d process group for communication and does not - broadcast buffers. - - Args: - module (~torch.nn.Module): module to be parallelized - process_group: the c10d process group to be used for distributed data - parallel all-reduction. - buffer_size (int, optional): number of elements to buffer before - performing all-reduce (default: 256M). - """ - - def __init__(self, module, process_group, buffer_size=2**28): - super().__init__() - - self.module = module - self.process_group = process_group - self.world_size = utils.get_world_size(self.process_group) - - # Never use a bigger buffer than the number of model params - self.buffer_size = min(buffer_size, sum(p.numel() for p in module.parameters())) - self.buffer = None - - # We can also forcibly accumulate grads locally and only do the - # all-reduce at some later time - self.accumulate_grads = False - - # make per-device lists of parameters - paramlists = OrderedDict() - for param in self.module.parameters(): - device = param.device - if paramlists.get(device) is None: - paramlists[device] = [] - paramlists[device] += [param] - self.per_device_params = list(paramlists.values()) - - @contextmanager - def no_sync(self): - """A context manager to disable gradient synchronization.""" - old_accumulate_grads = self.accumulate_grads - self.accumulate_grads = True - yield - self.accumulate_grads = old_accumulate_grads - - def forward(self, *inputs, **kwargs): - return self.module(*inputs, **kwargs) - - def all_reduce_grads(self): - """ - This function must be called explicitly after backward to reduce - gradients. There is no automatic hook like c10d. - """ - - def all_reduce_params(params): - buffer = self.buffer - nonzero_buffer = False - if len(params) > 1: - offset = 0 - for p in params: - sz = p.numel() - if p.grad is not None: - buffer[offset : offset + sz].copy_(p.grad.data.view(-1)) - nonzero_buffer = True - else: - buffer[offset : offset + sz].zero_() - offset += sz - else: - # we only have a single grad to all-reduce - p = params[0] - if p.grad is not None: - buffer = p.grad.data - nonzero_buffer = True - elif p.numel() <= self.buffer.numel(): - buffer = buffer[: p.numel()] - buffer.zero_() - else: - buffer = torch.zeros_like(p) - - if nonzero_buffer: - buffer.div_(self.world_size) - - utils.all_reduce(buffer, self.process_group) - - # copy all-reduced grads back into their original place - offset = 0 - for p in params: - sz = p.numel() - if p.grad is not None: - p.grad.data.copy_(buffer[offset : offset + sz].view_as(p)) - else: - p.grad = buffer[offset : offset + sz].view_as(p).clone() - offset += sz - - def reduction_fn(): - # This function only needs to be called once - if self.accumulate_grads: - return - - if self.buffer is None: - self.buffer = next(self.module.parameters()).new(self.buffer_size) - - for params in self.per_device_params: - # All-reduce the gradients in buckets - offset = 0 - buffered_params = [] - for param in params: - if not param.requires_grad: - continue - if param.grad is None: - param.grad = torch.zeros_like(param) - - if hasattr(param, "expert"): - # Skip gradient sync for unshared parameters - continue - - if param.grad.requires_grad: - raise RuntimeError( - "DistributedDataParallel only works " - "with gradients that don't require " - "grad" - ) - sz = param.numel() - if sz > self.buffer.numel(): - # all-reduce big params directly - all_reduce_params([param]) - else: - if offset + sz > self.buffer.numel(): - all_reduce_params(buffered_params) - offset = 0 - buffered_params.clear() - buffered_params.append(param) - offset += sz - - if len(buffered_params) > 0: - all_reduce_params(buffered_params) - - reduction_fn() diff --git a/spaces/ashutosh1919/quantum-perceptron/quantum_perceptron/__init__.py b/spaces/ashutosh1919/quantum-perceptron/quantum_perceptron/__init__.py deleted file mode 100644 index ed4e767fa8c5310969615aa941e5b41ce6ea5099..0000000000000000000000000000000000000000 --- a/spaces/ashutosh1919/quantum-perceptron/quantum_perceptron/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from quantum_perceptron.perceptron import Perceptron diff --git a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Saim Mehmood.html b/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Saim Mehmood.html deleted file mode 100644 index 74fbd5bb33874558aed6731c8c07572671215e80..0000000000000000000000000000000000000000 --- a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Saim Mehmood.html +++ /dev/null @@ -1,134 +0,0 @@ - - - - Saim Mehmood - - - - -
-

Saim Mehmood

- -
-

Interview


Saim was a mentee
Career
  • Moved to Canada back in Dec 2017
  • grad school Jan 2018
  • MSc in CS - research-oriented work
  • graduated June 2020
  • Sep 2019 got in touch with Omar
  • Omar: "You already have a good project with your thesis", finish that up
  • Covid hit, kept applying
  • kept applying
  • took me almost 2 years to find job
  • worked as MLOps
  • learned a lot throughout my journey, particularly about academia and industry

And how can you add value as a mentor?
  • selling yourself as a previous academic
  • it's a long journey - expectation setting - you will hit a lot rejections!
    • "I was getting rejection emails every day, and was happy to see them"
  • market the project that you've done
  • make sure they are technically sound
  • Build a project if needed, post on medium
Mentorship exp
  • Been a TA during school
Ideal mentee
  • Recent CS grads would be easier, but open to other disciplines
-
-
Questions about SM?



-
- -
- - - \ No newline at end of file diff --git a/spaces/awaawawawa/iurf7irfuyytruyyugb/ui/media/drawingboard.min.css b/spaces/awaawawawa/iurf7irfuyytruyyugb/ui/media/drawingboard.min.css deleted file mode 100644 index 7d80478705de8fdceacbad40e5a85c19307be665..0000000000000000000000000000000000000000 --- a/spaces/awaawawawa/iurf7irfuyytruyyugb/ui/media/drawingboard.min.css +++ /dev/null @@ -1,5 +0,0 @@ -/* drawingboard.js v0.4.6 - https://github.com/Leimi/drawingboard.js -* Copyright (c) 2015 Emmanuel Pelletier -* Licensed MIT */ - -.drawing-board,.drawing-board *{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}.drawing-board-controls-hidden,.drawing-board-utils-hidden{display:none!important}.drawing-board{position:relative;display:block}.drawing-board-canvas-wrapper{position:relative;margin:0;border:1px solid #ddd}.drawing-board-canvas{position:absolute;top:0;left:0;width:auto;cursor:crosshair;z-index:20}.drawing-board-cursor{position:absolute;top:0;left:0;pointer-events:none;border-radius:50%;background:#ccc;background:rgba(0,0,0,.2);z-index:30}.drawing-board-control-colors-rainbows,.drawing-board-control-size .drawing-board-control-inner,.drawing-board-control-size-dropdown,.drawing-board-control>button{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;overflow:hidden;background-color:#eee;padding:2px 4px;border:1px solid #ccc;box-shadow:0 1px 3px -2px #121212,inset 0 2px 5px 0 rgba(255,255,255,.3);-webkit-box-shadow:0 1px 3px -2px #121212,inset 0 2px 5px 0 rgba(255,255,255,.3);height:28px}.drawing-board-control>button{cursor:pointer;min-width:28px;line-height:14px}.drawing-board-control>button:focus,.drawing-board-control>button:hover{background-color:#ddd}.drawing-board-control>button.active,.drawing-board-control>button:active{box-shadow:inset 0 1px 2px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 2px 0 rgba(0,0,0,.2);background-color:#ddd}.drawing-board-control>button[disabled]{color:gray}.drawing-board-control>button[disabled].active,.drawing-board-control>button[disabled]:active,.drawing-board-control>button[disabled]:focus,.drawing-board-control>button[disabled]:hover{background-color:#eee;box-shadow:0 1px 3px -2px #121212,inset 0 2px 5px 0 rgba(255,255,255,.3);-webkit-box-shadow:0 1px 3px -2px #121212,inset 0 2px 5px 0 rgba(255,255,255,.3);cursor:default}.drawing-board-controls{margin:0 auto;text-align:center;font-size:0;display:table;border-spacing:9.33px 0;position:relative;min-height:28px}.drawing-board-controls[data-align=left]{margin:0;left:-9.33px}.drawing-board-controls[data-align=right]{margin:0 0 0 auto;right:-9.33px}.drawing-board-canvas-wrapper+.drawing-board-controls,.drawing-board-controls+.drawing-board-canvas-wrapper{margin-top:5px}.drawing-board-controls-hidden{height:0;min-height:0;padding:0;margin:0;border:0}.drawing-board-control{display:table-cell;border-collapse:separate;vertical-align:middle;font-size:16px;height:100%}.drawing-board-control-inner{position:relative;height:100%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.drawing-board-control>button{margin:0;vertical-align:middle}.drawing-board-control-colors{font-size:0;line-height:0}.drawing-board-control-colors-current{border:1px solid #ccc;cursor:pointer;display:inline-block;width:26px;height:26px}.drawing-board-control-colors-rainbows{display:inline-block;position:absolute;left:0;top:33px;margin-left:0;z-index:100;width:250px;height:auto;padding:4px}.drawing-board-control-colors-rainbow{height:18px}.drawing-board-control-colors-picker:first-child{margin-right:5px}.drawing-board-control-colors-picker{display:inline-block;width:18px;height:18px;cursor:pointer}.drawing-board-control-colors-picker[data-color="rgba(255, 255, 255, 1)"]{width:16px;height:17px;border:1px solid #ccc;border-bottom:none}.drawing-board-control-colors-picker:hover{width:16px;height:16px;border:1px solid #555}.drawing-board-control-drawingmode>button{margin-right:2px}.drawing-board-control-drawingmode>button:last-child{margin-right:0}.drawing-board-control-drawingmode-pencil-button{overflow:hidden;background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAe9JREFUeNpiZAACVlFRBhYREQZcQPnbNwa3N28YlL5+ZfgLFfvPwGD9m4FhIgsDHuAO0gTUDNKIBvyBmqt/MTDMY8Gl0f31azD7L6oUIxCnAzWmAPHBfwwM01AMUAV6JfPQIVwOYgVqqPnFyOjz6///O38YGKpAgmAD1OXlGdTk5PD5hgeouZudj8/uy9evP/78/dsFFPsJNiAoKIiBABAHap4oLi9v8fTNm48//v7NBwbgWZgkE7rqt8DY+A8JZRBW+cfIuEDT0NDlzadP3z98/doPFDuCrB7TAGFhBqCNIGwM9OcKUzs7+xdv3355+f79VqDYAiTDwZgJh7ONgYpnOvn4GL949erT7UePdgL5JVCD4fgBLBBxaX74+PG789evnwby0/8jKXgExIeB+CG6Af///1e9Ki9vFSAkZPzoyZPPJy9evA9MB77/sWiEARZkzV+/fvXYtGnTpG3btj28EBT0BqjZ5D8OjXCwPksUhA1Wpggf/PHjx/9169Y9EBERaUlgZmaIAcrLE4rk5sIqBqDmlefnRPzfWGX5EaSZm5ubgRloADGA5QZ3RgK7gESY4PMNn9ZtObPpzZvfU4DiYkiB/RcHG+S7fyxAMH/lFU2GOZd2bLx18/cEUMoD4j9I+DcS/RtJHGTYf4AAAwAxaOMYHjxKFwAAAABJRU5ErkJggg==);background-position:50% 50%;background-repeat:no-repeat}.drawing-board-control-drawingmode-pencil-button:before{content:"";display:block;width:0;height:100%}.drawing-board-control-drawingmode-eraser-button{overflow:hidden;background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAkpJREFUeNp0kk+IElEcx39vFBc9+OfQRTAwzFt4CaYOKStj6MoeculStzoIQSB4kCVckmDx4iGCXWYJIqjoVOzO1l4qT1F7WfBWHvxzDPyTB3XUmXn93suRybUffHmP997n9/cRsFgwGARJkiAcDsPlwgEIeEZQAhCRAkgAlOD6SQP4rgMFDWVnYCAQgFgsBqFQCBwOByzZNQOotPHx1RNCCCipu6bfb+zSnslkeOQVILPrBkAirbws9btdTEWAzZPXpfepOzaeGMBXwe/3w3+MwTc3Dl+UeghTiskbBvR6Pbh18mZHB0jjmxvCKhIfR37s3r+Sevf8ca/T4TBF2HTSODuDxP7uNjrZFFbBk8lEzOVyspa4ykGYw2zfbTb/7ilvok1YhlVVFfP5vDydTkHXdXDdlhZOOnPY4/HA0YPtp3h6LFjh8XgsFgoFGTPgsKm1zDr8ajTQh8Fh5eGjZzjGI8yjKlgjF4tFGdd/YKYmRja24hw+zu3sYe2HiH3hYzQjl8tleTQanWtou93G6Qngdrth6+1+9h6hTULJZ/PeziJXKhV5OByeg1ut1gJOp9NZTdNOcQ419ot+ggp1qoLdBFmqVmNpm3A8Huewy+Wq1RH8QH9zmBlJJpMRdCIqiiIPBgN+2MCGsW/r8/kgGo1m0fmpzWarseayHlmNeL1eFiWC0cRqtSr3+/3FpSiKHMZtjU1glbFyfKgLTqfzEka9OJvNeDnzz1JnCaFmqOl8ZdJY1SiDOXCiXKg1NtG5DIt0y6ov3dE/AgwAENFWYYLj4mYAAAAASUVORK5CYII=);background-position:50% 50%;background-repeat:no-repeat}.drawing-board-control-drawingmode-eraser-button:before{content:"";display:block;width:0;height:100%}.drawing-board-control-drawingmode-filler-button{overflow:hidden;background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAnNJREFUeNp0k0trE1EUx89MJpNJooYYXBgDNtCKdRPwlbqoCKUtaNVNA0Uo7UbMxoVPEARTXEi+QWfnwn6DEAlEkrSLttTGRiULEQlJ8yChmbzI++E50yTUJA78uMy953/u/557LmOz2WDEZ2m1WrckSRJSqdR2tVrdHQyYebwHtVoNuFHqTqczhQnWKaBYLDoKhcIuzgHDMKBSqeD20qd+LNdsNocSoFhRr9ctpVLJigl4xIIJQizLAmG4cAPa7bYcy9Iug5TL5UYikbD6/X7Rbre/IUcYe3WUW5ZsnQQzW9LpNOPz+UQc5aBM5mgdh7vI9FCCAesW2tnr9YqZTAby+bw8f3AQRP6853n+Ph5hemSCntjj8YjZbFYWx2IxeS2RSEMwuA87O79eqdXquVolK+GxnP0EPbHb7RZJSGABIR6PA11zJHKIR2MhHA5DIPDj7eH3j95KpfK60Wg8Yntil8slkqgnpioLghacTidoNDpEC3q9HnheCc3s1jZeLcW943pirPw/4lKpBkqlDubnl/riycnLsLy88EKj0fhzuRyZv8RFo1E6wpBYkiqy7Z54YmIcVlYeyOKC4mYwJ0nHRaQuM5vNT6hB/iceG7sIq6sPnwmC4MerDkby40AOCCoiddie1Wp92W7zQ2KTyQSLizNP8T0EsPLBbxEDnCj0GkM2qIEwyZRCobizsfH5A1ZXFhuN52F29vpz3HkL574mk8lj24Y5wsHkvjjoX0BOIWc5jruHzbK2ufmzEwpFO3jnDhQv4JoROYdoERVyGjEgZ8iBDlF3FzXo4go6utZ9lftY4N/dXisjR0i1G0ublv8KMAA0ZoUlicxrhwAAAABJRU5ErkJggg==);background-position:50% 50%;background-repeat:no-repeat}.drawing-board-control-drawingmode-filler-button:before{content:"";display:block;width:0;height:100%}.drawing-board-control-navigation>button{font-family:Helvetica,Arial,sans-serif;font-size:14px;font-weight:700;margin-right:2px}.drawing-board-control-navigation>button:last-child{margin-right:0}.drawing-board-control-size[data-drawing-board-type=range] .drawing-board-control-inner{width:75px}.drawing-board-control-size[data-drawing-board-type=dropdown] .drawing-board-control-inner{overflow:visible}.drawing-board-control-size-range-input{position:relative;width:100%;z-index:100;margin:0;padding:0;border:0}.drawing-board-control-size-dropdown span,.drawing-board-control-size-dropdown-current span,.drawing-board-control-size-range-current{display:block;background:#333;opacity:.8}.drawing-board-control-size-range-current{display:inline-block;opacity:.15;position:absolute;pointer-events:none;left:50%;top:50%;z-index:50}.drawing-board-control-size-dropdown-current{display:block;height:100%;width:40px;overflow:hidden;position:relative}.drawing-board-control-size-dropdown-current span{position:absolute;left:50%;top:50%}.drawing-board-control-size-dropdown{position:absolute;left:-6px;top:33px;height:auto;list-style-type:none;margin:0;padding:0;z-index:100}.drawing-board-control-size-dropdown li{display:block;padding:4px;margin:3px 0;min-height:16px}.drawing-board-control-size-dropdown li:hover{background:#ccc}.drawing-board-control-size-dropdown span{margin:0 auto}.drawing-board-control-download-button{overflow:hidden;background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAoBJREFUeNqMkr9PU1EUx7/vR1tQ3yu10hAmTawtBSYSy2YccFOcnDQm/gewOLnj5GYMg4sdXFxkMca4OBAwdUBe/ZkIGFp+9tHXvh/3/fTcAm01aLzJybnn3nM+95tzrnDl6Tb+sibuTmWUWj3C6/Juk+LySUmyvt0FCKKA02ryOCy6LBiu15ngMbZ5DDCNBqIw6gKM+n4nECUJru3glKry8CpjQaHVYmC2rVH82DIMMMdGGARdwJ+SPNdFS9chx+MXDNMp/NzagWNatk/nQU/hiYAoih6FYTBCBs9zUXMCbAhx2OYOv351lPOJ3EwH4LteL6Dcp/Rfu3FrstDyIizt+agpaYxNDU0M9gl4v7Ck+TYrCYLQqZHUyTtdQBiutPSGUflczSXHs5lVKwZdSOBMvwztxVvN0RtzsiyXBFHsAvL5PBSnCpXV2getILFiE2SjspYbuZzPiDSZ2vOXmlvX5yQqTmMfg9ZXqtls1wnT09OHEyAq0aFLg/gSXsSWq9wWk+p9PrCoYTwcijdLOfE7UsEufN9HGIYnT4EnTGIXe1KqtNNIvuNnGamxfi7SgQD/nIJCTbzOPQ/SQh1pud7T4M6W/8qFIw/5WAr5m7Ozsw9UVc069Fls2yJzSC5/lnc9RhaHZVnfSqUnEgXP2oBqtYqBgYG2+mKxmOVADnAcB4yxHgD1RzehKKns/LyV4gUHBweQy+UyRkdH6UKJ6fQDFxcXoWkaXJeRuTgUGCdLQJ9bx72lGZimGWs2m+083oN+2iiFQiGxvLy8RrDzudyltgrG3N8U2G8CrPz4sGYYRqJSqWR4H/jNWbJhUjAWi8XG8R/L87yPpGCVttVfAgwAVpZR+8tZC08AAAAASUVORK5CYII=);background-position:50% 50%;background-repeat:no-repeat}.drawing-board-control-download-button:before{content:"";display:block;width:0;height:100%} \ No newline at end of file diff --git a/spaces/awacke1/Bloom.Generative.Writer/generators/topic_to_abstract.py b/spaces/awacke1/Bloom.Generative.Writer/generators/topic_to_abstract.py deleted file mode 100644 index a91131db41007e46ce1d9a9772bdd7e9645b9d2b..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Bloom.Generative.Writer/generators/topic_to_abstract.py +++ /dev/null @@ -1,6 +0,0 @@ - -from .model import model - - -def topic_to_abstract_generator(template): - return model('topic', template) diff --git a/spaces/awacke1/Streamlit-Data-Synthesis-Example/app.py b/spaces/awacke1/Streamlit-Data-Synthesis-Example/app.py deleted file mode 100644 index 636e3a14af4ceae48968e372e88f840f3425c791..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Streamlit-Data-Synthesis-Example/app.py +++ /dev/null @@ -1,55 +0,0 @@ -import streamlit as st -import pandas as pd - - -def generate_hospital_data(): - # Generate hospital data - hospitals = { - "city": ["New York", "Los Angeles", "Chicago", "Houston", "Phoenix"], - "state": ["NY", "CA", "IL", "TX", "AZ"], - "bed_count": [1200, 1500, 1100, 1300, 1400], - } - df = pd.DataFrame(hospitals) - return df - - -def generate_state_data(): - # Generate state data - states = { - "state": ["NY", "CA", "IL", "TX", "AZ"], - "population": [20000000, 40000000, 13000000, 29000000, 7000000], - "square_miles": [54556, 163696, 57914, 268596, 113990], - } - df = pd.DataFrame(states) - return df - - -def merge_datasets(hospitals_df, states_df): - # Merge hospital and state data - merged_df = pd.merge(hospitals_df, states_df, on="state") - return merged_df - - -def calculate_beds_per_capita(merged_df): - # Calculate beds per capita - merged_df["beds_per_capita"] = merged_df["bed_count"] / merged_df["population"] - return merged_df - - -def main(): - # Generate data - hospitals_df = generate_hospital_data() - states_df = generate_state_data() - - # Merge datasets - merged_df = merge_datasets(hospitals_df, states_df) - - # Calculate beds per capita - merged_df = calculate_beds_per_capita(merged_df) - - # Show merged and calculated data - st.write(merged_df) - - -if __name__ == "__main__": - main() diff --git a/spaces/awacke1/Webcam-Object-Recognition-Yolo-n-Coco/class_names/README.md b/spaces/awacke1/Webcam-Object-Recognition-Yolo-n-Coco/class_names/README.md deleted file mode 100644 index 30d74d258442c7c65512eafab474568dd706c430..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Webcam-Object-Recognition-Yolo-n-Coco/class_names/README.md +++ /dev/null @@ -1 +0,0 @@ -test \ No newline at end of file diff --git a/spaces/badayvedat/AudioSep/models/CLAP/open_clip/bert.py b/spaces/badayvedat/AudioSep/models/CLAP/open_clip/bert.py deleted file mode 100644 index a83d96d2a77ed05198efc05837522bc88d2499cc..0000000000000000000000000000000000000000 --- a/spaces/badayvedat/AudioSep/models/CLAP/open_clip/bert.py +++ /dev/null @@ -1,40 +0,0 @@ -from transformers import BertTokenizer, BertModel - -tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") -model = BertModel.from_pretrained("bert-base-uncased") -text = "Replace me by any text you'd like." - - -def bert_embeddings(text): - # text = "Replace me by any text you'd like." - encoded_input = tokenizer(text, return_tensors="pt") - output = model(**encoded_input) - return output - - -from transformers import RobertaTokenizer, RobertaModel - -tokenizer = RobertaTokenizer.from_pretrained("roberta-base") -model = RobertaModel.from_pretrained("roberta-base") -text = "Replace me by any text you'd like." - - -def Roberta_embeddings(text): - # text = "Replace me by any text you'd like." - encoded_input = tokenizer(text, return_tensors="pt") - output = model(**encoded_input) - return output - - -from transformers import BartTokenizer, BartModel - -tokenizer = BartTokenizer.from_pretrained("facebook/bart-base") -model = BartModel.from_pretrained("facebook/bart-base") -text = "Replace me by any text you'd like." - - -def bart_embeddings(text): - # text = "Replace me by any text you'd like." - encoded_input = tokenizer(text, return_tensors="pt") - output = model(**encoded_input) - return output diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/sea3d/physics/SEA3DAmmoLoader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/sea3d/physics/SEA3DAmmoLoader.js deleted file mode 100644 index 9239e109afef47ff228e1fa2005fd9a6643da7ae..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/sea3d/physics/SEA3DAmmoLoader.js +++ /dev/null @@ -1,590 +0,0 @@ -/** - * SEA3D+AMMO for Three.JS - * @author Sunag / http://www.sunag.com.br/ - */ - -'use strict'; - -THREE.SEA3D.prototype.toAmmoVec3 = function ( v ) { - - return new Ammo.btVector3( v.x, v.y, v.z ); - -}; - -// -// Sphere -// - -THREE.SEA3D.prototype.readSphere = function ( sea ) { - - var shape = new Ammo.btSphereShape( sea.radius ); - - this.domain.shapes = this.shapes = this.shapes || []; - this.shapes.push( this.objects[ "shpe/" + sea.name ] = sea.tag = shape ); - -}; - -// -// Box -// - -THREE.SEA3D.prototype.readBox = function ( sea ) { - - var shape = new Ammo.btBoxShape( new Ammo.btVector3( sea.width * .5, sea.height * .5, sea.depth * .5 ) ); - - this.domain.shapes = this.shapes = this.shapes || []; - this.shapes.push( this.objects[ "shpe/" + sea.name ] = sea.tag = shape ); - -}; - -// -// Cone -// - -THREE.SEA3D.prototype.readCone = function ( sea ) { - - var shape = new Ammo.btConeShape( sea.radius, sea.height ); - - this.domain.shapes = this.shapes = this.shapes || []; - this.shapes.push( this.objects[ "shpe/" + sea.name ] = sea.tag = shape ); - -}; - -// -// Cylinder -// - -THREE.SEA3D.prototype.readCylinder = function ( sea ) { - - var shape = new Ammo.btCylinderShape( new Ammo.btVector3( sea.height, sea.radius, sea.radius ) ); - - this.domain.shapes = this.shapes = this.shapes || []; - this.shapes.push( this.objects[ "shpe/" + sea.name ] = sea.tag = shape ); - -}; - -// -// Capsule -// - -THREE.SEA3D.prototype.readCapsule = function ( sea ) { - - var shape = new Ammo.btCapsuleShape( sea.radius, sea.height ); - - this.domain.shapes = this.shapes = this.shapes || []; - this.shapes.push( this.objects[ "shpe/" + sea.name ] = sea.tag = shape ); - -}; - -// -// Convex Geometry -// - -THREE.SEA3D.prototype.readConvexGeometry = function ( sea ) { - - if ( this.config.convexHull ) { - - var shape = SEA3D.AMMO.createConvexHull( sea.geometry.tag, sea.subGeometryIndex ); - - } else { - - var triMesh = SEA3D.AMMO.createTriangleMesh( sea.geometry.tag, sea.subGeometryIndex ); - - var shape = new Ammo.btConvexTriangleMeshShape( triMesh, true ); - - } - - this.domain.shapes = this.shapes = this.shapes || []; - this.shapes.push( this.objects[ "shpe/" + sea.name ] = sea.tag = shape ); - -}; - -// -// Triangle Geometry -// - -THREE.SEA3D.prototype.readTriangleGeometry = function ( sea ) { - - var triMesh = SEA3D.AMMO.createTriangleMesh( sea.geometry.tag, sea.subGeometryIndex ); - - var shape = new Ammo.btBvhTriangleMeshShape( triMesh, true, true ); - - this.domain.shapes = this.shapes = this.shapes || []; - this.shapes.push( this.objects[ "shpe/" + sea.name ] = sea.tag = shape ); - -}; - -// -// Compound -// - -THREE.SEA3D.prototype.readCompound = function ( sea ) { - - var shape = new Ammo.btCompoundShape(); - - for ( var i = 0; i < sea.compounds.length; i ++ ) { - - var compound = sea.compounds[ i ]; - - THREE.SEA3D.MTXBUF.elements = compound.transform; - - var transform = SEA3D.AMMO.getTransformFromMatrix( THREE.SEA3D.MTXBUF ); - - shape.addChildShape( transform, compound.shape.tag ); - - } - - this.domain.shapes = this.shapes = this.shapes || []; - this.shapes.push( this.objects[ "shpe/" + sea.name ] = sea.tag = shape ); - -}; - -// -// Rigid Body Base -// - -THREE.SEA3D.prototype.readRigidBodyBase = function ( sea ) { - - var shape = sea.shape.tag, - transform, target; - - if ( sea.target ) { - - target = sea.target.tag; - - target.physics = { enabled: true }; - target.updateMatrix(); - - transform = SEA3D.AMMO.getTransformFromMatrix( sea.target.tag.matrix ); - - } else { - - THREE.SEA3D.MTXBUF.fromArray( sea.transform ); - - transform = SEA3D.AMMO.getTransformFromMatrix( THREE.SEA3D.MTXBUF ); - - } - - var motionState = new Ammo.btDefaultMotionState( transform ); - var localInertia = new Ammo.btVector3( 0, 0, 0 ); - - shape.calculateLocalInertia( sea.mass, localInertia ); - - var info = new Ammo.btRigidBodyConstructionInfo( sea.mass, motionState, shape, localInertia ); - info.set_m_friction( sea.friction ); - info.set_m_restitution( sea.restitution ); - info.set_m_linearDamping( sea.linearDamping ); - info.set_m_angularDamping( sea.angularDamping ); - - var rb = new Ammo.btRigidBody( info ); - - if ( target ) { - - target.physics.rigidBody = rb; - - if ( sea.offset ) { - - var offset = new THREE.Matrix4(); - offset.fromArray( sea.offset ); - - target.physics.offset = offset; - - } - - } - - Ammo.destroy( info ); - - this.domain.rigidBodies = this.rigidBodies = this.rigidBodies || []; - this.rigidBodies.push( this.objects[ "rb/" + sea.name ] = sea.tag = rb ); - - return rb; - -}; - -// -// Rigid Body -// - -THREE.SEA3D.prototype.readRigidBody = function ( sea ) { - - var rb = this.readRigidBodyBase( sea ); - - SEA3D.AMMO.addRigidBody( rb, sea.target ? sea.target.tag : undefined, this.config.enabledPhysics ); - -}; - -// -// Car Controller -// - -THREE.SEA3D.prototype.readCarController = function ( sea ) { - - var body = this.readRigidBodyBase( sea ); - - body.setActivationState( SEA3D.AMMO.DISABLE_DEACTIVATION ); - - // Car - - var vehicleRayCaster = new Ammo.btDefaultVehicleRaycaster( SEA3D.AMMO.world ); - - var tuning = new Ammo.btVehicleTuning(); - - tuning.set_m_suspensionStiffness( sea.suspensionStiffness ); - tuning.set_m_suspensionDamping( sea.suspensionDamping ); - tuning.set_m_suspensionCompression( sea.suspensionCompression ); - tuning.set_m_maxSuspensionTravelCm( sea.maxSuspensionTravelCm ); - tuning.set_m_maxSuspensionForce( sea.maxSuspensionForce ); - tuning.set_m_frictionSlip( sea.frictionSlip ); - - var vehicle = new Ammo.btRaycastVehicle( tuning, body, vehicleRayCaster ), - wheels = []; - - vehicle.setCoordinateSystem( 0, 1, 2 ); - - for ( var i = 0; i < sea.wheel.length; i ++ ) { - - var wheel = sea.wheel[ i ]; - - var wheelInfo = vehicle.addWheel( - this.toAmmoVec3( wheel.pos ), - this.toAmmoVec3( wheel.dir ), - this.toAmmoVec3( wheel.axle ), - wheel.suspensionRestLength, - wheel.radius, - tuning, - wheel.isFront - ); - - var target = wheels[ i ] = wheel.target ? wheel.target.tag : undefined; - - if ( target ) { - - target.physics = { enabled: true, rigidBody: wheelInfo }; - - if ( wheel.offset ) { - - var offset = new THREE.Matrix4(); - offset.fromArray( wheel.offset ); - - target.physics.offset = offset; - - } - - if ( target.parent ) { - - target.parent.remove( target ); - - } - - if ( this.container ) { - - this.container.add( target ); - - } - - } - - wheelInfo.set_m_suspensionStiffness( sea.suspensionStiffness ); - wheelInfo.set_m_wheelsDampingRelaxation( sea.dampingRelaxation ); - wheelInfo.set_m_wheelsDampingCompression( sea.dampingCompression ); - wheelInfo.set_m_frictionSlip( sea.frictionSlip ); - - } - - SEA3D.AMMO.addVehicle( vehicle, wheels ); - SEA3D.AMMO.addRigidBody( body, sea.target ? sea.target.tag : undefined, this.config.enabledPhysics ); - - this.domain.vehicles = this.vehicles = this.vehicles || []; - this.vehicles.push( this.objects[ "vhc/" + sea.name ] = sea.tag = vehicle ); - -}; - -// -// P2P Constraint -// - -THREE.SEA3D.prototype.readP2PConstraint = function ( sea ) { - - var ctrt; - - if ( sea.targetB ) { - - ctrt = new Ammo.btPoint2PointConstraint( - sea.targetA.tag, - sea.targetB.tag, - this.toAmmoVec3( sea.pointA ), - this.toAmmoVec3( sea.pointB ) - ); - - } else { - - ctrt = new Ammo.btPoint2PointConstraint( - sea.targetA.tag, - this.toAmmoVec3( sea.pointA ) - ); - - } - - SEA3D.AMMO.addConstraint( ctrt ); - - this.domain.constraints = this.constraints = this.constraints || []; - this.constraints.push( this.objects[ "ctnt/" + sea.name ] = sea.tag = ctrt ); - -}; - -// -// Hinge Constraint -// - -THREE.SEA3D.prototype.readHingeConstraint = function ( sea ) { - - var ctrt; - - if ( sea.targetB ) { - - ctrt = new Ammo.btHingeConstraint( - sea.targetA.tag, - sea.targetB.tag, - this.toAmmoVec3( sea.pointA ), - this.toAmmoVec3( sea.pointB ), - this.toAmmoVec3( sea.axisA ), - this.toAmmoVec3( sea.axisB ), - false - ); - - } else { - - ctrt = new Ammo.btHingeConstraint( - sea.targetA.tag, - this.toAmmoVec3( sea.pointA ), - this.toAmmoVec3( sea.axisA ), - false - ); - - } - - if ( sea.limit ) { - - ctrt.setLimit( sea.limit.low, sea.limit.high, sea.limit.softness, sea.limit.biasFactor, sea.limit.relaxationFactor ); - - } - - if ( sea.angularMotor ) { - - ctrt.enableAngularMotor( true, sea.angularMotor.velocity, sea.angularMotor.impulse ); - - } - - SEA3D.AMMO.addConstraint( ctrt ); - - this.domain.constraints = this.constraints = this.constraints || []; - this.constraints.push( this.objects[ "ctnt/" + sea.name ] = sea.tag = ctrt ); - -}; - -// -// Cone Twist Constraint -// - -THREE.SEA3D.prototype.readConeTwistConstraint = function ( sea ) { - - var ctrt; - - if ( sea.targetB ) { - - ctrt = new Ammo.btConeTwistConstraint( - sea.targetA.tag, - sea.targetB.tag, - this.toAmmoVec3( sea.pointA ), - this.toAmmoVec3( sea.pointB ), - false - ); - - } else { - - ctrt = new Ammo.btConeTwistConstraint( - sea.targetA.tag, - this.toAmmoVec3( sea.pointA ), - false - ); - - } - - SEA3D.AMMO.addConstraint( ctrt ); - - this.domain.constraints = this.constraints = this.constraints || []; - this.constraints.push( this.objects[ "ctnt/" + sea.name ] = sea.tag = ctrt ); - -}; - -// -// Domain -// - -THREE.SEA3D.Domain.prototype.enabledPhysics = function ( enabled ) { - - var i = this.rigidBodies ? this.rigidBodies.length : 0; - - while ( i -- ) { - - SEA3D.AMMO.setEnabledRigidBody( this.rigidBodies[ i ], enabled ); - - } - -}; - -THREE.SEA3D.Domain.prototype.applyContainerTransform = function () { - - this.container.updateMatrix(); - - var matrix = this.container.matrix.clone(); - - this.container.position.set( 0, 0, 0 ); - this.container.quaternion.set( 0, 0, 0, 1 ); - this.container.scale.set( 1, 1, 1 ); - - this.applyTransform( matrix ); - -}; - -THREE.SEA3D.Domain.prototype.applyTransform = function ( matrix ) { - - var mtx = THREE.SEA3D.MTXBUF, vec = THREE.SEA3D.VECBUF; - - var i = this.rigidBodies ? this.rigidBodies.length : 0, - childs = this.container ? this.container.children : [], - targets = []; - - while ( i -- ) { - - var rb = this.rigidBodies[ i ], - target = SEA3D.AMMO.getTargetByRigidBody( rb ), - transform = rb.getWorldTransform(), - transformMatrix = SEA3D.AMMO.getMatrixFromTransform( transform ); - - transformMatrix.multiplyMatrices( transformMatrix, matrix ); - - transform = SEA3D.AMMO.getTransformFromMatrix( transformMatrix ); - - rb.setWorldTransform( transform ); - - if ( target ) targets.push( target ); - - } - - for ( i = 0; i < childs.length; i ++ ) { - - var obj3d = childs[ i ]; - - if ( targets.indexOf( obj3d ) > - 1 ) continue; - - obj3d.updateMatrix(); - - mtx.copy( obj3d.matrix ); - - mtx.multiplyMatrices( matrix, mtx ); - - obj3d.position.setFromMatrixPosition( mtx ); - obj3d.scale.setFromMatrixScale( mtx ); - - // ignore rotation scale - - mtx.scale( vec.set( 1 / obj3d.scale.x, 1 / obj3d.scale.y, 1 / obj3d.scale.z ) ); - obj3d.quaternion.setFromRotationMatrix( mtx ); - - } - -}; - -// -// Extension -// - -THREE.SEA3D.Domain.prototype.getShape = THREE.SEA3D.prototype.getShape = function ( name ) { - - return this.objects[ "shpe/" + name ]; - -}; - -THREE.SEA3D.Domain.prototype.getRigidBody = THREE.SEA3D.prototype.getRigidBody = function ( name ) { - - return this.objects[ "rb/" + name ]; - -}; - -THREE.SEA3D.Domain.prototype.getConstraint = THREE.SEA3D.prototype.getConstraint = function ( name ) { - - return this.objects[ "ctnt/" + name ]; - -}; - -THREE.SEA3D.EXTENSIONS_LOADER.push( { - - parse: function () { - - delete this.shapes; - delete this.rigidBodies; - delete this.vehicles; - delete this.constraints; - - }, - - setTypeRead: function () { - - // CONFIG - - this.config.physics = this.config.physics !== undefined ? this.config.physics : true; - this.config.convexHull = this.config.convexHull !== undefined ? this.config.convexHull : true; - this.config.enabledPhysics = this.config.enabledPhysics !== undefined ? this.config.enabledPhysics : true; - - if ( this.config.physics ) { - - // SHAPES - - this.file.typeRead[ SEA3D.Sphere.prototype.type ] = this.readSphere; - this.file.typeRead[ SEA3D.Box.prototype.type ] = this.readBox; - this.file.typeRead[ SEA3D.Capsule.prototype.type ] = this.readCapsule; - this.file.typeRead[ SEA3D.Cone.prototype.type ] = this.readCone; - this.file.typeRead[ SEA3D.Cylinder.prototype.type ] = this.readCylinder; - this.file.typeRead[ SEA3D.ConvexGeometry.prototype.type ] = this.readConvexGeometry; - this.file.typeRead[ SEA3D.TriangleGeometry.prototype.type ] = this.readTriangleGeometry; - this.file.typeRead[ SEA3D.Compound.prototype.type ] = this.readCompound; - - // CONSTRAINTS - - this.file.typeRead[ SEA3D.P2PConstraint.prototype.type ] = this.readP2PConstraint; - this.file.typeRead[ SEA3D.HingeConstraint.prototype.type ] = this.readHingeConstraint; - this.file.typeRead[ SEA3D.ConeTwistConstraint.prototype.type ] = this.readConeTwistConstraint; - - // PHYSICS - - this.file.typeRead[ SEA3D.RigidBody.prototype.type ] = this.readRigidBody; - this.file.typeRead[ SEA3D.CarController.prototype.type ] = this.readCarController; - - } - - } -} ); - -THREE.SEA3D.EXTENSIONS_DOMAIN.push( { - - dispose: function () { - - var i; - - i = this.rigidBodies ? this.rigidBodies.length : 0; - while ( i -- ) SEA3D.AMMO.removeRigidBody( this.rigidBodies[ i ], true ); - - i = this.vehicles ? this.vehicles.length : 0; - while ( i -- ) SEA3D.AMMO.removeVehicle( this.vehicles[ i ], true ); - - i = this.constraints ? this.constraints.length : 0; - while ( i -- ) SEA3D.AMMO.removeConstraint( this.constraints[ i ], true ); - - i = this.shapes ? this.shapes.length : 0; - while ( i -- ) Ammo.destroy( this.shapes[ i ] ); - - } - -} ); diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/jsm/utils/ShadowMapViewer.js b/spaces/banana-projects/web3d/node_modules/three/examples/jsm/utils/ShadowMapViewer.js deleted file mode 100644 index 340efd3f339f776062b01678a0b75adaa67e5623..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/jsm/utils/ShadowMapViewer.js +++ /dev/null @@ -1,208 +0,0 @@ -/** - * @author arya-s / https://github.com/arya-s - * - * This is a helper for visualising a given light's shadow map. - * It works for shadow casting lights: THREE.DirectionalLight and THREE.SpotLight. - * It renders out the shadow map and displays it on a HUD. - * - * Example usage: - * 1) Include \n\t\t" + head + "\n\t\n\t\n\t\t" + body + "\n\t\t\n\n"; - -let read = null; - -set_paths({"base":"/static","assets":""}); - -let default_protocol = 'https'; - -// allow paths to be globally overridden -// in svelte-kit preview and in prerendering -export function override(settings) { - default_protocol = settings.protocol || default_protocol; - set_paths(settings.paths); - set_prerendering(settings.prerendering); - read = settings.read; -} - -export class Server { - constructor(manifest) { - this.options = { - csp: {"mode":"auto","directives":{"upgrade-insecure-requests":false,"block-all-mixed-content":false}}, - dev: false, - floc: false, - get_stack: error => String(error), // for security - handle_error: (error, event) => { - this.options.hooks.handleError({ - error, - event, - - // TODO remove for 1.0 - // @ts-expect-error - get request() { - throw new Error('request in handleError has been replaced with event. See https://github.com/sveltejs/kit/pull/3384 for details'); - } - }); - error.stack = this.options.get_stack(error); - }, - hooks: null, - hydrate: true, - manifest, - method_override: {"parameter":"_method","allowed":[]}, - paths: { base, assets }, - prefix: assets + '/_app/immutable/', - prerender: { - default: true, - enabled: true - }, - read, - root, - service_worker: null, - router: true, - template, - template_contains_nonce: false, - trailing_slash: "never" - }; - } - - async respond(request, options = {}) { - if (!(request instanceof Request)) { - throw new Error('The first argument to server.respond must be a Request object. See https://github.com/sveltejs/kit/pull/3384 for details'); - } - - if (!this.options.hooks) { - const module = await import("./hooks.js"); - this.options.hooks = { - getSession: module.getSession || (() => ({})), - handle: module.handle || (({ event, resolve }) => resolve(event)), - handleError: module.handleError || (({ error }) => console.error(error.stack)), - externalFetch: module.externalFetch || fetch - }; - } - - return respond(request, this.options, options); - } -} diff --git a/spaces/rorallitri/biomedical-language-models/logs/HD Online Player (The Hurt Locker 2008 Brrip 720p Dual).md b/spaces/rorallitri/biomedical-language-models/logs/HD Online Player (The Hurt Locker 2008 Brrip 720p Dual).md deleted file mode 100644 index eb679467e64203b70043f7597123935801c117aa..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/HD Online Player (The Hurt Locker 2008 Brrip 720p Dual).md +++ /dev/null @@ -1,6 +0,0 @@ -

HD Online Player (The Hurt Locker 2008 Brrip 720p Dual)


DOWNLOADhttps://tinurll.com/2uzmkz



-
-Watch Hindi TV channels live online Nov 28, 2019 · Commando 3 ... Challo Movie Online Watch Chammak Challo Full Length HD Movie Online on YuppFlix. ... The Hurt Locker (2008) Tags: Watch Running Man Episode 63 Engsub, ... After Tomorrow 2004 Movie BluRay Dual Audio Hindi Eng 300mb 480p 1GB 720p 5GB ... 1fdad05405
-
-
-

diff --git a/spaces/rorallitri/biomedical-language-models/logs/K Daniela Flying Semen.md b/spaces/rorallitri/biomedical-language-models/logs/K Daniela Flying Semen.md deleted file mode 100644 index a33372346a1142e34ce5ddbb1b6c050527916a12..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/K Daniela Flying Semen.md +++ /dev/null @@ -1,5 +0,0 @@ - -

CLICK TO GET "KKliProC4S 001-hdv handjob with french nails POV 00.14.24720p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 005-hdv new lube to try out 00.20.13720p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 062 leather on your cock 00.09.34720p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 070b two hours in my hands PART B 00.11.48720p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 075 cash per cumshot summary 00.12.23720p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 081 totally taped 00.09.46720p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 086b oral treatment PART B 00.09.28720p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 105 between my boobs 00.11.20720p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 107 finger exercise 00.09.50720p.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 147a extreme cock teasing PART A 00.19.41720p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 147b extreme cock teasing PART B 00.10.35720p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 151 j stiff issue AB joined 00.16.04720p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 154 balls-bondage.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 156a under anesthetic PART A 00.20.29720p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 157b colour roulette PART B 00.10.33720p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 160 slip in my boobies 00.11.35720p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 173b the lingam massage PART B 00.18.50720p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 178b slippery cover PART B.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 184b two teasing tongues PART B 00.10.281080p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 185 swallow on back 00.10.341080p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 186b sophisticated twisting PART B 00.10.331080p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 203b my POV your POV PART B.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 209b over my face PART B 00.08.551080p.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 218a three teasing tongues 2 PART A 00.31.111080p.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 218b three teasing tongues 2 PART B 00.14.191080p.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 221a two teasing tongues 5 PART A.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 223 pinch grip.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 224a Daniela Flying Semen PART A.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 224b Daniela Flying Semen PART B.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 226a two teasing tongues 6 PART A.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 226b two teasing tongues 6 PART B.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 230a daniela adorable lips PART A.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 230b daniela adorable lips PART B.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 233a two teasing tongues 7 PART A.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 233b two teasing tongues 7 PART B.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 234b daniela mind blowing fingers PART B.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 235b Daniela Dream Team PART B.mp4" FROM K2S.CC

CLICK TO GET "KKliProC4S 236a cipriana wild oral passion PART A.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 239a two teasing tongues 8 PART A.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 239b two teasing tongues 8 PART B.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 243b three teasing tongues 3 PART B.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 244b Daniela Kissed to cumshot PART B.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 246b two teasing tongues 9 PART B.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 248b two teasing tongues 10 PART B.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 250a Daniela takes a facial PART A.wmv" FROM K2S.CC

CLICK TO GET "KKliProC4S 253b two teasing tongues 11 PART B.wmv" FROM K2S.CC

-

K Daniela Flying Semen


DOWNLOADhttps://tinurll.com/2uznVO



aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/roshithindia/song-generation/app.py b/spaces/roshithindia/song-generation/app.py deleted file mode 100644 index 5675b767e19998e10dd9f8815073e06ee37d5bd2..0000000000000000000000000000000000000000 --- a/spaces/roshithindia/song-generation/app.py +++ /dev/null @@ -1,216 +0,0 @@ -from queue import Queue -from threading import Thread -from typing import Optional - -import numpy as np -import torch - -from transformers import MusicgenForConditionalGeneration, MusicgenProcessor, set_seed -from transformers.generation.streamers import BaseStreamer - -import gradio as gr -import spaces - - -model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") -processor = MusicgenProcessor.from_pretrained("facebook/musicgen-small") - -title = "MusicGen Streaming" - -description = """ -Stream the outputs of the MusicGen text-to-music model by playing the generated audio as soon as the first chunk is ready. -Demo uses [MusicGen Small](https://huggingface.co/facebook/musicgen-small) in the 🤗 Transformers library. Note that the -demo works best on the Chrome browser. If there is no audio output, try switching browser to Chrome. -""" - -article = """ -## Try different theme -""" - - -class MusicgenStreamer(BaseStreamer): - def __init__( - self, - model: MusicgenForConditionalGeneration, - device: Optional[str] = None, - play_steps: Optional[int] = 10, - stride: Optional[int] = None, - timeout: Optional[float] = None, - ): - """ - Streamer that stores playback-ready audio in a queue, to be used by a downstream application as an iterator. This is - useful for applications that benefit from accessing the generated audio in a non-blocking way (e.g. in an interactive - Gradio demo). - Parameters: - model (`MusicgenForConditionalGeneration`): - The MusicGen model used to generate the audio waveform. - device (`str`, *optional*): - The torch device on which to run the computation. If `None`, will default to the device of the model. - play_steps (`int`, *optional*, defaults to 10): - The number of generation steps with which to return the generated audio array. Using fewer steps will - mean the first chunk is ready faster, but will require more codec decoding steps overall. This value - should be tuned to your device and latency requirements. - stride (`int`, *optional*): - The window (stride) between adjacent audio samples. Using a stride between adjacent audio samples reduces - the hard boundary between them, giving smoother playback. If `None`, will default to a value equivalent to - play_steps // 6 in the audio space. - timeout (`int`, *optional*): - The timeout for the audio queue. If `None`, the queue will block indefinitely. Useful to handle exceptions - in `.generate()`, when it is called in a separate thread. - """ - self.decoder = model.decoder - self.audio_encoder = model.audio_encoder - self.generation_config = model.generation_config - self.device = device if device is not None else model.device - - # variables used in the streaming process - self.play_steps = play_steps - if stride is not None: - self.stride = stride - else: - hop_length = np.prod(self.audio_encoder.config.upsampling_ratios) - self.stride = hop_length * (play_steps - self.decoder.num_codebooks) // 6 - self.token_cache = None - self.to_yield = 0 - - # varibles used in the thread process - self.audio_queue = Queue() - self.stop_signal = None - self.timeout = timeout - - def apply_delay_pattern_mask(self, input_ids): - # build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to MusicGen) - _, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask( - input_ids[:, :1], - pad_token_id=self.generation_config.decoder_start_token_id, - max_length=input_ids.shape[-1], - ) - # apply the pattern mask to the input ids - input_ids = self.decoder.apply_delay_pattern_mask(input_ids, decoder_delay_pattern_mask) - - # revert the pattern delay mask by filtering the pad token id - input_ids = input_ids[input_ids != self.generation_config.pad_token_id].reshape( - 1, self.decoder.num_codebooks, -1 - ) - - # append the frame dimension back to the audio codes - input_ids = input_ids[None, ...] - - # send the input_ids to the correct device - input_ids = input_ids.to(self.audio_encoder.device) - - output_values = self.audio_encoder.decode( - input_ids, - audio_scales=[None], - ) - audio_values = output_values.audio_values[0, 0] - return audio_values.cpu().float().numpy() - - def put(self, value): - batch_size = value.shape[0] // self.decoder.num_codebooks - if batch_size > 1: - raise ValueError("MusicgenStreamer only supports batch size 1") - - if self.token_cache is None: - self.token_cache = value - else: - self.token_cache = torch.concatenate([self.token_cache, value[:, None]], dim=-1) - - if self.token_cache.shape[-1] % self.play_steps == 0: - audio_values = self.apply_delay_pattern_mask(self.token_cache) - self.on_finalized_audio(audio_values[self.to_yield : -self.stride]) - self.to_yield += len(audio_values) - self.to_yield - self.stride - - def end(self): - """Flushes any remaining cache and appends the stop symbol.""" - if self.token_cache is not None: - audio_values = self.apply_delay_pattern_mask(self.token_cache) - else: - audio_values = np.zeros(self.to_yield) - - self.on_finalized_audio(audio_values[self.to_yield :], stream_end=True) - - def on_finalized_audio(self, audio: np.ndarray, stream_end: bool = False): - """Put the new audio in the queue. If the stream is ending, also put a stop signal in the queue.""" - self.audio_queue.put(audio, timeout=self.timeout) - if stream_end: - self.audio_queue.put(self.stop_signal, timeout=self.timeout) - - def __iter__(self): - return self - - def __next__(self): - value = self.audio_queue.get(timeout=self.timeout) - if not isinstance(value, np.ndarray) and value == self.stop_signal: - raise StopIteration() - else: - return value - - -sampling_rate = model.audio_encoder.config.sampling_rate -frame_rate = model.audio_encoder.config.frame_rate - -target_dtype = np.int16 -max_range = np.iinfo(target_dtype).max - - -@spaces.GPU() -def generate_audio(text_prompt, audio_length_in_s=10.0, play_steps_in_s=2.0, seed=0): - max_new_tokens = int(frame_rate * audio_length_in_s) - play_steps = int(frame_rate * play_steps_in_s) - - device = "cuda:0" if torch.cuda.is_available() else "cpu" - if device != model.device: - model.to(device) - if device == "cuda:0": - model.half() - - inputs = processor( - text=text_prompt, - padding=True, - return_tensors="pt", - ) - - streamer = MusicgenStreamer(model, device=device, play_steps=play_steps) - - generation_kwargs = dict( - **inputs.to(device), - streamer=streamer, - max_new_tokens=max_new_tokens, - ) - thread = Thread(target=model.generate, kwargs=generation_kwargs) - thread.start() - - set_seed(seed) - for new_audio in streamer: - print(f"Sample of length: {round(new_audio.shape[0] / sampling_rate, 2)} seconds") - new_audio = (new_audio * max_range).astype(np.int16) - yield sampling_rate, new_audio - - -demo = gr.Interface( - fn=generate_audio, - inputs=[ - gr.Text(label="Prompt", value="80s pop track with synth and instrumentals"), - gr.Slider(10, 30, value=15, step=5, label="Audio length in seconds"), - gr.Slider(0.5, 2.5, value=1.5, step=0.5, label="Streaming interval in seconds", info="Lower = shorter chunks, lower latency, more codec steps"), - gr.Slider(0, 10, value=5, step=1, label="Seed for random generations"), - ], - outputs=[ - gr.Audio(label="Generated Music", streaming=True, autoplay=True) - ], - examples=[ - ["An 80s driving pop song with heavy drums and synth pads in the background", 30, 1.5, 5], - ["A cheerful country song with acoustic guitars", 30, 1.5, 5], - ["90s rock song with electric guitar and heavy drums", 30, 1.5, 5], - ["a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130", 30, 1.5, 5], - ["lofi slow bpm electro chill with organic samples", 30, 1.5, 5], - ], - title=title, - description=description, - article=article, - cache_examples=False, -) - - -demo.queue().launch() \ No newline at end of file diff --git a/spaces/rrighart/color-tags/app.py b/spaces/rrighart/color-tags/app.py deleted file mode 100644 index 3f507f83c0b0254e7dfe1b67a8db06c76ada65c4..0000000000000000000000000000000000000000 --- a/spaces/rrighart/color-tags/app.py +++ /dev/null @@ -1,66 +0,0 @@ -import gradio as gr -import tensorflow as tf -import numpy as np -from numpy import asarray -from datetime import datetime - - -model = tf.keras.models.load_model("simple-CNN-model.2022-8-9.hdf5") - -def image_predict(img): - """ - Displays dominant colors beyond a given threshold. - img : image input, for ex 'blue-car.jpg' - isize: input image size, for ex. 227 - thr: chosen threshold value - """ - thr=0 - global model - if model is None: - model = tf.keras.models.load_model("models/simple-CNN-model.2022-8-9.hdf5") - - data = np.asarray(img) - - ndata = np.expand_dims(data, axis=0) - y_prob = model.predict(ndata/255) - #y_prob.argmax(axis=-1) - - now = datetime.now() - print("--------") - print("data and time: ", now) - - colorlabels = ['beige', 'black', 'blue', 'brown', 'gold', 'green', 'grey', 'orange', 'pink', 'purple', 'red', 'silver', 'tan', 'white', 'yellow'] - coltags = [sorted(colorlabels)[i] for i in np.where(np.ravel(y_prob)>thr)[0]] - colprob = [np.ravel(y_prob)[i] for i in list(np.where(np.ravel(y_prob)>thr)[0])] - - if len(coltags) > 0: - response = [] - for i,j in zip(coltags, colprob): - #print(i,j) - resp = {} - resp[i] = float(j) - response.append(resp) - d = dict(map(dict.popitem, response)) - print('colors: ', d) - - return dict(d) - - else: - return str('No label was found') - -iface = gr.Interface( - title = "Object color tagging", - description = "App classifying objects on different colors", - article = "

Webpage

", - fn=image_predict, - inputs=gr.Image(shape=(227,227)), - outputs=gr.Label(), - examples=['shoes1.jpg', 'shoes2.jpg'], - enable_queue=True, - interpretation="default", - debug=True - ) -iface.launch() - - - diff --git a/spaces/runa91/bite_gradio/src/stacked_hourglass/utils/misc.py b/spaces/runa91/bite_gradio/src/stacked_hourglass/utils/misc.py deleted file mode 100644 index d754c55dc2206bbb2a5cabf18c4017b5c1ee3d04..0000000000000000000000000000000000000000 --- a/spaces/runa91/bite_gradio/src/stacked_hourglass/utils/misc.py +++ /dev/null @@ -1,56 +0,0 @@ -# Modified from: -# https://github.com/anibali/pytorch-stacked-hourglass -# https://github.com/bearpaw/pytorch-pose - -import os -import shutil - -import scipy.io -import torch - - -def to_numpy(tensor): - if torch.is_tensor(tensor): - return tensor.detach().cpu().numpy() - elif type(tensor).__module__ != 'numpy': - raise ValueError("Cannot convert {} to numpy array" - .format(type(tensor))) - return tensor - - -def to_torch(ndarray): - if type(ndarray).__module__ == 'numpy': - return torch.from_numpy(ndarray) - elif not torch.is_tensor(ndarray): - raise ValueError("Cannot convert {} to torch tensor" - .format(type(ndarray))) - return ndarray - - -def save_checkpoint(state, preds, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar', snapshot=None): - preds = to_numpy(preds) - filepath = os.path.join(checkpoint, filename) - torch.save(state, filepath) - scipy.io.savemat(os.path.join(checkpoint, 'preds.mat'), mdict={'preds' : preds}) - - if snapshot and state['epoch'] % snapshot == 0: - shutil.copyfile(filepath, os.path.join(checkpoint, 'checkpoint_{}.pth.tar'.format(state['epoch']))) - - if is_best: - shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar')) - scipy.io.savemat(os.path.join(checkpoint, 'preds_best.mat'), mdict={'preds' : preds}) - - -def save_pred(preds, checkpoint='checkpoint', filename='preds_valid.mat'): - preds = to_numpy(preds) - filepath = os.path.join(checkpoint, filename) - scipy.io.savemat(filepath, mdict={'preds' : preds}) - - -def adjust_learning_rate(optimizer, epoch, lr, schedule, gamma): - """Sets the learning rate to the initial LR decayed by schedule""" - if epoch in schedule: - lr *= gamma - for param_group in optimizer.param_groups: - param_group['lr'] = lr - return lr diff --git a/spaces/sanniu/newchat/README.md b/spaces/sanniu/newchat/README.md deleted file mode 100644 index 3f75e583928790aafdf262e1beb2b70583f6be72..0000000000000000000000000000000000000000 --- a/spaces/sanniu/newchat/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Newchat -emoji: 📊 -colorFrom: indigo -colorTo: blue -sdk: docker -pinned: false -license: mit -app_port: 8080 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/scedlatioru/img-to-music/example/Carte Antreprenoriat Marius Ghenea.pdf.md b/spaces/scedlatioru/img-to-music/example/Carte Antreprenoriat Marius Ghenea.pdf.md deleted file mode 100644 index 95163b650d5b157685f43152075228455a4e721e..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Carte Antreprenoriat Marius Ghenea.pdf.md +++ /dev/null @@ -1,6 +0,0 @@ - -

▸ cumpr cartea 34. prioritatea cetateanului - david r. poole, anthony m. schumacher cumpr cartea

ce este mai rau decat sa se simta rau si stresat este, cand zicem ca primesti punctele de vedere diferite si daca ameti sunteti saturi cu el, avem sarcina sa vinimilor si daca nu, atunci am avut sa uitam daca mai am pateaga si trebuie sa mai dam o buna! e o prea mare responsabilitate. cetateanii sunt cei mai importanti clienti, clientii care consemneaza ceea ce luam in ele. orice faima sa sa aincereze si sa du-a sunteti de prez a tre inconsemneaza mai mult pe el decat iti arei n format pdf care s formatul cartilor de business sa meriti sa primeasca inseamtul sa de carti pentru afaceri sa merite sa aja..cartile pentru afaceri sa merite sa prinzi.

-

Carte Antreprenoriat Marius Ghenea.pdf


Download ……… https://gohhs.com/2uEA9l



-

este adevarat ca veniturile din afaceri nu au niciun fel de cunostinta (abstrainteia aici nu inseamna ignorarele) de la existenta unei ramuri majore a economiei. si cealalta realitate e ca costul cheltuielilor e o deosebita problema pentru cine crede ca functioneaza orice business ca orice alta afaceri. este dorinta antreprenorului de a avea o afaceri financiara mai usoara, care sa aiba o rentabilitate mai mare, dar costuri mai mici. pe baza acestor fondatoare si a experientei experienta mea, am dezvoltat o putina revolutie. m-am concentrat si asupra rolului personalului de la nivelul unitatilor de primire in randul oamenilor si asupra disciplinei de discernment. claritatea mea este ca se raporteaza la un proces de discernment care are loc in viata privatului. acest proces contine un proces distinct de cel de cultura, a credintei, de speranta, de afaceri. insa nu cred ca necesar este sa se vorbeasca despre asta ca despre una dintre cele mai putine munci, intrucat suntem obisnuiti sa ne sprijinem in comunitatile noastre publice ca fiind sfinte ce desfasuram noi, direct la noi si nu la altii. cum echilibrarea dintre desfasurarea afacerii, a oricarei afaceri, iar oamenii respectivi ei ii facea suportul imediat si a costisilor cele mai mici? si care e rolul personalului? este un proces imbunatatit de la tinereza incontinenta dar care imigrantii nenorociati in cazul romaniei sunt pur si simplu obligati la abstracata si fuge cu colectivitatea de estasie. comportamentul privindu-i pe ceilalti si imparala aceasta etapa din viata trebuie sa se corecteze! vorbim de afaceri ca de un proces oarecare pe care trebuie sa ne facem treaba, oricare dintre dorintele si dorintile tale. asa cum vorbim pretex sa trebuie sa danieliii altilor. asa cum scaserile sa meratasca ilomulutional cu profitul sa fiu. asa sa cauti un pozitiv in cei mai puti care sunt mai activi! si corecuselile si dorintile tale se discutiu in acelasi tim pentru toti sa vasca debilii de estasie. acest proces trei desactive inlocuita cu felia si continutului cu costulcel mai minicantelor. orice inceint a scu-teci privat de dinporterii.

899543212b
-
-
\ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/Hitman David Foster And Friends 2008 Dts 720p Torrent.md b/spaces/scedlatioru/img-to-music/example/Hitman David Foster And Friends 2008 Dts 720p Torrent.md deleted file mode 100644 index 70b952527614b23fd416ea5b55c40b1b1ea2a2eb..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Hitman David Foster And Friends 2008 Dts 720p Torrent.md +++ /dev/null @@ -1,7 +0,0 @@ - -

the spectacular one-night only concert by david foster and his famous friends finds the songwriter and hit producer gathering together the best tracks from his bottomless well of hits and inviting some talented folks to come along and help perform them. not only are the songs magical, the performances are just as enchanting, including appearances from andrea bocelli, michael bubl, josh groban, kenny g, katharine mcphee, celine dion, blake shelton, brian mcknight, and charice. spotlighting #1 hits, award-winning songs, and other favorites all but one originally penned and/or produced by foster this may 23, 2008 show at the mandalay bay in las vegas was unforgettable. experience it again and again

-

Hitman David Foster And Friends 2008 Dts 720p Torrent


Download Ziphttps://gohhs.com/2uEA3x



-

hit man returns: david foster & friends (2010) blu-ray 1080i avc dts-hd 5.1.. the may 23, 2008 show at the mandalay bay in las vegas was unforgettable. 1hr 35mn 720p mbluray 1280720 mkv 7332 kbps dts 1509 kbps 4.37. format: multiple formats, blu-ray, color, ntsc; language: english (dts-hd. the spectacular one-night only concert of david foster and friends --. concerts mkv 15.18g torrent; charice - hit man david foster and friends clip. the cure trilogy live in berlin 2002 dts 720p mkv concert. 3ae92a269d free download mdesign mechanical.rar

-

restorer ultimate pro network 7 8 build 708689 portablel
la fille aux bleuets, suite et fin
accessdiver download mac
mon impression : axis
powershape 2019 x86-x64 torrent
girl workout clothes sex
crack keygen robot structural analysis professional 2012
herunterladenbuzzsaw 2010 aktivator 32 bits
[ zip album ] die zimmermnner - 1001 wege sex zu machen ohne daran spa zu haben 2019 mp3 320 kbps
virtual date games pool party

899543212b
-
-
\ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/Phoenix Fd 3ds Max 2016 Keygen ((NEW)).md b/spaces/scedlatioru/img-to-music/example/Phoenix Fd 3ds Max 2016 Keygen ((NEW)).md deleted file mode 100644 index 3497c27db86d331a836b45dca743139d9ada2c84..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Phoenix Fd 3ds Max 2016 Keygen ((NEW)).md +++ /dev/null @@ -1,12 +0,0 @@ -

phoenix fd 3ds max 2016 keygen


DOWNLOADhttps://gohhs.com/2uEzlc



-
-January 1, 2021 - Phoenix FD v4.20 plugin for 3ds max from 2016 to 2021. Phoenix FD is one of the advanced fire, smoke, . Fire Fusion is a plugin that will allow you to model and animate flames, smoke, sparks, and many other effects. -In this article, we'll look at how to install and update the plugin for 3ds Max to version 4. It's also available in 3ds Max Design and SketchUp. -The plugin is designed for use with 3ds Max, Adobe Maya, Blender and Cinema 4D. -Fire Fusion is available from the website. -It can be downloaded for free. -Download Asus N53J Network Card Driver next. -It is compatible with Autodesk M 8a78ff9644
-
-
-

diff --git a/spaces/scedlatioru/img-to-music/example/Xforce Keygen __TOP__ Robot Structural Analysis Professional 2015 Keygen __TOP__.md b/spaces/scedlatioru/img-to-music/example/Xforce Keygen __TOP__ Robot Structural Analysis Professional 2015 Keygen __TOP__.md deleted file mode 100644 index 14dce9d731a91f6217f292cde3003b67cc47fb4a..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Xforce Keygen __TOP__ Robot Structural Analysis Professional 2015 Keygen __TOP__.md +++ /dev/null @@ -1,6 +0,0 @@ -

xforce keygen Robot Structural Analysis Professional 2015 keygen


Download Ziphttps://gohhs.com/2uEA1d



- - d5da3c52bf
-
-
-

diff --git a/spaces/segestic/COVIDPrediction/application/components/prediction/symptom_check.py b/spaces/segestic/COVIDPrediction/application/components/prediction/symptom_check.py deleted file mode 100644 index 6cd3d891e84a69f94abd6faf0f84c419c86aee81..0000000000000000000000000000000000000000 --- a/spaces/segestic/COVIDPrediction/application/components/prediction/symptom_check.py +++ /dev/null @@ -1,14 +0,0 @@ -from application.schema import Symptom - -def get_risk_level(symptom: Symptom): - if not (symptom.fever or symptom.dry_cough or symptom.tiredness or symptom.breathing_problem): - return 'Low risk level. THIS IS A DEMO APP' - - if not (symptom.breathing_problem or symptom.dry_cough): - if symptom.fever: - return 'moderate risk level. THIS IS A DEMO APP' - - if symptom.breathing_problem: - return 'High risk level. THIS IS A DEMO APP' - - return 'THIS IS A DEMO APP' \ No newline at end of file diff --git a/spaces/segments-tobias/conex/espnet/nets/batch_beam_search.py b/spaces/segments-tobias/conex/espnet/nets/batch_beam_search.py deleted file mode 100644 index ba861f3f154258a1708185fdd792f8a17c29f585..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/nets/batch_beam_search.py +++ /dev/null @@ -1,348 +0,0 @@ -"""Parallel beam search module.""" - -import logging -from typing import Any -from typing import Dict -from typing import List -from typing import NamedTuple -from typing import Tuple - -import torch -from torch.nn.utils.rnn import pad_sequence - -from espnet.nets.beam_search import BeamSearch -from espnet.nets.beam_search import Hypothesis - - -class BatchHypothesis(NamedTuple): - """Batchfied/Vectorized hypothesis data type.""" - - yseq: torch.Tensor = torch.tensor([]) # (batch, maxlen) - score: torch.Tensor = torch.tensor([]) # (batch,) - length: torch.Tensor = torch.tensor([]) # (batch,) - scores: Dict[str, torch.Tensor] = dict() # values: (batch,) - states: Dict[str, Dict] = dict() - - def __len__(self) -> int: - """Return a batch size.""" - return len(self.length) - - -class BatchBeamSearch(BeamSearch): - """Batch beam search implementation.""" - - def batchfy(self, hyps: List[Hypothesis]) -> BatchHypothesis: - """Convert list to batch.""" - if len(hyps) == 0: - return BatchHypothesis() - return BatchHypothesis( - yseq=pad_sequence( - [h.yseq for h in hyps], batch_first=True, padding_value=self.eos - ), - length=torch.tensor([len(h.yseq) for h in hyps], dtype=torch.int64), - score=torch.tensor([h.score for h in hyps]), - scores={k: torch.tensor([h.scores[k] for h in hyps]) for k in self.scorers}, - states={k: [h.states[k] for h in hyps] for k in self.scorers}, - ) - - def _batch_select(self, hyps: BatchHypothesis, ids: List[int]) -> BatchHypothesis: - return BatchHypothesis( - yseq=hyps.yseq[ids], - score=hyps.score[ids], - length=hyps.length[ids], - scores={k: v[ids] for k, v in hyps.scores.items()}, - states={ - k: [self.scorers[k].select_state(v, i) for i in ids] - for k, v in hyps.states.items() - }, - ) - - def _select(self, hyps: BatchHypothesis, i: int) -> Hypothesis: - return Hypothesis( - yseq=hyps.yseq[i, : hyps.length[i]], - score=hyps.score[i], - scores={k: v[i] for k, v in hyps.scores.items()}, - states={ - k: self.scorers[k].select_state(v, i) for k, v in hyps.states.items() - }, - ) - - def unbatchfy(self, batch_hyps: BatchHypothesis) -> List[Hypothesis]: - """Revert batch to list.""" - return [ - Hypothesis( - yseq=batch_hyps.yseq[i][: batch_hyps.length[i]], - score=batch_hyps.score[i], - scores={k: batch_hyps.scores[k][i] for k in self.scorers}, - states={ - k: v.select_state(batch_hyps.states[k], i) - for k, v in self.scorers.items() - }, - ) - for i in range(len(batch_hyps.length)) - ] - - def batch_beam( - self, weighted_scores: torch.Tensor, ids: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Batch-compute topk full token ids and partial token ids. - - Args: - weighted_scores (torch.Tensor): The weighted sum scores for each tokens. - Its shape is `(n_beam, self.vocab_size)`. - ids (torch.Tensor): The partial token ids to compute topk. - Its shape is `(n_beam, self.pre_beam_size)`. - - Returns: - Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - The topk full (prev_hyp, new_token) ids - and partial (prev_hyp, new_token) ids. - Their shapes are all `(self.beam_size,)` - - """ - top_ids = weighted_scores.view(-1).topk(self.beam_size)[1] - # Because of the flatten above, `top_ids` is organized as: - # [hyp1 * V + token1, hyp2 * V + token2, ..., hypK * V + tokenK], - # where V is `self.n_vocab` and K is `self.beam_size` - prev_hyp_ids = top_ids // self.n_vocab - new_token_ids = top_ids % self.n_vocab - return prev_hyp_ids, new_token_ids, prev_hyp_ids, new_token_ids - - def init_hyp(self, x: torch.Tensor) -> BatchHypothesis: - """Get an initial hypothesis data. - - Args: - x (torch.Tensor): The encoder output feature - - Returns: - Hypothesis: The initial hypothesis. - - """ - init_states = dict() - init_scores = dict() - for k, d in self.scorers.items(): - init_states[k] = d.batch_init_state(x) - init_scores[k] = 0.0 - return self.batchfy( - [ - Hypothesis( - score=0.0, - scores=init_scores, - states=init_states, - yseq=torch.tensor([self.sos], device=x.device), - ) - ] - ) - - def score_full( - self, hyp: BatchHypothesis, x: torch.Tensor - ) -> Tuple[Dict[str, torch.Tensor], Dict[str, Any]]: - """Score new hypothesis by `self.full_scorers`. - - Args: - hyp (Hypothesis): Hypothesis with prefix tokens to score - x (torch.Tensor): Corresponding input feature - - Returns: - Tuple[Dict[str, torch.Tensor], Dict[str, Any]]: Tuple of - score dict of `hyp` that has string keys of `self.full_scorers` - and tensor score values of shape: `(self.n_vocab,)`, - and state dict that has string keys - and state values of `self.full_scorers` - - """ - scores = dict() - states = dict() - for k, d in self.full_scorers.items(): - scores[k], states[k] = d.batch_score(hyp.yseq, hyp.states[k], x) - return scores, states - - def score_partial( - self, hyp: BatchHypothesis, ids: torch.Tensor, x: torch.Tensor - ) -> Tuple[Dict[str, torch.Tensor], Dict[str, Any]]: - """Score new hypothesis by `self.full_scorers`. - - Args: - hyp (Hypothesis): Hypothesis with prefix tokens to score - ids (torch.Tensor): 2D tensor of new partial tokens to score - x (torch.Tensor): Corresponding input feature - - Returns: - Tuple[Dict[str, torch.Tensor], Dict[str, Any]]: Tuple of - score dict of `hyp` that has string keys of `self.full_scorers` - and tensor score values of shape: `(self.n_vocab,)`, - and state dict that has string keys - and state values of `self.full_scorers` - - """ - scores = dict() - states = dict() - for k, d in self.part_scorers.items(): - scores[k], states[k] = d.batch_score_partial( - hyp.yseq, ids, hyp.states[k], x - ) - return scores, states - - def merge_states(self, states: Any, part_states: Any, part_idx: int) -> Any: - """Merge states for new hypothesis. - - Args: - states: states of `self.full_scorers` - part_states: states of `self.part_scorers` - part_idx (int): The new token id for `part_scores` - - Returns: - Dict[str, torch.Tensor]: The new score dict. - Its keys are names of `self.full_scorers` and `self.part_scorers`. - Its values are states of the scorers. - - """ - new_states = dict() - for k, v in states.items(): - new_states[k] = v - for k, v in part_states.items(): - new_states[k] = v - return new_states - - def search(self, running_hyps: BatchHypothesis, x: torch.Tensor) -> BatchHypothesis: - """Search new tokens for running hypotheses and encoded speech x. - - Args: - running_hyps (BatchHypothesis): Running hypotheses on beam - x (torch.Tensor): Encoded speech feature (T, D) - - Returns: - BatchHypothesis: Best sorted hypotheses - - """ - n_batch = len(running_hyps) - part_ids = None # no pre-beam - # batch scoring - weighted_scores = torch.zeros( - n_batch, self.n_vocab, dtype=x.dtype, device=x.device - ) - scores, states = self.score_full(running_hyps, x.expand(n_batch, *x.shape)) - for k in self.full_scorers: - weighted_scores += self.weights[k] * scores[k] - # partial scoring - if self.do_pre_beam: - pre_beam_scores = ( - weighted_scores - if self.pre_beam_score_key == "full" - else scores[self.pre_beam_score_key] - ) - part_ids = torch.topk(pre_beam_scores, self.pre_beam_size, dim=-1)[1] - # NOTE(takaaki-hori): Unlike BeamSearch, we assume that score_partial returns - # full-size score matrices, which has non-zero scores for part_ids and zeros - # for others. - part_scores, part_states = self.score_partial(running_hyps, part_ids, x) - for k in self.part_scorers: - weighted_scores += self.weights[k] * part_scores[k] - # add previous hyp scores - weighted_scores += running_hyps.score.to( - dtype=x.dtype, device=x.device - ).unsqueeze(1) - - # TODO(karita): do not use list. use batch instead - # see also https://github.com/espnet/espnet/pull/1402#discussion_r354561029 - # update hyps - best_hyps = [] - prev_hyps = self.unbatchfy(running_hyps) - for ( - full_prev_hyp_id, - full_new_token_id, - part_prev_hyp_id, - part_new_token_id, - ) in zip(*self.batch_beam(weighted_scores, part_ids)): - prev_hyp = prev_hyps[full_prev_hyp_id] - best_hyps.append( - Hypothesis( - score=weighted_scores[full_prev_hyp_id, full_new_token_id], - yseq=self.append_token(prev_hyp.yseq, full_new_token_id), - scores=self.merge_scores( - prev_hyp.scores, - {k: v[full_prev_hyp_id] for k, v in scores.items()}, - full_new_token_id, - {k: v[part_prev_hyp_id] for k, v in part_scores.items()}, - part_new_token_id, - ), - states=self.merge_states( - { - k: self.full_scorers[k].select_state(v, full_prev_hyp_id) - for k, v in states.items() - }, - { - k: self.part_scorers[k].select_state( - v, part_prev_hyp_id, part_new_token_id - ) - for k, v in part_states.items() - }, - part_new_token_id, - ), - ) - ) - return self.batchfy(best_hyps) - - def post_process( - self, - i: int, - maxlen: int, - maxlenratio: float, - running_hyps: BatchHypothesis, - ended_hyps: List[Hypothesis], - ) -> BatchHypothesis: - """Perform post-processing of beam search iterations. - - Args: - i (int): The length of hypothesis tokens. - maxlen (int): The maximum length of tokens in beam search. - maxlenratio (int): The maximum length ratio in beam search. - running_hyps (BatchHypothesis): The running hypotheses in beam search. - ended_hyps (List[Hypothesis]): The ended hypotheses in beam search. - - Returns: - BatchHypothesis: The new running hypotheses. - - """ - n_batch = running_hyps.yseq.shape[0] - logging.debug(f"the number of running hypothes: {n_batch}") - if self.token_list is not None: - logging.debug( - "best hypo: " - + "".join( - [ - self.token_list[x] - for x in running_hyps.yseq[0, 1 : running_hyps.length[0]] - ] - ) - ) - # add eos in the final loop to avoid that there are no ended hyps - if i == maxlen - 1: - logging.info("adding in the last position in the loop") - yseq_eos = torch.cat( - ( - running_hyps.yseq, - torch.full( - (n_batch, 1), - self.eos, - device=running_hyps.yseq.device, - dtype=torch.int64, - ), - ), - 1, - ) - running_hyps.yseq.resize_as_(yseq_eos) - running_hyps.yseq[:] = yseq_eos - running_hyps.length[:] = yseq_eos.shape[1] - - # add ended hypotheses to a final list, and removed them from current hypotheses - # (this will be a probmlem, number of hyps < beam) - is_eos = ( - running_hyps.yseq[torch.arange(n_batch), running_hyps.length - 1] - == self.eos - ) - for b in torch.nonzero(is_eos).view(-1): - hyp = self._select(running_hyps, b) - ended_hyps.append(hyp) - remained_ids = torch.nonzero(is_eos == 0).view(-1) - return self._batch_select(running_hyps, remained_ids) diff --git a/spaces/shabnam91/Sanskrit-TTS/indic_nlp_library/indicnlp/script/phonetic_sim.py b/spaces/shabnam91/Sanskrit-TTS/indic_nlp_library/indicnlp/script/phonetic_sim.py deleted file mode 100644 index 87f56b63dd38c0f8fd5edf9b6ee5131afd332f31..0000000000000000000000000000000000000000 --- a/spaces/shabnam91/Sanskrit-TTS/indic_nlp_library/indicnlp/script/phonetic_sim.py +++ /dev/null @@ -1,59 +0,0 @@ -# -# Copyright (c) 2013-present, Anoop Kunchukuttan -# All rights reserved. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -# - -from indicnlp import loader -from indicnlp import langinfo -from indicnlp.script.indic_scripts import * -import numpy as np -import gzip -import pandas as pd -import sys - -def equal(v1,v2): - return 0.0 if np.sum( xor_vectors(v1, v2)) > 0 else 1.0 - -def dice(v1,v2): - dotprod=2*float(np.dot( v1, v2.T )) - return dotprod/float(len(v1)+len(v2)) - -def jaccard(v1,v2): - dotprod=float(np.dot( v1, v2.T )) - return dotprod/float(len(v1)+len(v2)-dotprod) - -def cosine(v1,v2): - dotprod=float(np.dot( v1, v2.T )) - norm1=float(np.dot( v1, v1.T )) - norm2=float(np.dot( v2, v2.T )) - return ((dotprod)/(np.sqrt(norm1*norm2)+0.00001)) - -def dotprod(v1,v2): - return float(np.dot( v1, v2.T )) - -def sim1(v1,v2,base=5.0): - return np.power(base,dotprod(v1,v2)) - -def softmax(v1,v2): - return sim1(v1,v2,np.e) - -def create_similarity_matrix(sim_func,slang,tlang,normalize=True): - - dim=langinfo.COORDINATED_RANGE_END_INCLUSIVE-langinfo.COORDINATED_RANGE_START_INCLUSIVE+1 - sim_mat=np.zeros((dim,dim)) - - for offset1 in range(langinfo.COORDINATED_RANGE_START_INCLUSIVE, langinfo.COORDINATED_RANGE_END_INCLUSIVE+1): - v1=get_phonetic_feature_vector(offset_to_char(offset1,slang),slang) - for offset2 in range(langinfo.COORDINATED_RANGE_START_INCLUSIVE, langinfo.COORDINATED_RANGE_END_INCLUSIVE+1): - v2=get_phonetic_feature_vector(offset_to_char(offset2,tlang),tlang) - sim_mat[offset1,offset2]=sim_func(v1,v2) - - if normalize: - sums=np.sum(sim_mat, axis=1) - sim_mat=(sim_mat.transpose()/sums).transpose() - - return sim_mat - diff --git a/spaces/shatrunjai/FutureMeMotivator/app.py b/spaces/shatrunjai/FutureMeMotivator/app.py deleted file mode 100644 index bfb226df8c82722a067f1ad590958a02fcdee237..0000000000000000000000000000000000000000 --- a/spaces/shatrunjai/FutureMeMotivator/app.py +++ /dev/null @@ -1,143 +0,0 @@ -""" -Title: Future Me Motivator -Author: Jai Singh -Date: 08/10/2023 - -""" -# Import libraries -import replicate -import os -import urllib.request -import gradio as gr -import subprocess -import platform -import time -from pathlib import Path -from TTS.api import TTS - -# Set the REPLICATE_API_TOKEN environment variable -os.environ["REPLICATE_API_TOKEN"] = "r8_TbFUDqMSF0a7wit5ufqLWIfxnEp5VJ82fhtRc" - -# Init TTS -tts = TTS(model_name="tts_models/multilingual/multi-dataset/your_tts", progress_bar=False, gpu=False) - - -# Set the relative path variable up -script_dir = Path(__file__).parent -print("Script directory: ") -print(script_dir) - -# Relative path for the input picture -input_data_relative_path = 'content/input' -input_data_relative_path_full = (script_dir/input_data_relative_path).resolve() -print(input_data_relative_path_full) - -# Relative path for the output aged picture -output_image_relative_path = 'content/output/aged_picture.jpg' -output_image_relative_path_full = (script_dir/output_image_relative_path).resolve() -print(output_image_relative_path_full) - -# Relative path for the output aged video -output_video_relative_path = 'content/output/aged_picture.jpg' -output_video_relative_path_full = (script_dir/output_video_relative_path).resolve() -print(output_video_relative_path_full) - -# Relative path for the output audio fromt ext -output_audio_relative_path = 'content/output/output.wav' -output_audio_relative_path_full = (script_dir/output_audio_relative_path).resolve() -print(output_audio_relative_path_full) - -# Function that creates text to speech from input speech. Saves it as output.wav -def text_to_speech(text_input: str, voice_input: str): - - # Check and switch the input type - if voice_input == "Male": - speaker_wav = tts.speakers[5] - else: - speaker_wav = tts.speakers[1] - - # Generate TTS output file - tts.tts_to_file(text=text_input, speaker=speaker_wav, language=tts.languages[0], file_path="content/output/output.wav") - - - return output_audio_relative_path_full - - -# This function creates the aged video -def future_me_creator(source_image, age_input: int, text_input: str, voice_input: str): - - # Check if previous outputs exsists then delete them - if os.path.exists(output_image_relative_path_full): - print("Found older aged image ----- Deleting") - os.remove(output_image_relative_path_full) - - # Check if previous outputs exsists then delete them - if os.path.exists(output_audio_relative_path_full): - print("Found older output.wav ----- Deleting") - os.remove(output_audio_relative_path_full) - - # Check if previous outputs exsists then delete them - if os.path.exists(output_video_relative_path_full): - print("Found older aged video ----- Deleting") - os.remove(output_video_relative_path_full) - - # Create new aged picture - urllib.request.urlretrieve(replicate.run( - "yuval-alaluf/sam:9222a21c181b707209ef12b5e0d7e94c994b58f01c7b2fec075d2e892362f13c", - input={"image": open(source_image, "rb"), "target_age": str(age_input)} - ) , "content/output/aged_picture.jpg") - - - # Create new aged talking video - urllib.request.urlretrieve(replicate.run( - "cjwbw/sadtalker:3aa3dac9353cc4d6bd62a8f95957bd844003b401ca4e4a9b33baa574c549d376", - input={"source_image": open("content/output/aged_picture.jpg", "rb"), - "driven_audio": open(text_to_speech(text_input, voice_input), "rb"), - "ref_eyeblink file": open("content/input/04.mp4", "rb"), - "ref_pose": open("content/input/04.mp4", "rb"), - "still": False, - } - ) , "content/output/aged_video.mp4") - - # Return the aged video - return("content/output/aged_video.mp4") - -# Main gradio app -with gr.Blocks() as demo: - #gr.Markdown("

FutureYouMotivator App

") - #gr.Markdown("

Created by Jai Singh for Hackathon2023

") - #gr.Markdown(Path('docs/header.md').read_text()) - #gr.Markdown(""" ![]('future_youmotivator4.png') """) - gr.Markdown(Path('docs/description.md').read_text()) - #gr.Markdown("
This web app that helps you make better decisions when you see your senior self!
") - - with gr.Row(): - with gr.Column(variant="panel"): - with gr.Row(variant="panel"): - source_image = gr.Image(label="Upload image or try an example", source="upload", type="filepath", elem_id="img2img_image").style(width=512) - examples = gr.Examples(examples=['content/examples/Jai.png','content/examples/Mert.jpg','content/examples/Michael.jpeg','content/examples/Saeju.jpg' ],inputs=[source_image]) - with gr.Row(variant="panel"): - text_input = gr.Textbox(label="What should the future you say? Write text or use an example", value = "Please be healthier, it will really help me!", max_lines=3) - examples = gr.Examples(examples=["Hey younger you, great job starting on Noom!", "Im glad we took these steps to be healthier!", "Keep eating them greens and buying Bitcoins"], - inputs=[text_input]) - - with gr.Column(variant="panel"): - with gr.Row(variant="panel"): - age_input = gr.Radio(label="Select Future Age", choices=[10,20,30,40,50,60,70,80], value=60) - voice_input = gr.Radio(label="Select Gender for Voice", choices=["Male", "Female"], value="Male") - - with gr.Row(): - with gr.Column(): - btn_clear = gr.Button("Clear") - with gr.Column(): - btn = gr.Button("Submit", variant="primary") - with gr.Column(variant="panel"): - video_output = gr.Video(label="Video from the Future You [downloadable]") - - btn.click(future_me_creator, inputs=[source_image, age_input, text_input, voice_input], outputs = video_output) - - gr.Markdown(Path('docs/footer.md').read_text()) - -demo.launch(enable_queue=False) - - diff --git a/spaces/shiwan10000/CodeFormer/CodeFormer/facelib/utils/misc.py b/spaces/shiwan10000/CodeFormer/CodeFormer/facelib/utils/misc.py deleted file mode 100644 index 52e2c0343f972d5bd5c735c5cfbf8b28bca6dd55..0000000000000000000000000000000000000000 --- a/spaces/shiwan10000/CodeFormer/CodeFormer/facelib/utils/misc.py +++ /dev/null @@ -1,174 +0,0 @@ -import cv2 -import os -import os.path as osp -import numpy as np -from PIL import Image -import torch -from torch.hub import download_url_to_file, get_dir -from urllib.parse import urlparse -# from basicsr.utils.download_util import download_file_from_google_drive -# import gdown - - -ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - - -def download_pretrained_models(file_ids, save_path_root): - os.makedirs(save_path_root, exist_ok=True) - - for file_name, file_id in file_ids.items(): - file_url = 'https://drive.google.com/uc?id='+file_id - save_path = osp.abspath(osp.join(save_path_root, file_name)) - if osp.exists(save_path): - user_response = input(f'{file_name} already exist. Do you want to cover it? Y/N\n') - if user_response.lower() == 'y': - print(f'Covering {file_name} to {save_path}') - # gdown.download(file_url, save_path, quiet=False) - # download_file_from_google_drive(file_id, save_path) - elif user_response.lower() == 'n': - print(f'Skipping {file_name}') - else: - raise ValueError('Wrong input. Only accepts Y/N.') - else: - print(f'Downloading {file_name} to {save_path}') - # gdown.download(file_url, save_path, quiet=False) - # download_file_from_google_drive(file_id, save_path) - - -def imwrite(img, file_path, params=None, auto_mkdir=True): - """Write image to file. - - Args: - img (ndarray): Image array to be written. - file_path (str): Image file path. - params (None or list): Same as opencv's :func:`imwrite` interface. - auto_mkdir (bool): If the parent folder of `file_path` does not exist, - whether to create it automatically. - - Returns: - bool: Successful or not. - """ - if auto_mkdir: - dir_name = os.path.abspath(os.path.dirname(file_path)) - os.makedirs(dir_name, exist_ok=True) - return cv2.imwrite(file_path, img, params) - - -def img2tensor(imgs, bgr2rgb=True, float32=True): - """Numpy array to tensor. - - Args: - imgs (list[ndarray] | ndarray): Input images. - bgr2rgb (bool): Whether to change bgr to rgb. - float32 (bool): Whether to change to float32. - - Returns: - list[tensor] | tensor: Tensor images. If returned results only have - one element, just return tensor. - """ - - def _totensor(img, bgr2rgb, float32): - if img.shape[2] == 3 and bgr2rgb: - if img.dtype == 'float64': - img = img.astype('float32') - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = torch.from_numpy(img.transpose(2, 0, 1)) - if float32: - img = img.float() - return img - - if isinstance(imgs, list): - return [_totensor(img, bgr2rgb, float32) for img in imgs] - else: - return _totensor(imgs, bgr2rgb, float32) - - -def load_file_from_url(url, model_dir=None, progress=True, file_name=None): - """Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py - """ - if model_dir is None: - hub_dir = get_dir() - model_dir = os.path.join(hub_dir, 'checkpoints') - - os.makedirs(os.path.join(ROOT_DIR, model_dir), exist_ok=True) - - parts = urlparse(url) - filename = os.path.basename(parts.path) - if file_name is not None: - filename = file_name - cached_file = os.path.abspath(os.path.join(ROOT_DIR, model_dir, filename)) - if not os.path.exists(cached_file): - print(f'Downloading: "{url}" to {cached_file}\n') - download_url_to_file(url, cached_file, hash_prefix=None, progress=progress) - return cached_file - - -def scandir(dir_path, suffix=None, recursive=False, full_path=False): - """Scan a directory to find the interested files. - Args: - dir_path (str): Path of the directory. - suffix (str | tuple(str), optional): File suffix that we are - interested in. Default: None. - recursive (bool, optional): If set to True, recursively scan the - directory. Default: False. - full_path (bool, optional): If set to True, include the dir_path. - Default: False. - Returns: - A generator for all the interested files with relative paths. - """ - - if (suffix is not None) and not isinstance(suffix, (str, tuple)): - raise TypeError('"suffix" must be a string or tuple of strings') - - root = dir_path - - def _scandir(dir_path, suffix, recursive): - for entry in os.scandir(dir_path): - if not entry.name.startswith('.') and entry.is_file(): - if full_path: - return_path = entry.path - else: - return_path = osp.relpath(entry.path, root) - - if suffix is None: - yield return_path - elif return_path.endswith(suffix): - yield return_path - else: - if recursive: - yield from _scandir(entry.path, suffix=suffix, recursive=recursive) - else: - continue - - return _scandir(dir_path, suffix=suffix, recursive=recursive) - - -def is_gray(img, threshold=10): - img = Image.fromarray(img) - if len(img.getbands()) == 1: - return True - img1 = np.asarray(img.getchannel(channel=0), dtype=np.int16) - img2 = np.asarray(img.getchannel(channel=1), dtype=np.int16) - img3 = np.asarray(img.getchannel(channel=2), dtype=np.int16) - diff1 = (img1 - img2).var() - diff2 = (img2 - img3).var() - diff3 = (img3 - img1).var() - diff_sum = (diff1 + diff2 + diff3) / 3.0 - if diff_sum <= threshold: - return True - else: - return False - -def rgb2gray(img, out_channel=3): - r, g, b = img[:,:,0], img[:,:,1], img[:,:,2] - gray = 0.2989 * r + 0.5870 * g + 0.1140 * b - if out_channel == 3: - gray = gray[:,:,np.newaxis].repeat(3, axis=2) - return gray - -def bgr2gray(img, out_channel=3): - b, g, r = img[:,:,0], img[:,:,1], img[:,:,2] - gray = 0.2989 * r + 0.5870 * g + 0.1140 * b - if out_channel == 3: - gray = gray[:,:,np.newaxis].repeat(3, axis=2) - return gray diff --git a/spaces/sidharthism/fashion-eye-try-on-demo/app.py b/spaces/sidharthism/fashion-eye-try-on-demo/app.py deleted file mode 100644 index 50b88041ab805c493e288fd5dcec5a0955c9443b..0000000000000000000000000000000000000000 --- a/spaces/sidharthism/fashion-eye-try-on-demo/app.py +++ /dev/null @@ -1,447 +0,0 @@ -# -*- coding: utf-8 -*- -"""With os FASHION-EYE_VITON-HD Integrated Full Model Final.ipynb - -Automatically generated by Colaboratory. -""" - -# !rm -rf sample_data -# !rm -rf fashion-eye-try-on/ - -BASE_DIR = "/home/user/app/fashion-eye-try-on" - -import os -os.system(f"git clone https://huggingface.co/spaces/sidharthism/fashion-eye-try-on {BASE_DIR}") - -# !pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113 -# !pip install -r /content/fashion-eye-try-on/requirements.txt -os.system("pip install torch>=1.6.0 torchvision -f https://download.pytorch.org/whl/cu92/torch_stable.html") -os.system("pip install opencv-python torchgeometry gdown Pillow") - -os.system(f"cd {BASE_DIR}") - -# Download and save checkpoints for cloth mask generation -os.system(f"rm -rf {BASE_DIR}/cloth_segmentation/checkpoints/") -os.system(f"gdown --id 1mhF3yqd7R-Uje092eypktNl-RoZNuiCJ -O {BASE_DIR}/cloth_segmentation/checkpoints/") - -os.system(f"git clone https://github.com/shadow2496/VITON-HD {BASE_DIR}/VITON-HD") - -#checkpoints -os.system(f"gdown 1RM4OthSM6V4r7kWCu8SbPIPY14Oz8B2u -O {BASE_DIR}/VITON-HD/checkpoints/alias_final.pth") -os.system(f"gdown 1MBHBddaAs7sy8W40jzLmNL83AUh035F1 -O {BASE_DIR}/VITON-HD/checkpoints/gmm_final.pth") -os.system(f"gdown 1MBHBddaAs7sy8W40jzLmNL83AUh035F1 -O {BASE_DIR}/VITON-HD/checkpoints/gmm_final.pth") -os.system(f"gdown 17U1sooR3mVIbe8a7rZuFIF3kukPchHfZ -O {BASE_DIR}/VITON-HD/checkpoints/seg_final.pth") -#test data -os.system(f"gdown 1ncEHn_6liOot8sgt3A2DOFJBffvx8tW8 -O {BASE_DIR}/VITON-HD/datasets/test_pairs.txt") -os.system(f"gdown 1ZA2C8yMOprwc0TV4hvrt0X-ljZugrClq -O {BASE_DIR}/VITON-HD/datasets/test.zip") - -os.system(f"unzip {BASE_DIR}/VITON-HD/datasets/test.zip -d {BASE_DIR}/VITON-HD/datasets/") - -#@title To clear all the already existing test data -# !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/image -# !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/image-parse -# !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/cloth -# !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/cloth-mask -# !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/openpose-img -# !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/openpose-json - -"""Paddle - - - -""" - -os.system(f"git clone https://huggingface.co/spaces/sidharthism/pipeline_paddle {BASE_DIR}/pipeline_paddle") - -# Required for paddle and gradio (Jinja2 dependency) -os.system("pip install paddlepaddle-gpu pymatting") -os.system(f"pip install -r {BASE_DIR}/pipeline_paddle/requirements.txt") - -os.system(f"rm -rf {BASE_DIR}/pipeline_paddle/models") -if not os.path.exists(f"{BASE_DIR}/pipeline_paddle/models/ppmatting-hrnet_w18-human_1024.pdparams"): - if not os.path.exists(f"{BASE_DIR}/pipeline_paddle/models"): - os.mkdir(f"{BASE_DIR}/pipeline_paddle/models") - os.system(f"wget https://paddleseg.bj.bcebos.com/matting/models/ppmatting-hrnet_w18-human_1024.pdparams -O {BASE_DIR}/pipeline_paddle/models/ppmatting-hrnet_w18-human_1024.pdparams") - # !wget "https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz" -O "/content/fashion-eye-try-on/pipeline_paddle/models/hrnet_w18_ssld.tar.gz" - -"""Initialization - -Pose estimator - open pose -""" - -# Clone openpose model repo -# os.system(f"git clone https://github.com/CMU-Perceptual-Computing-Lab/openpose.git {BASE_DIR}/openpose") - -#@ Building and Installation of openpose model -import os -import subprocess -from os.path import exists, join, basename, splitext - - -project_name = f"{BASE_DIR}/openpose" -print(project_name) -if not exists(project_name): - # see: https://github.com/CMU-Perceptual-Computing-Lab/openpose/issues/949 - # install new CMake becaue of CUDA10 - os.system(f"wget -q https://cmake.org/files/v3.13/cmake-3.13.0-Linux-x86_64.tar.gz") - os.system(f"sudo tar xfz cmake-3.13.0-Linux-x86_64.tar.gz --strip-components=1 -C /usr/local") - # clone openpose - os.system(f"cd {BASE_DIR} && git clone -q --depth 1 https://github.com/CMU-Perceptual-Computing-Lab/openpose.git") - os.system("sudo sed -i 's/execute_process(COMMAND git checkout master WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}\/3rdparty\/caffe)/execute_process(COMMAND git checkout f019d0dfe86f49d1140961f8c7dec22130c83154 WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}\/3rdparty\/caffe)/g' %s/openpose/CMakeLists.txt" % (BASE_DIR, )) - # install system dependencies - os.system("sudo apt-get -qq install -y libatlas-base-dev libprotobuf-dev libleveldb-dev libsnappy-dev libhdf5-serial-dev protobuf-compiler libgflags-dev libgoogle-glog-dev liblmdb-dev opencl-headers ocl-icd-opencl-dev libviennacl-dev") - # build openpose - print("Building openpose ... May take nearly 15 mins to build ...") - os.system(f"sudo cd {BASE_DIR}/openpose && rm -rf {BASE_DIR}/openpose/build || true && mkdir {BASE_DIR}/openpose/build && cd {BASE_DIR}/openpose/build && cmake .. && make -j`nproc`") - print("Openpose successfully build and installed.") - # subprocess.Popen(f"cd {BASE_DIR}/openpose && rm -rf {BASE_DIR}/openpose/build || true && mkdir {BASE_DIR}/openpose/build && cd {BASE_DIR}/openpose/build && cmake .. && make -j`nproc`") - # subprocess.call(["cd", f"{BASE_DIR}/openpose"]) - # subprocess.check_output(["rm", "-rf", f"{BASE_DIR}/openpose/build || true"]) - # subprocess.check_output(["mkdir", f"{BASE_DIR}/openpose/build"]) - # subprocess.check_output(["cd", f"{BASE_DIR}/openpose/build"]) - # subprocess.check_output(["cmake", ".."]) - # subprocess.check_output(["make","-j`nproc`"]) - -# !cd {BASE_DIR}/openpose && rm -rf {BASE_DIR}/openpose/build || true && mkdir {BASE_DIR}/openpose/build && cd {BASE_DIR}/openpose/build && cmake .. && make -j`nproc` - -"""Self correction human parsing""" - -os.system(f"git clone https://github.com/PeikeLi/Self-Correction-Human-Parsing.git {BASE_DIR}/human_parse") - -os.system(f"cd {BASE_DIR}/human_parse") -os.system(f"mkdir {BASE_DIR}/human_parse/checkpoints") -# !mkdir inputs -# !mkdir outputs - -dataset = 'lip' - -import gdown - -dataset_url = 'https://drive.google.com/uc?id=1k4dllHpu0bdx38J7H28rVVLpU-kOHmnH' -output = f'{BASE_DIR}/human_parse/checkpoints/final.pth' -gdown.download(dataset_url, output, quiet=False) - -# For human parse -os.system("pip install ninja") - -"""Preprocessing - - -""" - -# png to jpg -def convert_to_jpg(path): - from PIL import Image - import os - if os.path.exists(path): - cl = Image.open(path) - jpg_path = path[:-4] + ".jpg" - cl.save(jpg_path) - -def resize_img(path): - from PIL import Image - print(path) - im = Image.open(path) - im = im.resize((768, 1024), Image.BICUBIC) - im.save(path) - -def remove_ipynb_checkpoints(): - import os - os.system(f"rm -rf {BASE_DIR}/VITON-HD/datasets/test/image/.ipynb_checkpoints") - os.system(f"rm -rf {BASE_DIR}/VITON-HD/datasets/test/cloth/.ipynb_checkpoints") - os.system(f"rm -rf {BASE_DIR}/VITON-HD/datasets/test/cloth-mask/.ipynb_checkpoints") - -# os.chdir('/content/fashion-eye-try-on') -def preprocess(): - remove_ipynb_checkpoints() - for path in os.listdir(f'{BASE_DIR}/VITON-HD/datasets/test/image/'): - resize_img(f'{BASE_DIR}/VITON-HD/datasets/test/image/{path}') - for path in os.listdir(f'{BASE_DIR}/VITON-HD/datasets/test/cloth/'): - resize_img(f'{BASE_DIR}/VITON-HD/datasets/test/cloth/{path}') - # for path in os.listdir('/content/fashion-eye-try-on/VITON-HD/datasets/test/cloth-mask/'): - # resize_img(f'/content/fashion-eye-try-on/VITON-HD/datasets/test/cloth-mask/{path}') - -"""Paddle - removing background - -""" - -# PPMatting hrnet 1024 -# --fg_estimate True - for higher quality output but slower prediction -def upload_remove_background_and_save_person_image(person_img): - # !export CUDA_VISIBLE_DEVICES=0 - person_img = person_img.resize((768, 1024), Image.BICUBIC) - if os.path.exists(f"{BASE_DIR}/pipeline_paddle/image/person.jpg"): - os.remove(f"{BASE_DIR}/pipeline_paddle/image/person.jpg") - person_img.save(f"{BASE_DIR}/pipeline_paddle/image/person.jpg") - # resize_img(f'/content/fashion-eye-try-on/pipeline_paddle/image/person.jpg') - os.system(f"cd {BASE_DIR}/pipeline_paddle/") - os.system(f"python {BASE_DIR}/pipeline_paddle/bg_replace.py \ - --config {BASE_DIR}/pipeline_paddle/configs/ppmatting/ppmatting-hrnet_w18-human_1024.yml \ - --model_path {BASE_DIR}/pipeline_paddle/models/ppmatting-hrnet_w18-human_1024.pdparams \ - --image_path {BASE_DIR}/pipeline_paddle/image/person.jpg \ - --background 'w' \ - --save_dir {BASE_DIR}/VITON-HD/datasets/test/image \ - --fg_estimate True") - # --save_dir /content/fashion-eye-try-on/pipeline_paddle/output \ - try: - convert_to_jpg(f"{BASE_DIR}/VITON-HD/datasets/test/image/person.png") - # os.remove("/content/fashion-eye-try-on/pipeline_paddle/output/person_alpha.png") - os.remove(f"{BASE_DIR}/VITON-HD/datasets/test/image/person_alpha.png") - # os.remove("/content/fashion-eye-try-on/pipeline_paddle/output/person_rgba.png") - os.remove(f"{BASE_DIR}/VITON-HD/datasets/test/image/person_rgba.png") - os.system(f"cd {BASE_DIR}") - except Exception as e: - print(e) - os.system(f"cd {BASE_DIR}") - -#@title If multiple GPU available,uncomment and try this code -os.system("export CUDA_VISIBLE_DEVICES=0") - -# Openpose pose estimation -# Ubuntu and Mac -def estimate_pose(): - os.system(f"cd {BASE_DIR}/openpose && ./build/examples/openpose/openpose.bin --image_dir {BASE_DIR}/VITON-HD/datasets/test/image --write_json {BASE_DIR}/VITON-HD/datasets/test/openpose-json/ --display 0 --face --hand --render_pose 0") - os.system(f"cd {BASE_DIR}/openpose && ./build/examples/openpose/openpose.bin --image_dir {BASE_DIR}/VITON-HD/datasets/test/image --write_images {BASE_DIR}/VITON-HD/datasets/test/openpose-img/ --display 0 --hand --render_pose 1 --disable_blending true") - os.system(f"cd {BASE_DIR}") - # !cd /content/fashion-eye-try-on/openpose && ./build/examples/openpose/openpose.bin --image_dir /content/fashion-eye-try-on/pipeline_paddle/output/ --write_images /content/fashion-eye-try-on/openpose_img/ --display 0 --hand --render_pose 1 --disable_blending true - -# Run self correction human parser -# !python3 /content/fashion-eye-try-on/human_parse/simple_extractor.py --dataset 'lip' --model-restore '/content/fashion-eye-try-on/human_parse/checkpoints/final.pth' --input-dir '/content/fashion-eye-try-on/image' --output-dir '/content/fashion-eye-try-on/VITON-HD/datasets/test/image-parse' -def generate_human_segmentation_map(): - # remove_ipynb_checkpoints() - os.system(f"python3 {BASE_DIR}/human_parse/simple_extractor.py --dataset 'lip' --model-restore '{BASE_DIR}/human_parse/checkpoints/final.pth' --input-dir '{BASE_DIR}/VITON-HD/datasets/test/image' --output-dir '{BASE_DIR}/VITON-HD/datasets/test/image-parse'") - -# model_image = os.listdir('/content/fashion-eye-try-on/VITON-HD/datasets/test/image') -# cloth_image = os.listdir('/content/fashion-eye-try-on/VITON-HD/datasets/test/cloth') -# pairs = zip(model_image, cloth_image) - -# with open('/content/fashion-eye-try-on/VITON-HD/datasets/test_pairs.txt', 'w') as file: -# for model, cloth in pairs: -# file.write(f"{model} {cloth}\n") -def generate_test_pairs_txt(): - with open(f"{BASE_DIR}/VITON-HD/datasets/test_pairs.txt", 'w') as file: - file.write(f"person.jpg cloth.jpg\n") - -# VITON-HD -# Transfer the cloth to the model -def generate_viton_hd(): - os.system(f"python {BASE_DIR}/VITON-HD/test.py --name output --dataset_list {BASE_DIR}/VITON-HD/datasets/test_pairs.txt --dataset_dir {BASE_DIR}/VITON-HD/datasets/ --checkpoint_dir {BASE_DIR}/VITON-HD/checkpoints --save_dir {BASE_DIR}/") - -import sys -# To resolve ModuleNotFoundError during imports -if BASE_DIR not in sys.path: - sys.path.append(BASE_DIR) - sys.path.append(f"{BASE_DIR}/cloth_segmentation") - -from cloth_segmentation.networks import U2NET -import torchvision.transforms as transforms -import torch.nn.functional as F -import os -from PIL import Image -from collections import OrderedDict - -import torch - -device = 'cuda' if torch.cuda.is_available() else "cpu" - -if device == 'cuda': - torch.cuda.empty_cache() - -# for hugging face -# BASE_DIR = "/home/path/app" - -image_dir = 'cloth' -result_dir = 'cloth_mask' -checkpoint_path = 'cloth_segmentation/checkpoints/cloth_segm_u2net_latest.pth' - - -def load_checkpoint_mgpu(model, checkpoint_path): - if not os.path.exists(checkpoint_path): - print("----No checkpoints at given path----") - return - model_state_dict = torch.load( - checkpoint_path, map_location=torch.device("cpu")) - new_state_dict = OrderedDict() - for k, v in model_state_dict.items(): - name = k[7:] # remove `module.` - new_state_dict[name] = v - - model.load_state_dict(new_state_dict) - print("----checkpoints loaded from path: {}----".format(checkpoint_path)) - return model - - -class Normalize_image(object): - """Normalize given tensor into given mean and standard dev - Args: - mean (float): Desired mean to substract from tensors - std (float): Desired std to divide from tensors - """ - - def __init__(self, mean, std): - assert isinstance(mean, (float)) - if isinstance(mean, float): - self.mean = mean - - if isinstance(std, float): - self.std = std - - self.normalize_1 = transforms.Normalize(self.mean, self.std) - self.normalize_3 = transforms.Normalize( - [self.mean] * 3, [self.std] * 3) - self.normalize_18 = transforms.Normalize( - [self.mean] * 18, [self.std] * 18) - - def __call__(self, image_tensor): - if image_tensor.shape[0] == 1: - return self.normalize_1(image_tensor) - - elif image_tensor.shape[0] == 3: - return self.normalize_3(image_tensor) - - elif image_tensor.shape[0] == 18: - return self.normalize_18(image_tensor) - - else: - assert "Please set proper channels! Normlization implemented only for 1, 3 and 18" - - -def get_palette(num_cls): - """ Returns the color map for visualizing the segmentation mask. - Args: - num_cls: Number of classes - Returns: - The color map - """ - n = num_cls - palette = [0] * (n * 3) - for j in range(0, n): - lab = j - palette[j * 3 + 0] = 0 - palette[j * 3 + 1] = 0 - palette[j * 3 + 2] = 0 - i = 0 - while lab: - palette[j * 3 + 0] = 255 - palette[j * 3 + 1] = 255 - palette[j * 3 + 2] = 255 - # palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) - # palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) - # palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) - i += 1 - lab >>= 3 - return palette - - -def generate_cloth_mask(img_dir, output_dir, chkpt_dir): - global image_dir - global result_dir - global checkpoint_path - image_dir = img_dir - result_dir = output_dir - checkpoint_path = chkpt_dir - transforms_list = [] - transforms_list += [transforms.ToTensor()] - transforms_list += [Normalize_image(0.5, 0.5)] - transform_rgb = transforms.Compose(transforms_list) - - net = U2NET(in_ch=3, out_ch=4) - with torch.no_grad(): - net = load_checkpoint_mgpu(net, checkpoint_path) - net = net.to(device) - net = net.eval() - - palette = get_palette(4) - - images_list = sorted(os.listdir(image_dir)) - for image_name in images_list: - img = Image.open(os.path.join( - image_dir, image_name)).convert('RGB') - img_size = img.size - img = img.resize((768, 768), Image.BICUBIC) - image_tensor = transform_rgb(img) - image_tensor = torch.unsqueeze(image_tensor, 0) - - output_tensor = net(image_tensor.to(device)) - output_tensor = F.log_softmax(output_tensor[0], dim=1) - output_tensor = torch.max(output_tensor, dim=1, keepdim=True)[1] - output_tensor = torch.squeeze(output_tensor, dim=0) - output_tensor = torch.squeeze(output_tensor, dim=0) - output_arr = output_tensor.cpu().numpy() - - output_img = Image.fromarray(output_arr.astype('uint8'), mode='L') - output_img = output_img.resize(img_size, Image.BICUBIC) - - output_img.putpalette(palette) - output_img = output_img.convert('L') - output_img.save(os.path.join(result_dir, image_name[:-4]+'.jpg')) - -os.system(f"cd {BASE_DIR}") -from PIL import Image -def upload_resize_generate_cloth_mask_and_move_to_viton_hd_test_inputs(cloth_img): - os.system(f"cd {BASE_DIR}") - cloth_img = cloth_img.resize((768, 1024), Image.BICUBIC) - cloth_img.save(f"{BASE_DIR}/cloth/cloth.jpg") - cloth_img.save(f"{BASE_DIR}/VITON-HD/datasets/test/cloth/cloth.jpg") - try: - generate_cloth_mask(f"{BASE_DIR}/cloth", f"{BASE_DIR}/cloth_mask", f"{BASE_DIR}/cloth_segmentation/checkpoints/cloth_segm_u2net_latest.pth") - cloth_mask_img = Image.open(f"{BASE_DIR}/cloth_mask/cloth.jpg") - cloth_mask_img.save(f"{BASE_DIR}/VITON-HD/datasets/test/cloth-mask/cloth.jpg") - except Exception as e: - print(e) - -# Gradio -os.system("pip install gradio") - -import gradio as gr -# import cv2 -from PIL import Image -IMAGEPATH='/content/fashion-eye-try-on/VITON-HD/datasets/test/image' -CLOTHPATH='/content/fashion-eye-try-on/VITON-HD/datasets/test/cloth' -CLOTHMASKPATH='/content/fashion-eye-try-on/VITON-HD/datasets/test/image' - -from threading import Thread - -def fashion_eye_tryon(person_img, cloth_img): - result_img = person_img - # img.save(IMAGEPATH + "person.jpg") - # dress.save(CLOTHPATH + "cloth.jpg") - - # txt = open("/content/VITON-HD/datasets/test_pairs.txt", "a") - # txt.write("person_img.jpg dress_img.jpg\n") - # txt.close() - # # result - # print(person_img.info, cloth_img.info) - # p_t1 = Thread(target=upload_remove_background_and_save_person_image, args=(person_img, )) - # c_t2 = Thread(target=upload_resize_generate_cloth_mask_and_move_to_viton_hd_test_inputs, args=(cloth_img, )) - # p_t1.start() - # c_t2.start() - # p_t1.join() - # c_t2.join() - # Estimate pose - try: - upload_resize_generate_cloth_mask_and_move_to_viton_hd_test_inputs(cloth_img) - upload_remove_background_and_save_person_image(person_img) - remove_ipynb_checkpoints() - estimate_pose() - # Generate human parse - remove_ipynb_checkpoints() - generate_human_segmentation_map() - generate_test_pairs_txt() - remove_ipynb_checkpoints() - generate_viton_hd() - for p in ["/content/fashion-eye-try-on/output/person_cloth.jpg", "/content/fashion-eye-try-on/output/person.jpg_cloth.jpg"]: - if os.path.exists(p): - result_img = Image.open(p) - except Exception as e: - print(e) - return - return result_img - -# res = fashion_eye_tryon("", "") -# res.show() -gr.Interface(fn=fashion_eye_tryon, - inputs=[gr.Image(type = "pil", label="Your image"), gr.Image(type="pil", label="Dress")], - outputs="image" - ).launch(debug=True) - -# !pip freeze > /content/requirements_final.txt \ No newline at end of file diff --git a/spaces/simonduerr/ProteinMPNN/ProteinMPNN/vanilla_proteinmpnn/examples/submit_example_7.sh b/spaces/simonduerr/ProteinMPNN/ProteinMPNN/vanilla_proteinmpnn/examples/submit_example_7.sh deleted file mode 100644 index 440c720158a993833f1f1964618608a4cee2c9bc..0000000000000000000000000000000000000000 --- a/spaces/simonduerr/ProteinMPNN/ProteinMPNN/vanilla_proteinmpnn/examples/submit_example_7.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -#SBATCH -p gpu -#SBATCH --mem=32g -#SBATCH --gres=gpu:rtx2080:1 -#SBATCH -c 3 -#SBATCH --output=example_7.out - -source activate mlfold - -folder_with_pdbs="../PDB_complexes/pdbs/" - -output_dir="../PDB_complexes/example_7_outputs" -if [ ! -d $output_dir ] -then - mkdir -p $output_dir -fi - - -path_for_parsed_chains=$output_dir"/parsed_pdbs.jsonl" -path_for_assigned_chains=$output_dir"/PDB_complexes/assigned_pdbs.jsonl" -path_for_bias=$output_dir"/bias_pdbs.jsonl" -AA_list="G P A" -bias_list="40.1 0.3 -0.05" #for G P A respectively; global AA bias in the logit space -chains_to_design="A B" - - -python ../helper_scripts/parse_multiple_chains.py --input_path=$folder_with_pdbs --output_path=$path_for_parsed_chains - -python ../helper_scripts/assign_fixed_chains.py --input_path=$path_for_parsed_chains --output_path=$path_for_assigned_chains --chain_list "$chains_to_design" - -python ../helper_scripts/make_bias_AA.py --output_path=$path_for_bias --AA_list="$AA_list" --bias_list="$bias_list" - -python ../protein_mpnn_run.py \ - --jsonl_path $path_for_parsed_chains \ - --chain_id_jsonl $path_for_assigned_chains \ - --out_folder $output_dir \ - --bias_AA_jsonl $path_for_bias \ - --num_seq_per_target 2 \ - --sampling_temp "0.1" \ - --batch_size 1 diff --git a/spaces/simpie28/VITS-Umamusume-voice-synthesizer/ONNXVITS_infer.py b/spaces/simpie28/VITS-Umamusume-voice-synthesizer/ONNXVITS_infer.py deleted file mode 100644 index af04e614c8f1ac43faf363b1a9f6bfd667fbde21..0000000000000000000000000000000000000000 --- a/spaces/simpie28/VITS-Umamusume-voice-synthesizer/ONNXVITS_infer.py +++ /dev/null @@ -1,201 +0,0 @@ -import torch -import commons -import models - -import math -from torch import nn -from torch.nn import functional as F - -import modules -import attentions - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emotion_embedding = emotion_embedding - - if self.n_vocab != 0: - self.emb = nn.Embedding(n_vocab, hidden_channels) - if emotion_embedding: - self.emo_proj = nn.Linear(1024, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, emotion_embedding=None): - if self.n_vocab != 0: - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - if emotion_embedding is not None: - print("emotion added") - x = x + self.emo_proj(emotion_embedding.unsqueeze(1)) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class SynthesizerTrn(models.SynthesizerTrn): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - emotion_embedding=False, - ONNX_dir="./ONNX_net/", - **kwargs): - - super().__init__( - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=n_speakers, - gin_channels=gin_channels, - use_sdp=use_sdp, - **kwargs - ) - self.ONNX_dir = ONNX_dir - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None, - emotion_embedding=None): - from ONNXVITS_utils import runonnx - with torch.no_grad(): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding) - - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - # logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - logw = runonnx(f"{self.ONNX_dir}dp.onnx", x=x.numpy(), x_mask=x_mask.numpy(), g=g.numpy()) - logw = torch.from_numpy(logw[0]) - - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, - 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - - # z = self.flow(z_p, y_mask, g=g, reverse=True) - z = runonnx(f"{self.ONNX_dir}flow.onnx", z_p=z_p.numpy(), y_mask=y_mask.numpy(), g=g.numpy()) - z = torch.from_numpy(z[0]) - - # o = self.dec((z * y_mask)[:,:,:max_len], g=g) - o = runonnx(f"{self.ONNX_dir}dec.onnx", z_in=(z * y_mask)[:, :, :max_len].numpy(), g=g.numpy()) - o = torch.from_numpy(o[0]) - - return o, attn, y_mask, (z, z_p, m_p, logs_p) \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Brotato Extatonion The Ultimate Guide to This Amazing Mod for Brotato Game.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Brotato Extatonion The Ultimate Guide to This Amazing Mod for Brotato Game.md deleted file mode 100644 index b89fbbe376f5e0467bdaa1dedb87fca7bc875f90..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Brotato Extatonion The Ultimate Guide to This Amazing Mod for Brotato Game.md +++ /dev/null @@ -1,134 +0,0 @@ - -

Brotato Extatonion APK: A Fun and Challenging Shooter Roguelite Game

-

If you are looking for a new and exciting game to play on your Android device, you might want to check out Brotato Extatonion APK. This is a shooter roguelite game where you play as a potato named Brotato who has to fight off hordes of aliens with up to six weapons at a time. Sounds crazy, right? Well, it is! But it is also a lot of fun and very addictive. In this article, we will tell you everything you need to know about Brotato Extatonion APK, including what it is, how to download and install it, why you should play it, and some FAQs.

-

What is Brotato Extatonion APK?

-

Brotato Extatonion APK is a game developed by Erabit Studios and released in June 2023. It is a top-down arena shooter roguelite where you play as a potato who has survived an accident on an unknown planet. Your goal is to survive until help arrives by defeating waves of aliens with different weapons and items.

-

brotato extatonion apk


Downloadhttps://ssurll.com/2uNSEg



-

The story behind the game

-

The game does not have a detailed story or plot, but it does have some humor and personality. You play as Brotato, a potato who was part of a space mission that went wrong. You crash-landed on an alien planet where you are surrounded by hostile creatures. Luckily, you have some weapons and items that you can use to defend yourself. You also have a radio that occasionally receives messages from your friends or enemies. These messages add some flavor and comedy to the game.

-

The gameplay and features

-

The game is divided into waves that last from 20 to 90 seconds each. During each wave, you have to move around the map and shoot the aliens that spawn from different directions. You can use up to six weapons at a time, which you can switch by tapping on the screen. You can also collect materials that drop from the enemies or the environment. These materials can be used to buy items from the shop between waves or to upgrade your weapons.

-

The game has dozens of characters that you can unlock and play as. Each character has different traits and abilities that affect your gameplay. For example, some characters are faster, stronger, luckier, or crazier than others. You can also customize your runs by choosing different items and weapons from the shop. There are hundreds of items and weapons to choose from, ranging from flamethrowers, SMGs, rocket launchers, or sticks and stones.

-

The game also has some roguelite elements that make each run unique and unpredictable. For example, the map layout, the enemy types, the item drops, and the events are randomly generated each time you play. You can also encounter different bosses that have their own patterns and attacks. If you die during a run, you lose all your progress and have to start over.

-

The difficulty and accessibility options

-

The game is not easy by any means. It requires fast reflexes, strategic thinking, and good aim to survive the alien invasion. The game also has a permadeath system, which means that you have to start from scratch every time you die. However, the game also has some accessibility options that you can adjust to suit your preferences and needs. For example, you can change the size and position of the buttons, the sensitivity and speed of the movement and aiming, the volume and brightness of the sound and graphics, and the difficulty level of the enemies and items. You can also enable or disable some features such as blood, gore, vibration, or auto-fire.

-

How to download and install Brotato Extatonion APK?

-

Brotato Extatonion APK is not available on the Google Play Store or any other official app store. This is because the game is still in development and has not been officially released yet. However, you can still download and install the game from some third-party sources that offer the APK file of the game. Here is how to do it:

-

brotato extatonion apk download
-brotato extatonion mod apk
-brotato extatonion apk free
-brotato extatonion apk latest version
-brotato extatonion apk android
-brotato extatonion apk full
-brotato extatonion apk hack
-brotato extatonion apk offline
-brotato extatonion apk unlimited
-brotato extatonion apk online
-brotato extatonion apk update
-brotato extatonion apk cracked
-brotato extatonion apk premium
-brotato extatonion apk modded
-brotato extatonion apk no ads
-brotato extatonion apk cheats
-brotato extatonion apk gameplay
-brotato extatonion apk review
-brotato extatonion apk features
-brotato extatonion apk install
-brotato extatonion apk guide
-brotato extatonion apk tutorial
-brotato extatonion apk tips
-brotato extatonion apk tricks
-brotato extatonion apk best weapons
-brotato extatonion apk characters
-brotato extatonion apk items
-brotato extatonion apk levels
-brotato extatonion apk bosses
-brotato extatonion apk enemies
-brotato extatonion apk steam
-brotato extatonion apk pc
-brotato extatonion apk windows 10
-brotato extatonion apk mac
-brotato extatonion apk linux
-brotato extatonion apk ios
-brotato extatonion apk iphone
-brotato extatonion apk ipad
-brotato extatonion apk fire tablet
-brotato extatonion apk chromebook
-brotato extatonion apk bluestacks
-brotato extatonion apk nox player
-brotato extatonion apk ldplayer
-brotato extatonion apk memu play
-brotato extatonion apk gameloop
-how to play brotato extatonion apk
-how to get brotato extatonion apk
-how to download and install the Extation mod for Brotatoin less than 5 mins.

-

The requirements and permissions

-

Before you download and install Brotato Extatonion APK, you need to make sure that your device meets the minimum requirements and permissions for the game. These are:

-
    -
  • Android 5.0 or higher
  • -
  • At least 100 MB of free storage space
  • -
  • Internet connection
  • -
  • Allow installation of apps from unknown sources
  • -
-

To enable installation of apps from unknown sources, you need to go to your device settings, then security, then toggle on the option that says "Unknown sources" or "Install unknown apps". This will allow you to install apps that are not from the official app store.

-

The steps to follow

-

Once you have checked the requirements and permissions, you can follow these steps to download and install Brotato Extatonion APK:

-
    -
  1. Find a reliable source that offers the APK file of Brotato Extatonion. You can search online or use one of these links: . Make sure that the source is trustworthy and does not contain any malware or viruses.
  2. -
  3. Download the APK file to your device. You can use your browser or a file manager app to do this. The file size is about 80 MB.
  4. -
  5. Locate the APK file on your device. You can use your file manager app or your downloads folder to find it.
  6. -
  7. Tap on the APK file to start the installation process. You may see a warning message that says "This type of file can harm your device". Ignore it and tap on "Install anyway" or "OK".
  8. -
  9. Wait for the installation to finish. It may take a few seconds or minutes depending on your device speed.
  10. -
  11. Once the installation is done, you can launch the game by tapping on its icon on your home screen or app drawer.
  12. -
-

The sources to trust

-

As mentioned earlier, Brotato Extatonion APK is not available on the official app store, so you have to download it from third-party sources. However, not all sources are safe and reliable. Some may contain malware or viruses that can harm your device or steal your data. Therefore, you need to be careful when choosing a source to download Brotato Extatonion APK from.

-

Here are some tips to help you find a trustworthy source:

-
    -
  • Check the reviews and ratings of the source. See what other users have said about their experience with downloading and installing Brotato Extatonion APK from that source. Avoid sources that have low ratings, negative reviews, or no feedback at all.
  • -
  • Check the date and version of the APK file. See if the source offers the latest version of Brotato Extatonion APK, which is 1.0.6 as of June 2023. Avoid sources that offer outdated or modified versions of Brotato Extatonion APK.
  • -
  • Check the security and privacy policy of the source. See if the source respects your data and does not collect or share it with third parties without your consent. Avoid sources that ask for unnecessary permissions or access to your device features.
  • -
  • Use an antivirus or anti-malware app to scan the APK file before installing it. See if there are any threats or risks associated with Brotato Extatonion APK. Avoid installing Brotato Extatonion APK if there are any red flags or warnings.
  • -
-

Why should you play Brotato Extatonion APK?

-

Brotato Extatonion APK is a game that offers a lot of fun and challenge for players who love shooter roguelite games. Here are some reasons why you should play Brotato Extatonion APK:

-

The pros and cons of the game

-

Like any other game, Brotato Extatonion APK has its strengths and weaknesses. Here are some of the pros and cons of the game that you should consider before playing it:

- - - - - - - - - - - - - - - - - - - - - - - - - -
ProsCons
- The game has a unique and quirky theme and style that makes it stand out from other shooter roguelite games.- The game is still in development and may have some bugs or glitches that affect the gameplay or performance.
- The game has a lot of variety and replay value in terms of characters, weapons, items, enemies, bosses, maps, and events.- The game is very challenging and may frustrate some players who are not used to the permadeath system or the random elements.
- The game has a simple and intuitive control scheme that allows you to move and shoot with ease.- The game does not have a tutorial or a guide that explains the mechanics or the features of the game.
- The game has some humor and personality that adds some flavor and comedy to the game.- The game does not have a detailed story or plot that gives context or motivation to the game.
- The game has some accessibility options that allow you to adjust the game to your preferences and needs.- The game is not available on the official app store and requires downloading and installing from third-party sources.
-

The ratings and reviews from other players

-

Brotato Extatonion APK is a relatively new game that has not received a lot of ratings and reviews from other players yet. However, based on the few feedbacks that we have found online, most players seem to enjoy the game and recommend it to others. Here are some of the ratings and reviews from other players:

- - "This game is awesome! It's like a mix of Enter the Gungeon and Binding of Isaac with potatoes. I love the graphics, the music, the gameplay, everything. It's very addictive and fun. I can't wait for more updates and content." - 5 stars - "This game is very hard but very rewarding. It's not for casual players who want an easy win. You have to be strategic, fast, and lucky to survive. The game has a lot of variety and randomness that makes each run different and exciting. I like the characters, the weapons, the items, the enemies, everything." - 4 stars - "This game is hilarious and crazy. I love the concept of playing as a potato who fights aliens with guns. The game has a lot of humor and personality that makes it enjoyable. The game is also very challenging and requires skill and patience to beat. It's not for everyone, but if you like shooter roguelite games, you should give it a try." - 4 stars - "This game is decent but needs some improvement. It's fun and addictive, but it also has some issues. The game is still in development and has some bugs and glitches that affect the gameplay or performance. The game also does not have a tutorial or a guide that explains the mechanics or the features of the game. It can be confusing and frustrating at times." - 3 stars - "This game is not my cup of tea. It's too hard, too random, too chaotic for me. I don't like the permadeath system or the random elements that make the game unfair and unpredictable. I also don't like the theme or the style of the game. It's too silly and childish for me. I prefer more realistic and serious games." - 2 stars

The tips and tricks to master the game

-

If you want to play Brotato Extatonion APK better and have more fun with it, here are some tips and tricks that you can use to master the game:

- - Experiment with different characters, weapons, items, and combinations. Each character has different traits and abilities that affect your gameplay. Each weapon has different stats and effects that suit different situations. Each item has different benefits and drawbacks that influence your run. Try different combinations to find what works best for you. - Learn from your mistakes and failures. Each run is different and unpredictable, so you have to adapt to whatever happens. Don't get discouraged if you die or lose your progress. Use each run as an opportunity to learn from your mistakes and failures. See what went wrong, what you could have done better, what you should avoid or do next time. - Be strategic, fast, and lucky. The game requires a balance of strategy, speed, and luck to survive the alien invasion. You have to plan your moves, choose your weapons and items wisely, and react quickly to the enemies and events. You also have to rely on some luck to get good drops, items, and events that can help you or hinder you. - Have fun and enjoy the game. The game is meant to be a fun and challenging experience that tests your skills and luck. Don't take the game too seriously or get too frustrated by it. Enjoy the humor, the personality, the variety, and the randomness of the game. Have fun playing as a potato who fights aliens with guns.

Conclusion

-

Brotato Extatonion APK is a shooter roguelite game where you play as a potato who has to survive on an alien planet until help arrives. The game has a unique and quirky theme and style that makes it stand out from other games in the genre. The game also has a lot of variety and replay value in terms of characters, weapons, items, enemies, bosses, maps, and events. The game is very challenging and requires fast reflexes, strategic thinking, and good aim to beat. The game also has some accessibility options that allow you to adjust the game to your preferences and needs. The game is not available on the official app store and requires downloading and installing from third-party sources.

-

If you are looking for a new and exciting game to play on your Android device, you might want to give Brotato Extatonion APK a try. It is a fun and addictive game that will keep you entertained for hours. However, be prepared for some bugs, glitches, confusion, frustration, and permadeath along the way.

-

FAQs

-

Here are some of the frequently asked questions about Brotato Extatonion APK:

- - Q: How much does Brotato Extatonion APK cost? - A: Brotato Extatonion APK is a free-to-play game that does not require any payment or subscription to play. However, the game may have some optional in-app purchases or ads in the future. - Q: Is Brotato Extatonion APK safe to download and install? - A: Brotato Extatonion APK is safe to download and install as long as you use a reliable source that offers the latest version of the game without any malware or viruses. However, you should always scan the APK file before installing it and enable installation of apps from unknown sources on your device settings. - Q: How can I contact the developer of Brotato Extatonion APK? - A: You can contact the developer of Brotato Extatonion APK by sending an email to erabitstudios@gmail.com or by visiting their website at https://erabitstudios.com/. You can also follow them on their social media accounts such as Facebook, Twitter, Instagram, or YouTube. - Q: How can I support the development of Brotato Extatonion APK? - A: You can support the development of Brotato Extatonion APK by playing the game, giving feedback, rating and reviewing the game, sharing the game with your friends, or donating to the developer via PayPal or Patreon. - Q: When will Brotato Extatonion APK be officially released? - A: Brotato Extatonion APK is still in development and does not have a fixed release date yet. The developer plans to release more updates and content for the game in the future. You can stay tuned for more news and announcements by following the developer on their website or social media accounts.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/spenceryonce/gpt2/app.py b/spaces/spenceryonce/gpt2/app.py deleted file mode 100644 index 9d711f4ba26130db38dd7c5ede053d1362c007c2..0000000000000000000000000000000000000000 --- a/spaces/spenceryonce/gpt2/app.py +++ /dev/null @@ -1,43 +0,0 @@ -import gradio as gr - -title_gpt2 = "GPT2" -title_gpt2_med = "GPT2 Medium" -title_gpt2_large = "GPT2 Large" - -examples = [ - ["Who was the second president of the United States?"], - ["What is the distance between the Earth and the Sun?"], - ["Explain Hydrodynamics in layman's terms."], - ["What is the meaning of life?"], - ["What is the best way to learn about GPT-2?"], - ["Who was the first man to walk on the moon?"], -] - - -gpt2_interface = gr.load( - "models/gpt2", - inputs=gr.Textbox(lines=5, max_lines=10, label="Input Your Prompt Here"), - title=title_gpt2, - examples=examples, - theme=gr.themes.Base, -) - -gpt2_med_interface = gr.load( - "models/gpt2-medium", - inputs=gr.Textbox(lines=5, max_lines=10, label="Input Your Prompt Here"), - title=title_gpt2_med, - examples=examples, - theme=gr.themes.Base, -) - -gpt2_large_interface = gr.load( - "models/gpt2-large", - inputs=gr.Textbox(lines=5, max_lines=10, label="Input Your Prompt Here"), - title=title_gpt2_large, - examples=examples, - theme=gr.themes.Base, -) - -demo = gr.TabbedInterface([gpt2_interface, gpt2_med_interface, gpt2_large_interface], ["GPT2", "GPT2 Medium", "GPT2 Large"]) - -demo.launch() \ No newline at end of file diff --git a/spaces/sriramelango/Social_Classification_Public/data/data_utils.py b/spaces/sriramelango/Social_Classification_Public/data/data_utils.py deleted file mode 100644 index 7f843789138c62668f9e1c4e7fd44299fb5ef768..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/data/data_utils.py +++ /dev/null @@ -1,601 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -try: - from collections.abc import Iterable -except ImportError: - from collections import Iterable -import contextlib -import itertools -import logging -import re -import warnings -from typing import Optional, Tuple - -import numpy as np -import torch - -from fairseq.file_io import PathManager -from fairseq import utils -import os - -logger = logging.getLogger(__name__) - - -def infer_language_pair(path): - """Infer language pair from filename: .-.(...).idx""" - src, dst = None, None - for filename in PathManager.ls(path): - parts = filename.split(".") - if len(parts) >= 3 and len(parts[1].split("-")) == 2: - return parts[1].split("-") - return src, dst - - -def collate_tokens( - values, - pad_idx, - eos_idx=None, - left_pad=False, - move_eos_to_beginning=False, - pad_to_length=None, - pad_to_multiple=1, - pad_to_bsz=None, -): - """Convert a list of 1d tensors into a padded 2d tensor.""" - size = max(v.size(0) for v in values) - size = size if pad_to_length is None else max(size, pad_to_length) - if pad_to_multiple != 1 and size % pad_to_multiple != 0: - size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple) - - def copy_tensor(src, dst): - assert dst.numel() == src.numel() - if move_eos_to_beginning: - if eos_idx is None: - # if no eos_idx is specified, then use the last token in src - dst[0] = src[-1] - else: - dst[0] = eos_idx - dst[1:] = src[:-1] - else: - dst.copy_(src) - - if values[0].dim() == 1: - res = values[0].new(len(values), size).fill_(pad_idx) - elif values[0].dim() == 2: - assert move_eos_to_beginning is False - res = values[0].new(len(values), size, values[0].size(1)).fill_(pad_idx) - else: - raise NotImplementedError - - for i, v in enumerate(values): - copy_tensor(v, res[i][size - len(v) :] if left_pad else res[i][: len(v)]) - return res - - -def load_indexed_dataset( - path, dictionary=None, dataset_impl=None, combine=False, default="cached" -): - """A helper function for loading indexed datasets. - - Args: - path (str): path to indexed dataset (e.g., 'data-bin/train') - dictionary (~fairseq.data.Dictionary): data dictionary - dataset_impl (str, optional): which dataset implementation to use. If - not provided, it will be inferred automatically. For legacy indexed - data we use the 'cached' implementation by default. - combine (bool, optional): automatically load and combine multiple - datasets. For example, if *path* is 'data-bin/train', then we will - combine 'data-bin/train', 'data-bin/train1', ... and return a - single ConcatDataset instance. - """ - import fairseq.data.indexed_dataset as indexed_dataset - from fairseq.data.concat_dataset import ConcatDataset - - datasets = [] - for k in itertools.count(): - path_k = path + (str(k) if k > 0 else "") - try: - path_k = indexed_dataset.get_indexed_dataset_to_local(path_k) - except Exception as e: - if "StorageException: [404] Path not found" in str(e): - logger.warning(f"path_k: {e} not found") - else: - raise e - - dataset_impl_k = dataset_impl - if dataset_impl_k is None: - dataset_impl_k = indexed_dataset.infer_dataset_impl(path_k) - dataset = indexed_dataset.make_dataset( - path_k, - impl=dataset_impl_k or default, - fix_lua_indexing=True, - dictionary=dictionary, - ) - if dataset is None: - break - logger.info("loaded {:,} examples from: {}".format(len(dataset), path_k)) - datasets.append(dataset) - if not combine: - break - if len(datasets) == 0: - return None - elif len(datasets) == 1: - return datasets[0] - else: - return ConcatDataset(datasets) - - -@contextlib.contextmanager -def numpy_seed(seed, *addl_seeds): - """Context manager which seeds the NumPy PRNG with the specified seed and - restores the state afterward""" - if seed is None: - yield - return - if len(addl_seeds) > 0: - seed = int(hash((seed, *addl_seeds)) % 1e6) - state = np.random.get_state() - np.random.seed(seed) - try: - yield - finally: - np.random.set_state(state) - - -def collect_filtered(function, iterable, filtered): - """ - Similar to :func:`filter` but collects filtered elements in ``filtered``. - - Args: - function (callable): function that returns ``False`` for elements that - should be filtered - iterable (iterable): iterable to filter - filtered (list): list to store filtered elements - """ - for el in iterable: - if function(el): - yield el - else: - filtered.append(el) - - -def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False): - def compare_leq(a, b): - return a <= b if not isinstance(a, tuple) else max(a) <= b - - def check_size(idx): - if isinstance(max_positions, float) or isinstance(max_positions, int): - return size_fn(idx) <= max_positions - elif isinstance(max_positions, dict): - idx_size = size_fn(idx) - assert isinstance(idx_size, dict) - intersect_keys = set(max_positions.keys()) & set(idx_size.keys()) - return all( - all( - a is None or b is None or a <= b - for a, b in zip(idx_size[key], max_positions[key]) - ) - for key in intersect_keys - ) - else: - # For MultiCorpusSampledDataset, will generalize it later - if not isinstance(size_fn(idx), Iterable): - return all(size_fn(idx) <= b for b in max_positions) - return all( - a is None or b is None or a <= b - for a, b in zip(size_fn(idx), max_positions) - ) - - ignored = [] - itr = collect_filtered(check_size, indices, ignored) - indices = np.fromiter(itr, dtype=np.int64, count=-1) - return indices, ignored - - -def filter_by_size(indices, dataset, max_positions, raise_exception=False): - """ - [deprecated] Filter indices based on their size. - Use `FairseqDataset::filter_indices_by_size` instead. - - Args: - indices (List[int]): ordered list of dataset indices - dataset (FairseqDataset): fairseq dataset instance - max_positions (tuple): filter elements larger than this size. - Comparisons are done component-wise. - raise_exception (bool, optional): if ``True``, raise an exception if - any elements are filtered (default: False). - """ - warnings.warn( - "data_utils.filter_by_size is deprecated. " - "Use `FairseqDataset::filter_indices_by_size` instead.", - stacklevel=2, - ) - if isinstance(max_positions, float) or isinstance(max_positions, int): - if hasattr(dataset, "sizes") and isinstance(dataset.sizes, np.ndarray): - ignored = indices[dataset.sizes[indices] > max_positions].tolist() - indices = indices[dataset.sizes[indices] <= max_positions] - elif ( - hasattr(dataset, "sizes") - and isinstance(dataset.sizes, list) - and len(dataset.sizes) == 1 - ): - ignored = indices[dataset.sizes[0][indices] > max_positions].tolist() - indices = indices[dataset.sizes[0][indices] <= max_positions] - else: - indices, ignored = _filter_by_size_dynamic( - indices, dataset.size, max_positions - ) - else: - indices, ignored = _filter_by_size_dynamic(indices, dataset.size, max_positions) - - if len(ignored) > 0 and raise_exception: - raise Exception( - ( - "Size of sample #{} is invalid (={}) since max_positions={}, " - "skip this example with --skip-invalid-size-inputs-valid-test" - ).format(ignored[0], dataset.size(ignored[0]), max_positions) - ) - if len(ignored) > 0: - logger.warning( - ( - "{} samples have invalid sizes and will be skipped, " - "max_positions={}, first few sample ids={}" - ).format(len(ignored), max_positions, ignored[:10]) - ) - return indices - - -def filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes): - """Filter a list of sample indices. Remove those that are longer - than specified in max_sizes. - - Args: - indices (np.array): original array of sample indices - max_sizes (int or list[int] or tuple[int]): max sample size, - can be defined separately for src and tgt (then list or tuple) - - Returns: - np.array: filtered sample array - list: list of removed indices - """ - if max_sizes is None: - return indices, [] - if type(max_sizes) in (int, float): - max_src_size, max_tgt_size = max_sizes, max_sizes - else: - max_src_size, max_tgt_size = max_sizes - if tgt_sizes is None: - ignored = indices[src_sizes[indices] > max_src_size] - else: - ignored = indices[ - (src_sizes[indices] > max_src_size) | (tgt_sizes[indices] > max_tgt_size) - ] - if len(ignored) > 0: - if tgt_sizes is None: - indices = indices[src_sizes[indices] <= max_src_size] - else: - indices = indices[ - (src_sizes[indices] <= max_src_size) - & (tgt_sizes[indices] <= max_tgt_size) - ] - return indices, ignored.tolist() - - -def batch_by_size( - indices, - num_tokens_fn, - num_tokens_vec=None, - max_tokens=None, - max_sentences=None, - required_batch_size_multiple=1, - fixed_shapes=None, -): - """ - Yield mini-batches of indices bucketed by size. Batches may contain - sequences of different lengths. - - Args: - indices (List[int]): ordered list of dataset indices - num_tokens_fn (callable): function that returns the number of tokens at - a given index - num_tokens_vec (List[int], optional): precomputed vector of the number - of tokens for each index in indices (to enable faster batch generation) - max_tokens (int, optional): max number of tokens in each batch - (default: None). - max_sentences (int, optional): max number of sentences in each - batch (default: None). - required_batch_size_multiple (int, optional): require batch size to - be less than N or a multiple of N (default: 1). - fixed_shapes (List[Tuple[int, int]], optional): if given, batches will - only be created with the given shapes. *max_sentences* and - *required_batch_size_multiple* will be ignored (default: None). - """ - try: - from fairseq.data.data_utils_fast import ( - batch_by_size_fn, - batch_by_size_vec, - batch_fixed_shapes_fast, - ) - except ImportError: - raise ImportError( - "Please build Cython components with: " - "`python setup.py build_ext --inplace`" - ) - except ValueError: - raise ValueError( - "Please build (or rebuild) Cython components with `python setup.py build_ext --inplace`." - ) - - # added int() to avoid TypeError: an integer is required - max_tokens = ( - int(max_tokens) if max_tokens is not None else -1 - ) - max_sentences = max_sentences if max_sentences is not None else -1 - bsz_mult = required_batch_size_multiple - - if not isinstance(indices, np.ndarray): - indices = np.fromiter(indices, dtype=np.int64, count=-1) - - if num_tokens_vec is not None and not isinstance(num_tokens_vec, np.ndarray): - num_tokens_vec = np.fromiter(num_tokens_vec, dtype=np.int64, count=-1) - - if fixed_shapes is None: - if num_tokens_vec is None: - return batch_by_size_fn( - indices, - num_tokens_fn, - max_tokens, - max_sentences, - bsz_mult, - ) - else: - return batch_by_size_vec( - indices, - num_tokens_vec, - max_tokens, - max_sentences, - bsz_mult, - ) - - else: - fixed_shapes = np.array(fixed_shapes, dtype=np.int64) - sort_order = np.lexsort( - [ - fixed_shapes[:, 1].argsort(), # length - fixed_shapes[:, 0].argsort(), # bsz - ] - ) - fixed_shapes_sorted = fixed_shapes[sort_order] - return batch_fixed_shapes_fast(indices, num_tokens_fn, fixed_shapes_sorted) - - -def post_process(sentence: str, symbol: str): - if symbol == "sentencepiece": - sentence = sentence.replace(" ", "").replace("\u2581", " ").strip() - elif symbol == "wordpiece": - sentence = sentence.replace(" ", "").replace("_", " ").strip() - elif symbol == "letter": - sentence = sentence.replace(" ", "").replace("|", " ").strip() - elif symbol == "silence": - import re - sentence = sentence.replace("", "") - sentence = re.sub(' +', ' ', sentence).strip() - elif symbol == "_EOW": - sentence = sentence.replace(" ", "").replace("_EOW", " ").strip() - elif symbol in {"subword_nmt", "@@ ", "@@"}: - if symbol == "subword_nmt": - symbol = "@@ " - sentence = (sentence + " ").replace(symbol, "").rstrip() - elif symbol == "none": - pass - elif symbol is not None: - raise NotImplementedError(f"Unknown post_process option: {symbol}") - return sentence - - -def compute_mask_indices( - shape: Tuple[int, int], - padding_mask: Optional[torch.Tensor], - mask_prob: float, - mask_length: int, - mask_type: str = "static", - mask_other: float = 0.0, - min_masks: int = 0, - no_overlap: bool = False, - min_space: int = 0, -) -> np.ndarray: - """ - Computes random mask spans for a given shape - - Args: - shape: the the shape for which to compute masks. - should be of size 2 where first element is batch size and 2nd is timesteps - padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements - mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by - number of timesteps divided by length of mask span to mask approximately this percentage of all elements. - however due to overlaps, the actual number will be smaller (unless no_overlap is True) - mask_type: how to compute mask lengths - static = fixed size - uniform = sample from uniform distribution [mask_other, mask_length*2] - normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element - poisson = sample from possion distribution with lambda = mask length - min_masks: minimum number of masked spans - no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping - min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans - """ - - bsz, all_sz = shape - mask = np.full((bsz, all_sz), False) - - all_num_mask = int( - # add a random number for probabilistic rounding - mask_prob * all_sz / float(mask_length) - + np.random.rand() - ) - - all_num_mask = max(min_masks, all_num_mask) - - mask_idcs = [] - for i in range(bsz): - if padding_mask is not None: - sz = all_sz - padding_mask[i].long().sum().item() - num_mask = int( - # add a random number for probabilistic rounding - mask_prob * sz / float(mask_length) - + np.random.rand() - ) - num_mask = max(min_masks, num_mask) - else: - sz = all_sz - num_mask = all_num_mask - - if mask_type == "static": - lengths = np.full(num_mask, mask_length) - elif mask_type == "uniform": - lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask) - elif mask_type == "normal": - lengths = np.random.normal(mask_length, mask_other, size=num_mask) - lengths = [max(1, int(round(x))) for x in lengths] - elif mask_type == "poisson": - lengths = np.random.poisson(mask_length, size=num_mask) - lengths = [int(round(x)) for x in lengths] - else: - raise Exception("unknown mask selection " + mask_type) - - if sum(lengths) == 0: - lengths[0] = min(mask_length, sz - 1) - - if no_overlap: - mask_idc = [] - - def arrange(s, e, length, keep_length): - span_start = np.random.randint(s, e - length) - mask_idc.extend(span_start + i for i in range(length)) - - new_parts = [] - if span_start - s - min_space >= keep_length: - new_parts.append((s, span_start - min_space + 1)) - if e - span_start - keep_length - min_space > keep_length: - new_parts.append((span_start + length + min_space, e)) - return new_parts - - parts = [(0, sz)] - min_length = min(lengths) - for length in sorted(lengths, reverse=True): - lens = np.fromiter( - (e - s if e - s >= length + min_space else 0 for s, e in parts), - np.int, - ) - l_sum = np.sum(lens) - if l_sum == 0: - break - probs = lens / np.sum(lens) - c = np.random.choice(len(parts), p=probs) - s, e = parts.pop(c) - parts.extend(arrange(s, e, length, min_length)) - mask_idc = np.asarray(mask_idc) - else: - min_len = min(lengths) - if sz - min_len <= num_mask: - min_len = sz - num_mask - 1 - - mask_idc = np.random.choice(sz - min_len, num_mask, replace=False) - - mask_idc = np.asarray( - [ - mask_idc[j] + offset - for j in range(len(mask_idc)) - for offset in range(lengths[j]) - ] - ) - - mask_idcs.append(np.unique(mask_idc[mask_idc < sz])) - - min_len = min([len(m) for m in mask_idcs]) - for i, mask_idc in enumerate(mask_idcs): - if len(mask_idc) > min_len: - mask_idc = np.random.choice(mask_idc, min_len, replace=False) - mask[i, mask_idc] = True - - return mask - - -def get_mem_usage(): - try: - import psutil - - mb = 1024 * 1024 - return f"used={psutil.virtual_memory().used / mb}Mb; avail={psutil.virtual_memory().available / mb}Mb" - except ImportError: - return "N/A" - - -# lens: torch.LongTensor -# returns: torch.BoolTensor -def lengths_to_padding_mask(lens): - bsz, max_lens = lens.size(0), torch.max(lens).item() - mask = torch.arange(max_lens).to(lens.device).view(1, max_lens) - mask = mask.expand(bsz, -1) >= lens.view(bsz, 1).expand(-1, max_lens) - return mask - - -# lens: torch.LongTensor -# returns: torch.BoolTensor -def lengths_to_mask(lens): - return ~lengths_to_padding_mask(lens) - - -def get_buckets(sizes, num_buckets): - buckets = np.unique( - np.percentile( - sizes, - np.linspace(0, 100, num_buckets + 1), - interpolation='lower', - )[1:] - ) - return buckets - - -def get_bucketed_sizes(orig_sizes, buckets): - sizes = np.copy(orig_sizes) - assert np.min(sizes) >= 0 - start_val = -1 - for end_val in buckets: - mask = (sizes > start_val) & (sizes <= end_val) - sizes[mask] = end_val - start_val = end_val - return sizes - - - -def _find_extra_valid_paths(dataset_path: str) -> set: - paths = utils.split_paths(dataset_path) - all_valid_paths = set() - for sub_dir in paths: - contents = PathManager.ls(sub_dir) - valid_paths = [c for c in contents if re.match("valid*[0-9].*", c) is not None] - all_valid_paths |= {os.path.basename(p) for p in valid_paths} - # Remove .bin, .idx etc - roots = {os.path.splitext(p)[0] for p in all_valid_paths} - return roots - - -def raise_if_valid_subsets_unintentionally_ignored(train_cfg) -> None: - """Raises if there are paths matching 'valid*[0-9].*' which are not combined or ignored.""" - if ( - train_cfg.dataset.ignore_unused_valid_subsets - or train_cfg.dataset.combine_valid_subsets - or train_cfg.dataset.disable_validation - or not hasattr(train_cfg.task, "data") - ): - return - other_paths = _find_extra_valid_paths(train_cfg.task.data) - specified_subsets = train_cfg.dataset.valid_subset.split(",") - ignored_paths = [p for p in other_paths if p not in specified_subsets] - if ignored_paths: - advice = "Set --combine-val to combine them or --ignore-unused-valid-subsets to ignore them." - msg = f"Valid paths {ignored_paths} will be ignored. {advice}" - raise ValueError(msg) \ No newline at end of file diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/translation_moe/translation_moe_src/__init__.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/translation_moe/translation_moe_src/__init__.py deleted file mode 100644 index c0abe53e973b4bb31cfb062708965d002c79b6e7..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/translation_moe/translation_moe_src/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from . import translation_moe # noqa diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/scripts/build_sym_alignment.py b/spaces/sriramelango/Social_Classification_Public/fairseq/scripts/build_sym_alignment.py deleted file mode 100644 index 0ca5c18f7bd4b0fbf58b203793506ca395466129..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/scripts/build_sym_alignment.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -Use this script in order to build symmetric alignments for your translation -dataset. -This script depends on fast_align and mosesdecoder tools. You will need to -build those before running the script. -fast_align: - github: http://github.com/clab/fast_align - instructions: follow the instructions in README.md -mosesdecoder: - github: http://github.com/moses-smt/mosesdecoder - instructions: http://www.statmt.org/moses/?n=Development.GetStarted -The script produces the following files under --output_dir: - text.joined - concatenation of lines from the source_file and the - target_file. - align.forward - forward pass of fast_align. - align.backward - backward pass of fast_align. - aligned.sym_heuristic - symmetrized alignment. -""" - -import argparse -import os -from itertools import zip_longest - - -def main(): - parser = argparse.ArgumentParser(description="symmetric alignment builer") - # fmt: off - parser.add_argument('--fast_align_dir', - help='path to fast_align build directory') - parser.add_argument('--mosesdecoder_dir', - help='path to mosesdecoder root directory') - parser.add_argument('--sym_heuristic', - help='heuristic to use for symmetrization', - default='grow-diag-final-and') - parser.add_argument('--source_file', - help='path to a file with sentences ' - 'in the source language') - parser.add_argument('--target_file', - help='path to a file with sentences ' - 'in the target language') - parser.add_argument('--output_dir', - help='output directory') - # fmt: on - args = parser.parse_args() - - fast_align_bin = os.path.join(args.fast_align_dir, "fast_align") - symal_bin = os.path.join(args.mosesdecoder_dir, "bin", "symal") - sym_fast_align_bin = os.path.join( - args.mosesdecoder_dir, "scripts", "ems", "support", "symmetrize-fast-align.perl" - ) - - # create joined file - joined_file = os.path.join(args.output_dir, "text.joined") - with open(args.source_file, "r", encoding="utf-8") as src, open( - args.target_file, "r", encoding="utf-8" - ) as tgt: - with open(joined_file, "w", encoding="utf-8") as joined: - for s, t in zip_longest(src, tgt): - print("{} ||| {}".format(s.strip(), t.strip()), file=joined) - - bwd_align_file = os.path.join(args.output_dir, "align.backward") - - # run forward alignment - fwd_align_file = os.path.join(args.output_dir, "align.forward") - fwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v > {FWD}".format( - FASTALIGN=fast_align_bin, JOINED=joined_file, FWD=fwd_align_file - ) - assert os.system(fwd_fast_align_cmd) == 0 - - # run backward alignment - bwd_align_file = os.path.join(args.output_dir, "align.backward") - bwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}".format( - FASTALIGN=fast_align_bin, JOINED=joined_file, BWD=bwd_align_file - ) - assert os.system(bwd_fast_align_cmd) == 0 - - # run symmetrization - sym_out_file = os.path.join(args.output_dir, "aligned") - sym_cmd = "{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}".format( - SYMFASTALIGN=sym_fast_align_bin, - FWD=fwd_align_file, - BWD=bwd_align_file, - SRC=args.source_file, - TGT=args.target_file, - OUT=sym_out_file, - HEURISTIC=args.sym_heuristic, - SYMAL=symal_bin, - ) - assert os.system(sym_cmd) == 0 - - -if __name__ == "__main__": - main() diff --git a/spaces/srkajol/avocat-ia/app.py b/spaces/srkajol/avocat-ia/app.py deleted file mode 100644 index 1e8bb146e52df28cbcd166c3321a2158c2dbfa4a..0000000000000000000000000000000000000000 --- a/spaces/srkajol/avocat-ia/app.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -import openai -import gradio as gr -import pandas as pd -from datetime import datetime -import gspread -from google.oauth2.service_account import Credentials -import requests -import json - -# Set your OpenAI API key as an environment variable -openai.api_key = os.getenv("API_SECRET") - -# Global variables -records = [] -credentials = Credentials.from_service_account_file("credentials.json", scopes=["https://www.googleapis.com/auth/spreadsheets"]) -client = gspread.authorize(credentials) -sheet = client.open_by_url("https://docs.google.com/spreadsheets/d/1-Cf5Y_bg9ANmYy4Mw7nJS2ObxpL6j54ESmLAYbhPvFI/edit?usp=sharing").sheet1 - -def get_user_ip(): - try: - response = requests.get("https://api.ipify.org?format=json") - data = json.loads(response.text) - return data["ip"] - except: - return None - -def CustomChatGPT(passcode, user_input, user_name, user_email, user_type): - if not (passcode and user_input and user_name and user_email and user_type): - return "Veuillez remplir tous les champs." - - messages = [] - - if not user_name: - return "Veuillez entrer votre nom." - - user_message = f"{user_input} [USER_IDENTITY: {user_name}]" - messages.append({"role": "user", "content": user_message}) - messages.append({"role": "system", "content": "En tant qu'avocat expérimenté en France, vous êtes actuellement en consultation confidentielle avec votre client. Le client place une confiance totale en votre expertise et vos conseils. Vos réponses doivent refléter une compréhension approfondie des règles de droit, de la jurisprudence et des réglementations en vigueur. Assurez-vous de citer des textes de loi spécifiques, en indiquant clairement les articles et sections pertinents dont ces lois sont issues. Si des textes législatifs secondaires sont pertinents, veillez à les relier de manière précise à la juridiction du client. Si vous avez des doutes sur la juridiction ou d'autres détails, assurez-vous de les clarifier avec le client. Il est essentiel d'utiliser le vocabulaire juridique correct dans vos réponses et de vous concentrer sur les règles de droit applicables, les textes législatifs, la jurisprudence et la réglementation secondaire. Rappelez-vous que le client a une confiance totale en votre compétence et vos connaissances juridiques ; maintenez cette confiance en offrant des conseils juridiques complets, précis et exacts. En aucun cas, il ne doit être mentionné ou suggéré que l'entité fournissant ces conseils est une IA."}) - - response = openai.ChatCompletion.create( - model="gpt-4", - messages=messages - ) - - ChatGPT_reply = response["choices"][0]["message"]["content"] - messages.append({"role": "assistant", "content": ChatGPT_reply}) - - timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - ip_address = get_user_ip() - - record = { - "Passcode": passcode, - "Timestamp": timestamp, - "User Input": user_input, - "User Identity": user_name, - "User Email": user_email, - "IP Address": ip_address, - "Êtes-vous un particulier ou un professionnel?": user_type, - "Réponse de notre avocat IA": ChatGPT_reply - } - records.append(record) - - sheet_data = pd.DataFrame(records) - rows_to_append = sheet_data.iloc[len(records)-1:].values.tolist() - if len(records) == 1: - header = list(sheet_data.columns) - sheet.insert_row(header, 1) - sheet.append_rows(rows_to_append, value_input_option='USER_ENTERED') - - return ChatGPT_reply - -def launch_interface(): - inputs = [ - gr.inputs.Textbox(label="Passcode", placeholder="Entrez le code d'accès"), - gr.inputs.Textbox(label="Entrée de l'utilisateur", placeholder="Parlez à votre avocat..."), - gr.inputs.Textbox(label="Votre nom", placeholder="Entrez votre nom"), - gr.inputs.Textbox(label="Votre email", placeholder="Entrez votre email"), - gr.inputs.Radio(label="Êtes-vous un particulier ou un professionnel?", choices=["Particulier", "Professionnel"]) - ] - outputs = gr.outputs.Textbox(label="Réponse de notre avocat IA") - - def validate_passcode(passcode, user_input, user_name, user_email, user_type): - valid_passcodes = { - "organization7": "muklesur", - "organization8": "stephane", - "organization9": "alfortville", - "organization10": "seyfeddine", - "organization11": "senkoun", - "organization11": "mounir", - "organization12": "mosqueeassalam", - "organization13": "remi-slama", - "organization14": "maitre-laouini", - "organization15": "slimane" - } - - passcode = passcode.lower() # Convert the passcode to lowercase for case-insensitive comparison - - if passcode not in valid_passcodes.values(): - return "Mot de passe incorrect. Accès refusé." - - return CustomChatGPT(passcode, user_input, user_name, user_email, user_type) - - interface = gr.Interface(fn=validate_passcode, inputs=inputs, outputs=outputs, title="", description="") - interface.launch() - -if __name__ == "__main__": - launch_interface() diff --git a/spaces/stomexserde/gpt4-ui/Examples/Adobe Captivate 2019 V11.0.1.266 Crack [REPACK] Free Download.md b/spaces/stomexserde/gpt4-ui/Examples/Adobe Captivate 2019 V11.0.1.266 Crack [REPACK] Free Download.md deleted file mode 100644 index c311dd5c43054b1f817155b62c4777547f55e40f..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Adobe Captivate 2019 V11.0.1.266 Crack [REPACK] Free Download.md +++ /dev/null @@ -1,35 +0,0 @@ -
-Here is the title and article I generated for the keyword "Adobe Captivate 2019 v11.0.1.266 Crack FREE Download": - -

How to Download and Install Adobe Captivate 2019 v11.0.1.266 Crack for Free

-

Adobe Captivate 2019 is a powerful software for creating e-learning courses, interactive presentations, simulations, and quizzes. It allows you to design responsive content that adapts to any device and screen size, and publish it to HTML5, SCORM, xAPI, or Adobe Connect.

-

Adobe Captivate 2019 v11.0.1.266 Crack FREE Download


Download Zip » https://urlgoal.com/2uIaSU



-

However, Adobe Captivate 2019 is not a cheap software. It costs $1,299 for a perpetual license or $33.99 per month for a subscription plan. If you want to use it for free, you need to download and install Adobe Captivate 2019 v11.0.1.266 Crack from a reliable source.

-

In this article, we will show you how to download and install Adobe Captivate 2019 v11.0.1.266 Crack for free in a few simple steps.

-

Step 1: Download Adobe Captivate 2019 v11.0.1.266 Crack

-

The first step is to download Adobe Captivate 2019 v11.0.1.266 Crack from a trustworthy website. There are many websites that claim to offer cracked versions of Adobe Captivate 2019, but some of them may contain viruses, malware, or spyware that can harm your computer or steal your personal information.

-

One of the best websites to download Adobe Captivate 2019 v11.0.1.266 Crack is Crack4Download. This website has been tested and verified by many users and has positive reviews and ratings.

-

To download Adobe Captivate 2019 v11.0.1.266 Crack from Crack4Download, follow these steps:

- -

Step 2: Install Adobe Captivate 2019 v11.0.1.266 Crack

-

The next step is to install Adobe Captivate 2019 v11.0.1.266 Crack on your computer. Before you do that, make sure you have disabled your antivirus software and firewall, as they may interfere with the installation process or detect the crack as a threat.

-

-

To install Adobe Captivate 2019 v11.0.1.266 Crack on your computer, follow these steps:

-
    -
  • Open the folder where you saved the downloaded file and extract it using WinRAR or any other extraction tool.
  • -
  • Run the setup.exe file as an administrator and follow the instructions on the screen.
  • -
  • When the installation is complete, do not launch the program yet.
  • -
  • Copy the crack file from the crack folder and paste it into the installation directory of Adobe Captivate 2019 (usually C:\Program Files\Adobe\Adobe Captivate 2019).
  • -
  • Replace the original file if prompted.
  • -
-

Step 3: Enjoy Adobe Captivate 2019 v11.0.1.266 Crack for Free

-

The final step is to enjoy Adobe Captivate 2019 v11.0.1.266 Crack for free on your computer. You can now launch the program and use all its features without any limitations or restrictions.

-

You can create stunning e-learning courses, interactive presentations, simulations, and quizzes with Adobe Captivate 2019 v11.0.1.266 Crack and publish them to various platforms and formats.

-

You can also update the program whenever a new version is available without losing the crack activation.

-

Congratulations! You have successfully downloaded and installed Adobe Captivate 2019 v11.0.1.266 Crack for

7b8c122e87
-
-
\ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Download Xforce Keygen AutoCAD Mechanical 2013 Crack.md b/spaces/stomexserde/gpt4-ui/Examples/Download Xforce Keygen AutoCAD Mechanical 2013 Crack.md deleted file mode 100644 index 514204bd8c4475872a349a08daf9cc2c4c43127e..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Download Xforce Keygen AutoCAD Mechanical 2013 Crack.md +++ /dev/null @@ -1,104 +0,0 @@ - -

Download Xforce Keygen AutoCAD Mechanical 2013 Crack

-

If you are a mechanical engineer, designer, or manufacturer, you may have heard of or used AutoCAD Mechanical software. It is one of the most popular and powerful CAD software products for mechanical design and drafting. It offers many features and benefits that can help you create accurate, efficient, and standardized drawings. But what if you don't have a license or subscription for AutoCAD Mechanical software? What if you want to use it for free or for a trial period? You may have come across a tool called xforce keygen that claims to be able to crack AutoCAD Mechanical software and generate an activation code for it. But is it safe, legal, and reliable? In this article, we will explore what xforce keygen is, how it works, how to download it, how to use it, and what are the risks and disadvantages of using it. We will also provide some alternatives or recommendations for using AutoCAD Mechanical software legally and ethically.

-

download xforce keygen AutoCAD Mechanical 2013 crack


Download - https://urlgoal.com/2uI9jk



-

What is AutoCAD Mechanical 2013 and what are its features and benefits?

-

AutoCAD Mechanical software is a product of Autodesk Inc., a leading software company that provides CAD software for various industries. AutoCAD Mechanical software is specifically designed for mechanical engineering, manufacturing, and design. It is based on the core functionality of AutoCAD software, but with additional tools and libraries that are tailored for mechanical design tasks.

-

AutoCAD Mechanical software was first released in 1996 as a standalone product. Since then, it has been updated regularly with new features and enhancements. The latest version as of this writing is AutoCAD Mechanical software. However, some users may still prefer or need to use older versions of the software, such as AutoCAD Mechanical 2013.

-

AutoCAD Mechanical 2013 was released in March 2012 as part of the Autodesk Product Design Suite. It has many features and benefits that can help mechanical engineers, designers, and manufacturers create high-quality drawings faster and easier. Some of these features and benefits are:

-

-
    -
  • 700,000+ standard parts and features: You can access a library of standards-based parts, tools, and custom content that can help you produce accurate drawings that comply with ISO, ANSI, DIN, JIS, BSI, CSN
  • Smart mechanical engineering dimensions: You can create dimensions that automatically align with and adhere to your mechanical drawing geometry. You can also modify or delete dimensions without affecting the geometry or other dimensions.
  • -
  • Support for international drafting standards: You can work with drawings that follow different mechanical drafting standards, such as ISO, ANSI, DIN, JIS, BSI, CSN, and GB. You can also switch between different standards within the same drawing.
  • -
  • Integrated design calculations and simulations: You can perform design calculations and simulations within AutoCAD Mechanical software, such as shaft, spring, belt, chain, and cam generators. You can also verify the performance and interference of your design before manufacturing.
  • -
  • 3D CAD model import and export: You can import and export 3D CAD models from other software products, such as Autodesk Inventor, SolidWorks, CATIA, Pro/ENGINEER, and NX. You can also create 2D drawings from 3D models or vice versa.
  • -
  • Collaboration and data management: You can share your drawings and data with other users and stakeholders using tools such as Autodesk 360, Autodesk Vault, Autodesk Design Review, and eTransmit. You can also manage your drawing files and revisions more efficiently.
  • -
-

These are just some of the features and benefits of AutoCAD Mechanical 2013 software. You can learn more about the software by visiting the official website of Autodesk Inc. or by watching some online tutorials or videos.

-

What is xforce keygen and why do some people use it to crack AutoCAD Mechanical 2013?

-

xforce keygen is a tool that claims to be able to generate activation codes for various software products, including AutoCAD Mechanical 2013. It is not an official product of Autodesk Inc. or any other software company. It is a product of hackers or crackers who try to bypass the security and licensing systems of software products.

-

Some people may use xforce keygen to crack AutoCAD Mechanical 2013 because they want to use the software for free or for a longer period than the trial version allows. They may also use it because they cannot afford or access the official license or subscription for the software. They may think that using xforce keygen is harmless or beneficial for them.

-

However, using xforce keygen to crack AutoCAD Mechanical 2013 is not safe, legal, or reliable. It has many risks and disadvantages that can outweigh any perceived benefits. In the next section, we will discuss some of these risks and disadvantages in detail.

What are the risks and disadvantages of using xforce keygen to crack AutoCAD Mechanical 2013?

-

Using xforce keygen to crack AutoCAD Mechanical 2013 may seem like a convenient and easy way to use the software without paying for it. However, it is not a wise or ethical decision. It can expose you to many risks and disadvantages that can harm you, your computer, your data, your work, and your reputation. Some of these risks and disadvantages are:

-
    -
  • Legal issues: Using xforce keygen to crack AutoCAD Mechanical 2013 is a violation of the software license agreement and the intellectual property rights of Autodesk Inc. It is also a form of software piracy, which is illegal in many countries and regions. You can face legal consequences such as fines, lawsuits, or even criminal charges if you are caught using or distributing xforce keygen or cracked software.
  • -
  • Security issues: Using xforce keygen to crack AutoCAD Mechanical 2013 can compromise the security of your computer and your data. xforce keygen is not a trusted or verified source of software. It can contain malware, viruses, spyware, ransomware, or other harmful programs that can infect your computer and damage your files, system, or network. You can also lose your personal or confidential information to hackers or cybercriminals who can use it for malicious purposes.
  • -
  • Performance issues: Using xforce keygen to crack AutoCAD Mechanical 2013 can affect the performance of your computer and your software. xforce keygen can interfere with the normal functioning of your computer and cause errors, crashes, freezes, slowdowns, or glitches. It can also prevent you from accessing the latest updates, patches, features, or support from Autodesk Inc. or other sources. You can miss out on important improvements, bug fixes, security enhancements, or compatibility solutions that can improve your user experience and productivity.
  • -
  • Ethical issues: Using xforce keygen to crack AutoCAD Mechanical 2013 is an unethical and unfair practice. It is a form of stealing from Autodesk Inc. and other software developers who invest time, money, and effort to create quality software products for their customers. It is also a form of cheating from other users who pay for the software license or subscription legally and ethically. You can lose your credibility, integrity, and reputation as a professional or a student if you are found using or promoting xforce keygen or cracked software.
  • -
-

These are just some of the risks and disadvantages of using xforce keygen to crack AutoCAD Mechanical 2013. You can avoid these risks and disadvantages by using AutoCAD Mechanical 2013 legally and ethically. In the following sections, we will show you how to download xforce keygen AutoCAD Mechanical 2013 crack, how to install AutoCAD Mechanical 2013 with the activation code, and how to use AutoCAD Mechanical 2013 for mechanical design and drafting. However, we do not recommend or endorse using xforce keygen or cracked software in any way. We only provide this information for educational purposes only.

How to download xforce keygen AutoCAD Mechanical 2013 crack

-

If you still want to use xforce keygen to crack AutoCAD Mechanical 2013, you will need to download it from a source that claims to provide it. However, you should be very careful and cautious when doing so, as you may encounter many fake, malicious, or unreliable sources that can harm your computer or your data. Here are some steps that you can follow to download xforce keygen AutoCAD Mechanical 2013 crack:

-
    -
  1. Find the download link: You can search for the download link of xforce keygen AutoCAD Mechanical 2013 crack on the internet, using search engines, forums, blogs, social media, or other platforms. However, you should not trust any link that you find without verifying its authenticity and safety. You can use tools such as VirusTotal, URLVoid, or Norton Safe Web to scan the link for any malware or suspicious activity. You can also check the reviews, comments, ratings, or feedback from other users who have used the link before.
  2. -
  3. Disable antivirus and firewall: Before downloading and running xforce keygen AutoCAD Mechanical 2013 crack, you will need to disable your antivirus and firewall software on your computer. This is because xforce keygen is considered as a potentially unwanted program (PUP) or a hacking tool by most antivirus and firewall software. They will try to block, delete, or quarantine xforce keygen as soon as they detect it. However, by disabling your antivirus and firewall software, you are also exposing your computer and your data to other threats and risks that may come with xforce keygen or the download link.
  4. -
  5. Use xforce keygen to generate activation code: After downloading xforce keygen AutoCAD Mechanical 2013 crack, you will need to run it as an administrator on your computer. You will see a window that asks you to select the software product that you want to crack. You will need to select AutoCAD Mechanical 2013 from the list and click on Generate. xforce keygen will then generate a random activation code for AutoCAD Mechanical 2013. You will need to copy this activation code and save it somewhere for later use.
  6. -
-

These are the steps that you can follow to download xforce keygen AutoCAD Mechanical 2013 crack. However, we do not recommend or endorse using xforce keygen or cracked software in any way. We only provide this information for educational purposes only.

How to install AutoCAD Mechanical 2013 with the activation code

-

After generating the activation code for AutoCAD Mechanical 2013 with xforce keygen, you will need to install the software on your computer. You will need to have the setup file of AutoCAD Mechanical 2013, which you can download from the official website of Autodesk Inc. or from other sources. However, you should be careful and cautious when downloading the setup file, as you may encounter fake, malicious, or unreliable sources that can harm your computer or your data. Here are some steps that you can follow to install AutoCAD Mechanical 2013 with the activation code:

-
    -
  1. Run the setup file: After downloading the setup file of AutoCAD Mechanical 2013, you will need to run it as an administrator on your computer. You will see a window that asks you to select the language and the country or region for the installation. You will need to select the appropriate options and click on Next. You will then see a window that asks you to accept the license agreement and the privacy statement. You will need to read and agree to the terms and conditions and click on Next.
  2. -
  3. Enter the serial number and product key: You will then see a window that asks you to enter the serial number and product key for AutoCAD Mechanical 2013. You will need to enter a valid serial number and product key that you have obtained from Autodesk Inc. or from other sources. However, if you are using xforce keygen to crack AutoCAD Mechanical 2013, you can enter any random serial number and product key that match the format of the software. For example, you can enter 666-69696969 as the serial number and 206E1 as the product key. You will then need to click on Next.
  4. -
  5. Select the installation options: You will then see a window that asks you to select the installation options for AutoCAD Mechanical 2013. You will need to choose whether you want to install the software as a standalone or a network license, whether you want to install it on this computer or another computer, and whether you want to install it as a typical or a custom installation. You can choose the options that suit your preferences or needs. However, if you are using xforce keygen to crack AutoCAD Mechanical 2013, you should choose the standalone license option and install it on this computer only. You can also choose the typical installation option for simplicity and convenience. You will then need to click on Install.
  6. -
  7. Complete the installation process: You will then see a window that shows the progress of the installation process. You will need to wait for a few minutes until the installation is complete. You may also see some prompts or messages that ask you to confirm or allow some actions or changes on your computer. You will need to follow the instructions and click on Yes, OK, or Next as needed. After the installation is complete, you will see a window that says that AutoCAD Mechanical 2013 has been successfully installed on your computer. You will need to click on Finish.
  8. -
-

These are the steps that you can follow to install AutoCAD Mechanical 2013 with the activation code. However, we do not recommend or endorse using xforce keygen or cracked software in any way. We only provide this information for educational purposes only.

How to activate AutoCAD Mechanical 2013 with the activation code from xforce keygen

-

After installing AutoCAD Mechanical 2013 with the activation code, you will need to activate the software on your computer. You will need to have an internet connection and a valid activation code that you have obtained from Autodesk Inc. or from other sources. However, if you are using xforce keygen to crack AutoCAD Mechanical 2013, you will need to use the activation code that you have generated with xforce keygen. Here are some steps that you can follow to activate AutoCAD Mechanical 2013 with the activation code from xforce keygen:

-
    -
  1. Launch the software: After finishing the installation process, you will need to launch AutoCAD Mechanical 2013 on your computer. You will see a window that asks you to activate the software. You will need to click on Activate.
  2. -
  3. Select the activation option: You will then see a window that asks you to select the activation option for AutoCAD Mechanical 2013. You will need to choose whether you want to activate the software online or offline. You can choose the option that suits your preferences or needs. However, if you are using xforce keygen to crack AutoCAD Mechanical 2013, you should choose the offline activation option and click on Next.
  4. -
  5. Enter the activation code: You will then see a window that asks you to enter the activation code for AutoCAD Mechanical 2013. You will need to enter the activation code that you have generated with xforce keygen in the corresponding fields. You will then need to click on Next.
  6. -
  7. Complete the activation process: You will then see a window that says that AutoCAD Mechanical 2013 has been successfully activated on your computer. You will need to click on Finish.
  8. -
-

These are the steps that you can follow to activate AutoCAD Mechanical 2013 with the activation code from xforce keygen. However, we do not recommend or endorse using xforce keygen or cracked software in any way. We only provide this information for educational purposes only.

-

How to use AutoCAD Mechanical 2013 for mechanical design and drafting

-

After activating AutoCAD Mechanical 2013 with the activation code from xforce keygen, you can start using the software for mechanical design and drafting. You can access the user interface and the mechanical toolset features of AutoCAD Mechanical 2013 and create, modify, annotate, and document 2D and 3D mechanical drawings. You can also collaborate with other users and export or import data in different formats. Here are some steps that you can follow to use AutoCAD Mechanical 2013 for mechanical design and drafting:

-
    -
  1. Access the user interface and the mechanical toolset features: After launching AutoCAD Mechanical 2013 on your computer, you will see the user interface of the software. The user interface consists of various elements, such as the ribbon, the command line, the drawing area, the status bar, and the application menu. The ribbon contains tabs and panels that provide access to various commands and tools for creating and editing your drawings. The command line allows you to enter commands and options manually or by using keyboard shortcuts. The drawing area displays your drawing and allows you to zoom, pan, or rotate it. The status bar shows various settings and modes that affect your drawing, such as snap, grid, ortho, osnap, polar, dynamic input, etc. The application menu provides access to various functions and settings, such as saving, opening, printing, publishing, exporting, importing, options, etc.
  2. -
  3. Create a new drawing or open an existing one: To create a new drawing in AutoCAD Mechanical 2013, you can click on New from the application menu or press Ctrl+N on your keyboard. You will then see a window that asks you to select a template for your drawing. You can choose from various templates that are based on different mechanical drafting standards or create your own custom template. You can also specify the units, scale, angle, and other settings for your drawing. To open an existing drawing in AutoCAD Mechanical 2013, you can click on Open from the application menu or press Ctrl+O on your keyboard. You will then see a window that allows you to browse and select a drawing file from your computer or network.
  4. -
  5. Create, modify, annotate, and document 2D and 3D mechanical drawings: To create a 2D or 3D mechanical drawing in AutoCAD Mechanical 2013, you can use various commands and tools from the ribbon or the command line. You can draw basic shapes such as lines, circles, arcs , rectangles, polygons, etc., or use more advanced tools such as polylines, splines, ellipses, etc. You can also use the mechanical toolset features to draw standard parts and features, such as shafts, springs, gears, bearings, bolts, nuts, washers, etc. You can also modify your drawing by using commands and tools such as move, copy, rotate, scale, trim, extend, fillet, chamfer, mirror, array, offset, etc. You can also use the mechanical toolset features to modify standard parts and features, such as changing the size, type, material, or orientation of the parts and features. To annotate your drawing in AutoCAD Mechanical 2013, you can use commands and tools such as text, dimension, leader, note, balloon, symbol, hatch, etc. You can also use the mechanical toolset features to annotate your drawing with standard annotations, such as tolerances, surface finishes, welding symbols, centerlines, center marks, etc. You can also document your drawing by using commands and tools such as layout, viewport, title block, border, sheet set manager, plotter manager , etc. You can also use the mechanical toolset features to document your drawing with standard documentation, such as bills of materials (BOMs), parts lists, cut lists, etc.
  6. -
  7. Collaborate with other users and export or import data in different formats: To collaborate with other users in AutoCAD Mechanical 2013, you can use commands and tools such as share, email, eTransmit, Autodesk 360, Autodesk Vault, Autodesk Design Review, etc. You can also use the mechanical toolset features to collaborate with other users who use different mechanical drafting standards or software products. You can switch between different standards within the same drawing or convert your drawing to different formats. To export or import data in different formats in AutoCAD Mechanical 2013, you can use commands and tools such as save as, export, import, attach, insert, etc. You can also use the mechanical toolset features to export or import data in different formats that are compatible with other software products, such as Autodesk Inventor, SolidWorks, CATIA, Pro/ENGINEER, and NX.
  8. -
-

These are some of the steps that you can follow to use AutoCAD Mechanical 2013 for mechanical design and drafting. You can learn more about the software by visiting the official website of Autodesk Inc. or by watching some online tutorials or videos.

-

Conclusion

-

In this article, we have discussed what AutoCAD Mechanical 2013 is and what are its features and benefits. We have also discussed what xforce keygen is and why some people use it to crack AutoCAD Mechanical 2013. We have also discussed what are the risks and disadvantages of using xforce keygen to crack AutoCAD Mechanical 2013. We have also shown you how to download xforce keygen AutoCAD Mechanical 2013 crack, how to install AutoCAD Mechanical 2013 with the activation code, and how to use AutoCAD Mechanical 2013 for mechanical design and drafting.

-

We hope that this article has been informative and helpful for you. However, we do not recommend or endorse using xforce keygen or cracked software in any way. We only provide this information for educational purposes only. Using xforce keygen or cracked software is not safe, legal, or reliable. It can expose you to many risks and disadvantages that can harm you, your computer, your data, your work, and your reputation. You can avoid these risks and disadvantages by using AutoCAD Mechanical 2013 legally and ethically.

-

If you want to use AutoCAD Mechanical 2013 legally and ethically, you have some alternatives or recommendations that you can consider. You can purchase a license or subscription for AutoCAD Mechanical software from Autodesk Inc. or from authorized resellers. You can also try a free trial version of AutoCAD Mechanical software for a limited period of time. You can also use other free or open source CAD software products that are similar or compatible with AutoCAD Mechanical software. You can also learn new skills or upgrade your knowledge on mechanical design and drafting by taking online courses or certifications.

-

By using AutoCAD Mechanical 2013 legally and ethically, you can enjoy the features and benefits of the software without compromising your security, performance, ethics, or reputation. You can also support the software developers who create quality software products for their customers. You can also respect the software users who pay for the software license or subscription legally and ethically.

-

FAQs

-

Here are some frequently asked questions (FAQs) that you may have about using xforce keygen or cracked software:

-

What are some common errors or problems that may occur when using xforce keygen to crack AutoCAD Mechanical 2013?

-

Some common errors or problems that may occur when using xforce keygen to crack AutoCAD Mechanical 2013 are:

-
    -
  • The download link of xforce keygen is broken, expired, or unavailable.
  • -
  • The download file of xforce keygen is corrupted, incomplete, or infected with malware.
  • -
  • The activation code generated by xforce keygen is invalid, expired, or already used.
  • -
  • The activation process fails or encounters an error.
  • -
  • The software does not run properly or crashes frequently.
  • -
  • The software does not receive updates, patches, features, or support from Autodesk Inc. or other sources.
  • -
  • The software conflicts with other software products or system settings on your computer.
  • -
  • The software causes errors, crashes , freezes, slowdowns, or glitches on your computer or network.
  • -
  • The software exposes your computer or data to malware, viruses, spyware, ransomware, or other harmful programs.
  • -
  • The software exposes your personal or confidential information to hackers or cybercriminals.
  • -
  • The software violates the software license agreement and the intellectual property rights of Autodesk Inc.
  • -
  • The software violates the laws and regulations of software piracy in your country or region.
  • -
-

These are some common errors or problems that may occur when using xforce keygen to crack AutoCAD Mechanical 2013. You can avoid these errors or problems by using AutoCAD Mechanical 2013 legally and ethically.

-

How can I update or patch AutoCAD Mechanical 2013 after cracking it with xforce keygen?

-

You cannot update or patch AutoCAD Mechanical 2013 after cracking it with xforce keygen. This is because xforce keygen modifies the software files and registry entries to bypass the security and licensing systems of Autodesk Inc. This prevents the software from receiving updates, patches, features, or support from Autodesk Inc. or other sources. If you try to update or patch the software after cracking it with xforce keygen, you may encounter errors, crashes, or compatibility issues. You may also lose the activation code or the crack that you have applied to the software. You may also trigger the detection or protection mechanisms of Autodesk Inc. or other sources that can block, delete, or quarantine your software. Therefore, you should not update or patch AutoCAD Mechanical 2013 after cracking it with xforce keygen.

-

Is there a difference between AutoCAD Mechanical 2013 and AutoCAD Mechanical software?

-

Yes, there is a difference between AutoCAD Mechanical 2013 and AutoCAD Mechanical software. AutoCAD Mechanical 2013 is an older version of AutoCAD Mechanical software that was released in March 2012 as part of the Autodesk Product Design Suite. AutoCAD Mechanical software is the latest version of AutoCAD Mechanical software that was released in April 2021 as part of the Autodesk Product Design & Manufacturing Collection. AutoCAD Mechanical software has more features and enhancements than AutoCAD Mechanical 2013, such as improved user interface, performance, collaboration, interoperability, automation, and customization. AutoCAD Mechanical software also supports more mechanical drafting standards and formats than AutoCAD Mechanical 2013. Therefore, AutoCAD Mechanical software is more advanced and updated than AutoCAD Mechanical 2013.

-

What are some other software products that can be cracked with xforce keygen?

-

xforce keygen claims to be able to crack various software products from different software companies, such as Adobe, Microsoft, Corel, Sony, Autodesk, etc. Some of these software products are Photoshop, Illustrator, InDesign, Premiere Pro, After Effects , Word, Excel, PowerPoint, Outlook, Access, Project, Visio, CorelDRAW, PaintShop Pro, Vegas Pro, Sound Forge Pro, AutoCAD, Revit, Inventor, Maya, 3ds Max, etc. However, we do not recommend or endorse using xforce keygen or cracked software in any way. We only provide this information for educational purposes only. Using xforce keygen or cracked software is not safe, legal, or reliable. It can expose you to many risks and disadvantages that can harm you, your computer, your data, your work, and your reputation. You can avoid these risks and disadvantages by using the software products legally and ethically.

-

Where can I find more resources or tutorials on how to use AutoCAD Mechanical software?

-

If you want to learn more about how to use AutoCAD Mechanical software for mechanical design and drafting, you can find more resources or tutorials on the official website of Autodesk Inc. or on other online platforms. Some of these resources or tutorials are:

-
    -
  • Autodesk Knowledge Network: This is the official online help and support center of Autodesk Inc. You can find various articles, videos, forums, blogs, webinars, events, and courses on how to use AutoCAD Mechanical software and other Autodesk products. You can also ask questions or share your feedback with other users and experts.
  • -
  • Autodesk Learning Center: This is the official online learning platform of Autodesk Inc. You can find various courses, certifications, badges, and learning paths on how to use AutoCAD Mechanical software and other Autodesk products. You can also track your progress and achievements and earn recognition for your skills.
  • -
  • Autodesk YouTube Channel: This is the official YouTube channel of Autodesk Inc. You can find various videos on how to use AutoCAD Mechanical software and other Autodesk products. You can also watch live streams, demos, interviews, and events.
  • -
  • Lynda.com: This is an online learning platform that offers various courses and tutorials on how to use AutoCAD Mechanical software and other Autodesk products. You can also access other topics such as business, design, photography, web development, etc.
  • -
  • Udemy.com: This is an online learning platform that offers various courses and tutorials on how to use AutoCAD Mechanical software and other Autodesk products. You can also access other topics such as development, marketing, personal development , health, fitness, etc.
  • -
  • Coursera.org: This is an online learning platform that offers various courses and tutorials on how to use AutoCAD Mechanical software and other Autodesk products. You can also access other topics such as engineering, computer science, data science, humanities, etc.
  • -
-

These are some of the resources or tutorials that you can find on how to use AutoCAD Mechanical software. You can also search for more resources or tutorials on the internet or ask for recommendations from other users or experts.

b2dd77e56b
-
-
\ No newline at end of file diff --git a/spaces/stratussox/yolov5_inference/utils/aws/mime.sh b/spaces/stratussox/yolov5_inference/utils/aws/mime.sh deleted file mode 100644 index c319a83cfbdf09bea634c3bd9fca737c0b1dd505..0000000000000000000000000000000000000000 --- a/spaces/stratussox/yolov5_inference/utils/aws/mime.sh +++ /dev/null @@ -1,26 +0,0 @@ -# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ -# This script will run on every instance restart, not only on first start -# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- - -Content-Type: multipart/mixed; boundary="//" -MIME-Version: 1.0 - ---// -Content-Type: text/cloud-config; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="cloud-config.txt" - -#cloud-config -cloud_final_modules: -- [scripts-user, always] - ---// -Content-Type: text/x-shellscript; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="userdata.txt" - -#!/bin/bash -# --- paste contents of userdata.sh here --- ---// diff --git a/spaces/sub314xxl/MetaGPT/metagpt/static/assets/style-e2a22de8.css b/spaces/sub314xxl/MetaGPT/metagpt/static/assets/style-e2a22de8.css deleted file mode 100644 index e403fcf2ececcc6debd75709f08e0ecaf65b5501..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/metagpt/static/assets/style-e2a22de8.css +++ /dev/null @@ -1 +0,0 @@ -/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */html{line-height:1.15;-webkit-text-size-adjust:100%;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}body{margin:0}main{display:block}h1{margin:.67em 0;font-size:2em}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-size:1em;font-family:monospace,monospace}a{background-color:transparent}abbr[title]{text-decoration:underline;text-decoration:underline dotted;border-bottom:none}b,strong{font-weight:bolder}code,kbd,samp{font-size:1em;font-family:monospace,monospace}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{margin:0;font-size:100%;font-family:inherit;line-height:1.15}button,input{overflow:visible}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button}button::-moz-focus-inner,[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner{padding:0;border-style:none}button:-moz-focusring,[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{display:table;box-sizing:border-box;max-width:100%;padding:0;color:inherit;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{outline-offset:-2px;-webkit-appearance:textfield}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}details{display:block}summary{display:list-item}template{display:none}[hidden]{display:none}.arco-icon{display:inline-block;width:1em;height:1em;color:inherit;font-style:normal;vertical-align:-2px;outline:none;stroke:currentColor}.arco-icon-loading,.arco-icon-spin{animation:arco-loading-circle 1s infinite cubic-bezier(0,0,1,1)}@keyframes arco-loading-circle{0%{transform:rotate(0)}to{transform:rotate(360deg)}}.arco-icon-hover{position:relative;display:inline-block;cursor:pointer;line-height:12px}.arco-icon-hover .arco-icon{position:relative}.arco-icon-hover:before{position:absolute;display:block;box-sizing:border-box;background-color:transparent;border-radius:var(--border-radius-circle);transition:background-color .1s cubic-bezier(0,0,1,1);content:""}.arco-icon-hover:hover:before{background-color:var(--color-fill-2)}.arco-icon-hover.arco-icon-hover-disabled:before{opacity:0}.arco-icon-hover:before{top:50%;left:50%;width:20px;height:20px;transform:translate(-50%,-50%)}.arco-icon-hover-size-mini{line-height:12px}.arco-icon-hover-size-mini:before{top:50%;left:50%;width:20px;height:20px;transform:translate(-50%,-50%)}.arco-icon-hover-size-small{line-height:12px}.arco-icon-hover-size-small:before{top:50%;left:50%;width:20px;height:20px;transform:translate(-50%,-50%)}.arco-icon-hover-size-large{line-height:12px}.arco-icon-hover-size-large:before{top:50%;left:50%;width:24px;height:24px;transform:translate(-50%,-50%)}.arco-icon-hover-size-huge{line-height:12px}.arco-icon-hover-size-huge:before{top:50%;left:50%;width:24px;height:24px;transform:translate(-50%,-50%)}.fade-in-standard-enter-from,.fade-in-standard-appear-from{opacity:0}.fade-in-standard-enter-to,.fade-in-standard-appear-to{opacity:1}.fade-in-standard-enter-active,.fade-in-standard-appear-active{transition:opacity .3s cubic-bezier(.34,.69,.1,1)}.fade-in-standard-leave-from{opacity:1}.fade-in-standard-leave-to{opacity:0}.fade-in-standard-leave-active{transition:opacity .3s cubic-bezier(.34,.69,.1,1)}.fade-in-enter-from,.fade-in-appear-from{opacity:0}.fade-in-enter-to,.fade-in-appear-to{opacity:1}.fade-in-enter-active,.fade-in-appear-active{transition:opacity .1s cubic-bezier(0,0,1,1)}.fade-in-leave-from{opacity:1}.fade-in-leave-to{opacity:0}.fade-in-leave-active{transition:opacity .1s cubic-bezier(0,0,1,1)}.zoom-in-enter-from,.zoom-in-appear-from{transform:scale(.5);opacity:0}.zoom-in-enter-to,.zoom-in-appear-to{transform:scale(1);opacity:1}.zoom-in-enter-active,.zoom-in-appear-active{transition:opacity .3s cubic-bezier(.34,.69,.1,1),transform .3s cubic-bezier(.34,.69,.1,1)}.zoom-in-leave-from{transform:scale(1);opacity:1}.zoom-in-leave-to{transform:scale(.5);opacity:0}.zoom-in-leave-active{transition:opacity .3s cubic-bezier(.34,.69,.1,1),transform .3s cubic-bezier(.34,.69,.1,1)}.zoom-in-fade-out-enter-from,.zoom-in-fade-out-appear-from{transform:scale(.5);opacity:0}.zoom-in-fade-out-enter-to,.zoom-in-fade-out-appear-to{transform:scale(1);opacity:1}.zoom-in-fade-out-enter-active,.zoom-in-fade-out-appear-active{transition:opacity .3s cubic-bezier(.3,1.3,.3,1),transform .3s cubic-bezier(.3,1.3,.3,1)}.zoom-in-fade-out-leave-from{transform:scale(1);opacity:1}.zoom-in-fade-out-leave-to{transform:scale(.5);opacity:0}.zoom-in-fade-out-leave-active{transition:opacity .3s cubic-bezier(.3,1.3,.3,1),transform .3s cubic-bezier(.3,1.3,.3,1)}.zoom-in-big-enter-from,.zoom-in-big-appear-from{transform:scale(.5);opacity:0}.zoom-in-big-enter-to,.zoom-in-big-appear-to{transform:scale(1);opacity:1}.zoom-in-big-enter-active,.zoom-in-big-appear-active{transition:opacity .2s cubic-bezier(0,0,1,1),transform .2s cubic-bezier(0,0,1,1)}.zoom-in-big-leave-from{transform:scale(1);opacity:1}.zoom-in-big-leave-to{transform:scale(.2);opacity:0}.zoom-in-big-leave-active{transition:opacity .2s cubic-bezier(0,0,1,1),transform .2s cubic-bezier(0,0,1,1)}.zoom-in-left-enter-from,.zoom-in-left-appear-from{transform:scale(.1);opacity:.1}.zoom-in-left-enter-to,.zoom-in-left-appear-to{transform:scale(1);opacity:1}.zoom-in-left-enter-active,.zoom-in-left-appear-active{transform-origin:0 50%;transition:opacity .3s cubic-bezier(0,0,1,1),transform .3s cubic-bezier(.3,1.3,.3,1)}.zoom-in-left-leave-from{transform:scale(1);opacity:1}.zoom-in-left-leave-to{transform:scale(.1);opacity:.1}.zoom-in-left-leave-active{transform-origin:0 50%;transition:opacity .3s cubic-bezier(0,0,1,1),transform .3s cubic-bezier(.3,1.3,.3,1)}.zoom-in-top-enter-from,.zoom-in-top-appear-from{transform:scaleY(.8) translateZ(0);opacity:0}.zoom-in-top-enter-to,.zoom-in-top-appear-to{transform:scaleY(1) translateZ(0);opacity:1}.zoom-in-top-enter-active,.zoom-in-top-appear-active{transform-origin:0 0;transition:transform .3s cubic-bezier(.3,1.3,.3,1),opacity .3s cubic-bezier(.3,1.3,.3,1)}.zoom-in-top-leave-from{transform:scaleY(1) translateZ(0);opacity:1}.zoom-in-top-leave-to{transform:scaleY(.8) translateZ(0);opacity:0}.zoom-in-top-leave-active{transform-origin:0 0;transition:transform .3s cubic-bezier(.3,1.3,.3,1),opacity .3s cubic-bezier(.3,1.3,.3,1)}.zoom-in-bottom-enter-from,.zoom-in-bottom-appear-from{transform:scaleY(.8) translateZ(0);opacity:0}.zoom-in-bottom-enter-to,.zoom-in-bottom-appear-to{transform:scaleY(1) translateZ(0);opacity:1}.zoom-in-bottom-enter-active,.zoom-in-bottom-appear-active{transform-origin:100% 100%;transition:transform .3s cubic-bezier(.3,1.3,.3,1),opacity .3s cubic-bezier(.3,1.3,.3,1)}.zoom-in-bottom-leave-from{transform:scaleY(1) translateZ(0);opacity:1}.zoom-in-bottom-leave-to{transform:scaleY(.8) translateZ(0);opacity:0}.zoom-in-bottom-leave-active{transform-origin:100% 100%;transition:transform .3s cubic-bezier(.3,1.3,.3,1),opacity .3s cubic-bezier(.3,1.3,.3,1)}.slide-dynamic-origin-enter-from,.slide-dynamic-origin-appear-from{transform:scaleY(.9);transform-origin:0 0;opacity:0}.slide-dynamic-origin-enter-to,.slide-dynamic-origin-appear-to{transform:scaleY(1);transform-origin:0 0;opacity:1}.slide-dynamic-origin-enter-active,.slide-dynamic-origin-appear-active{transition:transform .2s cubic-bezier(.34,.69,.1,1),opacity .2s cubic-bezier(.34,.69,.1,1)}.slide-dynamic-origin-leave-from{transform:scaleY(1);transform-origin:0 0;opacity:1}.slide-dynamic-origin-leave-to{transform:scaleY(.9);transform-origin:0 0;opacity:0}.slide-dynamic-origin-leave-active{transition:transform .2s cubic-bezier(.34,.69,.1,1),opacity .2s cubic-bezier(.34,.69,.1,1)}.slide-left-enter-from,.slide-left-appear-from{transform:translate(-100%)}.slide-left-enter-to,.slide-left-appear-to{transform:translate(0)}.slide-left-enter-active,.slide-left-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-left-leave-from{transform:translate(0)}.slide-left-leave-to{transform:translate(-100%)}.slide-left-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-right-enter-from,.slide-right-appear-from{transform:translate(100%)}.slide-right-enter-to,.slide-right-appear-to{transform:translate(0)}.slide-right-enter-active,.slide-right-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-right-leave-from{transform:translate(0)}.slide-right-leave-to{transform:translate(100%)}.slide-right-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-top-enter-from,.slide-top-appear-from{transform:translateY(-100%)}.slide-top-enter-to,.slide-top-appear-to{transform:translateY(0)}.slide-top-enter-active,.slide-top-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-top-leave-from{transform:translateY(0)}.slide-top-leave-to{transform:translateY(-100%)}.slide-top-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-bottom-enter-from,.slide-bottom-appear-from{transform:translateY(100%)}.slide-bottom-enter-to,.slide-bottom-appear-to{transform:translateY(0)}.slide-bottom-enter-active,.slide-bottom-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-bottom-leave-from{transform:translateY(0)}.slide-bottom-leave-to{transform:translateY(100%)}.slide-bottom-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}body{--red-1: 255,236,232;--red-2: 253,205,197;--red-3: 251,172,163;--red-4: 249,137,129;--red-5: 247,101,96;--red-6: 245,63,63;--red-7: 203,39,45;--red-8: 161,21,30;--red-9: 119,8,19;--red-10: 77,0,10;--orangered-1: 255,243,232;--orangered-2: 253,221,195;--orangered-3: 252,197,159;--orangered-4: 250,172,123;--orangered-5: 249,144,87;--orangered-6: 247,114,52;--orangered-7: 204,81,32;--orangered-8: 162,53,17;--orangered-9: 119,31,6;--orangered-10: 77,14,0;--orange-1: 255,247,232;--orange-2: 255,228,186;--orange-3: 255,207,139;--orange-4: 255,182,93;--orange-5: 255,154,46;--orange-6: 255,125,0;--orange-7: 210,95,0;--orange-8: 166,69,0;--orange-9: 121,46,0;--orange-10: 77,27,0;--gold-1: 255,252,232;--gold-2: 253,244,191;--gold-3: 252,233,150;--gold-4: 250,220,109;--gold-5: 249,204,69;--gold-6: 247,186,30;--gold-7: 204,146,19;--gold-8: 162,109,10;--gold-9: 119,75,4;--gold-10: 77,45,0;--yellow-1: 254,255,232;--yellow-2: 254,254,190;--yellow-3: 253,250,148;--yellow-4: 252,242,107;--yellow-5: 251,232,66;--yellow-6: 250,220,25;--yellow-7: 207,175,15;--yellow-8: 163,132,8;--yellow-9: 120,93,3;--yellow-10: 77,56,0;--lime-1: 252,255,232;--lime-2: 237,248,187;--lime-3: 220,241,144;--lime-4: 201,233,104;--lime-5: 181,226,65;--lime-6: 159,219,29;--lime-7: 126,183,18;--lime-8: 95,148,10;--lime-9: 67,112,4;--lime-10: 42,77,0;--green-1: 232,255,234;--green-2: 175,240,181;--green-3: 123,225,136;--green-4: 76,210,99;--green-5: 35,195,67;--green-6: 0,180,42;--green-7: 0,154,41;--green-8: 0,128,38;--green-9: 0,102,34;--green-10: 0,77,28;--cyan-1: 232,255,251;--cyan-2: 183,244,236;--cyan-3: 137,233,224;--cyan-4: 94,223,214;--cyan-5: 55,212,207;--cyan-6: 20,201,201;--cyan-7: 13,165,170;--cyan-8: 7,130,139;--cyan-9: 3,97,108;--cyan-10: 0,66,77;--blue-1: 232,247,255;--blue-2: 195,231,254;--blue-3: 159,212,253;--blue-4: 123,192,252;--blue-5: 87,169,251;--blue-6: 52,145,250;--blue-7: 32,108,207;--blue-8: 17,75,163;--blue-9: 6,48,120;--blue-10: 0,26,77;--arcoblue-1: 232,243,255;--arcoblue-2: 190,218,255;--arcoblue-3: 148,191,255;--arcoblue-4: 106,161,255;--arcoblue-5: 64,128,255;--arcoblue-6: 22,93,255;--arcoblue-7: 14,66,210;--arcoblue-8: 7,44,166;--arcoblue-9: 3,26,121;--arcoblue-10: 0,13,77;--purple-1: 245,232,255;--purple-2: 221,190,246;--purple-3: 195,150,237;--purple-4: 168,113,227;--purple-5: 141,78,218;--purple-6: 114,46,209;--purple-7: 85,29,176;--purple-8: 60,16,143;--purple-9: 39,6,110;--purple-10: 22,0,77;--pinkpurple-1: 255,232,251;--pinkpurple-2: 247,186,239;--pinkpurple-3: 240,142,230;--pinkpurple-4: 232,101,223;--pinkpurple-5: 225,62,219;--pinkpurple-6: 217,26,217;--pinkpurple-7: 176,16,182;--pinkpurple-8: 138,9,147;--pinkpurple-9: 101,3,112;--pinkpurple-10: 66,0,77;--magenta-1: 255,232,241;--magenta-2: 253,194,219;--magenta-3: 251,157,199;--magenta-4: 249,121,183;--magenta-5: 247,84,168;--magenta-6: 245,49,157;--magenta-7: 203,30,131;--magenta-8: 161,16,105;--magenta-9: 119,6,79;--magenta-10: 77,0,52;--gray-1: 247,248,250;--gray-2: 242,243,245;--gray-3: 229,230,235;--gray-4: 201,205,212;--gray-5: 169,174,184;--gray-6: 134,144,156;--gray-7: 107,119,133;--gray-8: 78,89,105;--gray-9: 39,46,59;--gray-10: 29,33,41;--success-1: var(--green-1);--success-2: var(--green-2);--success-3: var(--green-3);--success-4: var(--green-4);--success-5: var(--green-5);--success-6: var(--green-6);--success-7: var(--green-7);--success-8: var(--green-8);--success-9: var(--green-9);--success-10: var(--green-10);--primary-1: var(--arcoblue-1);--primary-2: var(--arcoblue-2);--primary-3: var(--arcoblue-3);--primary-4: var(--arcoblue-4);--primary-5: var(--arcoblue-5);--primary-6: var(--arcoblue-6);--primary-7: var(--arcoblue-7);--primary-8: var(--arcoblue-8);--primary-9: var(--arcoblue-9);--primary-10: var(--arcoblue-10);--danger-1: var(--red-1);--danger-2: var(--red-2);--danger-3: var(--red-3);--danger-4: var(--red-4);--danger-5: var(--red-5);--danger-6: var(--red-6);--danger-7: var(--red-7);--danger-8: var(--red-8);--danger-9: var(--red-9);--danger-10: var(--red-10);--warning-1: var(--orange-1);--warning-2: var(--orange-2);--warning-3: var(--orange-3);--warning-4: var(--orange-4);--warning-5: var(--orange-5);--warning-6: var(--orange-6);--warning-7: var(--orange-7);--warning-8: var(--orange-8);--warning-9: var(--orange-9);--warning-10: var(--orange-10);--link-1: var(--arcoblue-1);--link-2: var(--arcoblue-2);--link-3: var(--arcoblue-3);--link-4: var(--arcoblue-4);--link-5: var(--arcoblue-5);--link-6: var(--arcoblue-6);--link-7: var(--arcoblue-7);--link-8: var(--arcoblue-8);--link-9: var(--arcoblue-9);--link-10: var(--arcoblue-10)}body[arco-theme=dark]{--red-1: 77,0,10;--red-2: 119,6,17;--red-3: 161,22,31;--red-4: 203,46,52;--red-5: 245,78,78;--red-6: 247,105,101;--red-7: 249,141,134;--red-8: 251,176,167;--red-9: 253,209,202;--red-10: 255,240,236;--orangered-1: 77,14,0;--orangered-2: 119,30,5;--orangered-3: 162,55,20;--orangered-4: 204,87,41;--orangered-5: 247,126,69;--orangered-6: 249,146,90;--orangered-7: 250,173,125;--orangered-8: 252,198,161;--orangered-9: 253,222,197;--orangered-10: 255,244,235;--orange-1: 77,27,0;--orange-2: 121,48,4;--orange-3: 166,75,10;--orange-4: 210,105,19;--orange-5: 255,141,31;--orange-6: 255,150,38;--orange-7: 255,179,87;--orange-8: 255,205,135;--orange-9: 255,227,184;--orange-10: 255,247,232;--gold-1: 77,45,0;--gold-2: 119,75,4;--gold-3: 162,111,15;--gold-4: 204,150,31;--gold-5: 247,192,52;--gold-6: 249,204,68;--gold-7: 250,220,108;--gold-8: 252,233,149;--gold-9: 253,244,190;--gold-10: 255,252,232;--yellow-1: 77,56,0;--yellow-2: 120,94,7;--yellow-3: 163,134,20;--yellow-4: 207,179,37;--yellow-5: 250,225,60;--yellow-6: 251,233,75;--yellow-7: 252,243,116;--yellow-8: 253,250,157;--yellow-9: 254,254,198;--yellow-10: 254,255,240;--lime-1: 42,77,0;--lime-2: 68,112,6;--lime-3: 98,148,18;--lime-4: 132,183,35;--lime-5: 168,219,57;--lime-6: 184,226,75;--lime-7: 203,233,112;--lime-8: 222,241,152;--lime-9: 238,248,194;--lime-10: 253,255,238;--green-1: 0,77,28;--green-2: 4,102,37;--green-3: 10,128,45;--green-4: 18,154,55;--green-5: 29,180,64;--green-6: 39,195,70;--green-7: 80,210,102;--green-8: 126,225,139;--green-9: 178,240,183;--green-10: 235,255,236;--cyan-1: 0,66,77;--cyan-2: 6,97,108;--cyan-3: 17,131,139;--cyan-4: 31,166,170;--cyan-5: 48,201,201;--cyan-6: 63,212,207;--cyan-7: 102,223,215;--cyan-8: 144,233,225;--cyan-9: 190,244,237;--cyan-10: 240,255,252;--blue-1: 0,26,77;--blue-2: 5,47,120;--blue-3: 19,76,163;--blue-4: 41,113,207;--blue-5: 70,154,250;--blue-6: 90,170,251;--blue-7: 125,193,252;--blue-8: 161,213,253;--blue-9: 198,232,254;--blue-10: 234,248,255;--arcoblue-1: 0,13,77;--arcoblue-2: 4,27,121;--arcoblue-3: 14,50,166;--arcoblue-4: 29,77,210;--arcoblue-5: 48,111,255;--arcoblue-6: 60,126,255;--arcoblue-7: 104,159,255;--arcoblue-8: 147,190,255;--arcoblue-9: 190,218,255;--arcoblue-10: 234,244,255;--purple-1: 22,0,77;--purple-2: 39,6,110;--purple-3: 62,19,143;--purple-4: 90,37,176;--purple-5: 123,61,209;--purple-6: 142,81,218;--purple-7: 169,116,227;--purple-8: 197,154,237;--purple-9: 223,194,246;--purple-10: 247,237,255;--pinkpurple-1: 66,0,77;--pinkpurple-2: 101,3,112;--pinkpurple-3: 138,13,147;--pinkpurple-4: 176,27,182;--pinkpurple-5: 217,46,217;--pinkpurple-6: 225,61,219;--pinkpurple-7: 232,102,223;--pinkpurple-8: 240,146,230;--pinkpurple-9: 247,193,240;--pinkpurple-10: 255,242,253;--magenta-1: 77,0,52;--magenta-2: 119,8,80;--magenta-3: 161,23,108;--magenta-4: 203,43,136;--magenta-5: 245,69,166;--magenta-6: 247,86,169;--magenta-7: 249,122,184;--magenta-8: 251,158,200;--magenta-9: 253,195,219;--magenta-10: 255,232,241;--gray-1: 23,23,26;--gray-2: 46,46,48;--gray-3: 72,72,73;--gray-4: 95,95,96;--gray-5: 120,120,122;--gray-6: 146,146,147;--gray-7: 171,171,172;--gray-8: 197,197,197;--gray-9: 223,223,223;--gray-10: 246,246,246;--primary-1: var(--arcoblue-1);--primary-2: var(--arcoblue-2);--primary-3: var(--arcoblue-3);--primary-4: var(--arcoblue-4);--primary-5: var(--arcoblue-5);--primary-6: var(--arcoblue-6);--primary-7: var(--arcoblue-7);--primary-8: var(--arcoblue-8);--primary-9: var(--arcoblue-9);--primary-10: var(--arcoblue-10);--success-1: var(--green-1);--success-2: var(--green-2);--success-3: var(--green-3);--success-4: var(--green-4);--success-5: var(--green-5);--success-6: var(--green-6);--success-7: var(--green-7);--success-8: var(--green-8);--success-9: var(--green-9);--success-10: var(--green-10);--danger-1: var(--red-1);--danger-2: var(--red-2);--danger-3: var(--red-3);--danger-4: var(--red-4);--danger-5: var(--red-5);--danger-6: var(--red-6);--danger-7: var(--red-7);--danger-8: var(--red-8);--danger-9: var(--red-9);--danger-10: var(--red-10);--warning-1: var(--orange-1);--warning-2: var(--orange-2);--warning-3: var(--orange-3);--warning-4: var(--orange-4);--warning-5: var(--orange-5);--warning-6: var(--orange-6);--warning-7: var(--orange-7);--warning-8: var(--orange-8);--warning-9: var(--orange-9);--warning-10: var(--orange-10);--link-1: var(--arcoblue-1);--link-2: var(--arcoblue-2);--link-3: var(--arcoblue-3);--link-4: var(--arcoblue-4);--link-5: var(--arcoblue-5);--link-6: var(--arcoblue-6);--link-7: var(--arcoblue-7);--link-8: var(--arcoblue-8);--link-9: var(--arcoblue-9);--link-10: var(--arcoblue-10)}body{--color-white: #ffffff;--color-black: #000000;--color-border: rgb(var(--gray-3));--color-bg-popup: var(--color-bg-5);--color-bg-1: #fff;--color-bg-2: #fff;--color-bg-3: #fff;--color-bg-4: #fff;--color-bg-5: #fff;--color-bg-white: #fff;--color-neutral-1: rgb(var(--gray-1));--color-neutral-2: rgb(var(--gray-2));--color-neutral-3: rgb(var(--gray-3));--color-neutral-4: rgb(var(--gray-4));--color-neutral-5: rgb(var(--gray-5));--color-neutral-6: rgb(var(--gray-6));--color-neutral-7: rgb(var(--gray-7));--color-neutral-8: rgb(var(--gray-8));--color-neutral-9: rgb(var(--gray-9));--color-neutral-10: rgb(var(--gray-10));--color-text-1: var(--color-neutral-10);--color-text-2: var(--color-neutral-8);--color-text-3: var(--color-neutral-6);--color-text-4: var(--color-neutral-4);--color-border-1: var(--color-neutral-2);--color-border-2: var(--color-neutral-3);--color-border-3: var(--color-neutral-4);--color-border-4: var(--color-neutral-6);--color-fill-1: var(--color-neutral-1);--color-fill-2: var(--color-neutral-2);--color-fill-3: var(--color-neutral-3);--color-fill-4: var(--color-neutral-4);--color-primary-light-1: rgb(var(--primary-1));--color-primary-light-2: rgb(var(--primary-2));--color-primary-light-3: rgb(var(--primary-3));--color-primary-light-4: rgb(var(--primary-4));--color-link-light-1: rgb(var(--link-1));--color-link-light-2: rgb(var(--link-2));--color-link-light-3: rgb(var(--link-3));--color-link-light-4: rgb(var(--link-4));--color-secondary: var(--color-neutral-2);--color-secondary-hover: var(--color-neutral-3);--color-secondary-active: var(--color-neutral-4);--color-secondary-disabled: var(--color-neutral-1);--color-danger-light-1: rgb(var(--danger-1));--color-danger-light-2: rgb(var(--danger-2));--color-danger-light-3: rgb(var(--danger-3));--color-danger-light-4: rgb(var(--danger-4));--color-success-light-1: rgb(var(--success-1));--color-success-light-2: rgb(var(--success-2));--color-success-light-3: rgb(var(--success-3));--color-success-light-4: rgb(var(--success-4));--color-warning-light-1: rgb(var(--warning-1));--color-warning-light-2: rgb(var(--warning-2));--color-warning-light-3: rgb(var(--warning-3));--color-warning-light-4: rgb(var(--warning-4));--border-radius-none: 0;--border-radius-small: 2px;--border-radius-medium: 4px;--border-radius-large: 8px;--border-radius-circle: 50%;--color-tooltip-bg: rgb(var(--gray-10));--color-spin-layer-bg: rgba(255, 255, 255, .6);--color-menu-dark-bg: #232324;--color-menu-light-bg: #ffffff;--color-menu-dark-hover: rgba(255, 255, 255, .04);--color-mask-bg: rgba(29, 33, 41, .6)}body[arco-theme=dark]{--color-white: rgba(255, 255, 255, .9);--color-black: #000000;--color-border: #333335;--color-bg-1: #17171a;--color-bg-2: #232324;--color-bg-3: #2a2a2b;--color-bg-4: #313132;--color-bg-5: #373739;--color-bg-white: #f6f6f6;--color-text-1: rgba(255, 255, 255, .9);--color-text-2: rgba(255, 255, 255, .7);--color-text-3: rgba(255, 255, 255, .5);--color-text-4: rgba(255, 255, 255, .3);--color-fill-1: rgba(255, 255, 255, .04);--color-fill-2: rgba(255, 255, 255, .08);--color-fill-3: rgba(255, 255, 255, .12);--color-fill-4: rgba(255, 255, 255, .16);--color-primary-light-1: rgba(var(--primary-6), .2);--color-primary-light-2: rgba(var(--primary-6), .35);--color-primary-light-3: rgba(var(--primary-6), .5);--color-primary-light-4: rgba(var(--primary-6), .65);--color-secondary: rgba(var(--gray-9), .08);--color-secondary-hover: rgba(var(--gray-8), .16);--color-secondary-active: rgba(var(--gray-7), .24);--color-secondary-disabled: rgba(var(--gray-9), .08);--color-danger-light-1: rgba(var(--danger-6), .2);--color-danger-light-2: rgba(var(--danger-6), .35);--color-danger-light-3: rgba(var(--danger-6), .5);--color-danger-light-4: rgba(var(--danger-6), .65);--color-success-light-1: rgb(var(--success-6), .2);--color-success-light-2: rgb(var(--success-6), .35);--color-success-light-3: rgb(var(--success-6), .5);--color-success-light-4: rgb(var(--success-6), .65);--color-warning-light-1: rgb(var(--warning-6), .2);--color-warning-light-2: rgb(var(--warning-6), .35);--color-warning-light-3: rgb(var(--warning-6), .5);--color-warning-light-4: rgb(var(--warning-6), .65);--color-link-light-1: rgb(var(--link-6), .2);--color-link-light-2: rgb(var(--link-6), .35);--color-link-light-3: rgb(var(--link-6), .5);--color-link-light-4: rgb(var(--link-6), .65);--color-tooltip-bg: #373739;--color-spin-layer-bg: rgba(51, 51, 51, .6);--color-menu-dark-bg: #232324;--color-menu-light-bg: #232324;--color-menu-dark-hover: var(--color-fill-2);--color-mask-bg: rgba(23, 23, 26, .6)}body{font-size:14px;font-family:Inter,-apple-system,BlinkMacSystemFont,PingFang SC,Hiragino Sans GB,noto sans,Microsoft YaHei,Helvetica Neue,Helvetica,Arial,sans-serif}.arco-trigger-wrapper{display:inline-block}.arco-trigger-popup{position:absolute;z-index:1000}.arco-trigger-arrow{position:absolute;z-index:-1;display:block;box-sizing:border-box;width:8px;height:8px;background-color:var(--color-bg-5);content:""}.arco-trigger-popup[trigger-placement=top] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=tl] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=tr] .arco-trigger-arrow{border-top:none;border-left:none;border-bottom-right-radius:var(--border-radius-small)}.arco-trigger-popup[trigger-placement=bottom] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=bl] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=br] .arco-trigger-arrow{border-right:none;border-bottom:none;border-top-left-radius:var(--border-radius-small)}.arco-trigger-popup[trigger-placement=left] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=lt] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=lb] .arco-trigger-arrow{border-bottom:none;border-left:none;border-top-right-radius:var(--border-radius-small)}.arco-trigger-popup[trigger-placement=right] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=rt] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=rb] .arco-trigger-arrow{border-top:none;border-right:none;border-bottom-left-radius:var(--border-radius-small)}.arco-auto-tooltip{display:block;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-input-label{display:inline-flex;box-sizing:border-box;width:100%;padding-right:12px;padding-left:12px;color:var(--color-text-1);font-size:14px;background-color:var(--color-fill-2);border:1px solid transparent;border-radius:var(--border-radius-small);cursor:text;transition:color .1s cubic-bezier(0,0,1,1),border-color .1s cubic-bezier(0,0,1,1),background-color .1s cubic-bezier(0,0,1,1);cursor:pointer}.arco-input-label.arco-input-label-search{cursor:text}.arco-input-label.arco-input-label-search .arco-input-label-input,.arco-input-label.arco-input-label-search .arco-input-label-value{pointer-events:none}.arco-input-label:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-input-label:focus-within,.arco-input-label.arco-input-label-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-input-label.arco-input-label-disabled{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent;cursor:not-allowed}.arco-input-label.arco-input-label-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent}.arco-input-label.arco-input-label-disabled .arco-input-label-prefix,.arco-input-label.arco-input-label-disabled .arco-input-label-suffix{color:inherit}.arco-input-label.arco-input-label-error{background-color:var(--color-danger-light-1);border-color:transparent}.arco-input-label.arco-input-label-error:hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-input-label.arco-input-label-error:focus-within,.arco-input-label.arco-input-label-error.arco-input-label-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-input-label .arco-input-label-prefix,.arco-input-label .arco-input-label-suffix{display:inline-flex;flex-shrink:0;align-items:center;white-space:nowrap;user-select:none}.arco-input-label .arco-input-label-prefix>svg,.arco-input-label .arco-input-label-suffix>svg{font-size:14px}.arco-input-label .arco-input-label-prefix{padding-right:12px;color:var(--color-text-2)}.arco-input-label .arco-input-label-suffix{padding-left:12px;color:var(--color-text-2)}.arco-input-label .arco-input-label-suffix .arco-feedback-icon{display:inline-flex}.arco-input-label .arco-input-label-suffix .arco-feedback-icon-status-validating{color:rgb(var(--primary-6))}.arco-input-label .arco-input-label-suffix .arco-feedback-icon-status-success{color:rgb(var(--success-6))}.arco-input-label .arco-input-label-suffix .arco-feedback-icon-status-warning{color:rgb(var(--warning-6))}.arco-input-label .arco-input-label-suffix .arco-feedback-icon-status-error{color:rgb(var(--danger-6))}.arco-input-label .arco-input-label-clear-btn{align-self:center;color:var(--color-text-2);font-size:12px;visibility:hidden;cursor:pointer}.arco-input-label .arco-input-label-clear-btn>svg{position:relative;transition:color .1s cubic-bezier(0,0,1,1)}.arco-input-label:hover .arco-input-label-clear-btn{visibility:visible}.arco-input-label:not(.arco-input-label-focus) .arco-input-label-icon-hover:hover:before{background-color:var(--color-fill-4)}.arco-input-label .arco-input-label-input{width:100%;padding-right:0;padding-left:0;color:inherit;line-height:1.5715;background:none;border:none;border-radius:0;outline:none;cursor:inherit;-webkit-appearance:none;-webkit-tap-highlight-color:rgba(0,0,0,0)}.arco-input-label .arco-input-label-input::placeholder{color:var(--color-text-3)}.arco-input-label .arco-input-label-input[disabled]::placeholder{color:var(--color-text-4)}.arco-input-label .arco-input-label-input[disabled]{-webkit-text-fill-color:var(--color-text-4)}.arco-input-label .arco-input-label-input-hidden{position:absolute;width:0!important}.arco-input-label .arco-input-label-value{display:flex;align-items:center;box-sizing:border-box;width:100%;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-input-label .arco-input-label-value:after{font-size:0;line-height:0;visibility:hidden;content:"."}.arco-input-label .arco-input-label-value-hidden{display:none}.arco-input-label.arco-input-label-size-mini .arco-input-label-input,.arco-input-label.arco-input-label-size-mini .arco-input-label-value{padding-top:1px;padding-bottom:1px;font-size:12px;line-height:1.667}.arco-input-label.arco-input-label-size-mini .arco-input-label-value{min-height:22px}.arco-input-label.arco-input-label-size-medium .arco-input-label-input,.arco-input-label.arco-input-label-size-medium .arco-input-label-value{padding-top:4px;padding-bottom:4px;font-size:14px;line-height:1.5715}.arco-input-label.arco-input-label-size-medium .arco-input-label-value{min-height:30px}.arco-input-label.arco-input-label-size-small .arco-input-label-input,.arco-input-label.arco-input-label-size-small .arco-input-label-value{padding-top:2px;padding-bottom:2px;font-size:14px;line-height:1.5715}.arco-input-label.arco-input-label-size-small .arco-input-label-value{min-height:26px}.arco-input-label.arco-input-label-size-large .arco-input-label-input,.arco-input-label.arco-input-label-size-large .arco-input-label-value{padding-top:6px;padding-bottom:6px;font-size:14px;line-height:1.5715}.arco-input-label.arco-input-label-size-large .arco-input-label-value{min-height:34px}.arco-picker{position:relative;display:inline-flex;align-items:center;box-sizing:border-box;padding:4px 11px 4px 4px;line-height:1.5715;background-color:var(--color-fill-2);border:1px solid transparent;border-radius:var(--border-radius-small);transition:all .1s cubic-bezier(0,0,1,1)}.arco-picker-input{display:inline-flex;flex:1}.arco-picker input{width:100%;padding:0 0 0 8px;color:var(--color-text-2);line-height:1.5715;text-align:left;background-color:transparent;border:none;outline:none;transition:all .1s cubic-bezier(0,0,1,1)}.arco-picker input::placeholder{color:var(--color-text-3)}.arco-picker input[disabled]{-webkit-text-fill-color:var(--color-text-4)}.arco-picker-has-prefix{padding-left:12px}.arco-picker-prefix{padding-right:4px;color:var(--color-text-2);font-size:14px}.arco-picker-suffix{display:inline-flex;align-items:center;margin-left:4px}.arco-picker-suffix .arco-feedback-icon{display:inline-flex}.arco-picker-suffix .arco-feedback-icon-status-validating{color:rgb(var(--primary-6))}.arco-picker-suffix .arco-feedback-icon-status-success{color:rgb(var(--success-6))}.arco-picker-suffix .arco-feedback-icon-status-warning{color:rgb(var(--warning-6))}.arco-picker-suffix .arco-feedback-icon-status-error{color:rgb(var(--danger-6))}.arco-picker-suffix .arco-feedback-icon{margin-left:4px}.arco-picker-suffix-icon{color:var(--color-text-2)}.arco-picker .arco-picker-clear-icon{display:none;color:var(--color-text-2);font-size:12px}.arco-picker:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-picker:not(.arco-picker-disabled):hover .arco-picker-clear-icon{display:inline-block}.arco-picker:not(.arco-picker-disabled):hover .arco-picker-suffix .arco-picker-clear-icon+span{display:none}.arco-picker input[disabled]{color:var(--color-text-4);cursor:not-allowed}.arco-picker input[disabled]::placeholder{color:var(--color-text-4)}.arco-picker-error{background-color:var(--color-danger-light-1);border-color:transparent}.arco-picker-error:hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-picker-focused{box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-picker-focused,.arco-picker-focused:hover{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6))}.arco-picker-focused.arco-picker-error{border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-picker-focused .arco-picker-input-active input,.arco-picker-focused:hover .arco-picker-input-active input{background:var(--color-fill-2)}.arco-picker-disabled,.arco-picker-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent;cursor:not-allowed}.arco-picker-disabled input[disabled],.arco-picker-disabled:hover input[disabled]{color:var(--color-text-4);cursor:not-allowed}.arco-picker-disabled input[disabled]::placeholder,.arco-picker-disabled:hover input[disabled]::placeholder{color:var(--color-text-4)}.arco-picker-separator{min-width:10px;padding:0 8px;color:var(--color-text-3)}.arco-picker-disabled .arco-picker-separator,.arco-picker-disabled .arco-picker-suffix-icon{color:var(--color-text-4)}.arco-picker-size-mini{height:24px}.arco-picker-size-mini input{font-size:12px}.arco-picker-size-small{height:28px}.arco-picker-size-small input{font-size:14px}.arco-picker-size-medium{height:32px}.arco-picker-size-medium input{font-size:14px}.arco-picker-size-large{height:36px}.arco-picker-size-large input{font-size:14px}.arco-select-view-single{display:inline-flex;box-sizing:border-box;width:100%;padding-right:12px;padding-left:12px;color:var(--color-text-1);font-size:14px;background-color:var(--color-fill-2);border:1px solid transparent;border-radius:var(--border-radius-small);cursor:text;transition:color .1s cubic-bezier(0,0,1,1),border-color .1s cubic-bezier(0,0,1,1),background-color .1s cubic-bezier(0,0,1,1);cursor:pointer}.arco-select-view-single.arco-select-view-search{cursor:text}.arco-select-view-single.arco-select-view-search .arco-select-view-input,.arco-select-view-single.arco-select-view-search .arco-select-view-value{pointer-events:none}.arco-select-view-single:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-select-view-single:focus-within,.arco-select-view-single.arco-select-view-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-select-view-single.arco-select-view-disabled{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent;cursor:not-allowed}.arco-select-view-single.arco-select-view-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent}.arco-select-view-single.arco-select-view-disabled .arco-select-view-prefix,.arco-select-view-single.arco-select-view-disabled .arco-select-view-suffix{color:inherit}.arco-select-view-single.arco-select-view-error{background-color:var(--color-danger-light-1);border-color:transparent}.arco-select-view-single.arco-select-view-error:hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-select-view-single.arco-select-view-error:focus-within,.arco-select-view-single.arco-select-view-error.arco-select-view-single-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-select-view-single .arco-select-view-prefix,.arco-select-view-single .arco-select-view-suffix{display:inline-flex;flex-shrink:0;align-items:center;white-space:nowrap;user-select:none}.arco-select-view-single .arco-select-view-prefix>svg,.arco-select-view-single .arco-select-view-suffix>svg{font-size:14px}.arco-select-view-single .arco-select-view-prefix{padding-right:12px;color:var(--color-text-2)}.arco-select-view-single .arco-select-view-suffix{padding-left:12px;color:var(--color-text-2)}.arco-select-view-single .arco-select-view-suffix .arco-feedback-icon{display:inline-flex}.arco-select-view-single .arco-select-view-suffix .arco-feedback-icon-status-validating{color:rgb(var(--primary-6))}.arco-select-view-single .arco-select-view-suffix .arco-feedback-icon-status-success{color:rgb(var(--success-6))}.arco-select-view-single .arco-select-view-suffix .arco-feedback-icon-status-warning{color:rgb(var(--warning-6))}.arco-select-view-single .arco-select-view-suffix .arco-feedback-icon-status-error{color:rgb(var(--danger-6))}.arco-select-view-single .arco-select-view-clear-btn{align-self:center;color:var(--color-text-2);font-size:12px;visibility:hidden;cursor:pointer}.arco-select-view-single .arco-select-view-clear-btn>svg{position:relative;transition:color .1s cubic-bezier(0,0,1,1)}.arco-select-view-single:hover .arco-select-view-clear-btn{visibility:visible}.arco-select-view-single:not(.arco-select-view-focus) .arco-select-view-icon-hover:hover:before{background-color:var(--color-fill-4)}.arco-select-view-single .arco-select-view-input{width:100%;padding-right:0;padding-left:0;color:inherit;line-height:1.5715;background:none;border:none;border-radius:0;outline:none;cursor:inherit;-webkit-appearance:none;-webkit-tap-highlight-color:rgba(0,0,0,0)}.arco-select-view-single .arco-select-view-input::placeholder{color:var(--color-text-3)}.arco-select-view-single .arco-select-view-input[disabled]::placeholder{color:var(--color-text-4)}.arco-select-view-single .arco-select-view-input[disabled]{-webkit-text-fill-color:var(--color-text-4)}.arco-select-view-single .arco-select-view-input-hidden{position:absolute;width:0!important}.arco-select-view-single .arco-select-view-value{display:flex;align-items:center;box-sizing:border-box;width:100%;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-select-view-single .arco-select-view-value:after{font-size:0;line-height:0;visibility:hidden;content:"."}.arco-select-view-single .arco-select-view-value-hidden{display:none}.arco-select-view-single.arco-select-view-size-mini .arco-select-view-input,.arco-select-view-single.arco-select-view-size-mini .arco-select-view-value{padding-top:1px;padding-bottom:1px;font-size:12px;line-height:1.667}.arco-select-view-single.arco-select-view-size-mini .arco-select-view-value{min-height:22px}.arco-select-view-single.arco-select-view-size-medium .arco-select-view-input,.arco-select-view-single.arco-select-view-size-medium .arco-select-view-value{padding-top:4px;padding-bottom:4px;font-size:14px;line-height:1.5715}.arco-select-view-single.arco-select-view-size-medium .arco-select-view-value{min-height:30px}.arco-select-view-single.arco-select-view-size-small .arco-select-view-input,.arco-select-view-single.arco-select-view-size-small .arco-select-view-value{padding-top:2px;padding-bottom:2px;font-size:14px;line-height:1.5715}.arco-select-view-single.arco-select-view-size-small .arco-select-view-value{min-height:26px}.arco-select-view-single.arco-select-view-size-large .arco-select-view-input,.arco-select-view-single.arco-select-view-size-large .arco-select-view-value{padding-top:6px;padding-bottom:6px;font-size:14px;line-height:1.5715}.arco-select-view-single.arco-select-view-size-large .arco-select-view-value{min-height:34px}.arco-select-view-multiple{display:inline-flex;box-sizing:border-box;width:100%;padding-right:12px;padding-left:12px;color:var(--color-text-1);font-size:14px;background-color:var(--color-fill-2);border:1px solid transparent;border-radius:var(--border-radius-small);cursor:text;transition:color .1s cubic-bezier(0,0,1,1),border-color .1s cubic-bezier(0,0,1,1),background-color .1s cubic-bezier(0,0,1,1)}.arco-select-view-multiple:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-select-view-multiple:focus-within,.arco-select-view-multiple.arco-select-view-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-select-view-multiple.arco-select-view-disabled{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent;cursor:not-allowed}.arco-select-view-multiple.arco-select-view-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent}.arco-select-view-multiple.arco-select-view-disabled .arco-select-view-prefix,.arco-select-view-multiple.arco-select-view-disabled .arco-select-view-suffix{color:inherit}.arco-select-view-multiple.arco-select-view-error{background-color:var(--color-danger-light-1);border-color:transparent}.arco-select-view-multiple.arco-select-view-error:hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-select-view-multiple.arco-select-view-error:focus-within,.arco-select-view-multiple.arco-select-view-error.arco-select-view-multiple-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-select-view-multiple .arco-select-view-prefix,.arco-select-view-multiple .arco-select-view-suffix{display:inline-flex;flex-shrink:0;align-items:center;white-space:nowrap;user-select:none}.arco-select-view-multiple .arco-select-view-prefix>svg,.arco-select-view-multiple .arco-select-view-suffix>svg{font-size:14px}.arco-select-view-multiple .arco-select-view-prefix{padding-right:12px;color:var(--color-text-2)}.arco-select-view-multiple .arco-select-view-suffix{padding-left:12px;color:var(--color-text-2)}.arco-select-view-multiple .arco-select-view-suffix .arco-feedback-icon{display:inline-flex}.arco-select-view-multiple .arco-select-view-suffix .arco-feedback-icon-status-validating{color:rgb(var(--primary-6))}.arco-select-view-multiple .arco-select-view-suffix .arco-feedback-icon-status-success{color:rgb(var(--success-6))}.arco-select-view-multiple .arco-select-view-suffix .arco-feedback-icon-status-warning{color:rgb(var(--warning-6))}.arco-select-view-multiple .arco-select-view-suffix .arco-feedback-icon-status-error{color:rgb(var(--danger-6))}.arco-select-view-multiple .arco-select-view-clear-btn{align-self:center;color:var(--color-text-2);font-size:12px;visibility:hidden;cursor:pointer}.arco-select-view-multiple .arco-select-view-clear-btn>svg{position:relative;transition:color .1s cubic-bezier(0,0,1,1)}.arco-select-view-multiple:hover .arco-select-view-clear-btn{visibility:visible}.arco-select-view-multiple:not(.arco-select-view-focus) .arco-select-view-icon-hover:hover:before{background-color:var(--color-fill-4)}.arco-select-view-multiple.arco-select-view-has-tag{padding-right:4px;padding-left:4px}.arco-select-view-multiple.arco-select-view-has-prefix{padding-left:12px}.arco-select-view-multiple.arco-select-view-has-suffix{padding-right:12px}.arco-select-view-multiple .arco-select-view-inner{flex:1;overflow:hidden;line-height:0}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-tag{display:inline-flex;align-items:center;margin-right:4px;color:var(--color-text-1);font-size:12px;white-space:pre-wrap;word-break:break-word;background-color:var(--color-bg-2);border-color:var(--color-fill-3)}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-tag .arco-icon-hover:hover:before{background-color:var(--color-fill-2)}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-tag.arco-tag-custom-color{color:var(--color-white)}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-tag.arco-tag-custom-color .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:#fff3}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-input{width:100%;padding-right:0;padding-left:0;color:inherit;line-height:1.5715;background:none;border:none;border-radius:0;outline:none;cursor:inherit;-webkit-appearance:none;-webkit-tap-highlight-color:rgba(0,0,0,0);box-sizing:border-box}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-input::placeholder{color:var(--color-text-3)}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-input[disabled]::placeholder{color:var(--color-text-4)}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-input[disabled]{-webkit-text-fill-color:var(--color-text-4)}.arco-select-view-multiple .arco-select-view-mirror{position:absolute;top:0;left:0;white-space:pre;visibility:hidden;pointer-events:none}.arco-select-view-multiple.arco-select-view-focus .arco-select-view-tag{background-color:var(--color-fill-2);border-color:var(--color-fill-2)}.arco-select-view-multiple.arco-select-view-focus .arco-select-view-tag .arco-icon-hover:hover:before{background-color:var(--color-fill-3)}.arco-select-view-multiple.arco-select-view-disabled .arco-select-view-tag{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:var(--color-fill-3)}.arco-select-view-multiple.arco-select-view-readonly,.arco-select-view-multiple.arco-select-view-disabled-input{cursor:default}.arco-select-view-multiple.arco-select-view-size-mini{font-size:12px}.arco-select-view-multiple.arco-select-view-size-mini .arco-select-view-inner{padding-top:0;padding-bottom:0}.arco-select-view-multiple.arco-select-view-size-mini .arco-select-view-tag,.arco-select-view-multiple.arco-select-view-size-mini .arco-select-view-input{margin-top:1px;margin-bottom:1px;line-height:18px;vertical-align:middle}.arco-select-view-multiple.arco-select-view-size-mini .arco-select-view-tag{height:auto;min-height:20px}.arco-select-view-multiple.arco-select-view-size-mini .arco-select-view-input{height:20px}.arco-select-view-multiple.arco-select-view-size-medium{font-size:14px}.arco-select-view-multiple.arco-select-view-size-medium .arco-select-view-inner{padding-top:2px;padding-bottom:2px}.arco-select-view-multiple.arco-select-view-size-medium .arco-select-view-tag,.arco-select-view-multiple.arco-select-view-size-medium .arco-select-view-input{margin-top:1px;margin-bottom:1px;line-height:22px;vertical-align:middle}.arco-select-view-multiple.arco-select-view-size-medium .arco-select-view-tag{height:auto;min-height:24px}.arco-select-view-multiple.arco-select-view-size-medium .arco-select-view-input{height:24px}.arco-select-view-multiple.arco-select-view-size-small{font-size:14px}.arco-select-view-multiple.arco-select-view-size-small .arco-select-view-inner{padding-top:2px;padding-bottom:2px}.arco-select-view-multiple.arco-select-view-size-small .arco-select-view-tag,.arco-select-view-multiple.arco-select-view-size-small .arco-select-view-input{margin-top:1px;margin-bottom:1px;line-height:18px;vertical-align:middle}.arco-select-view-multiple.arco-select-view-size-small .arco-select-view-tag{height:auto;min-height:20px}.arco-select-view-multiple.arco-select-view-size-small .arco-select-view-input{height:20px}.arco-select-view-multiple.arco-select-view-size-large{font-size:14px}.arco-select-view-multiple.arco-select-view-size-large .arco-select-view-inner{padding-top:2px;padding-bottom:2px}.arco-select-view-multiple.arco-select-view-size-large .arco-select-view-tag,.arco-select-view-multiple.arco-select-view-size-large .arco-select-view-input{margin-top:1px;margin-bottom:1px;line-height:26px;vertical-align:middle}.arco-select-view-multiple.arco-select-view-size-large .arco-select-view-tag{height:auto;min-height:28px}.arco-select-view-multiple.arco-select-view-size-large .arco-select-view-input{height:28px}.arco-select-view-multiple.arco-select-view-disabled-input{cursor:pointer}.arco-select-view.arco-select-view-borderless{background:none!important;border:none!important;box-shadow:none!important}.arco-select-view-suffix .arco-feedback-icon{margin-left:4px}.arco-select-view-clear-btn svg,.arco-select-view-icon svg{display:block;font-size:12px}.arco-select-view-opened .arco-select-view-arrow-icon{transform:rotate(180deg)}.arco-select-view-expand-icon{transform:rotate(-45deg)}.arco-select-view-clear-btn{display:none;cursor:pointer}.arco-select-view:hover .arco-select-view-clear-btn{display:block}.arco-select-view:hover .arco-select-view-clear-btn~*{display:none}.arco-affix{position:fixed;z-index:999}.arco-alert{display:flex;align-items:center;box-sizing:border-box;width:100%;padding:8px 15px;overflow:hidden;font-size:14px;line-height:1.5715;text-align:left;border-radius:var(--border-radius-small)}.arco-alert-with-title{align-items:flex-start;padding:15px}.arco-alert-normal{background-color:var(--color-neutral-2);border:1px solid transparent}.arco-alert-info{background-color:var(--color-primary-light-1);border:1px solid transparent}.arco-alert-success{background-color:var(--color-success-light-1);border:1px solid transparent}.arco-alert-warning{background-color:var(--color-warning-light-1);border:1px solid transparent}.arco-alert-error{background-color:var(--color-danger-light-1);border:1px solid transparent}.arco-alert-banner{border:none;border-radius:0}.arco-alert-body{position:relative;flex:1}.arco-alert-title{margin-bottom:4px;font-weight:500;font-size:16px;line-height:1.5}.arco-alert-normal .arco-alert-title,.arco-alert-normal .arco-alert-content{color:var(--color-text-1)}.arco-alert-normal.arco-alert-with-title .arco-alert-content{color:var(--color-text-2)}.arco-alert-info .arco-alert-title,.arco-alert-info .arco-alert-content{color:var(--color-text-1)}.arco-alert-info.arco-alert-with-title .arco-alert-content{color:var(--color-text-2)}.arco-alert-success .arco-alert-title,.arco-alert-success .arco-alert-content{color:var(--color-text-1)}.arco-alert-success.arco-alert-with-title .arco-alert-content{color:var(--color-text-2)}.arco-alert-warning .arco-alert-title,.arco-alert-warning .arco-alert-content{color:var(--color-text-1)}.arco-alert-warning.arco-alert-with-title .arco-alert-content{color:var(--color-text-2)}.arco-alert-error .arco-alert-title,.arco-alert-error .arco-alert-content{color:var(--color-text-1)}.arco-alert-error.arco-alert-with-title .arco-alert-content{color:var(--color-text-2)}.arco-alert-icon{margin-right:8px}.arco-alert-icon svg{font-size:16px;vertical-align:-3px}.arco-alert-with-title .arco-alert-icon svg{font-size:18px;vertical-align:-5px}.arco-alert-normal .arco-alert-icon svg{color:var(--color-neutral-4)}.arco-alert-info .arco-alert-icon svg{color:rgb(var(--primary-6))}.arco-alert-success .arco-alert-icon svg{color:rgb(var(--success-6))}.arco-alert-warning .arco-alert-icon svg{color:rgb(var(--warning-6))}.arco-alert-error .arco-alert-icon svg{color:rgb(var(--danger-6))}.arco-alert-close-btn{top:4px;right:0;box-sizing:border-box;margin-left:8px;padding:0;color:var(--color-text-2);font-size:12px;background-color:transparent;border:none;outline:none;cursor:pointer;transition:color .1s cubic-bezier(0,0,1,1)}.arco-alert-close-btn:hover{color:var(--color-text-1)}.arco-alert-action+.arco-alert-close-btn{margin-left:8px}.arco-alert-action{margin-left:8px}.arco-alert-with-title .arco-alert-close-btn{margin-top:0;margin-right:0}.arco-anchor{position:relative;width:150px;overflow:auto}.arco-anchor-line-slider{position:absolute;top:0;left:0;z-index:1;width:2px;height:12px;margin-top:9.0005px;background-color:rgb(var(--primary-6));transition:top .2s cubic-bezier(.34,.69,.1,1)}.arco-anchor-list{position:relative;margin-top:0;margin-bottom:0;margin-left:4px;padding-left:0;list-style:none}.arco-anchor-list:before{position:absolute;left:-4px;width:2px;height:100%;background-color:var(--color-fill-3);content:""}.arco-anchor-sublist{margin-top:0;margin-bottom:0;padding-left:0;list-style:none}.arco-anchor-link-item{margin-bottom:2px}.arco-anchor-link-item .arco-anchor-link{display:block;margin-bottom:2px;padding:4px 8px;overflow:hidden;color:var(--color-text-2);font-size:14px;line-height:1.5715;white-space:nowrap;text-decoration:none;text-overflow:ellipsis;border-radius:var(--border-radius-small);cursor:pointer}.arco-anchor-link-item .arco-anchor-link:hover{color:var(--color-text-1);font-weight:500;background-color:var(--color-fill-2)}.arco-anchor-link-active>.arco-anchor-link{color:var(--color-text-1);font-weight:500;transition:all .1s cubic-bezier(0,0,1,1)}.arco-anchor-link-item .arco-anchor-link-item{margin-left:16px}.arco-anchor-line-less .arco-anchor-list{margin-left:0}.arco-anchor-line-less .arco-anchor-list:before{display:none}.arco-anchor-line-less .arco-anchor-link-active>.arco-anchor-link{color:rgb(var(--primary-6));font-weight:500;background-color:var(--color-fill-2)}.arco-autocomplete-popup .arco-select-popup{background-color:var(--color-bg-popup);border:1px solid var(--color-fill-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-autocomplete-popup .arco-select-popup .arco-select-popup-inner{max-height:200px;padding:4px 0}.arco-autocomplete-popup .arco-select-popup .arco-select-option{height:36px;padding:0 12px;font-size:14px;line-height:36px;color:var(--color-text-1);background-color:var(--color-bg-popup)}.arco-autocomplete-popup .arco-select-popup .arco-select-option-selected{color:var(--color-text-1);background-color:var(--color-bg-popup)}.arco-autocomplete-popup .arco-select-popup .arco-select-option-hover{color:var(--color-text-1);background-color:var(--color-fill-2)}.arco-autocomplete-popup .arco-select-popup .arco-select-option-disabled{color:var(--color-text-4);background-color:var(--color-bg-popup)}.arco-autocomplete-popup .arco-select-popup .arco-select-option-selected{font-weight:500}.arco-avatar{position:relative;display:inline-flex;align-items:center;box-sizing:border-box;width:40px;height:40px;color:var(--color-white);font-size:20px;white-space:nowrap;vertical-align:middle;background-color:var(--color-fill-4)}.arco-avatar-circle{border-radius:var(--border-radius-circle)}.arco-avatar-circle .arco-avatar-image{overflow:hidden;border-radius:var(--border-radius-circle)}.arco-avatar-square{border-radius:var(--border-radius-medium)}.arco-avatar-square .arco-avatar-image{overflow:hidden;border-radius:var(--border-radius-medium)}.arco-avatar-text{position:absolute;left:50%;font-weight:500;line-height:1;transform:translate(-50%);transform-origin:0 center}.arco-avatar-image{display:inline-block;width:100%;height:100%}.arco-avatar-image-icon{display:flex;align-items:center;justify-content:center;width:100%;height:100%}.arco-avatar-image img,.arco-avatar-image picture{width:100%;height:100%}.arco-avatar-trigger-icon-button{position:absolute;right:-4px;bottom:-4px;z-index:1;width:20px;height:20px;color:var(--color-fill-4);font-size:12px;line-height:20px;text-align:center;background-color:var(--color-neutral-2);border-radius:var(--border-radius-circle);transition:background-color .1s cubic-bezier(0,0,1,1)}.arco-avatar-trigger-icon-mask{position:absolute;top:0;left:0;z-index:0;display:flex;align-items:center;justify-content:center;width:100%;height:100%;color:var(--color-white);font-size:16px;background-color:#1d212999;border-radius:var(--border-radius-medium);opacity:0;transition:all .1s cubic-bezier(0,0,1,1)}.arco-avatar-circle .arco-avatar-trigger-icon-mask{border-radius:var(--border-radius-circle)}.arco-avatar-with-trigger-icon{cursor:pointer}.arco-avatar-with-trigger-icon:hover .arco-avatar-trigger-icon-mask{z-index:2;opacity:1}.arco-avatar-with-trigger-icon:hover .arco-avatar-trigger-icon-button{background-color:var(--color-neutral-3)}.arco-avatar-group{display:inline-block;line-height:0}.arco-avatar-group-max-count-avatar{color:var(--color-white);font-size:20px;cursor:default}.arco-avatar-group .arco-avatar{border:2px solid var(--color-bg-2)}.arco-avatar-group .arco-avatar:not(:first-child){margin-left:-10px}.arco-avatar-group-popover .arco-avatar:not(:first-child){margin-left:4px}.arco-back-top{position:fixed;right:24px;bottom:24px;z-index:100}.arco-back-top-btn{width:40px;height:40px;color:var(--color-white);font-size:12px;text-align:center;background-color:rgb(var(--primary-6));border:none;border-radius:var(--border-radius-circle);outline:none;cursor:pointer;transition:all .2s cubic-bezier(0,0,1,1)}.arco-back-top-btn:hover{background-color:rgb(var(--primary-5))}.arco-back-top-btn svg{font-size:14px}.arco-badge{position:relative;display:inline-block;line-height:1}.arco-badge-number,.arco-badge-dot,.arco-badge-text,.arco-badge-custom-dot{position:absolute;top:2px;right:2px;z-index:2;box-sizing:border-box;overflow:hidden;text-align:center;border-radius:20px;transform:translate(50%,-50%);transform-origin:100% 0%}.arco-badge-custom-dot{background-color:var(--color-bg-2)}.arco-badge-number,.arco-badge-text{min-width:20px;height:20px;padding:0 6px;color:var(--color-white);font-weight:500;font-size:12px;line-height:20px;background-color:rgb(var(--danger-6));box-shadow:0 0 0 2px var(--color-bg-2)}.arco-badge-dot{width:6px;height:6px;background-color:rgb(var(--danger-6));border-radius:var(--border-radius-circle);box-shadow:0 0 0 2px var(--color-bg-2)}.arco-badge-no-children .arco-badge-dot,.arco-badge-no-children .arco-badge-number,.arco-badge-no-children .arco-badge-text{position:relative;top:unset;right:unset;display:inline-block;transform:none}.arco-badge-status-wrapper{display:inline-flex;align-items:center}.arco-badge-status-dot{display:inline-block;width:6px;height:6px;border-radius:var(--border-radius-circle)}.arco-badge-status-normal{background-color:var(--color-fill-4)}.arco-badge-status-processing{background-color:rgb(var(--primary-6))}.arco-badge-status-success{background-color:rgb(var(--success-6))}.arco-badge-status-warning{background-color:rgb(var(--warning-6))}.arco-badge-status-danger,.arco-badge-color-red{background-color:rgb(var(--danger-6))}.arco-badge-color-orangered{background-color:#f77234}.arco-badge-color-orange{background-color:rgb(var(--orange-6))}.arco-badge-color-gold{background-color:rgb(var(--gold-6))}.arco-badge-color-lime{background-color:rgb(var(--lime-6))}.arco-badge-color-green{background-color:rgb(var(--success-6))}.arco-badge-color-cyan{background-color:rgb(var(--cyan-6))}.arco-badge-color-arcoblue{background-color:rgb(var(--primary-6))}.arco-badge-color-purple{background-color:rgb(var(--purple-6))}.arco-badge-color-pinkpurple{background-color:rgb(var(--pinkpurple-6))}.arco-badge-color-magenta{background-color:rgb(var(--magenta-6))}.arco-badge-color-gray{background-color:rgb(var(--gray-4))}.arco-badge .arco-badge-status-text{margin-left:8px;color:var(--color-text-1);font-size:12px;line-height:1.5715}.arco-badge-number-text{display:inline-block;animation:arco-badge-scale .5s cubic-bezier(.3,1.3,.3,1)}@keyframes arco-badge-scale{0%{transform:scale(0)}to{transform:scale(1)}}.badge-zoom-enter,.badge-zoom-appear{transform:translate(50%,-50%) scale(.2);transform-origin:center}.badge-zoom-enter-active,.badge-zoom-appear-active{transform:translate(50%,-50%) scale(1);transform-origin:center;opacity:1;transition:opacity .3s cubic-bezier(.3,1.3,.3,1),transform .3s cubic-bezier(.3,1.3,.3,1)}.badge-zoom-exit{transform:translate(50%,-50%) scale(1);transform-origin:center;opacity:1}.badge-zoom-exit-active{transform:translate(50%,-50%) scale(.2);transform-origin:center;opacity:0;transition:opacity .3s cubic-bezier(.3,1.3,.3,1),transform .3s cubic-bezier(.3,1.3,.3,1)}.arco-breadcrumb{display:inline-flex;align-items:center;color:var(--color-text-2);font-size:14px}.arco-breadcrumb-icon{color:var(--color-text-2)}.arco-breadcrumb-item{display:inline-block;padding:0 4px;color:var(--color-text-2);line-height:24px;vertical-align:middle}.arco-breadcrumb-item>.arco-icon{color:var(--color-text-3)}.arco-breadcrumb-item a{display:inline-block;margin:0 -4px;padding:0 4px;color:var(--color-text-2);text-decoration:none;border-radius:var(--border-radius-small);background-color:transparent}.arco-breadcrumb-item a:hover{color:rgb(var(--link-6));background-color:var(--color-fill-2)}.arco-breadcrumb-item:last-child{color:var(--color-text-1);font-weight:500}.arco-breadcrumb-item-ellipses{position:relative;top:-3px;display:inline-block;padding:0 4px;color:var(--color-text-2)}.arco-breadcrumb-item-separator{display:inline-block;margin:0 4px;color:var(--color-text-4);line-height:24px;vertical-align:middle}.arco-breadcrumb-item-with-dropdown{cursor:pointer}.arco-breadcrumb-item-dropdown-icon{margin-left:4px;color:var(--color-text-2);font-size:12px}.arco-breadcrumb-item-dropdown-icon-active svg{transform:rotate(180deg)}.arco-btn{position:relative;display:inline-flex;align-items:center;justify-content:center;box-sizing:border-box;font-weight:400;line-height:1.5715;white-space:nowrap;outline:none;cursor:pointer;transition:all .1s cubic-bezier(0,0,1,1);-webkit-appearance:none;user-select:none}.arco-btn>a:only-child{color:currentColor}.arco-btn:active{transition:none}.arco-btn-long{display:flex;width:100%}.arco-btn-link{display:inline-flex;align-items:center;justify-content:center;text-decoration:none}.arco-btn-link:not([href]){color:var(--color-text-4)}.arco-btn-link:hover{text-decoration:none}.arco-btn-link.arco-btn-only-icon{display:inline-flex;align-items:center;justify-content:center;vertical-align:top}.arco-btn.arco-btn-only-icon .arco-btn-icon{display:flex;justify-content:center}.arco-btn-loading{position:relative;cursor:default}.arco-btn-loading:before{position:absolute;top:-1px;right:-1px;bottom:-1px;left:-1px;z-index:1;display:block;background:#fff;border-radius:inherit;opacity:.4;transition:opacity .1s cubic-bezier(0,0,1,1);content:"";pointer-events:none}.arco-btn-loading-fixed-width{transition:none}.arco-btn-two-chinese-chars>*:not(svg){margin-right:-.3em;letter-spacing:.3em}.arco-btn-outline,.arco-btn-outline[type=button],.arco-btn-outline[type=submit]{color:rgb(var(--primary-6));background-color:transparent;border:1px solid rgb(var(--primary-6))}.arco-btn-outline:hover,.arco-btn-outline[type=button]:hover,.arco-btn-outline[type=submit]:hover{color:rgb(var(--primary-5));background-color:transparent;border-color:rgb(var(--primary-5))}.arco-btn-outline:focus-visible,.arco-btn-outline[type=button]:focus-visible,.arco-btn-outline[type=submit]:focus-visible{box-shadow:0 0 0 .25em rgb(var(--primary-3))}.arco-btn-outline:active,.arco-btn-outline[type=button]:active,.arco-btn-outline[type=submit]:active{color:rgb(var(--primary-7));background-color:transparent;border-color:rgb(var(--primary-7))}.arco-btn-outline.arco-btn-loading,.arco-btn-outline[type=button].arco-btn-loading,.arco-btn-outline[type=submit].arco-btn-loading{color:rgb(var(--primary-6));background-color:transparent;border:1px solid rgb(var(--primary-6))}.arco-btn-outline.arco-btn-disabled,.arco-btn-outline[type=button].arco-btn-disabled,.arco-btn-outline[type=submit].arco-btn-disabled{color:var(--color-primary-light-3);background-color:transparent;border:1px solid var(--color-primary-light-3);cursor:not-allowed}.arco-btn-outline.arco-btn-status-warning{color:rgb(var(--warning-6));background-color:transparent;border-color:rgb(var(--warning-6))}.arco-btn-outline.arco-btn-status-warning:hover{color:rgb(var(--warning-5));background-color:transparent;border-color:rgb(var(--warning-5))}.arco-btn-outline.arco-btn-status-warning:focus-visible{box-shadow:0 0 0 .25em rgb(var(--warning-3))}.arco-btn-outline.arco-btn-status-warning:active{color:rgb(var(--warning-7));background-color:transparent;border-color:rgb(var(--warning-7))}.arco-btn-outline.arco-btn-status-warning.arco-btn-loading{color:rgb(var(--warning-6));background-color:transparent;border-color:rgb(var(--warning-6))}.arco-btn-outline.arco-btn-status-warning.arco-btn-disabled{color:var(--color-warning-light-3);background-color:transparent;border:1px solid var(--color-warning-light-3)}.arco-btn-outline.arco-btn-status-danger{color:rgb(var(--danger-6));background-color:transparent;border-color:rgb(var(--danger-6))}.arco-btn-outline.arco-btn-status-danger:hover{color:rgb(var(--danger-5));background-color:transparent;border-color:rgb(var(--danger-5))}.arco-btn-outline.arco-btn-status-danger:focus-visible{box-shadow:0 0 0 .25em rgb(var(--danger-3))}.arco-btn-outline.arco-btn-status-danger:active{color:rgb(var(--danger-7));background-color:transparent;border-color:rgb(var(--danger-7))}.arco-btn-outline.arco-btn-status-danger.arco-btn-loading{color:rgb(var(--danger-6));background-color:transparent;border-color:rgb(var(--danger-6))}.arco-btn-outline.arco-btn-status-danger.arco-btn-disabled{color:var(--color-danger-light-3);background-color:transparent;border:1px solid var(--color-danger-light-3)}.arco-btn-outline.arco-btn-status-success{color:rgb(var(--success-6));background-color:transparent;border-color:rgb(var(--success-6))}.arco-btn-outline.arco-btn-status-success:hover{color:rgb(var(--success-5));background-color:transparent;border-color:rgb(var(--success-5))}.arco-btn-outline.arco-btn-status-success:focus-visible{box-shadow:0 0 0 .25em rgb(var(--success-3))}.arco-btn-outline.arco-btn-status-success:active{color:rgb(var(--success-7));background-color:transparent;border-color:rgb(var(--success-7))}.arco-btn-outline.arco-btn-status-success.arco-btn-loading{color:rgb(var(--success-6));background-color:transparent;border-color:rgb(var(--success-6))}.arco-btn-outline.arco-btn-status-success.arco-btn-disabled{color:var(--color-success-light-3);background-color:transparent;border:1px solid var(--color-success-light-3)}.arco-btn-primary,.arco-btn-primary[type=button],.arco-btn-primary[type=submit]{color:#fff;background-color:rgb(var(--primary-6));border:1px solid transparent}.arco-btn-primary:hover,.arco-btn-primary[type=button]:hover,.arco-btn-primary[type=submit]:hover{color:#fff;background-color:rgb(var(--primary-5));border-color:transparent}.arco-btn-primary:focus-visible,.arco-btn-primary[type=button]:focus-visible,.arco-btn-primary[type=submit]:focus-visible{box-shadow:0 0 0 .25em rgb(var(--primary-3))}.arco-btn-primary:active,.arco-btn-primary[type=button]:active,.arco-btn-primary[type=submit]:active{color:#fff;background-color:rgb(var(--primary-7));border-color:transparent}.arco-btn-primary.arco-btn-loading,.arco-btn-primary[type=button].arco-btn-loading,.arco-btn-primary[type=submit].arco-btn-loading{color:#fff;background-color:rgb(var(--primary-6));border:1px solid transparent}.arco-btn-primary.arco-btn-disabled,.arco-btn-primary[type=button].arco-btn-disabled,.arco-btn-primary[type=submit].arco-btn-disabled{color:#fff;background-color:var(--color-primary-light-3);border:1px solid transparent;cursor:not-allowed}.arco-btn-primary.arco-btn-status-warning{color:#fff;background-color:rgb(var(--warning-6));border-color:transparent}.arco-btn-primary.arco-btn-status-warning:hover{color:#fff;background-color:rgb(var(--warning-5));border-color:transparent}.arco-btn-primary.arco-btn-status-warning:focus-visible{box-shadow:0 0 0 .25em rgb(var(--warning-3))}.arco-btn-primary.arco-btn-status-warning:active{color:#fff;background-color:rgb(var(--warning-7));border-color:transparent}.arco-btn-primary.arco-btn-status-warning.arco-btn-loading{color:#fff;background-color:rgb(var(--warning-6));border-color:transparent}.arco-btn-primary.arco-btn-status-warning.arco-btn-disabled{color:#fff;background-color:var(--color-warning-light-3);border:1px solid transparent}.arco-btn-primary.arco-btn-status-danger{color:#fff;background-color:rgb(var(--danger-6));border-color:transparent}.arco-btn-primary.arco-btn-status-danger:hover{color:#fff;background-color:rgb(var(--danger-5));border-color:transparent}.arco-btn-primary.arco-btn-status-danger:focus-visible{box-shadow:0 0 0 .25em rgb(var(--danger-3))}.arco-btn-primary.arco-btn-status-danger:active{color:#fff;background-color:rgb(var(--danger-7));border-color:transparent}.arco-btn-primary.arco-btn-status-danger.arco-btn-loading{color:#fff;background-color:rgb(var(--danger-6));border-color:transparent}.arco-btn-primary.arco-btn-status-danger.arco-btn-disabled{color:#fff;background-color:var(--color-danger-light-3);border:1px solid transparent}.arco-btn-primary.arco-btn-status-success{color:#fff;background-color:rgb(var(--success-6));border-color:transparent}.arco-btn-primary.arco-btn-status-success:hover{color:#fff;background-color:rgb(var(--success-5));border-color:transparent}.arco-btn-primary.arco-btn-status-success:focus-visible{box-shadow:0 0 0 .25em rgb(var(--success-3))}.arco-btn-primary.arco-btn-status-success:active{color:#fff;background-color:rgb(var(--success-7));border-color:transparent}.arco-btn-primary.arco-btn-status-success.arco-btn-loading{color:#fff;background-color:rgb(var(--success-6));border-color:transparent}.arco-btn-primary.arco-btn-status-success.arco-btn-disabled{color:#fff;background-color:var(--color-success-light-3);border:1px solid transparent}.arco-btn-secondary,.arco-btn-secondary[type=button],.arco-btn-secondary[type=submit]{color:var(--color-text-2);background-color:var(--color-secondary);border:1px solid transparent}.arco-btn-secondary:hover,.arco-btn-secondary[type=button]:hover,.arco-btn-secondary[type=submit]:hover{color:var(--color-text-2);background-color:var(--color-secondary-hover);border-color:transparent}.arco-btn-secondary:focus-visible,.arco-btn-secondary[type=button]:focus-visible,.arco-btn-secondary[type=submit]:focus-visible{box-shadow:0 0 0 .25em var(--color-neutral-4)}.arco-btn-secondary:active,.arco-btn-secondary[type=button]:active,.arco-btn-secondary[type=submit]:active{color:var(--color-text-2);background-color:var(--color-secondary-active);border-color:transparent}.arco-btn-secondary.arco-btn-loading,.arco-btn-secondary[type=button].arco-btn-loading,.arco-btn-secondary[type=submit].arco-btn-loading{color:var(--color-text-2);background-color:var(--color-secondary);border:1px solid transparent}.arco-btn-secondary.arco-btn-disabled,.arco-btn-secondary[type=button].arco-btn-disabled,.arco-btn-secondary[type=submit].arco-btn-disabled{color:var(--color-text-4);background-color:var(--color-secondary-disabled);border:1px solid transparent;cursor:not-allowed}.arco-btn-secondary.arco-btn-status-warning{color:rgb(var(--warning-6));background-color:var(--color-warning-light-1);border-color:transparent}.arco-btn-secondary.arco-btn-status-warning:hover{color:rgb(var(--warning-6));background-color:var(--color-warning-light-2);border-color:transparent}.arco-btn-secondary.arco-btn-status-warning:focus-visible{box-shadow:0 0 0 .25em rgb(var(--warning-3))}.arco-btn-secondary.arco-btn-status-warning:active{color:rgb(var(--warning-6));background-color:var(--color-warning-light-3);border-color:transparent}.arco-btn-secondary.arco-btn-status-warning.arco-btn-loading{color:rgb(var(--warning-6));background-color:var(--color-warning-light-1);border-color:transparent}.arco-btn-secondary.arco-btn-status-warning.arco-btn-disabled{color:var(--color-warning-light-3);background-color:var(--color-warning-light-1);border:1px solid transparent}.arco-btn-secondary.arco-btn-status-danger{color:rgb(var(--danger-6));background-color:var(--color-danger-light-1);border-color:transparent}.arco-btn-secondary.arco-btn-status-danger:hover{color:rgb(var(--danger-6));background-color:var(--color-danger-light-2);border-color:transparent}.arco-btn-secondary.arco-btn-status-danger:focus-visible{box-shadow:0 0 0 .25em rgb(var(--danger-3))}.arco-btn-secondary.arco-btn-status-danger:active{color:rgb(var(--danger-6));background-color:var(--color-danger-light-3);border-color:transparent}.arco-btn-secondary.arco-btn-status-danger.arco-btn-loading{color:rgb(var(--danger-6));background-color:var(--color-danger-light-1);border-color:transparent}.arco-btn-secondary.arco-btn-status-danger.arco-btn-disabled{color:var(--color-danger-light-3);background-color:var(--color-danger-light-1);border:1px solid transparent}.arco-btn-secondary.arco-btn-status-success{color:rgb(var(--success-6));background-color:var(--color-success-light-1);border-color:transparent}.arco-btn-secondary.arco-btn-status-success:hover{color:rgb(var(--success-6));background-color:var(--color-success-light-2);border-color:transparent}.arco-btn-secondary.arco-btn-status-success:focus-visible{box-shadow:0 0 0 .25em rgb(var(--success-3))}.arco-btn-secondary.arco-btn-status-success:active{color:rgb(var(--success-6));background-color:var(--color-success-light-3);border-color:transparent}.arco-btn-secondary.arco-btn-status-success.arco-btn-loading{color:rgb(var(--success-6));background-color:var(--color-success-light-1);border-color:transparent}.arco-btn-secondary.arco-btn-status-success.arco-btn-disabled{color:var(--color-success-light-3);background-color:var(--color-success-light-1);border:1px solid transparent}.arco-btn-dashed,.arco-btn-dashed[type=button],.arco-btn-dashed[type=submit]{color:var(--color-text-2);background-color:var(--color-fill-2);border:1px dashed var(--color-neutral-3)}.arco-btn-dashed:hover,.arco-btn-dashed[type=button]:hover,.arco-btn-dashed[type=submit]:hover{color:var(--color-text-2);background-color:var(--color-fill-3);border-color:var(--color-neutral-4)}.arco-btn-dashed:focus-visible,.arco-btn-dashed[type=button]:focus-visible,.arco-btn-dashed[type=submit]:focus-visible{box-shadow:0 0 0 .25em var(--color-neutral-4)}.arco-btn-dashed:active,.arco-btn-dashed[type=button]:active,.arco-btn-dashed[type=submit]:active{color:var(--color-text-2);background-color:var(--color-fill-4);border-color:var(--color-neutral-5)}.arco-btn-dashed.arco-btn-loading,.arco-btn-dashed[type=button].arco-btn-loading,.arco-btn-dashed[type=submit].arco-btn-loading{color:var(--color-text-2);background-color:var(--color-fill-2);border:1px dashed var(--color-neutral-3)}.arco-btn-dashed.arco-btn-disabled,.arco-btn-dashed[type=button].arco-btn-disabled,.arco-btn-dashed[type=submit].arco-btn-disabled{color:var(--color-text-4);background-color:var(--color-fill-2);border:1px dashed var(--color-neutral-3);cursor:not-allowed}.arco-btn-dashed.arco-btn-status-warning{color:rgb(var(--warning-6));background-color:var(--color-warning-light-1);border-color:var(--color-warning-light-2)}.arco-btn-dashed.arco-btn-status-warning:hover{color:rgb(var(--warning-6));background-color:var(--color-warning-light-2);border-color:var(--color-warning-light-3)}.arco-btn-dashed.arco-btn-status-warning:focus-visible{box-shadow:0 0 0 .25em rgb(var(--warning-3))}.arco-btn-dashed.arco-btn-status-warning:active{color:rgb(var(--warning-6));background-color:var(--color-warning-light-3);border-color:var(--color-warning-light-4)}.arco-btn-dashed.arco-btn-status-warning.arco-btn-loading{color:rgb(var(--warning-6));background-color:var(--color-warning-light-1);border-color:var(--color-warning-light-2)}.arco-btn-dashed.arco-btn-status-warning.arco-btn-disabled{color:var(--color-warning-light-3);background-color:var(--color-warning-light-1);border:1px dashed var(--color-warning-light-2)}.arco-btn-dashed.arco-btn-status-danger{color:rgb(var(--danger-6));background-color:var(--color-danger-light-1);border-color:var(--color-danger-light-2)}.arco-btn-dashed.arco-btn-status-danger:hover{color:rgb(var(--danger-6));background-color:var(--color-danger-light-2);border-color:var(--color-danger-light-3)}.arco-btn-dashed.arco-btn-status-danger:focus-visible{box-shadow:0 0 0 .25em rgb(var(--danger-3))}.arco-btn-dashed.arco-btn-status-danger:active{color:rgb(var(--danger-6));background-color:var(--color-danger-light-3);border-color:var(--color-danger-light-4)}.arco-btn-dashed.arco-btn-status-danger.arco-btn-loading{color:rgb(var(--danger-6));background-color:var(--color-danger-light-1);border-color:var(--color-danger-light-2)}.arco-btn-dashed.arco-btn-status-danger.arco-btn-disabled{color:var(--color-danger-light-3);background-color:var(--color-danger-light-1);border:1px dashed var(--color-danger-light-2)}.arco-btn-dashed.arco-btn-status-success{color:rgb(var(--success-6));background-color:var(--color-success-light-1);border-color:var(--color-success-light-2)}.arco-btn-dashed.arco-btn-status-success:hover{color:rgb(var(--success-6));background-color:var(--color-success-light-2);border-color:var(--color-success-light-3)}.arco-btn-dashed.arco-btn-status-success:focus-visible{box-shadow:0 0 0 .25em rgb(var(--success-3))}.arco-btn-dashed.arco-btn-status-success:active{color:rgb(var(--success-6));background-color:var(--color-success-light-3);border-color:var(--color-success-light-4)}.arco-btn-dashed.arco-btn-status-success.arco-btn-loading{color:rgb(var(--success-6));background-color:var(--color-success-light-1);border-color:var(--color-success-light-2)}.arco-btn-dashed.arco-btn-status-success.arco-btn-disabled{color:var(--color-success-light-3);background-color:var(--color-success-light-1);border:1px dashed var(--color-success-light-2)}.arco-btn-text,.arco-btn-text[type=button],.arco-btn-text[type=submit]{color:rgb(var(--primary-6));background-color:transparent;border:1px solid transparent}.arco-btn-text:hover,.arco-btn-text[type=button]:hover,.arco-btn-text[type=submit]:hover{color:rgb(var(--primary-6));background-color:var(--color-fill-2);border-color:transparent}.arco-btn-text:focus-visible,.arco-btn-text[type=button]:focus-visible,.arco-btn-text[type=submit]:focus-visible{box-shadow:0 0 0 .25em var(--color-neutral-4)}.arco-btn-text:active,.arco-btn-text[type=button]:active,.arco-btn-text[type=submit]:active{color:rgb(var(--primary-6));background-color:var(--color-fill-3);border-color:transparent}.arco-btn-text.arco-btn-loading,.arco-btn-text[type=button].arco-btn-loading,.arco-btn-text[type=submit].arco-btn-loading{color:rgb(var(--primary-6));background-color:transparent;border:1px solid transparent}.arco-btn-text.arco-btn-disabled,.arco-btn-text[type=button].arco-btn-disabled,.arco-btn-text[type=submit].arco-btn-disabled{color:var(--color-primary-light-3);background-color:transparent;border:1px solid transparent;cursor:not-allowed}.arco-btn-text.arco-btn-status-warning{color:rgb(var(--warning-6));background-color:transparent;border-color:transparent}.arco-btn-text.arco-btn-status-warning:hover{color:rgb(var(--warning-6));background-color:var(--color-fill-2);border-color:transparent}.arco-btn-text.arco-btn-status-warning:focus-visible{box-shadow:0 0 0 .25em rgb(var(--warning-3))}.arco-btn-text.arco-btn-status-warning:active{color:rgb(var(--warning-6));background-color:var(--color-fill-3);border-color:transparent}.arco-btn-text.arco-btn-status-warning.arco-btn-loading{color:rgb(var(--warning-6));background-color:transparent;border-color:transparent}.arco-btn-text.arco-btn-status-warning.arco-btn-disabled{color:var(--color-warning-light-3);background-color:transparent;border:1px solid transparent}.arco-btn-text.arco-btn-status-danger{color:rgb(var(--danger-6));background-color:transparent;border-color:transparent}.arco-btn-text.arco-btn-status-danger:hover{color:rgb(var(--danger-6));background-color:var(--color-fill-2);border-color:transparent}.arco-btn-text.arco-btn-status-danger:focus-visible{box-shadow:0 0 0 .25em rgb(var(--danger-3))}.arco-btn-text.arco-btn-status-danger:active{color:rgb(var(--danger-6));background-color:var(--color-fill-3);border-color:transparent}.arco-btn-text.arco-btn-status-danger.arco-btn-loading{color:rgb(var(--danger-6));background-color:transparent;border-color:transparent}.arco-btn-text.arco-btn-status-danger.arco-btn-disabled{color:var(--color-danger-light-3);background-color:transparent;border:1px solid transparent}.arco-btn-text.arco-btn-status-success{color:rgb(var(--success-6));background-color:transparent;border-color:transparent}.arco-btn-text.arco-btn-status-success:hover{color:rgb(var(--success-6));background-color:var(--color-fill-2);border-color:transparent}.arco-btn-text.arco-btn-status-success:focus-visible{box-shadow:0 0 0 .25em rgb(var(--success-3))}.arco-btn-text.arco-btn-status-success:active{color:rgb(var(--success-6));background-color:var(--color-fill-3);border-color:transparent}.arco-btn-text.arco-btn-status-success.arco-btn-loading{color:rgb(var(--success-6));background-color:transparent;border-color:transparent}.arco-btn-text.arco-btn-status-success.arco-btn-disabled{color:var(--color-success-light-3);background-color:transparent;border:1px solid transparent}.arco-btn-size-mini{height:24px;padding:0 11px;font-size:12px;border-radius:var(--border-radius-small)}.arco-btn-size-mini:not(.arco-btn-only-icon) .arco-btn-icon{margin-right:4px}.arco-btn-size-mini svg{vertical-align:-1px}.arco-btn-size-mini.arco-btn-loading-fixed-width.arco-btn-loading{padding-right:3px;padding-left:3px}.arco-btn-size-mini.arco-btn-only-icon{width:24px;height:24px;padding:0}.arco-btn-size-mini.arco-btn-shape-circle{width:24px;height:24px;padding:0;text-align:center;border-radius:var(--border-radius-circle)}.arco-btn-size-mini.arco-btn-shape-round{border-radius:12px}.arco-btn-size-small{height:28px;padding:0 15px;font-size:14px;border-radius:var(--border-radius-small)}.arco-btn-size-small:not(.arco-btn-only-icon) .arco-btn-icon{margin-right:6px}.arco-btn-size-small svg{vertical-align:-2px}.arco-btn-size-small.arco-btn-loading-fixed-width.arco-btn-loading{padding-right:5px;padding-left:5px}.arco-btn-size-small.arco-btn-only-icon{width:28px;height:28px;padding:0}.arco-btn-size-small.arco-btn-shape-circle{width:28px;height:28px;padding:0;text-align:center;border-radius:var(--border-radius-circle)}.arco-btn-size-small.arco-btn-shape-round{border-radius:14px}.arco-btn-size-medium{height:32px;padding:0 15px;font-size:14px;border-radius:var(--border-radius-small)}.arco-btn-size-medium:not(.arco-btn-only-icon) .arco-btn-icon{margin-right:8px}.arco-btn-size-medium svg{vertical-align:-2px}.arco-btn-size-medium.arco-btn-loading-fixed-width.arco-btn-loading{padding-right:4px;padding-left:4px}.arco-btn-size-medium.arco-btn-only-icon{width:32px;height:32px;padding:0}.arco-btn-size-medium.arco-btn-shape-circle{width:32px;height:32px;padding:0;text-align:center;border-radius:var(--border-radius-circle)}.arco-btn-size-medium.arco-btn-shape-round{border-radius:16px}.arco-btn-size-large{height:36px;padding:0 19px;font-size:14px;border-radius:var(--border-radius-small)}.arco-btn-size-large:not(.arco-btn-only-icon) .arco-btn-icon{margin-right:8px}.arco-btn-size-large svg{vertical-align:-2px}.arco-btn-size-large.arco-btn-loading-fixed-width.arco-btn-loading{padding-right:8px;padding-left:8px}.arco-btn-size-large.arco-btn-only-icon{width:36px;height:36px;padding:0}.arco-btn-size-large.arco-btn-shape-circle{width:36px;height:36px;padding:0;text-align:center;border-radius:var(--border-radius-circle)}.arco-btn-size-large.arco-btn-shape-round{border-radius:18px}.arco-btn-group{display:inline-flex;align-items:center}.arco-btn-group .arco-btn-outline:not(:first-child),.arco-btn-group .arco-btn-dashed:not(:first-child){margin-left:-1px}.arco-btn-group .arco-btn-primary:not(:last-child){border-right:1px solid rgb(var(--primary-5))}.arco-btn-group .arco-btn-secondary:not(:last-child){border-right:1px solid var(--color-secondary-hover)}.arco-btn-group .arco-btn-status-warning:not(:last-child){border-right:1px solid rgb(var(--warning-5))}.arco-btn-group .arco-btn-status-danger:not(:last-child){border-right:1px solid rgb(var(--danger-5))}.arco-btn-group .arco-btn-status-success:not(:last-child){border-right:1px solid rgb(var(--success-5))}.arco-btn-group .arco-btn-outline:hover,.arco-btn-group .arco-btn-dashed:hover,.arco-btn-group .arco-btn-outline:active,.arco-btn-group .arco-btn-dashed:active{z-index:2}.arco-btn-group .arco-btn:first-child{border-top-right-radius:0;border-bottom-right-radius:0}.arco-btn-group .arco-btn:last-child{border-top-left-radius:0;border-bottom-left-radius:0}.arco-btn-group .arco-btn:not(:first-child):not(:last-child){border-radius:0}body[arco-theme=dark] .arco-btn-primary.arco-btn-disabled{color:#ffffff4d}.arco-calendar{box-sizing:border-box;border:1px solid var(--color-neutral-3)}.arco-calendar-header{display:flex;padding:24px}.arco-calendar-header-left{position:relative;display:flex;flex:1;align-items:center;height:28px;line-height:28px}.arco-calendar-header-right{position:relative;height:28px}.arco-calendar-header-value{color:var(--color-text-1);font-weight:500;font-size:20px}.arco-calendar-header-icon{width:28px;height:28px;margin-right:12px;color:var(--color-text-2);font-size:12px;line-height:28px;text-align:center;background-color:var(--color-bg-5);border-radius:50%;transition:all .1s cubic-bezier(0,0,1,1);user-select:none}.arco-calendar-header-icon:not(:first-child){margin:0 12px}.arco-calendar-header-icon:focus-visible{box-shadow:0 0 0 2px var(--color-primary-light-3)}.arco-calendar-header-icon:not(.arco-calendar-header-icon-hidden){cursor:pointer}.arco-calendar-header-icon:not(.arco-calendar-header-icon-hidden):hover{background-color:var(--color-fill-3)}.arco-calendar .arco-calendar-header-value-year{width:100px;margin-right:8px}.arco-calendar .arco-calendar-header-value-month{width:76px;margin-right:32px}.arco-calendar-month{width:100%}.arco-calendar-month-row{display:flex;height:100px}.arco-calendar-month-row .arco-calendar-cell{flex:1;overflow:hidden;border-bottom:1px solid var(--color-neutral-3)}.arco-calendar-month-row:last-child .arco-calendar-cell{border-bottom:unset}.arco-calendar-month-cell-body{box-sizing:border-box}.arco-calendar-mode-month:not(.arco-calendar-panel) .arco-calendar-cell:not(:last-child){border-right:1px solid var(--color-neutral-3)}.arco-calendar-week-list{display:flex;box-sizing:border-box;width:100%;padding:0;border-bottom:1px solid var(--color-neutral-3)}.arco-calendar-week-list-item{flex:1;padding:20px 16px;color:#7d7d7f;text-align:left}.arco-calendar-cell .arco-calendar-date{box-sizing:border-box;width:100%;height:100%;padding:10px;cursor:pointer}.arco-calendar-cell .arco-calendar-date-circle{width:28px;height:28px;line-height:28px;text-align:center;border-radius:50%}.arco-calendar-date-content{height:70px;overflow-y:auto}.arco-calendar-cell-today .arco-calendar-date-circle{box-sizing:border-box;border:1px solid rgb(var(--primary-6))}.arco-calendar-date-value{color:var(--color-text-4);font-weight:500;font-size:16px}.arco-calendar-cell-in-view .arco-calendar-date-value{color:var(--color-text-1)}.arco-calendar-mode-month .arco-calendar-cell-selected .arco-calendar-date-circle,.arco-calendar-mode-year .arco-calendar-cell-selected .arco-calendar-cell-selected .arco-calendar-date-circle{color:#fff;background-color:rgb(var(--primary-6));border:1px solid rgb(var(--primary-6))}.arco-calendar-mode-year:not(.arco-calendar-panel){min-width:820px}.arco-calendar-mode-year .arco-calendar-header{border-bottom:1px solid var(--color-neutral-3)}.arco-calendar-mode-year .arco-calendar-body{padding:12px}.arco-calendar-mode-year .arco-calendar-year-row{display:flex}.arco-calendar-year-row>.arco-calendar-cell{flex:1;padding:20px 8px}.arco-calendar-year-row>.arco-calendar-cell:not(:last-child){border-right:1px solid var(--color-neutral-3)}.arco-calendar-year-row:not(:last-child)>.arco-calendar-cell{border-bottom:1px solid var(--color-neutral-3)}.arco-calendar-month-with-days .arco-calendar-month-row{height:26px}.arco-calendar-month-with-days .arco-calendar-cell{border-bottom:0}.arco-calendar-month-with-days .arco-calendar-month-cell-body{padding:0}.arco-calendar-month-with-days .arco-calendar-month-title{padding:10px 6px;color:var(--color-text-1);font-weight:500;font-size:16px}.arco-calendar-month-cell{width:100%;font-size:12px}.arco-calendar-month-cell .arco-calendar-week-list{padding:0;border-bottom:unset}.arco-calendar-month-cell .arco-calendar-week-list-item{padding:6px;color:#7d7d7f;text-align:center}.arco-calendar-month-cell .arco-calendar-cell{text-align:center}.arco-calendar-month-cell .arco-calendar-date{padding:2px}.arco-calendar-month-cell .arco-calendar-date-value{font-size:14px}.arco-calendar-month-cell .arco-calendar-date-circle{display:inline-block;width:22px;height:22px;line-height:22px;text-align:center;border-radius:50%}.arco-calendar-panel{background-color:var(--color-bg-5);border:1px solid var(--color-neutral-3)}.arco-calendar-panel .arco-calendar-header{padding:8px 16px;border-bottom:1px solid var(--color-neutral-3)}.arco-calendar-panel .arco-calendar-header-value{flex:1;font-size:14px;line-height:24px;text-align:center}.arco-calendar-panel .arco-calendar-header-icon{width:24px;height:24px;margin-right:2px;margin-left:2px;line-height:24px}.arco-calendar-panel .arco-calendar-body{padding:14px 16px}.arco-calendar-panel .arco-calendar-month-cell-body{padding:0}.arco-calendar-panel .arco-calendar-month-row{height:unset}.arco-calendar-panel .arco-calendar-week-list{padding:0;border-bottom:unset}.arco-calendar-panel .arco-calendar-week-list-item{height:32px;padding:0;font-weight:400;line-height:32px;text-align:center}.arco-calendar-panel .arco-calendar-cell,.arco-calendar-panel .arco-calendar-year-row .arco-calendar-cell{box-sizing:border-box;padding:2px 0;text-align:center;border-right:0;border-bottom:0}.arco-calendar-panel .arco-calendar-cell .arco-calendar-date{display:flex;justify-content:center;padding:4px 0}.arco-calendar-panel .arco-calendar-cell .arco-calendar-date-value{min-width:24px;height:24px;font-size:14px;line-height:24px;cursor:pointer}.arco-calendar-panel.arco-calendar-mode-year .arco-calendar-cell{padding:4px 0}.arco-calendar-panel.arco-calendar-mode-year .arco-calendar-cell .arco-calendar-date{padding:4px}.arco-calendar-panel.arco-calendar-mode-year .arco-calendar-cell .arco-calendar-date-value{width:100%;border-radius:12px}.arco-calendar-panel .arco-calendar-cell-selected .arco-calendar-date-value{color:var(--color-white);background-color:rgb(var(--primary-6));border-radius:50%}.arco-calendar-panel .arco-calendar-cell:not(.arco-calendar-cell-selected):not(.arco-calendar-cell-range-start):not(.arco-calendar-cell-range-end):not(.arco-calendar-cell-hover-range-start):not(.arco-calendar-cell-hover-range-end):not(.arco-calendar-cell-disabled):not(.arco-calendar-cell-week) .arco-calendar-date-value:hover{color:rgb(var(--primary-6));background-color:var(--color-primary-light-1);border-radius:50%}.arco-calendar-panel.arco-calendar-mode-year .arco-calendar-cell:not(.arco-calendar-cell-selected):not(.arco-calendar-cell-range-start):not(.arco-calendar-cell-range-end):not(.arco-calendar-cell-hover-range-start):not(.arco-calendar-cell-hover-range-end):not(.arco-calendar-cell-disabled) .arco-calendar-date-value:hover{border-radius:12px}.arco-calendar-panel .arco-calendar-cell-today{position:relative}.arco-calendar-panel .arco-calendar-cell-today:after{position:absolute;bottom:0;left:50%;display:block;width:4px;height:4px;margin-left:-2px;background-color:rgb(var(--primary-6));border-radius:50%;content:""}.arco-calendar-cell-in-range .arco-calendar-date{background-color:var(--color-primary-light-1)}.arco-calendar-cell-range-start .arco-calendar-date{border-radius:16px 0 0 16px}.arco-calendar-cell-range-end .arco-calendar-date{border-radius:0 16px 16px 0}.arco-calendar-cell-in-range-near-hover .arco-calendar-date{border-radius:0}.arco-calendar-cell-range-start .arco-calendar-date-value,.arco-calendar-cell-range-end .arco-calendar-date-value{color:var(--color-white);background-color:rgb(var(--primary-6));border-radius:50%}.arco-calendar-cell-hover-in-range .arco-calendar-date{background-color:var(--color-primary-light-1)}.arco-calendar-cell-hover-range-start .arco-calendar-date{border-radius:16px 0 0 16px}.arco-calendar-cell-hover-range-end .arco-calendar-date{border-radius:0 16px 16px 0}.arco-calendar-cell-hover-range-start .arco-calendar-date-value,.arco-calendar-cell-hover-range-end .arco-calendar-date-value{color:var(--color-text-1);background-color:var(--color-primary-light-2);border-radius:50%}.arco-calendar-panel .arco-calendar-cell-disabled>.arco-calendar-date{background-color:var(--color-fill-1);cursor:not-allowed}.arco-calendar-panel .arco-calendar-cell-disabled>.arco-calendar-date>.arco-calendar-date-value{color:var(--color-text-4);background-color:var(--color-fill-1);cursor:not-allowed}.arco-calendar-panel .arco-calendar-footer-btn-wrapper{height:38px;color:var(--color-text-1);line-height:38px;text-align:center;border-top:1px solid var(--color-neutral-3);cursor:pointer}.arco-calendar-rtl{direction:rtl}.arco-calendar-rtl .arco-calendar-header-icon{margin-right:0;margin-left:12px;transform:scaleX(-1)}.arco-calendar-rtl .arco-calendar-week-list-item{text-align:right}.arco-calendar-rtl.arco-calendar-mode-month:not(.arco-calendar-panel) .arco-calendar-cell:not(:last-child){border-right:0;border-left:1px solid var(--color-neutral-3)}.arco-calendar-rtl .arco-calendar-header-value-year{margin-right:0;margin-left:8px}.arco-calendar-rtl .arco-calendar-header-value-month{margin-right:0;margin-left:32px}.arco-card{position:relative;background:var(--color-bg-2);border-radius:var(--border-radius-none);transition:box-shadow .2s cubic-bezier(0,0,1,1)}.arco-card-header{position:relative;display:flex;align-items:center;justify-content:space-between;box-sizing:border-box;overflow:hidden;border-bottom:1px solid var(--color-neutral-3)}.arco-card-header-no-title:before{display:block;content:" "}.arco-card-header-title{flex:1;color:var(--color-text-1);font-weight:500;line-height:1.5715;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-card-header-extra{color:rgb(var(--primary-6));overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-card-body{color:var(--color-text-2)}.arco-card-cover{overflow:hidden}.arco-card-cover>*{display:block;width:100%}.arco-card-actions{display:flex;align-items:center;justify-content:space-between;margin-top:20px}.arco-card-actions:before{visibility:hidden;content:""}.arco-card-actions-right{display:flex;align-items:center}.arco-card-actions-item{display:flex;align-items:center;justify-content:center;color:var(--color-text-2);cursor:pointer;transition:color .2s cubic-bezier(0,0,1,1);overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-card-actions-item:hover{color:rgb(var(--primary-6))}.arco-card-actions-item:not(:last-child){margin-right:12px}.arco-card-meta-footer{display:flex;align-items:center;justify-content:space-between}.arco-card-meta-footer:last-child{margin-top:20px}.arco-card-meta-footer-only-actions:before{visibility:hidden;content:""}.arco-card-meta-footer .arco-card-actions{margin-top:0}.arco-card-meta-title{color:var(--color-text-1);font-weight:500;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-card-meta-description:not(:first-child){margin-top:4px}.arco-card-grid{position:relative;box-sizing:border-box;width:33.33%;box-shadow:1px 0 0 0 var(--color-neutral-3),0 1px 0 0 var(--color-neutral-3),1px 1px 0 0 var(--color-neutral-3),1px 0 0 0 var(--color-neutral-3) inset,0 1px 0 0 var(--color-neutral-3) inset}.arco-card-grid:before{position:absolute;top:0;right:0;bottom:0;left:0;transition:box-shadow .2s cubic-bezier(0,0,1,1);content:"";pointer-events:none}.arco-card-grid-hoverable:hover{z-index:1}.arco-card-grid-hoverable:hover:before{box-shadow:0 4px 10px rgb(var(--gray-2))}.arco-card-grid .arco-card{background:none;box-shadow:none}.arco-card-contain-grid:not(.arco-card-loading)>.arco-card-body{display:flex;flex-wrap:wrap;margin:0 -1px;padding:0}.arco-card-hoverable:hover{box-shadow:0 4px 10px rgb(var(--gray-2))}.arco-card-bordered{border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-small)}.arco-card-bordered .arco-card-cover{border-radius:var(--border-radius-small) var(--border-radius-small) 0 0}.arco-card-loading .arco-card-body{overflow:hidden;text-align:center}.arco-card-size-medium{font-size:14px}.arco-card-size-medium .arco-card-header{height:46px;padding:10px 16px}.arco-card-size-medium .arco-card-header-title,.arco-card-size-medium .arco-card-meta-title{font-size:16px}.arco-card-size-medium .arco-card-header-extra{font-size:14px}.arco-card-size-medium .arco-card-body{padding:16px}.arco-card-size-small{font-size:14px}.arco-card-size-small .arco-card-header{height:40px;padding:8px 16px}.arco-card-size-small .arco-card-header-title,.arco-card-size-small .arco-card-meta-title{font-size:16px}.arco-card-size-small .arco-card-header-extra{font-size:14px}.arco-card-size-small .arco-card-body{padding:12px 16px}body[arco-theme=dark] .arco-card-grid-hoverable:hover:before,body[arco-theme=dark] .arco-card-hoverable:hover{box-shadow:0 4px 10px rgba(var(--gray-1),40%)}@keyframes arco-carousel-slide-x-in{0%{transform:translate(100%)}to{transform:translate(0)}}@keyframes arco-carousel-slide-x-out{0%{transform:translate(0)}to{transform:translate(-100%)}}@keyframes arco-carousel-slide-x-in-reverse{0%{transform:translate(-100%)}to{transform:translate(0)}}@keyframes arco-carousel-slide-x-out-reverse{0%{transform:translate(0)}to{transform:translate(100%)}}@keyframes arco-carousel-slide-y-in{0%{transform:translateY(100%)}to{transform:translateY(0)}}@keyframes arco-carousel-slide-y-out{0%{transform:translateY(0)}to{transform:translateY(-100%)}}@keyframes arco-carousel-slide-y-in-reverse{0%{transform:translateY(-100%)}to{transform:translateY(0)}}@keyframes arco-carousel-slide-y-out-reverse{0%{transform:translateY(0)}to{transform:translateY(100%)}}@keyframes arco-carousel-card-bottom-to-middle{0%{transform:translate(0) translateZ(-400px);opacity:0}to{transform:translate(0) translateZ(-200px);opacity:.4}}@keyframes arco-carousel-card-middle-to-bottom{0%{transform:translate(-100%) translateZ(-200px);opacity:.4}to{transform:translate(-100%) translateZ(-400px);opacity:0}}@keyframes arco-carousel-card-top-to-middle{0%{transform:translate(-50%) translateZ(0);opacity:1}to{transform:translate(-100%) translateZ(-200px);opacity:.4}}@keyframes arco-carousel-card-middle-to-top{0%{transform:translate(0) translateZ(-200px);opacity:.4}to{transform:translate(-50%) translateZ(0);opacity:1}}@keyframes arco-carousel-card-bottom-to-middle-reverse{0%{transform:translate(-100%) translateZ(-400px);opacity:0}to{transform:translate(-100%) translateZ(-200px);opacity:.4}}@keyframes arco-carousel-card-middle-to-bottom-reverse{0%{transform:translate(0) translateZ(-200px);opacity:.4}to{transform:translate(0) translateZ(-400px);opacity:0}}@keyframes arco-carousel-card-top-to-middle-reverse{0%{transform:translate(-50%) translateZ(0);opacity:1}to{transform:translate(0) translateZ(-200px);opacity:.4}}@keyframes arco-carousel-card-middle-to-top-reverse{0%{transform:translate(-100%) translateZ(-200px);opacity:.4}to{transform:translate(-50%) translateZ(0);opacity:1}}.arco-carousel{position:relative}.arco-carousel-indicator-position-outer{margin-bottom:30px}.arco-carousel-slide,.arco-carousel-card,.arco-carousel-fade{position:relative;width:100%;height:100%;overflow:hidden}.arco-carousel-slide>*,.arco-carousel-card>*,.arco-carousel-fade>*{position:absolute;top:0;left:0;width:100%;height:100%;overflow:hidden}.arco-carousel-item-current{z-index:1}.arco-carousel-slide>*:not(.arco-carousel-item-current){display:none;visibility:hidden}.arco-carousel-slide.arco-carousel-horizontal .arco-carousel-item-slide-out{display:block;animation:arco-carousel-slide-x-out}.arco-carousel-slide.arco-carousel-horizontal .arco-carousel-item-slide-in{display:block;animation:arco-carousel-slide-x-in}.arco-carousel-slide.arco-carousel-horizontal.arco-carousel-negative .arco-carousel-item-slide-out{animation:arco-carousel-slide-x-out-reverse}.arco-carousel-slide.arco-carousel-horizontal.arco-carousel-negative .arco-carousel-item-slide-in{animation:arco-carousel-slide-x-in-reverse}.arco-carousel-slide.arco-carousel-vertical .arco-carousel-item-slide-out{display:block;animation:arco-carousel-slide-y-out}.arco-carousel-slide.arco-carousel-vertical .arco-carousel-item-slide-in{display:block;animation:arco-carousel-slide-y-in}.arco-carousel-slide.arco-carousel-vertical.arco-carousel-negative .arco-carousel-item-slide-out{animation:arco-carousel-slide-y-out-reverse}.arco-carousel-slide.arco-carousel-vertical.arco-carousel-negative .arco-carousel-item-slide-in{animation:arco-carousel-slide-y-in-reverse}.arco-carousel-card{perspective:800px}.arco-carousel-card>*{left:50%;transform:translate(-50%) translateZ(-400px);opacity:0;animation:arco-carousel-card-middle-to-bottom}.arco-carousel-card .arco-carousel-item-prev{transform:translate(-100%) translateZ(-200px);opacity:.4;animation:arco-carousel-card-top-to-middle}.arco-carousel-card .arco-carousel-item-next{transform:translate(0) translateZ(-200px);opacity:.4;animation:arco-carousel-card-bottom-to-middle}.arco-carousel-card .arco-carousel-item-current{transform:translate(-50%) translateZ(0);opacity:1;animation:arco-carousel-card-middle-to-top}.arco-carousel-card.arco-carousel-negative>*{animation:arco-carousel-card-middle-to-bottom-reverse}.arco-carousel-card.arco-carousel-negative .arco-carousel-item-prev{animation:arco-carousel-card-bottom-to-middle-reverse}.arco-carousel-card.arco-carousel-negative .arco-carousel-item-next{animation:arco-carousel-card-top-to-middle-reverse}.arco-carousel-card.arco-carousel-negative .arco-carousel-item-current{animation:arco-carousel-card-middle-to-top-reverse}.arco-carousel-fade>*{left:50%;transform:translate(-50%);opacity:0}.arco-carousel-fade .arco-carousel-item-current{opacity:1}.arco-carousel-indicator{position:absolute;display:flex;margin:0;padding:0}.arco-carousel-indicator-wrapper{position:absolute;z-index:2}.arco-carousel-indicator-wrapper-top{top:0;right:0;left:0;height:48px;background:linear-gradient(180deg,rgba(0,0,0,.15) 0%,rgba(0,0,0,0) 87%)}.arco-carousel-indicator-wrapper-bottom{right:0;bottom:0;left:0;height:48px;background:linear-gradient(180deg,rgba(0,0,0,0) 13%,rgba(0,0,0,.15) 100%)}.arco-carousel-indicator-wrapper-left{top:0;left:0;width:48px;height:100%;background:linear-gradient(90deg,rgba(0,0,0,.15) 0%,rgba(0,0,0,0) 87%)}.arco-carousel-indicator-wrapper-right{top:0;right:0;width:48px;height:100%;background:linear-gradient(90deg,rgba(0,0,0,0) 13%,rgba(0,0,0,.15) 100%)}.arco-carousel-indicator-wrapper-outer{right:0;left:0;background:none}.arco-carousel-indicator-bottom{bottom:12px;left:50%;transform:translate(-50%)}.arco-carousel-indicator-top{top:12px;left:50%;transform:translate(-50%)}.arco-carousel-indicator-left{top:50%;left:12px;transform:translate(-50%,-50%) rotate(90deg)}.arco-carousel-indicator-right{top:50%;right:12px;transform:translate(50%,-50%) rotate(90deg)}.arco-carousel-indicator-outer{left:50%;padding:4px;background-color:transparent;border-radius:20px;transform:translate(-50%)}.arco-carousel-indicator-outer.arco-carousel-indicator-dot{bottom:-22px}.arco-carousel-indicator-outer.arco-carousel-indicator-line{bottom:-20px}.arco-carousel-indicator-outer.arco-carousel-indicator-slider{bottom:-16px;padding:0;background-color:rgba(var(--gray-4),.5)}.arco-carousel-indicator-outer .arco-carousel-indicator-item{background-color:rgba(var(--gray-4),.5)}.arco-carousel-indicator-outer .arco-carousel-indicator-item:hover,.arco-carousel-indicator-outer .arco-carousel-indicator-item-active{background-color:var(--color-fill-4)}.arco-carousel-indicator-item{display:inline-block;background-color:#ffffff4d;border-radius:var(--border-radius-medium);cursor:pointer}.arco-carousel-indicator-item:hover,.arco-carousel-indicator-item-active{background-color:var(--color-white)}.arco-carousel-indicator-dot .arco-carousel-indicator-item{width:6px;height:6px;border-radius:50%}.arco-carousel-indicator-dot .arco-carousel-indicator-item:not(:last-child){margin-right:8px}.arco-carousel-indicator-line .arco-carousel-indicator-item{width:12px;height:4px}.arco-carousel-indicator-line .arco-carousel-indicator-item:not(:last-child){margin-right:8px}.arco-carousel-indicator-slider{width:48px;height:4px;background-color:#ffffff4d;border-radius:var(--border-radius-medium);cursor:pointer}.arco-carousel-indicator-slider .arco-carousel-indicator-item{position:absolute;top:0;height:100%;transition:left .3s}.arco-carousel-arrow>div{position:absolute;z-index:2;display:flex;align-items:center;justify-content:center;width:24px;height:24px;color:var(--color-white);background-color:#ffffff4d;border-radius:50%;cursor:pointer}.arco-carousel-arrow>div>svg{color:var(--color-white);font-size:14px}.arco-carousel-arrow>div:hover{background-color:#ffffff80}.arco-carousel-arrow-left{top:50%;left:12px;transform:translateY(-50%)}.arco-carousel-arrow-right{top:50%;right:12px;transform:translateY(-50%)}.arco-carousel-arrow-top{top:12px;left:50%;transform:translate(-50%)}.arco-carousel-arrow-bottom{bottom:12px;left:50%;transform:translate(-50%)}.arco-carousel-arrow-hover div{opacity:0;transition:all .3s}.arco-carousel:hover .arco-carousel-arrow-hover div{opacity:1}body[arco-theme=dark] .arco-carousel-arrow>div{background-color:#17171a4d}body[arco-theme=dark] .arco-carousel-arrow>div:hover{background-color:#17171a80}body[arco-theme=dark] .arco-carousel-indicator-item,body[arco-theme=dark] .arco-carousel-indicator-slider{background-color:#17171a4d}body[arco-theme=dark] .arco-carousel-indicator-item-active,body[arco-theme=dark] .arco-carousel-indicator-item:hover{background-color:var(--color-white)}body[arco-theme=dark] .arco-carousel-indicator-outer.arco-carousel-indicator-slider{background-color:rgba(var(--gray-4),.5)}body[arco-theme=dark] .arco-carousel-indicator-outer .arco-carousel-indicator-item:hover,body[arco-theme=dark] .arco-carousel-indicator-outer .arco-carousel-indicator-item-active{background-color:var(--color-fill-4)}.arco-cascader-panel{display:inline-flex;box-sizing:border-box;height:200px;overflow:hidden;white-space:nowrap;list-style:none;background-color:var(--color-bg-popup);border:1px solid var(--color-fill-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-cascader-search-panel{justify-content:center;width:100%;overflow:auto}.arco-cascader-popup-trigger-hover .arco-cascader-list-item{transition:fontweight 0s}.arco-cascader-highlight{font-weight:500}.arco-cascader-panel-column{position:relative;display:inline-flex;flex-direction:column;min-width:120px;height:100%;max-height:200px;background-color:var(--color-bg-popup)}.arco-cascader-panel-column-loading{display:inline-flex;align-items:center;justify-content:center}.arco-cascader-panel-column:not(:last-of-type){border-right:1px solid var(--color-fill-3)}.arco-cascader-column-content{flex:1;max-height:200px;overflow-y:auto}.arco-cascader-list-wrapper{position:relative;display:flex;flex-direction:column;box-sizing:border-box;height:100%;padding:4px 0}.arco-cascader-list-wrapper-with-footer{padding-bottom:0}.arco-cascader-list-empty{display:flex;align-items:center;height:100%}.arco-cascader-list{flex:1;box-sizing:border-box;margin:0;padding:0;list-style:none}.arco-cascader-list-multiple .arco-cascader-option-label,.arco-cascader-list-strictly .arco-cascader-option-label{padding-left:0}.arco-cascader-list-multiple .arco-cascader-option,.arco-cascader-list-strictly .arco-cascader-option{padding-left:12px}.arco-cascader-list-multiple .arco-cascader-option .arco-checkbox,.arco-cascader-list-strictly .arco-cascader-option .arco-checkbox,.arco-cascader-list-multiple .arco-cascader-option .arco-radio,.arco-cascader-list-strictly .arco-cascader-option .arco-radio{margin-right:8px;padding-left:0}.arco-cascader-search-list.arco-cascader-list-multiple .arco-cascader-option-label{padding-right:12px}.arco-cascader-list-footer{box-sizing:border-box;height:36px;padding-left:12px;line-height:36px;border-top:1px solid var(--color-fill-3)}.arco-cascader-option,.arco-cascader-search-option{position:relative;display:flex;box-sizing:border-box;min-width:100px;height:36px;color:var(--color-text-1);font-size:14px;line-height:36px;background-color:transparent;cursor:pointer}.arco-cascader-option-label,.arco-cascader-search-option-label{flex-grow:1;padding-right:34px;padding-left:12px}.arco-cascader-option .arco-icon-right,.arco-cascader-search-option .arco-icon-right,.arco-cascader-option .arco-icon-check,.arco-cascader-search-option .arco-icon-check{position:absolute;top:50%;right:10px;color:var(--color-text-2);font-size:12px;transform:translateY(-50%)}.arco-cascader-option .arco-icon-check,.arco-cascader-search-option .arco-icon-check{color:rgb(var(--primary-6))}.arco-cascader-option .arco-icon-loading,.arco-cascader-search-option .arco-icon-loading{position:absolute;top:50%;right:10px;margin-top:-6px;color:rgb(var(--primary-6));font-size:12px}.arco-cascader-option:hover,.arco-cascader-search-option-hover{color:var(--color-text-1);background-color:var(--color-fill-2)}.arco-cascader-option:hover .arco-checkbox:not(.arco-checkbox-disabled):not(.arco-checkbox-checked):hover .arco-checkbox-icon-hover:before,.arco-cascader-search-option-hover .arco-checkbox:not(.arco-checkbox-disabled):not(.arco-checkbox-checked):hover .arco-checkbox-icon-hover:before{background-color:var(--color-fill-3)}.arco-cascader-option:hover .arco-radio:not(.arco-radio-disabled):not(.arco-radio-checked):hover .arco-radio-icon-hover:before,.arco-cascader-search-option-hover .arco-radio:not(.arco-radio-disabled):not(.arco-radio-checked):hover .arco-radio-icon-hover:before{background-color:var(--color-fill-3)}.arco-cascader-option-disabled,.arco-cascader-search-option-disabled,.arco-cascader-option-disabled:hover,.arco-cascader-search-option-disabled:hover{color:var(--color-text-4);background-color:transparent;cursor:not-allowed}.arco-cascader-option-disabled .arco-icon-right,.arco-cascader-search-option-disabled .arco-icon-right,.arco-cascader-option-disabled:hover .arco-icon-right,.arco-cascader-search-option-disabled:hover .arco-icon-right{color:inherit}.arco-cascader-option-disabled .arco-icon-check,.arco-cascader-search-option-disabled .arco-icon-check,.arco-cascader-option-disabled:hover .arco-icon-check,.arco-cascader-search-option-disabled:hover .arco-icon-check{color:var(--color-primary-light-3)}.arco-cascader-option-active{color:var(--color-text-1);background-color:var(--color-fill-2);transition:all .2s cubic-bezier(0,0,1,1)}.arco-cascader-option-active:hover{color:var(--color-text-1);background-color:var(--color-fill-2)}.arco-cascader-option-active.arco-cascader-option-disabled,.arco-cascader-option-active.arco-cascader-option-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2)}.cascader-slide-enter-active,.cascader-slide-leave-active{transition:margin .3s cubic-bezier(.34,.69,.1,1)}.cascader-slide-enter-from,.cascader-slide-leave-to{margin-left:-120px}.cascader-slide-enter-to,.cascader-slide-leave-from{margin-left:0}.arco-icon-hover.arco-checkbox-icon-hover:before{width:24px;height:24px}.arco-checkbox{position:relative;display:inline-flex;align-items:center;box-sizing:border-box;padding-left:5px;font-size:14px;line-height:unset;cursor:pointer}.arco-checkbox>input[type=checkbox]{position:absolute;top:0;left:0;width:0;height:0;opacity:0}.arco-checkbox>input[type=checkbox]:focus-visible+.arco-checkbox-icon-hover:before{background-color:var(--color-fill-2)}.arco-checkbox:hover .arco-checkbox-icon-hover:before{background-color:var(--color-fill-2)}.arco-checkbox-label{margin-left:8px;color:var(--color-text-1)}.arco-checkbox-icon{position:relative;box-sizing:border-box;width:14px;height:14px;background-color:var(--color-bg-2);border:2px solid var(--color-fill-3);border-radius:var(--border-radius-small);user-select:none}.arco-checkbox-icon:after{position:absolute;top:50%;left:50%;display:block;width:6px;height:2px;background:var(--color-white);border-radius:.5px;transform:translate(-50%) translateY(-50%) scale(0);content:""}.arco-checkbox-icon-check{position:relative;display:block;width:8px;height:100%;margin:0 auto;color:var(--color-white);transform:scale(0);transform-origin:center 75%}.arco-checkbox:hover .arco-checkbox-icon{border-color:var(--color-fill-4);transition:border-color .1s cubic-bezier(0,0,1,1),transform .3s cubic-bezier(.3,1.3,.3,1)}.arco-checkbox-checked:hover .arco-checkbox-icon,.arco-checkbox-indeterminate:hover .arco-checkbox-icon{transition:transform .3s cubic-bezier(.3,1.3,.3,1)}.arco-checkbox-checked .arco-checkbox-icon{background-color:rgb(var(--primary-6));border-color:transparent}.arco-checkbox-checked .arco-checkbox-icon-check{transform:scale(1);transition:transform .3s cubic-bezier(.3,1.3,.3,1)}.arco-checkbox-indeterminate .arco-checkbox-icon{background-color:rgb(var(--primary-6));border-color:transparent}.arco-checkbox-indeterminate .arco-checkbox-icon svg{transform:scale(0)}.arco-checkbox-indeterminate .arco-checkbox-icon:after{transform:translate(-50%) translateY(-50%) scale(1);transition:transform .3s cubic-bezier(.3,1.3,.3,1)}.arco-checkbox.arco-checkbox-disabled,.arco-checkbox.arco-checkbox-disabled .arco-checkbox-icon-hover{cursor:not-allowed}.arco-checkbox.arco-checkbox-disabled:hover .arco-checkbox-mask{border-color:var(--color-fill-3)}.arco-checkbox-checked:hover .arco-checkbox-icon,.arco-checkbox-indeterminate:hover .arco-checkbox-icon{border-color:transparent}.arco-checkbox-disabled .arco-checkbox-icon{background-color:var(--color-fill-2);border-color:var(--color-fill-3)}.arco-checkbox-disabled.arco-checkbox-checked .arco-checkbox-icon,.arco-checkbox-disabled.arco-checkbox-checked:hover .arco-checkbox-icon{background-color:var(--color-primary-light-3);border-color:transparent}.arco-checkbox-disabled:hover .arco-checkbox-icon-hover:before,.arco-checkbox-checked:hover .arco-checkbox-icon-hover:before,.arco-checkbox-indeterminate:hover .arco-checkbox-icon-hover:before{background-color:transparent}.arco-checkbox-disabled:hover .arco-checkbox-icon{border-color:var(--color-fill-3)}.arco-checkbox-disabled .arco-checkbox-label{color:var(--color-text-4)}.arco-checkbox-disabled .arco-checkbox-icon-check{color:var(--color-fill-3)}.arco-checkbox-group{display:inline-block}.arco-checkbox-group .arco-checkbox{margin-right:16px}.arco-checkbox-group-direction-vertical .arco-checkbox{display:flex;margin-right:0;line-height:32px}.arco-icon-hover.arco-collapse-item-icon-hover:before{width:16px;height:16px}.arco-icon-hover.arco-collapse-item-icon-hover:hover:before{background-color:var(--color-fill-2)}.arco-collapse{overflow:hidden;line-height:1.5715;border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium)}.arco-collapse-item{box-sizing:border-box;border-bottom:1px solid var(--color-border-2)}.arco-collapse-item-active>.arco-collapse-item-header{background-color:var(--color-bg-2);border-color:var(--color-neutral-3);transition:border-color 0s ease 0s}.arco-collapse-item-active>.arco-collapse-item-header .arco-collapse-item-header-title{font-weight:500}.arco-collapse-item-active>.arco-collapse-item-header .arco-collapse-item-expand-icon{transform:rotate(90deg)}.arco-collapse-item-active>.arco-collapse-item-header .arco-collapse-item-icon-right .arco-collapse-item-expand-icon{transform:rotate(-90deg)}.arco-collapse-item-header{position:relative;display:flex;align-items:center;justify-content:space-between;box-sizing:border-box;padding-top:8px;padding-bottom:8px;overflow:hidden;color:var(--color-text-1);font-size:14px;line-height:24px;background-color:var(--color-bg-2);border-bottom:1px solid transparent;cursor:pointer;transition:border-color 0s ease .19s}.arco-collapse-item-header-left{padding-right:13px;padding-left:34px}.arco-collapse-item-header-right{padding-right:34px;padding-left:13px}.arco-collapse-item-header-right+.arco-collapse-item-content{padding-left:13px}.arco-collapse-item-header-disabled{color:var(--color-text-4);background-color:var(--color-bg-2);cursor:not-allowed}.arco-collapse-item-header-disabled .arco-collapse-item-header-icon{color:var(--color-text-4)}.arco-collapse-item-header-title{display:inline}.arco-collapse-item-header-extra{float:right}.arco-collapse-item .arco-collapse-item-icon-hover{position:absolute;top:50%;left:13px;text-align:center;transform:translateY(-50%)}.arco-collapse-item .arco-collapse-item-icon-right{right:13px;left:unset}.arco-collapse-item .arco-collapse-item-icon-right>.arco-collapse-item-header-icon-down{transform:rotate(-90deg)}.arco-collapse-item .arco-collapse-item-expand-icon{position:relative;display:block;color:var(--color-neutral-7);font-size:14px;vertical-align:middle;transition:transform .2s cubic-bezier(.34,.69,.1,1)}.arco-collapse-item-content{position:relative;padding-right:13px;padding-left:34px;overflow:hidden;color:var(--color-text-1);font-size:14px;background-color:var(--color-fill-1)}.arco-collapse-item-content-expanded{display:block;height:auto}.arco-collapse-item-content-box{padding:8px 0}.arco-collapse-item.arco-collapse-item-disabled>.arco-collapse-item-content{color:var(--color-text-4)}.arco-collapse-item-no-icon>.arco-collapse-item-header{padding-right:13px;padding-left:13px}.arco-collapse-item:last-of-type{border-bottom:none}.arco-collapse.arco-collapse-borderless{border:none}.arco-collapse:after{display:table;clear:both;content:""}.collapse-slider-enter-from,.collapse-slider-leave-to{height:0}.collapse-slider-enter-active,.collapse-slider-leave-active{transition:height .2s cubic-bezier(.34,.69,.1,1)}.arco-comment{display:flex;flex-wrap:nowrap;font-size:14px;line-height:1.5715}.arco-comment:not(:first-of-type),.arco-comment-inner-comment{margin-top:20px}.arco-comment-inner{flex:1}.arco-comment-avatar{flex-shrink:0;margin-right:12px;cursor:pointer}.arco-comment-avatar>img{width:32px;height:32px;border-radius:var(--border-radius-circle)}.arco-comment-author{margin-right:8px;color:var(--color-text-2);font-size:14px}.arco-comment-datetime{color:var(--color-text-3);font-size:12px}.arco-comment-content{color:var(--color-text-1)}.arco-comment-title-align-right{display:flex;justify-content:space-between}.arco-comment-actions{margin-top:8px;color:var(--color-text-2);font-size:14px}.arco-comment-actions>*:not(:last-child){margin-right:8px}.arco-comment-actions-align-right{display:flex;justify-content:flex-end}.arco-picker-container,.arco-picker-range-container{box-sizing:border-box;min-height:60px;overflow:hidden;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium);box-shadow:0 2px 5px #0000001a}.arco-picker-container-shortcuts-placement-left,.arco-picker-range-container-shortcuts-placement-left,.arco-picker-container-shortcuts-placement-right,.arco-picker-range-container-shortcuts-placement-right{display:flex;align-items:flex-start}.arco-picker-container-shortcuts-placement-left>.arco-picker-shortcuts,.arco-picker-range-container-shortcuts-placement-left>.arco-picker-shortcuts,.arco-picker-container-shortcuts-placement-right>.arco-picker-shortcuts,.arco-picker-range-container-shortcuts-placement-right>.arco-picker-shortcuts{display:flex;flex-direction:column;box-sizing:border-box;padding:5px 8px;overflow-x:hidden;overflow-y:auto}.arco-picker-container-shortcuts-placement-left>.arco-picker-shortcuts>*,.arco-picker-range-container-shortcuts-placement-left>.arco-picker-shortcuts>*,.arco-picker-container-shortcuts-placement-right>.arco-picker-shortcuts>*,.arco-picker-range-container-shortcuts-placement-right>.arco-picker-shortcuts>*{margin:5px 0}.arco-picker-container-shortcuts-placement-left .arco-picker-panel-wrapper,.arco-picker-range-container-shortcuts-placement-left .arco-picker-panel-wrapper,.arco-picker-container-shortcuts-placement-left .arco-picker-range-panel-wrapper,.arco-picker-range-container-shortcuts-placement-left .arco-picker-range-panel-wrapper{border-left:1px solid var(--color-neutral-3)}.arco-picker-container-shortcuts-placement-right .arco-picker-panel-wrapper,.arco-picker-range-container-shortcuts-placement-right .arco-picker-panel-wrapper,.arco-picker-container-shortcuts-placement-right .arco-picker-range-panel-wrapper,.arco-picker-range-container-shortcuts-placement-right .arco-picker-range-panel-wrapper{border-right:1px solid var(--color-neutral-3)}.arco-picker-panel-only,.arco-picker-range-panel-only{box-shadow:none}.arco-picker-panel-only .arco-panel-date-inner,.arco-picker-range-panel-only .arco-panel-date-inner,.arco-picker-range-panel-only .arco-panel-date{width:100%}.arco-picker-header{display:flex;padding:8px 16px;border-bottom:1px solid var(--color-neutral-3)}.arco-picker-header-title{flex:1;color:var(--color-text-1);font-size:14px;line-height:24px;text-align:center}.arco-picker-header-icon{width:24px;height:24px;margin-right:2px;margin-left:2px;color:var(--color-text-2);font-size:12px;line-height:24px;text-align:center;background-color:var(--color-bg-popup);border-radius:50%;transition:all .1s cubic-bezier(0,0,1,1);user-select:none}.arco-picker-header-icon:not(.arco-picker-header-icon-hidden){cursor:pointer}.arco-picker-header-icon:not(.arco-picker-header-icon-hidden):hover{background-color:var(--color-fill-3)}.arco-picker-header-label{padding:2px;border-radius:2px;cursor:pointer;transition:all .1s}.arco-picker-header-label:hover{background-color:var(--color-fill-3)}.arco-picker-body{padding:14px 16px}.arco-picker-week-list{display:flex;box-sizing:border-box;width:100%;padding:14px 16px 0}.arco-picker-week-list-item{flex:1;height:32px;padding:0;color:#7d7d7f;font-weight:400;line-height:32px;text-align:center}.arco-picker-row{display:flex;padding:2px 0}.arco-picker-cell{flex:1}.arco-picker-cell .arco-picker-date{display:flex;justify-content:center;box-sizing:border-box;width:100%;height:100%;padding:4px 0;cursor:pointer}.arco-picker-date-value{min-width:24px;height:24px;color:var(--color-text-4);font-size:14px;line-height:24px;text-align:center;border-radius:var(--border-radius-circle);cursor:pointer}.arco-picker-cell-in-view .arco-picker-date-value{color:var(--color-text-1);font-weight:500}.arco-picker-cell-selected .arco-picker-date-value{color:var(--color-white);background-color:rgb(var(--primary-6));transition:background-color .1s cubic-bezier(0,0,1,1)}.arco-picker-cell-in-view:not(.arco-picker-cell-selected):not(.arco-picker-cell-range-start):not(.arco-picker-cell-range-end):not(.arco-picker-cell-disabled):not(.arco-picker-cell-week) .arco-picker-date-value:hover{color:var(--color-text-1);background-color:var(--color-fill-3)}.arco-picker-cell-today{position:relative}.arco-picker-cell-today:after{position:absolute;bottom:-2px;left:50%;display:block;width:4px;height:4px;margin-left:-2px;background-color:rgb(var(--primary-6));border-radius:50%;content:""}.arco-picker-cell-in-range .arco-picker-date{background-color:var(--color-primary-light-1)}.arco-picker-cell-range-start .arco-picker-date{border-top-left-radius:24px;border-bottom-left-radius:24px}.arco-picker-cell-range-end .arco-picker-date{border-top-right-radius:24px;border-bottom-right-radius:24px}.arco-picker-cell-in-range-near-hover .arco-picker-date{border-radius:0}.arco-picker-cell-range-start .arco-picker-date-value,.arco-picker-cell-range-end .arco-picker-date-value{color:var(--color-white);background-color:rgb(var(--primary-6));border-radius:var(--border-radius-circle)}.arco-picker-cell-hover-in-range .arco-picker-date{background-color:var(--color-primary-light-1)}.arco-picker-cell-hover-range-start .arco-picker-date{border-radius:24px 0 0 24px}.arco-picker-cell-hover-range-end .arco-picker-date{border-radius:0 24px 24px 0}.arco-picker-cell-hover-range-start .arco-picker-date-value,.arco-picker-cell-hover-range-end .arco-picker-date-value{color:var(--color-text-1);background-color:var(--color-primary-light-2);border-radius:50%}.arco-picker-cell-disabled .arco-picker-date{background-color:var(--color-fill-1);cursor:not-allowed}.arco-picker-cell-disabled .arco-picker-date-value{color:var(--color-text-4);background-color:transparent;cursor:not-allowed}.arco-picker-footer{width:min-content;min-width:100%}.arco-picker-footer-btn-wrapper{display:flex;align-items:center;justify-content:space-between;box-sizing:border-box;padding:3px 8px;border-top:1px solid var(--color-neutral-3)}.arco-picker-footer-btn-wrapper :only-child{margin-left:auto}.arco-picker-footer-extra-wrapper{box-sizing:border-box;padding:8px 24px;color:var(--color-text-1);font-size:12px;border-top:1px solid var(--color-neutral-3)}.arco-picker-footer-now-wrapper{box-sizing:border-box;height:36px;line-height:36px;text-align:center;border-top:1px solid var(--color-neutral-3)}.arco-picker-btn-confirm{margin:5px 0}.arco-picker-shortcuts{flex:1}.arco-picker-shortcuts>*{margin:5px 10px 5px 0}.arco-panel-date{display:flex;box-sizing:border-box}.arco-panel-date-inner{width:265px}.arco-panel-date-inner .arco-picker-body{padding-top:0}.arco-panel-date-timepicker{display:flex;flex-direction:column;border-left:1px solid var(--color-neutral-3)}.arco-panel-date-timepicker-title{width:100%;height:40px;color:var(--color-text-1);font-weight:400;font-size:14px;line-height:40px;text-align:center;border-bottom:1px solid var(--color-neutral-3)}.arco-panel-date-timepicker .arco-timepicker{height:276px;padding:0 6px;overflow:hidden}.arco-panel-date-timepicker .arco-timepicker-column{box-sizing:border-box;width:auto;height:100%;padding:0 4px}.arco-panel-date-timepicker .arco-timepicker-column::-webkit-scrollbar{width:0}.arco-panel-date-timepicker .arco-timepicker-column:not(:last-child){border-right:0}.arco-panel-date-timepicker .arco-timepicker ul:after{height:244px}.arco-panel-date-timepicker .arco-timepicker-cell{width:36px}.arco-panel-date-timepicker .arco-timepicker-cell-inner{padding-left:10px}.arco-panel-date-footer{border-right:1px solid var(--color-neutral-3)}.arco-panel-date-with-view-tabs{flex-direction:column;min-width:265px}.arco-panel-date-with-view-tabs .arco-panel-date-timepicker .arco-timepicker-column{flex:1}.arco-panel-date-with-view-tabs .arco-panel-date-timepicker .arco-timepicker-column::-webkit-scrollbar{width:0}.arco-panel-date-with-view-tabs .arco-panel-date-timepicker .arco-timepicker-cell{width:100%;text-align:center}.arco-panel-date-with-view-tabs .arco-panel-date-timepicker .arco-timepicker-cell-inner{padding-left:0}.arco-panel-date-view-tabs{display:flex;border-top:1px solid var(--color-neutral-3)}.arco-panel-date-view-tab-pane{flex:1;height:50px;color:var(--color-text-4);font-size:14px;line-height:50px;text-align:center;border-right:1px solid var(--color-neutral-3);cursor:pointer}.arco-panel-date-view-tab-pane:last-child{border-right:none}.arco-panel-date-view-tab-pane-text{margin-left:8px}.arco-panel-date-view-tab-pane-active{color:var(--color-text-1)}.arco-panel-month,.arco-panel-quarter,.arco-panel-year{box-sizing:border-box;width:265px}.arco-panel-month .arco-picker-date,.arco-panel-quarter .arco-picker-date,.arco-panel-year .arco-picker-date{padding:4px}.arco-panel-month .arco-picker-date-value,.arco-panel-quarter .arco-picker-date-value,.arco-panel-year .arco-picker-date-value{width:100%;border-radius:24px}.arco-panel-month .arco-picker-cell:not(.arco-picker-cell-selected):not(.arco-picker-cell-range-start):not(.arco-picker-cell-range-end):not(.arco-picker-cell-disabled):not(.arco-picker-cell-week) .arco-picker-date-value:hover,.arco-panel-quarter .arco-picker-cell:not(.arco-picker-cell-selected):not(.arco-picker-cell-range-start):not(.arco-picker-cell-range-end):not(.arco-picker-cell-disabled):not(.arco-picker-cell-week) .arco-picker-date-value:hover,.arco-panel-year .arco-picker-cell:not(.arco-picker-cell-selected):not(.arco-picker-cell-range-start):not(.arco-picker-cell-range-end):not(.arco-picker-cell-disabled):not(.arco-picker-cell-week) .arco-picker-date-value:hover{border-radius:24px}.arco-panel-year{box-sizing:border-box;width:265px}.arco-panel-week{box-sizing:border-box}.arco-panel-week-wrapper{display:flex}.arco-panel-week-inner{width:298px}.arco-panel-week-inner .arco-picker-body{padding-top:0}.arco-panel-week .arco-picker-row-week{cursor:pointer}.arco-panel-week .arco-picker-row-week .arco-picker-date-value{width:100%;border-radius:0}.arco-panel-week .arco-picker-cell .arco-picker-date{border-radius:0}.arco-panel-week .arco-picker-cell:nth-child(2) .arco-picker-date{padding-left:4px;border-top-left-radius:24px;border-bottom-left-radius:24px}.arco-panel-week .arco-picker-cell:nth-child(2) .arco-picker-date .arco-picker-date-value{border-top-left-radius:24px;border-bottom-left-radius:24px}.arco-panel-week .arco-picker-cell:nth-child(8) .arco-picker-date{padding-right:4px;border-top-right-radius:24px;border-bottom-right-radius:24px}.arco-panel-week .arco-picker-cell:nth-child(8) .arco-picker-date .arco-picker-date-value{border-top-right-radius:24px;border-bottom-right-radius:24px}.arco-panel-week .arco-picker-row-week:hover .arco-picker-cell:not(.arco-picker-cell-week):not(.arco-picker-cell-selected):not(.arco-picker-cell-range-start):not(.arco-picker-cell-range-end) .arco-picker-date-value{background-color:var(--color-fill-3)}.arco-panel-quarter{box-sizing:border-box;width:265px}.arco-picker-range-wrapper{display:flex}.arco-datepicker-shortcuts-wrapper{box-sizing:border-box;width:106px;height:100%;max-height:300px;margin:10px 0 0;padding:0;overflow-y:auto;list-style:none}.arco-datepicker-shortcuts-wrapper>li{box-sizing:border-box;width:100%;padding:6px 16px;cursor:pointer}.arco-datepicker-shortcuts-wrapper>li:hover{color:rgb(var(--primary-6))}.arco-descriptions-table{width:100%;border-collapse:collapse}.arco-descriptions-table-layout-fixed table{table-layout:fixed}.arco-descriptions-title{margin-bottom:16px;color:var(--color-text-1);font-weight:500;font-size:16px;line-height:1.5715}.arco-descriptions-item,.arco-descriptions-item-label,.arco-descriptions-item-value{box-sizing:border-box;font-size:14px;line-height:1.5715;text-align:left}.arco-descriptions-table-layout-fixed .arco-descriptions-item-label{width:auto}.arco-descriptions-item-label-block{width:1px;padding:0 4px 12px 0;color:var(--color-text-3);font-weight:500;white-space:nowrap}.arco-descriptions-item-value-block{padding:0 4px 12px 0;color:var(--color-text-1);font-weight:400;white-space:pre-wrap;word-break:break-word}.arco-descriptions-item-label-inline,.arco-descriptions-item-value-inline{box-sizing:border-box;font-size:14px;line-height:1.5715;text-align:left}.arco-descriptions-item-label-inline{margin-bottom:2px;color:var(--color-text-3);font-weight:500}.arco-descriptions-item-value-inline{color:var(--color-text-1);font-weight:400}.arco-descriptions-layout-inline-horizontal .arco-descriptions-item-label-inline{margin-right:4px}.arco-descriptions-layout-inline-horizontal .arco-descriptions-item-label-inline,.arco-descriptions-layout-inline-horizontal .arco-descriptions-item-value-inline{display:inline-block;margin-bottom:0}.arco-descriptions-border.arco-descriptions-layout-inline-vertical .arco-descriptions-item{padding:12px 20px}.arco-descriptions-border .arco-descriptions-body{overflow:hidden;border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium)}.arco-descriptions-border .arco-descriptions-row:not(:last-child){border-bottom:1px solid var(--color-neutral-3)}.arco-descriptions-border .arco-descriptions-item,.arco-descriptions-border .arco-descriptions-item-label-block,.arco-descriptions-border .arco-descriptions-item-value-block{padding:7px 20px;border-right:1px solid var(--color-neutral-3)}.arco-descriptions-border .arco-descriptions-item-label-block{background-color:var(--color-fill-1)}.arco-descriptions-border .arco-descriptions-item-value-block:last-child{border-right:none}.arco-descriptions-border .arco-descriptions-item:last-child{border-right:none}.arco-descriptions-border.arco-descriptions-layout-vertical .arco-descriptions-item-label-block:last-child{border-right:none}.arco-descriptions-layout-vertical:not(.arco-descriptions-border) .arco-descriptions-item-value-block:first-child{padding-left:0}.arco-descriptions-size-mini .arco-descriptions-title{margin-bottom:6px}.arco-descriptions-size-mini .arco-descriptions-item-label-block,.arco-descriptions-size-mini .arco-descriptions-item-value-block{padding-right:20px;padding-bottom:2px;font-size:12px}.arco-descriptions-size-mini.arco-descriptions-border .arco-descriptions-item-label-block,.arco-descriptions-size-mini.arco-descriptions-border .arco-descriptions-item-value-block{padding:3px 20px}.arco-descriptions-size-mini.arco-descriptions-border.arco-descriptions-layout-inline-vertical .arco-descriptions-item{padding:8px 20px}.arco-descriptions-size-small .arco-descriptions-title{margin-bottom:8px}.arco-descriptions-size-small .arco-descriptions-item-label-block,.arco-descriptions-size-small .arco-descriptions-item-value-block{padding-right:20px;padding-bottom:4px;font-size:14px}.arco-descriptions-size-small.arco-descriptions-border .arco-descriptions-item-label-block,.arco-descriptions-size-small.arco-descriptions-border .arco-descriptions-item-value-block{padding:3px 20px}.arco-descriptions-size-small.arco-descriptions-border.arco-descriptions-layout-inline-vertical .arco-descriptions-item{padding:8px 20px}.arco-descriptions-size-medium .arco-descriptions-title{margin-bottom:12px}.arco-descriptions-size-medium .arco-descriptions-item-label-block,.arco-descriptions-size-medium .arco-descriptions-item-value-block{padding-right:20px;padding-bottom:8px;font-size:14px}.arco-descriptions-size-medium.arco-descriptions-border .arco-descriptions-item-label-block,.arco-descriptions-size-medium.arco-descriptions-border .arco-descriptions-item-value-block{padding:5px 20px}.arco-descriptions-size-medium.arco-descriptions-border.arco-descriptions-layout-inline-vertical .arco-descriptions-item{padding:10px 20px}.arco-descriptions-size-large .arco-descriptions-title{margin-bottom:20px}.arco-descriptions-size-large .arco-descriptions-item-label-block,.arco-descriptions-size-large .arco-descriptions-item-value-block{padding-right:20px;padding-bottom:16px;font-size:14px}.arco-descriptions-size-large.arco-descriptions-border .arco-descriptions-item-label-block,.arco-descriptions-size-large.arco-descriptions-border .arco-descriptions-item-value-block{padding:9px 20px}.arco-descriptions-size-large.arco-descriptions-border.arco-descriptions-layout-inline-vertical .arco-descriptions-item{padding:14px 20px}.arco-divider-horizontal{position:relative;clear:both;width:100%;min-width:100%;max-width:100%;margin:20px 0;border-bottom:1px solid var(--color-neutral-3)}.arco-divider-horizontal.arco-divider-with-text{margin:20px 0}.arco-divider-vertical{display:inline-block;min-width:1px;max-width:1px;height:1em;margin:0 12px;vertical-align:middle;border-left:1px solid var(--color-neutral-3)}.arco-divider-text{position:absolute;top:50%;box-sizing:border-box;padding:0 16px;color:var(--color-text-1);font-weight:500;font-size:14px;line-height:2;background:var(--color-bg-2);transform:translateY(-50%)}.arco-divider-text-center{left:50%;transform:translate(-50%,-50%)}.arco-divider-text-left{left:24px}.arco-divider-text-right{right:24px}.arco-drawer-container{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1001}.arco-drawer-mask{position:absolute;top:0;right:0;bottom:0;left:0;background-color:var(--color-mask-bg)}.arco-drawer{position:absolute;display:flex;flex-direction:column;width:100%;height:100%;overflow:auto;line-height:1.5715;background-color:var(--color-bg-3)}.arco-drawer-header{display:flex;flex-shrink:0;align-items:center;box-sizing:border-box;width:100%;height:48px;padding:0 16px;border-bottom:1px solid var(--color-neutral-3)}.arco-drawer-header .arco-drawer-title{margin-right:auto;color:var(--color-text-1);font-weight:500;font-size:16px;text-align:left}.arco-drawer-header .arco-drawer-close-btn{margin-left:8px;color:var(--color-text-1);font-size:12px;cursor:pointer}.arco-drawer-footer{flex-shrink:0;box-sizing:border-box;padding:16px;text-align:right;border-top:1px solid var(--color-neutral-3)}.arco-drawer-footer>.arco-btn{margin-left:12px}.arco-drawer-body{position:relative;flex:1;box-sizing:border-box;height:100%;padding:12px 16px;overflow:auto;color:var(--color-text-1)}.fade-drawer-enter-from,.fade-drawer-appear-from{opacity:0}.fade-drawer-enter-to,.fade-drawer-appear-to{opacity:1}.fade-drawer-enter-active,.fade-drawer-appear-active{transition:opacity .3s cubic-bezier(.34,.69,.1,1)}.fade-drawer-leave-from{opacity:1}.fade-drawer-leave-to{opacity:0}.fade-drawer-leave-active{transition:opacity .3s cubic-bezier(.34,.69,.1,1)}.slide-left-drawer-enter-from,.slide-left-drawer-appear-from{transform:translate(-100%)}.slide-left-drawer-enter-to,.slide-left-drawer-appear-to{transform:translate(0)}.slide-left-drawer-enter-active,.slide-left-drawer-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-left-drawer-leave-from{transform:translate(0)}.slide-left-drawer-leave-to{transform:translate(-100%)}.slide-left-drawer-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-right-drawer-enter-from,.slide-right-drawer-appear-from{transform:translate(100%)}.slide-right-drawer-enter-to,.slide-right-drawer-appear-to{transform:translate(0)}.slide-right-drawer-enter-active,.slide-right-drawer-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-right-drawer-leave-from{transform:translate(0)}.slide-right-drawer-leave-to{transform:translate(100%)}.slide-right-drawer-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-top-drawer-enter,.slide-top-drawer-appear,.slide-top-drawer-enter-from,.slide-top-drawer-appear-from{transform:translateY(-100%)}.slide-top-drawer-enter-to,.slide-top-drawer-appear-to{transform:translateY(0)}.slide-top-drawer-enter-active,.slide-top-drawer-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-top-drawer-leave-from{transform:translateY(0)}.slide-top-drawer-leave-to{transform:translateY(-100%)}.slide-top-drawer-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-bottom-drawer-enter-from,.slide-bottom-drawer-appear-from{transform:translateY(100%)}.slide-bottom-drawer-enter-to,.slide-bottom-drawer-appear-to{transform:translateY(0)}.slide-bottom-drawer-enter-active,.slide-bottom-drawer-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-bottom-drawer-leave-from{transform:translateY(0)}.slide-bottom-drawer-leave-to{transform:translateY(100%)}.slide-bottom-drawer-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.arco-dropdown{box-sizing:border-box;padding:4px 0;background-color:var(--color-bg-popup);border:1px solid var(--color-fill-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-dropdown-list{margin-top:0;margin-bottom:0;padding-left:0;list-style:none}.arco-dropdown-list-wrapper{max-height:200px;overflow-y:auto}.arco-dropdown-option{position:relative;z-index:1;display:flex;align-items:center;box-sizing:border-box;width:100%;padding:0 12px;color:var(--color-text-1);font-size:14px;line-height:36px;text-align:left;background-color:transparent;cursor:pointer}.arco-dropdown-option-content{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-dropdown-option-has-suffix{justify-content:space-between}.arco-dropdown-option-active,.arco-dropdown-option:not(.arco-dropdown-option-disabled):hover{color:var(--color-text-1);background-color:var(--color-fill-2);transition:all .1s cubic-bezier(0,0,1,1)}.arco-dropdown-option-disabled{color:var(--color-text-4);background-color:transparent;cursor:not-allowed}.arco-dropdown-option-icon{display:inline-flex;margin-right:8px}.arco-dropdown-option-suffix{margin-left:12px}.arco-dropdown-group:first-child .arco-dropdown-group-title{margin-top:8px}.arco-dropdown-group-title{box-sizing:border-box;width:100%;margin-top:8px;padding:0 12px;color:var(--color-text-3);font-size:12px;line-height:20px;cursor:default;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-dropdown-submenu{margin-top:-4px}.arco-dropdown.arco-dropdown-has-footer{padding-bottom:0}.arco-dropdown-footer{border-top:1px solid var(--color-fill-3)}.arco-empty{box-sizing:border-box;width:100%;padding:10px 0;text-align:center}.arco-empty-image{margin-bottom:4px;color:rgb(var(--gray-5));font-size:48px;line-height:1}.arco-empty-image img{height:80px}.arco-empty .arco-empty-description{color:rgb(var(--gray-5));font-size:14px}.arco-form-item-status-validating .arco-input-wrapper:not(.arco-input-disabled),.arco-form-item-status-validating .arco-textarea-wrapper:not(.arco-textarea-disabled){background-color:var(--color-fill-2);border-color:transparent}.arco-form-item-status-validating .arco-input-wrapper:not(.arco-input-disabled):hover,.arco-form-item-status-validating .arco-textarea-wrapper:not(.arco-textarea-disabled):hover{background-color:var(--color-fill-3);border-color:transparent}.arco-form-item-status-validating .arco-input-wrapper:not(.arco-input-disabled).arco-input-focus,.arco-form-item-status-validating .arco-textarea-wrapper:not(.arco-textarea-disabled).arco-textarea-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-form-item-status-validating .arco-select-view:not(.arco-select-view-disabled),.arco-form-item-status-validating .arco-input-tag:not(.arco-input-tag-disabled){background-color:var(--color-fill-2);border-color:transparent}.arco-form-item-status-validating .arco-select-view:not(.arco-select-view-disabled):hover,.arco-form-item-status-validating .arco-input-tag:not(.arco-input-tag-disabled):hover{background-color:var(--color-fill-3);border-color:transparent}.arco-form-item-status-validating .arco-select-view:not(.arco-select-view-disabled).arco-select-view-focus,.arco-form-item-status-validating .arco-input-tag:not(.arco-input-tag-disabled).arco-input-tag-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-form-item-status-validating .arco-picker:not(.arco-picker-disabled){border-color:transparent;background-color:var(--color-fill-2)}.arco-form-item-status-validating .arco-picker:not(.arco-picker-disabled):hover{border-color:transparent;background-color:var(--color-fill-3)}.arco-form-item-status-validating .arco-picker-focused:not(.arco-picker-disabled),.arco-form-item-status-validating .arco-picker-focused:not(.arco-picker-disabled):hover{border-color:rgb(var(--primary-6));background-color:var(--color-bg-2);box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-form-item-status-validating .arco-form-item-message-help,.arco-form-item-status-validating .arco-form-item-feedback{color:rgb(var(--primary-6))}.arco-form-item-status-success .arco-input-wrapper:not(.arco-input-disabled),.arco-form-item-status-success .arco-textarea-wrapper:not(.arco-textarea-disabled){background-color:var(--color-fill-2);border-color:transparent}.arco-form-item-status-success .arco-input-wrapper:not(.arco-input-disabled):hover,.arco-form-item-status-success .arco-textarea-wrapper:not(.arco-textarea-disabled):hover{background-color:var(--color-fill-3);border-color:transparent}.arco-form-item-status-success .arco-input-wrapper:not(.arco-input-disabled).arco-input-focus,.arco-form-item-status-success .arco-textarea-wrapper:not(.arco-textarea-disabled).arco-textarea-focus{background-color:var(--color-bg-2);border-color:rgb(var(--success-6));box-shadow:0 0 0 0 var(--color-success-light-2)}.arco-form-item-status-success .arco-select-view:not(.arco-select-view-disabled),.arco-form-item-status-success .arco-input-tag:not(.arco-input-tag-disabled){background-color:var(--color-fill-2);border-color:transparent}.arco-form-item-status-success .arco-select-view:not(.arco-select-view-disabled):hover,.arco-form-item-status-success .arco-input-tag:not(.arco-input-tag-disabled):hover{background-color:var(--color-fill-3);border-color:transparent}.arco-form-item-status-success .arco-select-view:not(.arco-select-view-disabled).arco-select-view-focus,.arco-form-item-status-success .arco-input-tag:not(.arco-input-tag-disabled).arco-input-tag-focus{background-color:var(--color-bg-2);border-color:rgb(var(--success-6));box-shadow:0 0 0 0 var(--color-success-light-2)}.arco-form-item-status-success .arco-picker:not(.arco-picker-disabled){border-color:transparent;background-color:var(--color-fill-2)}.arco-form-item-status-success .arco-picker:not(.arco-picker-disabled):hover{border-color:transparent;background-color:var(--color-fill-3)}.arco-form-item-status-success .arco-picker-focused:not(.arco-picker-disabled),.arco-form-item-status-success .arco-picker-focused:not(.arco-picker-disabled):hover{border-color:rgb(var(--success-6));background-color:var(--color-bg-2);box-shadow:0 0 0 0 var(--color-success-light-2)}.arco-form-item-status-success .arco-form-item-message-help,.arco-form-item-status-success .arco-form-item-feedback{color:rgb(var(--success-6))}.arco-form-item-status-warning .arco-input-wrapper:not(.arco-input-disabled),.arco-form-item-status-warning .arco-textarea-wrapper:not(.arco-textarea-disabled){background-color:var(--color-warning-light-1);border-color:transparent}.arco-form-item-status-warning .arco-input-wrapper:not(.arco-input-disabled):hover,.arco-form-item-status-warning .arco-textarea-wrapper:not(.arco-textarea-disabled):hover{background-color:var(--color-warning-light-2);border-color:transparent}.arco-form-item-status-warning .arco-input-wrapper:not(.arco-input-disabled).arco-input-focus,.arco-form-item-status-warning .arco-textarea-wrapper:not(.arco-textarea-disabled).arco-textarea-focus{background-color:var(--color-bg-2);border-color:rgb(var(--warning-6));box-shadow:0 0 0 0 var(--color-warning-light-2)}.arco-form-item-status-warning .arco-select-view:not(.arco-select-view-disabled),.arco-form-item-status-warning .arco-input-tag:not(.arco-input-tag-disabled){background-color:var(--color-warning-light-1);border-color:transparent}.arco-form-item-status-warning .arco-select-view:not(.arco-select-view-disabled):hover,.arco-form-item-status-warning .arco-input-tag:not(.arco-input-tag-disabled):hover{background-color:var(--color-warning-light-2);border-color:transparent}.arco-form-item-status-warning .arco-select-view:not(.arco-select-view-disabled).arco-select-view-focus,.arco-form-item-status-warning .arco-input-tag:not(.arco-input-tag-disabled).arco-input-tag-focus{background-color:var(--color-bg-2);border-color:rgb(var(--warning-6));box-shadow:0 0 0 0 var(--color-warning-light-2)}.arco-form-item-status-warning .arco-picker:not(.arco-picker-disabled){border-color:transparent;background-color:var(--color-warning-light-1)}.arco-form-item-status-warning .arco-picker:not(.arco-picker-disabled):hover{border-color:transparent;background-color:var(--color-warning-light-2)}.arco-form-item-status-warning .arco-picker-focused:not(.arco-picker-disabled),.arco-form-item-status-warning .arco-picker-focused:not(.arco-picker-disabled):hover{border-color:rgb(var(--warning-6));background-color:var(--color-bg-2);box-shadow:0 0 0 0 var(--color-warning-light-2)}.arco-form-item-status-warning .arco-form-item-message-help,.arco-form-item-status-warning .arco-form-item-feedback{color:rgb(var(--warning-6))}.arco-form-item-status-error .arco-input-wrapper:not(.arco-input-disabled),.arco-form-item-status-error .arco-textarea-wrapper:not(.arco-textarea-disabled){background-color:var(--color-danger-light-1);border-color:transparent}.arco-form-item-status-error .arco-input-wrapper:not(.arco-input-disabled):hover,.arco-form-item-status-error .arco-textarea-wrapper:not(.arco-textarea-disabled):hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-form-item-status-error .arco-input-wrapper:not(.arco-input-disabled).arco-input-focus,.arco-form-item-status-error .arco-textarea-wrapper:not(.arco-textarea-disabled).arco-textarea-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-form-item-status-error .arco-select-view:not(.arco-select-view-disabled),.arco-form-item-status-error .arco-input-tag:not(.arco-input-tag-disabled){background-color:var(--color-danger-light-1);border-color:transparent}.arco-form-item-status-error .arco-select-view:not(.arco-select-view-disabled):hover,.arco-form-item-status-error .arco-input-tag:not(.arco-input-tag-disabled):hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-form-item-status-error .arco-select-view:not(.arco-select-view-disabled).arco-select-view-focus,.arco-form-item-status-error .arco-input-tag:not(.arco-input-tag-disabled).arco-input-tag-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-form-item-status-error .arco-picker:not(.arco-picker-disabled){border-color:transparent;background-color:var(--color-danger-light-1)}.arco-form-item-status-error .arco-picker:not(.arco-picker-disabled):hover{border-color:transparent;background-color:var(--color-danger-light-2)}.arco-form-item-status-error .arco-picker-focused:not(.arco-picker-disabled),.arco-form-item-status-error .arco-picker-focused:not(.arco-picker-disabled):hover{border-color:rgb(var(--danger-6));background-color:var(--color-bg-2);box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-form-item-status-error .arco-form-item-message-help,.arco-form-item-status-error .arco-form-item-feedback{color:rgb(var(--danger-6))}.arco-form-item-control-children{position:relative}.arco-form-item-feedback{position:absolute;top:50%;right:9px;font-size:14px;transform:translateY(-50%)}.arco-form-item-feedback .arco-icon-loading{font-size:12px}.arco-form-item-has-feedback .arco-input,.arco-form-item-has-feedback .arco-input-inner-wrapper,.arco-form-item-has-feedback .arco-textarea{padding-right:28px}.arco-form-item-has-feedback .arco-input-number-mode-embed .arco-input-number-step-layer{right:24px}.arco-form-item-has-feedback .arco-select.arco-select-multiple .arco-select-view,.arco-form-item-has-feedback .arco-select.arco-select-single .arco-select-view{padding-right:28px}.arco-form-item-has-feedback .arco-select.arco-select-multiple .arco-select-suffix{padding-right:0}.arco-form-item-has-feedback .arco-cascader.arco-cascader-multiple .arco-cascader-view,.arco-form-item-has-feedback .arco-cascader.arco-cascader-single .arco-cascader-view{padding-right:28px}.arco-form-item-has-feedback .arco-cascader.arco-cascader-multiple .arco-cascader-suffix{padding-right:0}.arco-form-item-has-feedback .arco-tree-select.arco-tree-select-multiple .arco-tree-select-view,.arco-form-item-has-feedback .arco-tree-select.arco-tree-select-single .arco-tree-select-view{padding-right:28px}.arco-form-item-has-feedback .arco-tree-select.arco-tree-select-multiple .arco-tree-select-suffix{padding-right:0}.arco-form-item-has-feedback .arco-picker{padding-right:28px}.arco-form-item-has-feedback .arco-picker-suffix .arco-picker-suffix-icon,.arco-form-item-has-feedback .arco-picker-suffix .arco-picker-clear-icon{margin-right:0;margin-left:0}.arco-form{display:flex;flex-direction:column;width:100%}.arco-form-layout-inline{flex-direction:row;flex-wrap:wrap}.arco-form-layout-inline .arco-form-item{width:auto;margin-bottom:8px}.arco-form-auto-label-width .arco-form-item-label-col>.arco-form-item-label{white-space:nowrap}.arco-form-item{display:flex;align-items:flex-start;justify-content:flex-start;width:100%;margin-bottom:20px}.arco-form-item-layout-vertical{display:block}.arco-form-item-layout-vertical>.arco-form-item-label-col{justify-content:flex-start;margin-bottom:8px;padding:0;line-height:1.5715;white-space:normal}.arco-form-item-layout-inline{margin-right:24px}.arco-form-item-label-col{padding-right:16px}.arco-form-item.arco-form-item-error,.arco-form-item.arco-form-item-has-help{margin-bottom:0}.arco-form-item-wrapper-flex.arco-col{flex:1}.arco-form-size-mini .arco-form-item-label-col{line-height:24px}.arco-form-size-mini .arco-form-item-label-col>.arco-form-item-label{font-size:12px}.arco-form-size-mini .arco-form-item-content,.arco-form-size-mini .arco-form-item-wrapper-col{min-height:24px}.arco-form-size-small .arco-form-item-label-col{line-height:28px}.arco-form-size-small .arco-form-item-label-col>.arco-form-item-label{font-size:14px}.arco-form-size-small .arco-form-item-content,.arco-form-size-small .arco-form-item-wrapper-col{min-height:28px}.arco-form-size-large .arco-form-item-label-col{line-height:36px}.arco-form-size-large .arco-form-item-label-col>.arco-form-item-label{font-size:14px}.arco-form-size-large .arco-form-item-content,.arco-form-size-large .arco-form-item-wrapper-col{min-height:36px}.arco-form-item-extra{margin-top:4px;color:var(--color-text-3);font-size:12px}.arco-form-item-message{min-height:20px;color:rgb(var(--danger-6));font-size:12px;line-height:20px}.arco-form-item-message-help{color:var(--color-text-3)}.arco-form-item-message+.arco-form-item-extra{margin-top:0;margin-bottom:4px}.arco-form-item-label-col{display:flex;flex-shrink:0;justify-content:flex-end;line-height:32px;white-space:nowrap}.arco-form-item-label-col-left{justify-content:flex-start}.arco-form-item-label-col>.arco-form-item-label{max-width:100%;color:var(--color-text-2);font-size:14px;white-space:normal}.arco-form-item-label-col.arco-form-item-label-col-flex{box-sizing:content-box}.arco-form-item-wrapper-col{display:flex;flex-direction:column;align-items:flex-start;width:100%;min-width:0;min-height:32px}.arco-form-item-content{flex:1;max-width:100%;min-height:32px}.arco-form-item-content-wrapper{display:flex;align-items:center;justify-content:flex-start;width:100%}.arco-form-item-content-flex{display:flex;align-items:center;justify-content:flex-start}.arco-form .arco-slider{display:block}.arco-form-item-label-required-symbol{color:rgb(var(--danger-6));font-size:12px;line-height:1}.arco-form-item-label-required-symbol svg{display:inline-block;transform:scale(.5)}.arco-form-item-label-tooltip{margin-left:4px;color:var(--color-text-4)}.form-blink-enter-from,.form-blink-appear-from{opacity:0}.form-blink-enter-to,.form-blink-appear-to{opacity:1}.form-blink-enter-active,.form-blink-appear-active{transition:opacity .3s cubic-bezier(0,0,1,1);animation:arco-form-blink .5s cubic-bezier(0,0,1,1)}@keyframes arco-form-blink{0%{opacity:1}50%{opacity:.2}to{opacity:1}}.arco-row{display:flex;flex-flow:row wrap}.arco-row-nowrap{flex-wrap:nowrap}.arco-row-align-start{align-items:flex-start}.arco-row-align-center{align-items:center}.arco-row-align-end{align-items:flex-end}.arco-row-justify-start{justify-content:flex-start}.arco-row-justify-center{justify-content:center}.arco-row-justify-end{justify-content:flex-end}.arco-row-justify-space-around{justify-content:space-around}.arco-row-justify-space-between{justify-content:space-between}.arco-col{box-sizing:border-box}.arco-col-1{flex:0 0 4.16666667%;width:4.16666667%}.arco-col-2{flex:0 0 8.33333333%;width:8.33333333%}.arco-col-3{flex:0 0 12.5%;width:12.5%}.arco-col-4{flex:0 0 16.66666667%;width:16.66666667%}.arco-col-5{flex:0 0 20.83333333%;width:20.83333333%}.arco-col-6{flex:0 0 25%;width:25%}.arco-col-7{flex:0 0 29.16666667%;width:29.16666667%}.arco-col-8{flex:0 0 33.33333333%;width:33.33333333%}.arco-col-9{flex:0 0 37.5%;width:37.5%}.arco-col-10{flex:0 0 41.66666667%;width:41.66666667%}.arco-col-11{flex:0 0 45.83333333%;width:45.83333333%}.arco-col-12{flex:0 0 50%;width:50%}.arco-col-13{flex:0 0 54.16666667%;width:54.16666667%}.arco-col-14{flex:0 0 58.33333333%;width:58.33333333%}.arco-col-15{flex:0 0 62.5%;width:62.5%}.arco-col-16{flex:0 0 66.66666667%;width:66.66666667%}.arco-col-17{flex:0 0 70.83333333%;width:70.83333333%}.arco-col-18{flex:0 0 75%;width:75%}.arco-col-19{flex:0 0 79.16666667%;width:79.16666667%}.arco-col-20{flex:0 0 83.33333333%;width:83.33333333%}.arco-col-21{flex:0 0 87.5%;width:87.5%}.arco-col-22{flex:0 0 91.66666667%;width:91.66666667%}.arco-col-23{flex:0 0 95.83333333%;width:95.83333333%}.arco-col-24{flex:0 0 100%;width:100%}.arco-col-offset-1{margin-left:4.16666667%}.arco-col-offset-2{margin-left:8.33333333%}.arco-col-offset-3{margin-left:12.5%}.arco-col-offset-4{margin-left:16.66666667%}.arco-col-offset-5{margin-left:20.83333333%}.arco-col-offset-6{margin-left:25%}.arco-col-offset-7{margin-left:29.16666667%}.arco-col-offset-8{margin-left:33.33333333%}.arco-col-offset-9{margin-left:37.5%}.arco-col-offset-10{margin-left:41.66666667%}.arco-col-offset-11{margin-left:45.83333333%}.arco-col-offset-12{margin-left:50%}.arco-col-offset-13{margin-left:54.16666667%}.arco-col-offset-14{margin-left:58.33333333%}.arco-col-offset-15{margin-left:62.5%}.arco-col-offset-16{margin-left:66.66666667%}.arco-col-offset-17{margin-left:70.83333333%}.arco-col-offset-18{margin-left:75%}.arco-col-offset-19{margin-left:79.16666667%}.arco-col-offset-20{margin-left:83.33333333%}.arco-col-offset-21{margin-left:87.5%}.arco-col-offset-22{margin-left:91.66666667%}.arco-col-offset-23{margin-left:95.83333333%}.arco-col-order-1{order:1}.arco-col-order-2{order:2}.arco-col-order-3{order:3}.arco-col-order-4{order:4}.arco-col-order-5{order:5}.arco-col-order-6{order:6}.arco-col-order-7{order:7}.arco-col-order-8{order:8}.arco-col-order-9{order:9}.arco-col-order-10{order:10}.arco-col-order-11{order:11}.arco-col-order-12{order:12}.arco-col-order-13{order:13}.arco-col-order-14{order:14}.arco-col-order-15{order:15}.arco-col-order-16{order:16}.arco-col-order-17{order:17}.arco-col-order-18{order:18}.arco-col-order-19{order:19}.arco-col-order-20{order:20}.arco-col-order-21{order:21}.arco-col-order-22{order:22}.arco-col-order-23{order:23}.arco-col-order-24{order:24}.arco-col-xs-1{flex:0 0 4.16666667%;width:4.16666667%}.arco-col-xs-2{flex:0 0 8.33333333%;width:8.33333333%}.arco-col-xs-3{flex:0 0 12.5%;width:12.5%}.arco-col-xs-4{flex:0 0 16.66666667%;width:16.66666667%}.arco-col-xs-5{flex:0 0 20.83333333%;width:20.83333333%}.arco-col-xs-6{flex:0 0 25%;width:25%}.arco-col-xs-7{flex:0 0 29.16666667%;width:29.16666667%}.arco-col-xs-8{flex:0 0 33.33333333%;width:33.33333333%}.arco-col-xs-9{flex:0 0 37.5%;width:37.5%}.arco-col-xs-10{flex:0 0 41.66666667%;width:41.66666667%}.arco-col-xs-11{flex:0 0 45.83333333%;width:45.83333333%}.arco-col-xs-12{flex:0 0 50%;width:50%}.arco-col-xs-13{flex:0 0 54.16666667%;width:54.16666667%}.arco-col-xs-14{flex:0 0 58.33333333%;width:58.33333333%}.arco-col-xs-15{flex:0 0 62.5%;width:62.5%}.arco-col-xs-16{flex:0 0 66.66666667%;width:66.66666667%}.arco-col-xs-17{flex:0 0 70.83333333%;width:70.83333333%}.arco-col-xs-18{flex:0 0 75%;width:75%}.arco-col-xs-19{flex:0 0 79.16666667%;width:79.16666667%}.arco-col-xs-20{flex:0 0 83.33333333%;width:83.33333333%}.arco-col-xs-21{flex:0 0 87.5%;width:87.5%}.arco-col-xs-22{flex:0 0 91.66666667%;width:91.66666667%}.arco-col-xs-23{flex:0 0 95.83333333%;width:95.83333333%}.arco-col-xs-24{flex:0 0 100%;width:100%}.arco-col-xs-offset-1{margin-left:4.16666667%}.arco-col-xs-offset-2{margin-left:8.33333333%}.arco-col-xs-offset-3{margin-left:12.5%}.arco-col-xs-offset-4{margin-left:16.66666667%}.arco-col-xs-offset-5{margin-left:20.83333333%}.arco-col-xs-offset-6{margin-left:25%}.arco-col-xs-offset-7{margin-left:29.16666667%}.arco-col-xs-offset-8{margin-left:33.33333333%}.arco-col-xs-offset-9{margin-left:37.5%}.arco-col-xs-offset-10{margin-left:41.66666667%}.arco-col-xs-offset-11{margin-left:45.83333333%}.arco-col-xs-offset-12{margin-left:50%}.arco-col-xs-offset-13{margin-left:54.16666667%}.arco-col-xs-offset-14{margin-left:58.33333333%}.arco-col-xs-offset-15{margin-left:62.5%}.arco-col-xs-offset-16{margin-left:66.66666667%}.arco-col-xs-offset-17{margin-left:70.83333333%}.arco-col-xs-offset-18{margin-left:75%}.arco-col-xs-offset-19{margin-left:79.16666667%}.arco-col-xs-offset-20{margin-left:83.33333333%}.arco-col-xs-offset-21{margin-left:87.5%}.arco-col-xs-offset-22{margin-left:91.66666667%}.arco-col-xs-offset-23{margin-left:95.83333333%}.arco-col-xs-order-1{order:1}.arco-col-xs-order-2{order:2}.arco-col-xs-order-3{order:3}.arco-col-xs-order-4{order:4}.arco-col-xs-order-5{order:5}.arco-col-xs-order-6{order:6}.arco-col-xs-order-7{order:7}.arco-col-xs-order-8{order:8}.arco-col-xs-order-9{order:9}.arco-col-xs-order-10{order:10}.arco-col-xs-order-11{order:11}.arco-col-xs-order-12{order:12}.arco-col-xs-order-13{order:13}.arco-col-xs-order-14{order:14}.arco-col-xs-order-15{order:15}.arco-col-xs-order-16{order:16}.arco-col-xs-order-17{order:17}.arco-col-xs-order-18{order:18}.arco-col-xs-order-19{order:19}.arco-col-xs-order-20{order:20}.arco-col-xs-order-21{order:21}.arco-col-xs-order-22{order:22}.arco-col-xs-order-23{order:23}.arco-col-xs-order-24{order:24}@media (min-width: 576px){.arco-col-sm-1{flex:0 0 4.16666667%;width:4.16666667%}.arco-col-sm-2{flex:0 0 8.33333333%;width:8.33333333%}.arco-col-sm-3{flex:0 0 12.5%;width:12.5%}.arco-col-sm-4{flex:0 0 16.66666667%;width:16.66666667%}.arco-col-sm-5{flex:0 0 20.83333333%;width:20.83333333%}.arco-col-sm-6{flex:0 0 25%;width:25%}.arco-col-sm-7{flex:0 0 29.16666667%;width:29.16666667%}.arco-col-sm-8{flex:0 0 33.33333333%;width:33.33333333%}.arco-col-sm-9{flex:0 0 37.5%;width:37.5%}.arco-col-sm-10{flex:0 0 41.66666667%;width:41.66666667%}.arco-col-sm-11{flex:0 0 45.83333333%;width:45.83333333%}.arco-col-sm-12{flex:0 0 50%;width:50%}.arco-col-sm-13{flex:0 0 54.16666667%;width:54.16666667%}.arco-col-sm-14{flex:0 0 58.33333333%;width:58.33333333%}.arco-col-sm-15{flex:0 0 62.5%;width:62.5%}.arco-col-sm-16{flex:0 0 66.66666667%;width:66.66666667%}.arco-col-sm-17{flex:0 0 70.83333333%;width:70.83333333%}.arco-col-sm-18{flex:0 0 75%;width:75%}.arco-col-sm-19{flex:0 0 79.16666667%;width:79.16666667%}.arco-col-sm-20{flex:0 0 83.33333333%;width:83.33333333%}.arco-col-sm-21{flex:0 0 87.5%;width:87.5%}.arco-col-sm-22{flex:0 0 91.66666667%;width:91.66666667%}.arco-col-sm-23{flex:0 0 95.83333333%;width:95.83333333%}.arco-col-sm-24{flex:0 0 100%;width:100%}.arco-col-sm-offset-1{margin-left:4.16666667%}.arco-col-sm-offset-2{margin-left:8.33333333%}.arco-col-sm-offset-3{margin-left:12.5%}.arco-col-sm-offset-4{margin-left:16.66666667%}.arco-col-sm-offset-5{margin-left:20.83333333%}.arco-col-sm-offset-6{margin-left:25%}.arco-col-sm-offset-7{margin-left:29.16666667%}.arco-col-sm-offset-8{margin-left:33.33333333%}.arco-col-sm-offset-9{margin-left:37.5%}.arco-col-sm-offset-10{margin-left:41.66666667%}.arco-col-sm-offset-11{margin-left:45.83333333%}.arco-col-sm-offset-12{margin-left:50%}.arco-col-sm-offset-13{margin-left:54.16666667%}.arco-col-sm-offset-14{margin-left:58.33333333%}.arco-col-sm-offset-15{margin-left:62.5%}.arco-col-sm-offset-16{margin-left:66.66666667%}.arco-col-sm-offset-17{margin-left:70.83333333%}.arco-col-sm-offset-18{margin-left:75%}.arco-col-sm-offset-19{margin-left:79.16666667%}.arco-col-sm-offset-20{margin-left:83.33333333%}.arco-col-sm-offset-21{margin-left:87.5%}.arco-col-sm-offset-22{margin-left:91.66666667%}.arco-col-sm-offset-23{margin-left:95.83333333%}.arco-col-sm-order-1{order:1}.arco-col-sm-order-2{order:2}.arco-col-sm-order-3{order:3}.arco-col-sm-order-4{order:4}.arco-col-sm-order-5{order:5}.arco-col-sm-order-6{order:6}.arco-col-sm-order-7{order:7}.arco-col-sm-order-8{order:8}.arco-col-sm-order-9{order:9}.arco-col-sm-order-10{order:10}.arco-col-sm-order-11{order:11}.arco-col-sm-order-12{order:12}.arco-col-sm-order-13{order:13}.arco-col-sm-order-14{order:14}.arco-col-sm-order-15{order:15}.arco-col-sm-order-16{order:16}.arco-col-sm-order-17{order:17}.arco-col-sm-order-18{order:18}.arco-col-sm-order-19{order:19}.arco-col-sm-order-20{order:20}.arco-col-sm-order-21{order:21}.arco-col-sm-order-22{order:22}.arco-col-sm-order-23{order:23}.arco-col-sm-order-24{order:24}}@media (min-width: 768px){.arco-col-md-1{flex:0 0 4.16666667%;width:4.16666667%}.arco-col-md-2{flex:0 0 8.33333333%;width:8.33333333%}.arco-col-md-3{flex:0 0 12.5%;width:12.5%}.arco-col-md-4{flex:0 0 16.66666667%;width:16.66666667%}.arco-col-md-5{flex:0 0 20.83333333%;width:20.83333333%}.arco-col-md-6{flex:0 0 25%;width:25%}.arco-col-md-7{flex:0 0 29.16666667%;width:29.16666667%}.arco-col-md-8{flex:0 0 33.33333333%;width:33.33333333%}.arco-col-md-9{flex:0 0 37.5%;width:37.5%}.arco-col-md-10{flex:0 0 41.66666667%;width:41.66666667%}.arco-col-md-11{flex:0 0 45.83333333%;width:45.83333333%}.arco-col-md-12{flex:0 0 50%;width:50%}.arco-col-md-13{flex:0 0 54.16666667%;width:54.16666667%}.arco-col-md-14{flex:0 0 58.33333333%;width:58.33333333%}.arco-col-md-15{flex:0 0 62.5%;width:62.5%}.arco-col-md-16{flex:0 0 66.66666667%;width:66.66666667%}.arco-col-md-17{flex:0 0 70.83333333%;width:70.83333333%}.arco-col-md-18{flex:0 0 75%;width:75%}.arco-col-md-19{flex:0 0 79.16666667%;width:79.16666667%}.arco-col-md-20{flex:0 0 83.33333333%;width:83.33333333%}.arco-col-md-21{flex:0 0 87.5%;width:87.5%}.arco-col-md-22{flex:0 0 91.66666667%;width:91.66666667%}.arco-col-md-23{flex:0 0 95.83333333%;width:95.83333333%}.arco-col-md-24{flex:0 0 100%;width:100%}.arco-col-md-offset-1{margin-left:4.16666667%}.arco-col-md-offset-2{margin-left:8.33333333%}.arco-col-md-offset-3{margin-left:12.5%}.arco-col-md-offset-4{margin-left:16.66666667%}.arco-col-md-offset-5{margin-left:20.83333333%}.arco-col-md-offset-6{margin-left:25%}.arco-col-md-offset-7{margin-left:29.16666667%}.arco-col-md-offset-8{margin-left:33.33333333%}.arco-col-md-offset-9{margin-left:37.5%}.arco-col-md-offset-10{margin-left:41.66666667%}.arco-col-md-offset-11{margin-left:45.83333333%}.arco-col-md-offset-12{margin-left:50%}.arco-col-md-offset-13{margin-left:54.16666667%}.arco-col-md-offset-14{margin-left:58.33333333%}.arco-col-md-offset-15{margin-left:62.5%}.arco-col-md-offset-16{margin-left:66.66666667%}.arco-col-md-offset-17{margin-left:70.83333333%}.arco-col-md-offset-18{margin-left:75%}.arco-col-md-offset-19{margin-left:79.16666667%}.arco-col-md-offset-20{margin-left:83.33333333%}.arco-col-md-offset-21{margin-left:87.5%}.arco-col-md-offset-22{margin-left:91.66666667%}.arco-col-md-offset-23{margin-left:95.83333333%}.arco-col-md-order-1{order:1}.arco-col-md-order-2{order:2}.arco-col-md-order-3{order:3}.arco-col-md-order-4{order:4}.arco-col-md-order-5{order:5}.arco-col-md-order-6{order:6}.arco-col-md-order-7{order:7}.arco-col-md-order-8{order:8}.arco-col-md-order-9{order:9}.arco-col-md-order-10{order:10}.arco-col-md-order-11{order:11}.arco-col-md-order-12{order:12}.arco-col-md-order-13{order:13}.arco-col-md-order-14{order:14}.arco-col-md-order-15{order:15}.arco-col-md-order-16{order:16}.arco-col-md-order-17{order:17}.arco-col-md-order-18{order:18}.arco-col-md-order-19{order:19}.arco-col-md-order-20{order:20}.arco-col-md-order-21{order:21}.arco-col-md-order-22{order:22}.arco-col-md-order-23{order:23}.arco-col-md-order-24{order:24}}@media (min-width: 992px){.arco-col-lg-1{flex:0 0 4.16666667%;width:4.16666667%}.arco-col-lg-2{flex:0 0 8.33333333%;width:8.33333333%}.arco-col-lg-3{flex:0 0 12.5%;width:12.5%}.arco-col-lg-4{flex:0 0 16.66666667%;width:16.66666667%}.arco-col-lg-5{flex:0 0 20.83333333%;width:20.83333333%}.arco-col-lg-6{flex:0 0 25%;width:25%}.arco-col-lg-7{flex:0 0 29.16666667%;width:29.16666667%}.arco-col-lg-8{flex:0 0 33.33333333%;width:33.33333333%}.arco-col-lg-9{flex:0 0 37.5%;width:37.5%}.arco-col-lg-10{flex:0 0 41.66666667%;width:41.66666667%}.arco-col-lg-11{flex:0 0 45.83333333%;width:45.83333333%}.arco-col-lg-12{flex:0 0 50%;width:50%}.arco-col-lg-13{flex:0 0 54.16666667%;width:54.16666667%}.arco-col-lg-14{flex:0 0 58.33333333%;width:58.33333333%}.arco-col-lg-15{flex:0 0 62.5%;width:62.5%}.arco-col-lg-16{flex:0 0 66.66666667%;width:66.66666667%}.arco-col-lg-17{flex:0 0 70.83333333%;width:70.83333333%}.arco-col-lg-18{flex:0 0 75%;width:75%}.arco-col-lg-19{flex:0 0 79.16666667%;width:79.16666667%}.arco-col-lg-20{flex:0 0 83.33333333%;width:83.33333333%}.arco-col-lg-21{flex:0 0 87.5%;width:87.5%}.arco-col-lg-22{flex:0 0 91.66666667%;width:91.66666667%}.arco-col-lg-23{flex:0 0 95.83333333%;width:95.83333333%}.arco-col-lg-24{flex:0 0 100%;width:100%}.arco-col-lg-offset-1{margin-left:4.16666667%}.arco-col-lg-offset-2{margin-left:8.33333333%}.arco-col-lg-offset-3{margin-left:12.5%}.arco-col-lg-offset-4{margin-left:16.66666667%}.arco-col-lg-offset-5{margin-left:20.83333333%}.arco-col-lg-offset-6{margin-left:25%}.arco-col-lg-offset-7{margin-left:29.16666667%}.arco-col-lg-offset-8{margin-left:33.33333333%}.arco-col-lg-offset-9{margin-left:37.5%}.arco-col-lg-offset-10{margin-left:41.66666667%}.arco-col-lg-offset-11{margin-left:45.83333333%}.arco-col-lg-offset-12{margin-left:50%}.arco-col-lg-offset-13{margin-left:54.16666667%}.arco-col-lg-offset-14{margin-left:58.33333333%}.arco-col-lg-offset-15{margin-left:62.5%}.arco-col-lg-offset-16{margin-left:66.66666667%}.arco-col-lg-offset-17{margin-left:70.83333333%}.arco-col-lg-offset-18{margin-left:75%}.arco-col-lg-offset-19{margin-left:79.16666667%}.arco-col-lg-offset-20{margin-left:83.33333333%}.arco-col-lg-offset-21{margin-left:87.5%}.arco-col-lg-offset-22{margin-left:91.66666667%}.arco-col-lg-offset-23{margin-left:95.83333333%}.arco-col-lg-order-1{order:1}.arco-col-lg-order-2{order:2}.arco-col-lg-order-3{order:3}.arco-col-lg-order-4{order:4}.arco-col-lg-order-5{order:5}.arco-col-lg-order-6{order:6}.arco-col-lg-order-7{order:7}.arco-col-lg-order-8{order:8}.arco-col-lg-order-9{order:9}.arco-col-lg-order-10{order:10}.arco-col-lg-order-11{order:11}.arco-col-lg-order-12{order:12}.arco-col-lg-order-13{order:13}.arco-col-lg-order-14{order:14}.arco-col-lg-order-15{order:15}.arco-col-lg-order-16{order:16}.arco-col-lg-order-17{order:17}.arco-col-lg-order-18{order:18}.arco-col-lg-order-19{order:19}.arco-col-lg-order-20{order:20}.arco-col-lg-order-21{order:21}.arco-col-lg-order-22{order:22}.arco-col-lg-order-23{order:23}.arco-col-lg-order-24{order:24}}@media (min-width: 1200px){.arco-col-xl-1{flex:0 0 4.16666667%;width:4.16666667%}.arco-col-xl-2{flex:0 0 8.33333333%;width:8.33333333%}.arco-col-xl-3{flex:0 0 12.5%;width:12.5%}.arco-col-xl-4{flex:0 0 16.66666667%;width:16.66666667%}.arco-col-xl-5{flex:0 0 20.83333333%;width:20.83333333%}.arco-col-xl-6{flex:0 0 25%;width:25%}.arco-col-xl-7{flex:0 0 29.16666667%;width:29.16666667%}.arco-col-xl-8{flex:0 0 33.33333333%;width:33.33333333%}.arco-col-xl-9{flex:0 0 37.5%;width:37.5%}.arco-col-xl-10{flex:0 0 41.66666667%;width:41.66666667%}.arco-col-xl-11{flex:0 0 45.83333333%;width:45.83333333%}.arco-col-xl-12{flex:0 0 50%;width:50%}.arco-col-xl-13{flex:0 0 54.16666667%;width:54.16666667%}.arco-col-xl-14{flex:0 0 58.33333333%;width:58.33333333%}.arco-col-xl-15{flex:0 0 62.5%;width:62.5%}.arco-col-xl-16{flex:0 0 66.66666667%;width:66.66666667%}.arco-col-xl-17{flex:0 0 70.83333333%;width:70.83333333%}.arco-col-xl-18{flex:0 0 75%;width:75%}.arco-col-xl-19{flex:0 0 79.16666667%;width:79.16666667%}.arco-col-xl-20{flex:0 0 83.33333333%;width:83.33333333%}.arco-col-xl-21{flex:0 0 87.5%;width:87.5%}.arco-col-xl-22{flex:0 0 91.66666667%;width:91.66666667%}.arco-col-xl-23{flex:0 0 95.83333333%;width:95.83333333%}.arco-col-xl-24{flex:0 0 100%;width:100%}.arco-col-xl-offset-1{margin-left:4.16666667%}.arco-col-xl-offset-2{margin-left:8.33333333%}.arco-col-xl-offset-3{margin-left:12.5%}.arco-col-xl-offset-4{margin-left:16.66666667%}.arco-col-xl-offset-5{margin-left:20.83333333%}.arco-col-xl-offset-6{margin-left:25%}.arco-col-xl-offset-7{margin-left:29.16666667%}.arco-col-xl-offset-8{margin-left:33.33333333%}.arco-col-xl-offset-9{margin-left:37.5%}.arco-col-xl-offset-10{margin-left:41.66666667%}.arco-col-xl-offset-11{margin-left:45.83333333%}.arco-col-xl-offset-12{margin-left:50%}.arco-col-xl-offset-13{margin-left:54.16666667%}.arco-col-xl-offset-14{margin-left:58.33333333%}.arco-col-xl-offset-15{margin-left:62.5%}.arco-col-xl-offset-16{margin-left:66.66666667%}.arco-col-xl-offset-17{margin-left:70.83333333%}.arco-col-xl-offset-18{margin-left:75%}.arco-col-xl-offset-19{margin-left:79.16666667%}.arco-col-xl-offset-20{margin-left:83.33333333%}.arco-col-xl-offset-21{margin-left:87.5%}.arco-col-xl-offset-22{margin-left:91.66666667%}.arco-col-xl-offset-23{margin-left:95.83333333%}.arco-col-xl-order-1{order:1}.arco-col-xl-order-2{order:2}.arco-col-xl-order-3{order:3}.arco-col-xl-order-4{order:4}.arco-col-xl-order-5{order:5}.arco-col-xl-order-6{order:6}.arco-col-xl-order-7{order:7}.arco-col-xl-order-8{order:8}.arco-col-xl-order-9{order:9}.arco-col-xl-order-10{order:10}.arco-col-xl-order-11{order:11}.arco-col-xl-order-12{order:12}.arco-col-xl-order-13{order:13}.arco-col-xl-order-14{order:14}.arco-col-xl-order-15{order:15}.arco-col-xl-order-16{order:16}.arco-col-xl-order-17{order:17}.arco-col-xl-order-18{order:18}.arco-col-xl-order-19{order:19}.arco-col-xl-order-20{order:20}.arco-col-xl-order-21{order:21}.arco-col-xl-order-22{order:22}.arco-col-xl-order-23{order:23}.arco-col-xl-order-24{order:24}}@media (min-width: 1600px){.arco-col-xxl-1{flex:0 0 4.16666667%;width:4.16666667%}.arco-col-xxl-2{flex:0 0 8.33333333%;width:8.33333333%}.arco-col-xxl-3{flex:0 0 12.5%;width:12.5%}.arco-col-xxl-4{flex:0 0 16.66666667%;width:16.66666667%}.arco-col-xxl-5{flex:0 0 20.83333333%;width:20.83333333%}.arco-col-xxl-6{flex:0 0 25%;width:25%}.arco-col-xxl-7{flex:0 0 29.16666667%;width:29.16666667%}.arco-col-xxl-8{flex:0 0 33.33333333%;width:33.33333333%}.arco-col-xxl-9{flex:0 0 37.5%;width:37.5%}.arco-col-xxl-10{flex:0 0 41.66666667%;width:41.66666667%}.arco-col-xxl-11{flex:0 0 45.83333333%;width:45.83333333%}.arco-col-xxl-12{flex:0 0 50%;width:50%}.arco-col-xxl-13{flex:0 0 54.16666667%;width:54.16666667%}.arco-col-xxl-14{flex:0 0 58.33333333%;width:58.33333333%}.arco-col-xxl-15{flex:0 0 62.5%;width:62.5%}.arco-col-xxl-16{flex:0 0 66.66666667%;width:66.66666667%}.arco-col-xxl-17{flex:0 0 70.83333333%;width:70.83333333%}.arco-col-xxl-18{flex:0 0 75%;width:75%}.arco-col-xxl-19{flex:0 0 79.16666667%;width:79.16666667%}.arco-col-xxl-20{flex:0 0 83.33333333%;width:83.33333333%}.arco-col-xxl-21{flex:0 0 87.5%;width:87.5%}.arco-col-xxl-22{flex:0 0 91.66666667%;width:91.66666667%}.arco-col-xxl-23{flex:0 0 95.83333333%;width:95.83333333%}.arco-col-xxl-24{flex:0 0 100%;width:100%}.arco-col-xxl-offset-1{margin-left:4.16666667%}.arco-col-xxl-offset-2{margin-left:8.33333333%}.arco-col-xxl-offset-3{margin-left:12.5%}.arco-col-xxl-offset-4{margin-left:16.66666667%}.arco-col-xxl-offset-5{margin-left:20.83333333%}.arco-col-xxl-offset-6{margin-left:25%}.arco-col-xxl-offset-7{margin-left:29.16666667%}.arco-col-xxl-offset-8{margin-left:33.33333333%}.arco-col-xxl-offset-9{margin-left:37.5%}.arco-col-xxl-offset-10{margin-left:41.66666667%}.arco-col-xxl-offset-11{margin-left:45.83333333%}.arco-col-xxl-offset-12{margin-left:50%}.arco-col-xxl-offset-13{margin-left:54.16666667%}.arco-col-xxl-offset-14{margin-left:58.33333333%}.arco-col-xxl-offset-15{margin-left:62.5%}.arco-col-xxl-offset-16{margin-left:66.66666667%}.arco-col-xxl-offset-17{margin-left:70.83333333%}.arco-col-xxl-offset-18{margin-left:75%}.arco-col-xxl-offset-19{margin-left:79.16666667%}.arco-col-xxl-offset-20{margin-left:83.33333333%}.arco-col-xxl-offset-21{margin-left:87.5%}.arco-col-xxl-offset-22{margin-left:91.66666667%}.arco-col-xxl-offset-23{margin-left:95.83333333%}.arco-col-xxl-order-1{order:1}.arco-col-xxl-order-2{order:2}.arco-col-xxl-order-3{order:3}.arco-col-xxl-order-4{order:4}.arco-col-xxl-order-5{order:5}.arco-col-xxl-order-6{order:6}.arco-col-xxl-order-7{order:7}.arco-col-xxl-order-8{order:8}.arco-col-xxl-order-9{order:9}.arco-col-xxl-order-10{order:10}.arco-col-xxl-order-11{order:11}.arco-col-xxl-order-12{order:12}.arco-col-xxl-order-13{order:13}.arco-col-xxl-order-14{order:14}.arco-col-xxl-order-15{order:15}.arco-col-xxl-order-16{order:16}.arco-col-xxl-order-17{order:17}.arco-col-xxl-order-18{order:18}.arco-col-xxl-order-19{order:19}.arco-col-xxl-order-20{order:20}.arco-col-xxl-order-21{order:21}.arco-col-xxl-order-22{order:22}.arco-col-xxl-order-23{order:23}.arco-col-xxl-order-24{order:24}}.arco-grid{display:grid}.arco-image-trigger{padding:6px 4px;background:var(--color-bg-5);border:1px solid var(--color-neutral-3);border-radius:4px}.arco-image-trigger .arco-trigger-arrow{background-color:var(--color-bg-5);border:1px solid var(--color-neutral-3)}.arco-image{position:relative;display:inline-block;border-radius:var(--border-radius-small)}.arco-image-img{vertical-align:middle;border-radius:inherit}.arco-image-overlay{position:absolute;top:0;left:0;width:100%;height:100%}.arco-image-footer{display:flex;width:100%;max-width:100%}.arco-image-footer-caption{flex:1 1 auto}.arco-image-footer-caption-title{font-weight:500;font-size:16px}.arco-image-footer-caption-description{font-size:14px}.arco-image-footer-extra{flex:0 0 auto;padding-left:12px}.arco-image-with-footer-inner .arco-image-footer{position:absolute;bottom:0;left:0;align-items:center;box-sizing:border-box;padding:9px 16px;color:var(--color-white);background:linear-gradient(360deg,rgba(0,0,0,.3) 0%,rgba(0,0,0,0) 100%);border-bottom-right-radius:var(--border-radius-small);border-bottom-left-radius:var(--border-radius-small)}.arco-image-with-footer-inner .arco-image-footer-caption-title,.arco-image-with-footer-inner .arco-image-footer-caption-description{color:var(--color-white)}.arco-image-with-footer-outer .arco-image-footer{margin-top:4px;color:var(--color-neutral-8)}.arco-image-with-footer-outer .arco-image-footer-caption-title{color:var(--color-text-1)}.arco-image-with-footer-outer .arco-image-footer-caption-description{color:var(--color-neutral-6)}.arco-image-error{display:flex;flex-direction:column;align-items:center;justify-content:center;box-sizing:border-box;width:100%;height:100%;color:var(--color-neutral-4);background-color:var(--color-neutral-1)}.arco-image-error-icon{width:60px;max-width:100%;height:60px;max-height:100%}.arco-image-error-icon>svg{width:100%;height:100%}.arco-image-error-alt{padding:8px 16px;font-size:12px;line-height:1.6667;text-align:center}.arco-image-loader{position:absolute;top:0;left:0;width:100%;height:100%;background-color:var(--color-neutral-1)}.arco-image-loader-spin{position:absolute;top:50%;left:50%;color:rgb(var(--primary-6));font-size:32px;text-align:center;transform:translate(-50%,-50%)}.arco-image-loader-spin-text{color:var(--color-neutral-6);font-size:16px}.arco-image-simple.arco-image-with-footer-inner .arco-image-footer{padding:12px 16px}.arco-image-loading .arco-image-img,.arco-image-loading-error .arco-image-img{visibility:hidden}.arco-image-preview{position:fixed;top:0;left:0;z-index:1001;width:100%;height:100%}.arco-image-preview-hide{display:none}.arco-image-preview-mask,.arco-image-preview-wrapper{position:absolute;top:0;left:0;width:100%;height:100%}.arco-image-preview-mask{background-color:var(--color-mask-bg)}.arco-image-preview-img-container{width:100%;height:100%;text-align:center}.arco-image-preview-img-container:before{display:inline-block;width:0;height:100%;vertical-align:middle;content:""}.arco-image-preview-img-container .arco-image-preview-img{display:inline-block;max-width:100%;max-height:100%;vertical-align:middle;cursor:grab;user-select:none}.arco-image-preview-img-container .arco-image-preview-img.arco-image-preview-img-moving{cursor:grabbing}.arco-image-preview-scale-value{box-sizing:border-box;padding:7px 10px;color:var(--color-white);font-size:12px;line-height:initial;background-color:#ffffff14;position:absolute;top:50%;left:50%;transform:translate(-50%,-50%)}.arco-image-preview-toolbar{position:absolute;bottom:46px;left:50%;display:flex;align-items:flex-start;padding:4px 16px;background-color:var(--color-bg-2);border-radius:var(--border-radius-medium);transform:translate(-50%)}.arco-image-preview-toolbar-action{display:flex;align-items:center;color:var(--color-neutral-8);font-size:14px;background-color:transparent;border-radius:var(--border-radius-small);cursor:pointer}.arco-image-preview-toolbar-action:not(:last-of-type){margin-right:0}.arco-image-preview-toolbar-action:hover{color:rgb(var(--primary-6));background-color:var(--color-neutral-2)}.arco-image-preview-toolbar-action-disabled,.arco-image-preview-toolbar-action-disabled:hover{color:var(--color-text-4);background-color:transparent;cursor:not-allowed}.arco-image-preview-toolbar-action-name{padding-right:12px;font-size:12px}.arco-image-preview-toolbar-action-content{padding:13px;line-height:1}.arco-image-preview-loading{display:flex;align-items:center;justify-content:center;box-sizing:border-box;width:48px;height:48px;padding:10px;color:rgb(var(--primary-6));font-size:18px;background-color:#232324;border-radius:var(--border-radius-medium);position:absolute;top:50%;left:50%;transform:translate(-50%,-50%)}.arco-image-preview-close-btn{position:absolute;top:36px;right:36px;display:flex;align-items:center;justify-content:center;width:32px;height:32px;color:var(--color-white);font-size:14px;line-height:32px;text-align:center;background:rgba(0,0,0,.5);border-radius:50%;cursor:pointer}.arco-image-preview-arrow-left,.arco-image-preview-arrow-right{position:absolute;z-index:2;display:flex;align-items:center;justify-content:center;width:32px;height:32px;color:var(--color-white);background-color:#ffffff4d;border-radius:50%;cursor:pointer}.arco-image-preview-arrow-left>svg,.arco-image-preview-arrow-right>svg{color:var(--color-white);font-size:16px}.arco-image-preview-arrow-left:hover,.arco-image-preview-arrow-right:hover{background-color:#ffffff80}.arco-image-preview-arrow-left{top:50%;left:20px;transform:translateY(-50%)}.arco-image-preview-arrow-right{top:50%;right:20px;transform:translateY(-50%)}.arco-image-preview-arrow-disabled{color:#ffffff4d;background-color:#fff3;cursor:not-allowed}.arco-image-preview-arrow-disabled>svg{color:#ffffff4d}.arco-image-preview-arrow-disabled:hover{background-color:#fff3}.image-fade-enter-from,.image-fade-leave-to{opacity:0}.image-fade-enter-to,.image-fade-leave-from{opacity:1}.image-fade-enter-active,.image-fade-leave-active{transition:opacity .4s cubic-bezier(.3,1.3,.3,1)}.arco-input-number{position:relative;box-sizing:border-box;width:100%;border-radius:var(--border-radius-small)}.arco-input-number-step-button{display:flex;align-items:center;justify-content:center;box-sizing:border-box;padding:0;color:var(--color-text-2);background-color:var(--color-fill-2);cursor:pointer;user-select:none;transition:all .1s cubic-bezier(0,0,1,1)}.arco-input-number-step-button:hover{background-color:var(--color-fill-3);border-color:var(--color-fill-3)}.arco-input-number-step-button:active{background-color:var(--color-fill-4);border-color:var(--color-fill-4)}.arco-input-number-step-button:disabled{color:var(--color-text-4);background-color:var(--color-fill-2);cursor:not-allowed}.arco-input-number-step-button:disabled:hover,.arco-input-number-step-button:disabled:active{background-color:var(--color-fill-2);border-color:var(--color-neutral-3)}.arco-input-number-prefix,.arco-input-number-suffix{transition:all .1s cubic-bezier(0,0,1,1)}.arco-input-number-mode-embed .arco-input-number-step{position:absolute;top:4px;right:4px;bottom:4px;width:18px;overflow:hidden;border-radius:1px;opacity:0;transition:all .1s cubic-bezier(0,0,1,1)}.arco-input-number-mode-embed .arco-input-number-step .arco-input-number-step-button{width:100%;height:50%;font-size:10px;border:none;border-color:var(--color-neutral-3)}.arco-input-number-mode-embed .arco-input-suffix{justify-content:flex-end;min-width:6px}.arco-input-number-mode-embed .arco-input-suffix-has-feedback{min-width:32px}.arco-input-number-mode-embed .arco-input-suffix-has-feedback .arco-input-number-step{right:30px}.arco-input-number-mode-embed:not(.arco-input-disabled):not(.arco-input-outer-disabled):hover .arco-input-number-step,.arco-input-number-mode-embed:not(.arco-input-disabled):not(.arco-input-outer-disabled):focus-within .arco-input-number-step{opacity:1}.arco-input-number-mode-embed:not(.arco-input-disabled):not(.arco-input-outer-disabled):hover .arco-input-number-step~.arco-input-suffix,.arco-input-number-mode-embed:not(.arco-input-disabled):not(.arco-input-outer-disabled):focus-within .arco-input-number-step~.arco-input-suffix{opacity:0;pointer-events:none}.arco-input-number-mode-embed.arco-input-wrapper:not(.arco-input-focus) .arco-input-number-step-button:not(.arco-input-number-step-button-disabled):hover{background-color:var(--color-fill-4)}.arco-input-number-mode-button .arco-input-prepend,.arco-input-number-mode-button .arco-input-append{padding:0;border:none}.arco-input-number-mode-button .arco-input-prepend .arco-input-number-step-button{border-right:1px solid transparent;border-top-right-radius:0;border-bottom-right-radius:0}.arco-input-number-mode-button .arco-input-prepend .arco-input-number-step-button:not(.arco-input-number-mode-button .arco-input-prepend .arco-input-number-step-button:active){border-right-color:var(--color-neutral-3)}.arco-input-number-mode-button .arco-input-append .arco-input-number-step-button{border-left:1px solid transparent;border-top-left-radius:0;border-bottom-left-radius:0}.arco-input-number-mode-button .arco-input-append .arco-input-number-step-button:not(.arco-input-number-mode-button .arco-input-append .arco-input-number-step-button:active){border-left-color:var(--color-neutral-3)}.arco-input-tag{display:inline-flex;box-sizing:border-box;width:100%;padding-right:12px;padding-left:12px;color:var(--color-text-1);font-size:14px;background-color:var(--color-fill-2);border:1px solid transparent;border-radius:var(--border-radius-small);cursor:text;transition:color .1s cubic-bezier(0,0,1,1),border-color .1s cubic-bezier(0,0,1,1),background-color .1s cubic-bezier(0,0,1,1)}.arco-input-tag:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-input-tag:focus-within,.arco-input-tag.arco-input-tag-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-input-tag.arco-input-tag-disabled{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent;cursor:not-allowed}.arco-input-tag.arco-input-tag-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent}.arco-input-tag.arco-input-tag-disabled .arco-input-tag-prefix,.arco-input-tag.arco-input-tag-disabled .arco-input-tag-suffix{color:inherit}.arco-input-tag.arco-input-tag-error{background-color:var(--color-danger-light-1);border-color:transparent}.arco-input-tag.arco-input-tag-error:hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-input-tag.arco-input-tag-error:focus-within,.arco-input-tag.arco-input-tag-error.arco-input-tag-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-input-tag .arco-input-tag-prefix,.arco-input-tag .arco-input-tag-suffix{display:inline-flex;flex-shrink:0;align-items:center;white-space:nowrap;user-select:none}.arco-input-tag .arco-input-tag-prefix>svg,.arco-input-tag .arco-input-tag-suffix>svg{font-size:14px}.arco-input-tag .arco-input-tag-prefix{padding-right:12px;color:var(--color-text-2)}.arco-input-tag .arco-input-tag-suffix{padding-left:12px;color:var(--color-text-2)}.arco-input-tag .arco-input-tag-suffix .arco-feedback-icon{display:inline-flex}.arco-input-tag .arco-input-tag-suffix .arco-feedback-icon-status-validating{color:rgb(var(--primary-6))}.arco-input-tag .arco-input-tag-suffix .arco-feedback-icon-status-success{color:rgb(var(--success-6))}.arco-input-tag .arco-input-tag-suffix .arco-feedback-icon-status-warning{color:rgb(var(--warning-6))}.arco-input-tag .arco-input-tag-suffix .arco-feedback-icon-status-error{color:rgb(var(--danger-6))}.arco-input-tag .arco-input-tag-clear-btn{align-self:center;color:var(--color-text-2);font-size:12px;visibility:hidden;cursor:pointer}.arco-input-tag .arco-input-tag-clear-btn>svg{position:relative;transition:color .1s cubic-bezier(0,0,1,1)}.arco-input-tag:hover .arco-input-tag-clear-btn{visibility:visible}.arco-input-tag:not(.arco-input-tag-focus) .arco-input-tag-icon-hover:hover:before{background-color:var(--color-fill-4)}.arco-input-tag.arco-input-tag-has-tag{padding-right:4px;padding-left:4px}.arco-input-tag.arco-input-tag-has-prefix{padding-left:12px}.arco-input-tag.arco-input-tag-has-suffix{padding-right:12px}.arco-input-tag .arco-input-tag-inner{flex:1;overflow:hidden;line-height:0}.arco-input-tag .arco-input-tag-inner .arco-input-tag-tag{display:inline-flex;align-items:center;margin-right:4px;color:var(--color-text-1);font-size:12px;white-space:pre-wrap;word-break:break-word;background-color:var(--color-bg-2);border-color:var(--color-fill-3)}.arco-input-tag .arco-input-tag-inner .arco-input-tag-tag .arco-icon-hover:hover:before{background-color:var(--color-fill-2)}.arco-input-tag .arco-input-tag-inner .arco-input-tag-tag.arco-tag-custom-color{color:var(--color-white)}.arco-input-tag .arco-input-tag-inner .arco-input-tag-tag.arco-tag-custom-color .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:#fff3}.arco-input-tag .arco-input-tag-inner .arco-input-tag-input{width:100%;padding-right:0;padding-left:0;color:inherit;line-height:1.5715;background:none;border:none;border-radius:0;outline:none;cursor:inherit;-webkit-appearance:none;-webkit-tap-highlight-color:rgba(0,0,0,0);box-sizing:border-box}.arco-input-tag .arco-input-tag-inner .arco-input-tag-input::placeholder{color:var(--color-text-3)}.arco-input-tag .arco-input-tag-inner .arco-input-tag-input[disabled]::placeholder{color:var(--color-text-4)}.arco-input-tag .arco-input-tag-inner .arco-input-tag-input[disabled]{-webkit-text-fill-color:var(--color-text-4)}.arco-input-tag .arco-input-tag-mirror{position:absolute;top:0;left:0;white-space:pre;visibility:hidden;pointer-events:none}.arco-input-tag.arco-input-tag-focus .arco-input-tag-tag{background-color:var(--color-fill-2);border-color:var(--color-fill-2)}.arco-input-tag.arco-input-tag-focus .arco-input-tag-tag .arco-icon-hover:hover:before{background-color:var(--color-fill-3)}.arco-input-tag.arco-input-tag-disabled .arco-input-tag-tag{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:var(--color-fill-3)}.arco-input-tag.arco-input-tag-readonly,.arco-input-tag.arco-input-tag-disabled-input{cursor:default}.arco-input-tag.arco-input-tag-size-mini{font-size:12px}.arco-input-tag.arco-input-tag-size-mini .arco-input-tag-inner{padding-top:0;padding-bottom:0}.arco-input-tag.arco-input-tag-size-mini .arco-input-tag-tag,.arco-input-tag.arco-input-tag-size-mini .arco-input-tag-input{margin-top:1px;margin-bottom:1px;line-height:18px;vertical-align:middle}.arco-input-tag.arco-input-tag-size-mini .arco-input-tag-tag{height:auto;min-height:20px}.arco-input-tag.arco-input-tag-size-mini .arco-input-tag-input{height:20px}.arco-input-tag.arco-input-tag-size-medium{font-size:14px}.arco-input-tag.arco-input-tag-size-medium .arco-input-tag-inner{padding-top:2px;padding-bottom:2px}.arco-input-tag.arco-input-tag-size-medium .arco-input-tag-tag,.arco-input-tag.arco-input-tag-size-medium .arco-input-tag-input{margin-top:1px;margin-bottom:1px;line-height:22px;vertical-align:middle}.arco-input-tag.arco-input-tag-size-medium .arco-input-tag-tag{height:auto;min-height:24px}.arco-input-tag.arco-input-tag-size-medium .arco-input-tag-input{height:24px}.arco-input-tag.arco-input-tag-size-small{font-size:14px}.arco-input-tag.arco-input-tag-size-small .arco-input-tag-inner{padding-top:2px;padding-bottom:2px}.arco-input-tag.arco-input-tag-size-small .arco-input-tag-tag,.arco-input-tag.arco-input-tag-size-small .arco-input-tag-input{margin-top:1px;margin-bottom:1px;line-height:18px;vertical-align:middle}.arco-input-tag.arco-input-tag-size-small .arco-input-tag-tag{height:auto;min-height:20px}.arco-input-tag.arco-input-tag-size-small .arco-input-tag-input{height:20px}.arco-input-tag.arco-input-tag-size-large{font-size:14px}.arco-input-tag.arco-input-tag-size-large .arco-input-tag-inner{padding-top:2px;padding-bottom:2px}.arco-input-tag.arco-input-tag-size-large .arco-input-tag-tag,.arco-input-tag.arco-input-tag-size-large .arco-input-tag-input{margin-top:1px;margin-bottom:1px;line-height:26px;vertical-align:middle}.arco-input-tag.arco-input-tag-size-large .arco-input-tag-tag{height:auto;min-height:28px}.arco-input-tag.arco-input-tag-size-large .arco-input-tag-input{height:28px}.input-tag-zoom-enter-from{transform:scale(.5);opacity:0}.input-tag-zoom-enter-to{transform:scale(1);opacity:1}.input-tag-zoom-enter-active{transition:all .3s cubic-bezier(.34,.69,.1,1)}.input-tag-zoom-leave-from{transform:scale(1);opacity:1}.input-tag-zoom-leave-to{transform:scale(.5);opacity:0}.input-tag-zoom-leave-active{position:absolute;transition:all .3s cubic-bezier(.3,1.3,.3,1)}.input-tag-zoom-move{transition:all .3s cubic-bezier(.3,1.3,.3,1)}.arco-input-wrapper{display:inline-flex;box-sizing:border-box;width:100%;padding-right:12px;padding-left:12px;color:var(--color-text-1);font-size:14px;background-color:var(--color-fill-2);border:1px solid transparent;border-radius:var(--border-radius-small);cursor:text;transition:color .1s cubic-bezier(0,0,1,1),border-color .1s cubic-bezier(0,0,1,1),background-color .1s cubic-bezier(0,0,1,1)}.arco-input-wrapper:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-input-wrapper:focus-within,.arco-input-wrapper.arco-input-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-input-wrapper.arco-input-disabled{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent;cursor:not-allowed}.arco-input-wrapper.arco-input-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent}.arco-input-wrapper.arco-input-disabled .arco-input-prefix,.arco-input-wrapper.arco-input-disabled .arco-input-suffix{color:inherit}.arco-input-wrapper.arco-input-error{background-color:var(--color-danger-light-1);border-color:transparent}.arco-input-wrapper.arco-input-error:hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-input-wrapper.arco-input-error:focus-within,.arco-input-wrapper.arco-input-error.arco-input-wrapper-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-input-wrapper .arco-input-prefix,.arco-input-wrapper .arco-input-suffix{display:inline-flex;flex-shrink:0;align-items:center;white-space:nowrap;user-select:none}.arco-input-wrapper .arco-input-prefix>svg,.arco-input-wrapper .arco-input-suffix>svg{font-size:14px}.arco-input-wrapper .arco-input-prefix{padding-right:12px;color:var(--color-text-2)}.arco-input-wrapper .arco-input-suffix{padding-left:12px;color:var(--color-text-2)}.arco-input-wrapper .arco-input-suffix .arco-feedback-icon{display:inline-flex}.arco-input-wrapper .arco-input-suffix .arco-feedback-icon-status-validating{color:rgb(var(--primary-6))}.arco-input-wrapper .arco-input-suffix .arco-feedback-icon-status-success{color:rgb(var(--success-6))}.arco-input-wrapper .arco-input-suffix .arco-feedback-icon-status-warning{color:rgb(var(--warning-6))}.arco-input-wrapper .arco-input-suffix .arco-feedback-icon-status-error{color:rgb(var(--danger-6))}.arco-input-wrapper .arco-input-clear-btn{align-self:center;color:var(--color-text-2);font-size:12px;visibility:hidden;cursor:pointer}.arco-input-wrapper .arco-input-clear-btn>svg{position:relative;transition:color .1s cubic-bezier(0,0,1,1)}.arco-input-wrapper:hover .arco-input-clear-btn{visibility:visible}.arco-input-wrapper:not(.arco-input-focus) .arco-input-icon-hover:hover:before{background-color:var(--color-fill-4)}.arco-input-wrapper .arco-input{width:100%;padding-right:0;padding-left:0;color:inherit;line-height:1.5715;background:none;border:none;border-radius:0;outline:none;cursor:inherit;-webkit-appearance:none;-webkit-tap-highlight-color:rgba(0,0,0,0)}.arco-input-wrapper .arco-input::placeholder{color:var(--color-text-3)}.arco-input-wrapper .arco-input[disabled]::placeholder{color:var(--color-text-4)}.arco-input-wrapper .arco-input[disabled]{-webkit-text-fill-color:var(--color-text-4)}.arco-input-wrapper .arco-input.arco-input-size-mini{padding-top:1px;padding-bottom:1px;font-size:12px;line-height:1.667}.arco-input-wrapper .arco-input.arco-input-size-small{padding-top:2px;padding-bottom:2px;font-size:14px;line-height:1.5715}.arco-input-wrapper .arco-input.arco-input-size-medium{padding-top:4px;padding-bottom:4px;font-size:14px;line-height:1.5715}.arco-input-wrapper .arco-input.arco-input-size-large{padding-top:6px;padding-bottom:6px;font-size:14px;line-height:1.5715}.arco-input-wrapper .arco-input-word-limit{color:var(--color-text-3);font-size:12px}.arco-input-outer{display:inline-flex;width:100%}.arco-input-outer>.arco-input-wrapper{border-radius:0}.arco-input-outer>:first-child{border-top-left-radius:var(--border-radius-small);border-bottom-left-radius:var(--border-radius-small)}.arco-input-outer>:last-child{border-top-right-radius:var(--border-radius-small);border-bottom-right-radius:var(--border-radius-small)}.arco-input-outer.arco-input-outer-size-mini .arco-input-outer,.arco-input-outer.arco-input-outer-size-mini .arco-input-wrapper .arco-input-prefix,.arco-input-outer.arco-input-outer-size-mini .arco-input-wrapper .arco-input-suffix{font-size:12px}.arco-input-outer.arco-input-outer-size-mini .arco-input-wrapper .arco-input-prefix>svg,.arco-input-outer.arco-input-outer-size-mini .arco-input-wrapper .arco-input-suffix>svg{font-size:12px}.arco-input-outer.arco-input-outer-size-mini .arco-input-prepend,.arco-input-outer.arco-input-outer-size-mini .arco-input-append{font-size:12px}.arco-input-outer.arco-input-outer-size-mini .arco-input-prepend>svg,.arco-input-outer.arco-input-outer-size-mini .arco-input-append>svg{font-size:12px}.arco-input-outer.arco-input-outer-size-mini .arco-input-prepend .arco-input{width:auto;height:100%;margin:-1px -13px -1px -12px;border-color:transparent;border-top-left-radius:0;border-bottom-left-radius:0}.arco-input-outer.arco-input-outer-size-mini .arco-input-prepend .arco-select{width:auto;height:100%;margin:-1px -13px -1px -12px}.arco-input-outer.arco-input-outer-size-mini .arco-input-prepend .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-outer.arco-input-outer-size-mini .arco-input-prepend .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-outer.arco-input-outer-size-mini .arco-input-append .arco-input{width:auto;height:100%;margin:-1px -12px -1px -13px;border-color:transparent;border-top-right-radius:0;border-bottom-right-radius:0}.arco-input-outer.arco-input-outer-size-mini .arco-input-append .arco-select{width:auto;height:100%;margin:-1px -12px -1px -13px}.arco-input-outer.arco-input-outer-size-mini .arco-input-append .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-outer.arco-input-outer-size-mini .arco-input-append .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-outer.arco-input-outer-size-small .arco-input-outer,.arco-input-outer.arco-input-outer-size-small .arco-input-wrapper .arco-input-prefix,.arco-input-outer.arco-input-outer-size-small .arco-input-wrapper .arco-input-suffix{font-size:14px}.arco-input-outer.arco-input-outer-size-small .arco-input-wrapper .arco-input-prefix>svg,.arco-input-outer.arco-input-outer-size-small .arco-input-wrapper .arco-input-suffix>svg{font-size:14px}.arco-input-outer.arco-input-outer-size-small .arco-input-prepend,.arco-input-outer.arco-input-outer-size-small .arco-input-append{font-size:14px}.arco-input-outer.arco-input-outer-size-small .arco-input-prepend>svg,.arco-input-outer.arco-input-outer-size-small .arco-input-append>svg{font-size:14px}.arco-input-outer.arco-input-outer-size-small .arco-input-prepend .arco-input{width:auto;height:100%;margin:-1px -13px -1px -12px;border-color:transparent;border-top-left-radius:0;border-bottom-left-radius:0}.arco-input-outer.arco-input-outer-size-small .arco-input-prepend .arco-select{width:auto;height:100%;margin:-1px -13px -1px -12px}.arco-input-outer.arco-input-outer-size-small .arco-input-prepend .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-outer.arco-input-outer-size-small .arco-input-prepend .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-outer.arco-input-outer-size-small .arco-input-append .arco-input{width:auto;height:100%;margin:-1px -12px -1px -13px;border-color:transparent;border-top-right-radius:0;border-bottom-right-radius:0}.arco-input-outer.arco-input-outer-size-small .arco-input-append .arco-select{width:auto;height:100%;margin:-1px -12px -1px -13px}.arco-input-outer.arco-input-outer-size-small .arco-input-append .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-outer.arco-input-outer-size-small .arco-input-append .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-outer.arco-input-outer-size-large .arco-input-outer,.arco-input-outer.arco-input-outer-size-large .arco-input-wrapper .arco-input-prefix,.arco-input-outer.arco-input-outer-size-large .arco-input-wrapper .arco-input-suffix{font-size:14px}.arco-input-outer.arco-input-outer-size-large .arco-input-wrapper .arco-input-prefix>svg,.arco-input-outer.arco-input-outer-size-large .arco-input-wrapper .arco-input-suffix>svg{font-size:14px}.arco-input-outer.arco-input-outer-size-large .arco-input-prepend,.arco-input-outer.arco-input-outer-size-large .arco-input-append{font-size:14px}.arco-input-outer.arco-input-outer-size-large .arco-input-prepend>svg,.arco-input-outer.arco-input-outer-size-large .arco-input-append>svg{font-size:14px}.arco-input-outer.arco-input-outer-size-large .arco-input-prepend .arco-input{width:auto;height:100%;margin:-1px -13px -1px -12px;border-color:transparent;border-top-left-radius:0;border-bottom-left-radius:0}.arco-input-outer.arco-input-outer-size-large .arco-input-prepend .arco-select{width:auto;height:100%;margin:-1px -13px -1px -12px}.arco-input-outer.arco-input-outer-size-large .arco-input-prepend .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-outer.arco-input-outer-size-large .arco-input-prepend .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-outer.arco-input-outer-size-large .arco-input-append .arco-input{width:auto;height:100%;margin:-1px -12px -1px -13px;border-color:transparent;border-top-right-radius:0;border-bottom-right-radius:0}.arco-input-outer.arco-input-outer-size-large .arco-input-append .arco-select{width:auto;height:100%;margin:-1px -12px -1px -13px}.arco-input-outer.arco-input-outer-size-large .arco-input-append .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-outer.arco-input-outer-size-large .arco-input-append .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-outer-disabled{cursor:not-allowed}.arco-input-prepend,.arco-input-append{display:inline-flex;flex-shrink:0;align-items:center;box-sizing:border-box;padding:0 12px;color:var(--color-text-1);white-space:nowrap;background-color:var(--color-fill-2);border:1px solid transparent}.arco-input-prepend>svg,.arco-input-append>svg{font-size:14px}.arco-input-prepend{border-right:1px solid var(--color-neutral-3)}.arco-input-prepend .arco-input{width:auto;height:100%;margin:-1px -12px -1px -13px;border-color:transparent;border-top-right-radius:0;border-bottom-right-radius:0}.arco-input-prepend .arco-select{width:auto;height:100%;margin:-1px -12px -1px -13px}.arco-input-prepend .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-prepend .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-append{border-left:1px solid var(--color-neutral-3)}.arco-input-append .arco-input{width:auto;height:100%;margin:-1px -13px -1px -12px;border-color:transparent;border-top-left-radius:0;border-bottom-left-radius:0}.arco-input-append .arco-select{width:auto;height:100%;margin:-1px -13px -1px -12px}.arco-input-append .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-append .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-group{display:inline-flex;align-items:center}.arco-input-group>*{border-radius:0}.arco-input-group>*.arco-input-outer>:last-child,.arco-input-group>*.arco-input-outer>:first-child{border-radius:0}.arco-input-group>*:not(:last-child){position:relative;box-sizing:border-box}.arco-input-group>*:first-child,.arco-input-group>*:first-child .arco-input-group>*:first-child{border-top-left-radius:var(--border-radius-small);border-bottom-left-radius:var(--border-radius-small)}.arco-input-group>*:first-child .arco-select-view,.arco-input-group>*:first-child .arco-input-group>*:first-child .arco-select-view{border-top-left-radius:var(--border-radius-small);border-bottom-left-radius:var(--border-radius-small)}.arco-input-group>*:last-child,.arco-input-group>*:last-child .arco-input-outer>*:last-child{border-top-right-radius:var(--border-radius-small);border-bottom-right-radius:var(--border-radius-small)}.arco-input-group>*:last-child .arco-select-view,.arco-input-group>*:last-child .arco-input-outer>*:last-child .arco-select-view{border-top-right-radius:var(--border-radius-small);border-bottom-right-radius:var(--border-radius-small)}.arco-input-group>.arco-input-wrapper:not(:last-child),.arco-input-group>.arco-input-outer:not(:last-child),.arco-input-group>.arco-input-tag:not(:last-child),.arco-input-group>.arco-select-view:not(:last-child){border-right:1px solid var(--color-neutral-3)}.arco-input-group>.arco-input-wrapper:not(:last-child):focus-within,.arco-input-group>.arco-input-outer:not(:last-child):focus-within,.arco-input-group>.arco-input-tag:not(:last-child):focus-within,.arco-input-group>.arco-select-view:not(:last-child):focus-within{border-right-color:rgb(var(--primary-6))}.size-height-size-mini{padding-top:1px;padding-bottom:1px;font-size:12px;line-height:1.667}.size-height-size-small{padding-top:2px;padding-bottom:2px;font-size:14px}.size-height-size-large{padding-top:6px;padding-bottom:6px;font-size:14px}.arco-textarea-wrapper{position:relative;display:inline-block;width:100%}.arco-textarea-clear-wrapper:hover .arco-textarea-clear-icon{display:inline-block}.arco-textarea-clear-wrapper .arco-textarea{padding-right:20px}.arco-textarea-word-limit{position:absolute;right:10px;bottom:6px;color:var(--color-text-3);font-size:12px;user-select:none}.arco-textarea-clear-icon{position:absolute;top:10px;right:10px;display:none;font-size:12px}.arco-input-search .arco-input-append{padding:0;border:none}.arco-input-search .arco-input-suffix{color:var(--color-text-2);font-size:14px}.arco-input-search .arco-input-search-btn{border-top-left-radius:0;border-bottom-left-radius:0}.arco-input-wrapper.arco-input-password:not(.arco-input-disabled) .arco-input-suffix{color:var(--color-text-2);font-size:12px;cursor:pointer}.arco-layout{display:flex;flex:1;flex-direction:column;margin:0;padding:0}.arco-layout-sider{position:relative;flex:none;width:auto;margin:0;padding:0;background:var(--color-menu-dark-bg);transition:width .2s cubic-bezier(.34,.69,.1,1)}.arco-layout-sider-children{height:100%;overflow:auto}.arco-layout-sider-collapsed .arco-layout-sider-children::-webkit-scrollbar{width:0}.arco-layout-sider-has-trigger{box-sizing:border-box;padding-bottom:48px}.arco-layout-sider-trigger{z-index:1;display:flex;align-items:center;justify-content:center;box-sizing:border-box;width:100%;height:48px;color:var(--color-white);background:rgba(255,255,255,.2);cursor:pointer;transition:width .2s cubic-bezier(.34,.69,.1,1)}.arco-layout-sider-trigger-light{color:var(--color-text-1);background:var(--color-menu-light-bg);border-top:1px solid var(--color-bg-5)}.arco-layout-sider-light{background:var(--color-menu-light-bg);box-shadow:0 2px 5px #00000014}.arco-layout-header{flex:0 0 auto;box-sizing:border-box;margin:0}.arco-layout-content{flex:1}.arco-layout-footer{flex:0 0 auto;margin:0}.arco-layout-has-sider{flex-direction:row}.arco-layout-has-sider>.arco-layout,.arco-layout-has-sider>.arco-layout-content{overflow-x:hidden}.arco-link{display:inline-flex;align-items:center;justify-content:center;padding:1px 4px;color:rgb(var(--link-6));font-size:14px;line-height:1.5715;text-decoration:none;background-color:transparent;border-radius:var(--border-radius-small);cursor:pointer;transition:all .1s cubic-bezier(0,0,1,1)}.arco-link:hover{color:rgb(var(--link-6));background-color:var(--color-fill-2)}.arco-link:active{color:rgb(var(--link-6));background-color:var(--color-fill-3);transition:none}.arco-link.arco-link-hoverless{display:inline;padding:0;background-color:unset}.arco-link.arco-link-hoverless:active,.arco-link.arco-link-hoverless:hover{background-color:unset}.arco-link.arco-link-disabled{color:var(--color-link-light-3);background:none;cursor:not-allowed}.arco-link.arco-link-loading{color:var(--color-link-light-3);background:none;cursor:default}.arco-link-status-success,.arco-link-status-success:hover,.arco-link-status-success:active{color:rgb(var(--success-6))}.arco-link-status-success.arco-link-disabled,.arco-link-status-success.arco-link-loading{color:var(--color-success-light-3)}.arco-link-status-danger,.arco-link-status-danger:hover,.arco-link-status-danger:active{color:rgb(var(--danger-6))}.arco-link-status-danger.arco-link-disabled,.arco-link-status-danger.arco-link-loading{color:var(--color-danger-light-3)}.arco-link-status-warning,.arco-link-status-warning:hover,.arco-link-status-warning:active{color:rgb(var(--warning-6))}.arco-link-status-warning.arco-link-disabled,.arco-link-status-warning.arco-link-loading{color:var(--color-warning-light-2)}.arco-link-icon{margin-right:6px;font-size:12px;vertical-align:middle}.arco-list{display:flex;flex-direction:column;box-sizing:border-box;width:100%;overflow-y:auto;color:var(--color-text-1);font-size:14px;line-height:1.5715;border-radius:var(--border-radius-medium)}.arco-list-wrapper{overflow:hidden}.arco-list-wrapper .arco-list-spin{display:block;height:100%;overflow:hidden}.arco-list-content{overflow:hidden}.arco-list-small .arco-list-content-wrapper .arco-list-header{padding:8px 20px}.arco-list-small .arco-list-content-wrapper .arco-list-footer,.arco-list-small .arco-list-content-wrapper .arco-list-content>.arco-list-item,.arco-list-small .arco-list-content-wrapper .arco-list-content .arco-list-col>.arco-list-item,.arco-list-small .arco-list-content-wrapper .arco-list-content.arco-list-virtual .arco-list-item{padding:9px 20px}.arco-list-medium .arco-list-content-wrapper .arco-list-header{padding:12px 20px}.arco-list-medium .arco-list-content-wrapper .arco-list-footer,.arco-list-medium .arco-list-content-wrapper .arco-list-content>.arco-list-item,.arco-list-medium .arco-list-content-wrapper .arco-list-content .arco-list-col>.arco-list-item,.arco-list-medium .arco-list-content-wrapper .arco-list-content.arco-list-virtual .arco-list-item{padding:13px 20px}.arco-list-large .arco-list-content-wrapper .arco-list-header{padding:16px 20px}.arco-list-large .arco-list-content-wrapper .arco-list-footer,.arco-list-large .arco-list-content-wrapper .arco-list-content>.arco-list-item,.arco-list-large .arco-list-content-wrapper .arco-list-content .arco-list-col>.arco-list-item,.arco-list-large .arco-list-content-wrapper .arco-list-content.arco-list-virtual .arco-list-item{padding:17px 20px}.arco-list-bordered{border:1px solid var(--color-neutral-3)}.arco-list-split .arco-list-header,.arco-list-split .arco-list-item:not(:last-child){border-bottom:1px solid var(--color-neutral-3)}.arco-list-split .arco-list-footer{border-top:1px solid var(--color-neutral-3)}.arco-list-header{color:var(--color-text-1);font-weight:500;font-size:16px;line-height:1.5}.arco-list-item{display:flex;justify-content:space-between;box-sizing:border-box;width:100%;overflow:hidden}.arco-list-item-main{flex:1}.arco-list-item-main .arco-list-item-action:not(:first-child){margin-top:4px}.arco-list-item-meta{display:flex;align-items:center;padding:4px 0}.arco-list-item-meta-avatar{display:flex}.arco-list-item-meta-avatar:not(:last-child){margin-right:16px}.arco-list-item-meta-title{color:var(--color-text-1);font-weight:500}.arco-list-item-meta-title:not(:last-child){margin-bottom:2px}.arco-list-item-meta-description{color:var(--color-text-2)}.arco-list-item-action{display:flex;flex-wrap:nowrap;align-self:center;margin:0;padding:0;list-style:none}.arco-list-item-action>li{display:inline-block;cursor:pointer}.arco-list-item-action>li:not(:last-child){margin-right:20px}.arco-list-hover .arco-list-item:hover{background-color:var(--color-fill-1)}.arco-list-pagination{float:right;margin-top:24px}.arco-list-pagination:after{display:block;clear:both;height:0;overflow:hidden;visibility:hidden;content:""}.arco-list-scroll-loading{display:flex;align-items:center;justify-content:center}.arco-list-content{flex:auto}.arco-list-content .arco-empty{display:flex;align-items:center;justify-content:center;height:100%}.arco-mention{position:relative;display:inline-block;box-sizing:border-box;width:100%}.arco-mention-measure{position:absolute;top:0;right:0;bottom:0;left:0;overflow:auto;visibility:hidden;pointer-events:none}.arco-menu{position:relative;box-sizing:border-box;width:100%;font-size:14px;line-height:1.5715;transition:width .2s cubic-bezier(.34,.69,.1,1)}.arco-menu:focus-visible{outline:3px solid var(--color-primary-light-2)}.arco-menu-indent{display:inline-block;width:20px}.arco-menu .arco-menu-item,.arco-menu .arco-menu-group-title,.arco-menu .arco-menu-pop-header,.arco-menu .arco-menu-inline-header{position:relative;box-sizing:border-box;border-radius:var(--border-radius-small);cursor:pointer}.arco-menu .arco-menu-item.arco-menu-disabled,.arco-menu .arco-menu-group-title.arco-menu-disabled,.arco-menu .arco-menu-pop-header.arco-menu-disabled,.arco-menu .arco-menu-inline-header.arco-menu-disabled{cursor:not-allowed}.arco-menu .arco-menu-item.arco-menu-selected,.arco-menu .arco-menu-group-title.arco-menu-selected,.arco-menu .arco-menu-pop-header.arco-menu-selected,.arco-menu .arco-menu-inline-header.arco-menu-selected{font-weight:500;transition:color .2s cubic-bezier(0,0,1,1)}.arco-menu .arco-menu-item .arco-icon,.arco-menu .arco-menu-group-title .arco-icon,.arco-menu .arco-menu-pop-header .arco-icon,.arco-menu .arco-menu-inline-header .arco-icon,.arco-menu .arco-menu-item .arco-menu-icon,.arco-menu .arco-menu-group-title .arco-menu-icon,.arco-menu .arco-menu-pop-header .arco-menu-icon,.arco-menu .arco-menu-inline-header .arco-menu-icon{margin-right:16px}.arco-menu .arco-menu-item .arco-menu-icon .arco-icon,.arco-menu .arco-menu-group-title .arco-menu-icon .arco-icon,.arco-menu .arco-menu-pop-header .arco-menu-icon .arco-icon,.arco-menu .arco-menu-inline-header .arco-menu-icon .arco-icon{margin-right:0}.arco-menu-light{background-color:var(--color-menu-light-bg)}.arco-menu-light .arco-menu-item,.arco-menu-light .arco-menu-group-title,.arco-menu-light .arco-menu-pop-header,.arco-menu-light .arco-menu-inline-header{color:var(--color-text-2);background-color:var(--color-menu-light-bg)}.arco-menu-light .arco-menu-item .arco-icon,.arco-menu-light .arco-menu-group-title .arco-icon,.arco-menu-light .arco-menu-pop-header .arco-icon,.arco-menu-light .arco-menu-inline-header .arco-icon,.arco-menu-light .arco-menu-item .arco-menu-icon,.arco-menu-light .arco-menu-group-title .arco-menu-icon,.arco-menu-light .arco-menu-pop-header .arco-menu-icon,.arco-menu-light .arco-menu-inline-header .arco-menu-icon{color:var(--color-text-3)}.arco-menu-light .arco-menu-item:hover,.arco-menu-light .arco-menu-group-title:hover,.arco-menu-light .arco-menu-pop-header:hover,.arco-menu-light .arco-menu-inline-header:hover{color:var(--color-text-2);background-color:var(--color-fill-2)}.arco-menu-light .arco-menu-item:hover .arco-icon,.arco-menu-light .arco-menu-group-title:hover .arco-icon,.arco-menu-light .arco-menu-pop-header:hover .arco-icon,.arco-menu-light .arco-menu-inline-header:hover .arco-icon,.arco-menu-light .arco-menu-item:hover .arco-menu-icon,.arco-menu-light .arco-menu-group-title:hover .arco-menu-icon,.arco-menu-light .arco-menu-pop-header:hover .arco-menu-icon,.arco-menu-light .arco-menu-inline-header:hover .arco-menu-icon{color:var(--color-text-3)}.arco-menu-light .arco-menu-item.arco-menu-selected,.arco-menu-light .arco-menu-group-title.arco-menu-selected,.arco-menu-light .arco-menu-pop-header.arco-menu-selected,.arco-menu-light .arco-menu-inline-header.arco-menu-selected,.arco-menu-light .arco-menu-item.arco-menu-selected .arco-icon,.arco-menu-light .arco-menu-group-title.arco-menu-selected .arco-icon,.arco-menu-light .arco-menu-pop-header.arco-menu-selected .arco-icon,.arco-menu-light .arco-menu-inline-header.arco-menu-selected .arco-icon,.arco-menu-light .arco-menu-item.arco-menu-selected .arco-menu-icon,.arco-menu-light .arco-menu-group-title.arco-menu-selected .arco-menu-icon,.arco-menu-light .arco-menu-pop-header.arco-menu-selected .arco-menu-icon,.arco-menu-light .arco-menu-inline-header.arco-menu-selected .arco-menu-icon{color:rgb(var(--primary-6))}.arco-menu-light .arco-menu-item.arco-menu-disabled,.arco-menu-light .arco-menu-group-title.arco-menu-disabled,.arco-menu-light .arco-menu-pop-header.arco-menu-disabled,.arco-menu-light .arco-menu-inline-header.arco-menu-disabled{color:var(--color-text-4);background-color:var(--color-menu-light-bg)}.arco-menu-light .arco-menu-item.arco-menu-disabled .arco-icon,.arco-menu-light .arco-menu-group-title.arco-menu-disabled .arco-icon,.arco-menu-light .arco-menu-pop-header.arco-menu-disabled .arco-icon,.arco-menu-light .arco-menu-inline-header.arco-menu-disabled .arco-icon,.arco-menu-light .arco-menu-item.arco-menu-disabled .arco-menu-icon,.arco-menu-light .arco-menu-group-title.arco-menu-disabled .arco-menu-icon,.arco-menu-light .arco-menu-pop-header.arco-menu-disabled .arco-menu-icon,.arco-menu-light .arco-menu-inline-header.arco-menu-disabled .arco-menu-icon{color:var(--color-text-4)}.arco-menu-light .arco-menu-item.arco-menu-selected{background-color:var(--color-fill-2)}.arco-menu-light .arco-menu-inline-header.arco-menu-selected,.arco-menu-light .arco-menu-inline-header.arco-menu-selected .arco-icon,.arco-menu-light .arco-menu-inline-header.arco-menu-selected .arco-menu-icon{color:rgb(var(--primary-6))}.arco-menu-light .arco-menu-inline-header.arco-menu-selected:hover{background-color:var(--color-fill-2)}.arco-menu-light.arco-menu-horizontal .arco-menu-item.arco-menu-selected,.arco-menu-light.arco-menu-horizontal .arco-menu-group-title.arco-menu-selected,.arco-menu-light.arco-menu-horizontal .arco-menu-pop-header.arco-menu-selected,.arco-menu-light.arco-menu-horizontal .arco-menu-inline-header.arco-menu-selected{background:none;transition:color .2s cubic-bezier(0,0,1,1)}.arco-menu-light.arco-menu-horizontal .arco-menu-item.arco-menu-selected:hover,.arco-menu-light.arco-menu-horizontal .arco-menu-group-title.arco-menu-selected:hover,.arco-menu-light.arco-menu-horizontal .arco-menu-pop-header.arco-menu-selected:hover,.arco-menu-light.arco-menu-horizontal .arco-menu-inline-header.arco-menu-selected:hover{background-color:var(--color-fill-2)}.arco-menu-light .arco-menu-group-title{color:var(--color-text-3);pointer-events:none}.arco-menu-light .arco-menu-collapse-button{color:var(--color-text-3);background-color:var(--color-fill-1)}.arco-menu-light .arco-menu-collapse-button:hover{background-color:var(--color-fill-3)}.arco-menu-dark{background-color:var(--color-menu-dark-bg)}.arco-menu-dark .arco-menu-item,.arco-menu-dark .arco-menu-group-title,.arco-menu-dark .arco-menu-pop-header,.arco-menu-dark .arco-menu-inline-header{color:var(--color-text-4);background-color:var(--color-menu-dark-bg)}.arco-menu-dark .arco-menu-item .arco-icon,.arco-menu-dark .arco-menu-group-title .arco-icon,.arco-menu-dark .arco-menu-pop-header .arco-icon,.arco-menu-dark .arco-menu-inline-header .arco-icon,.arco-menu-dark .arco-menu-item .arco-menu-icon,.arco-menu-dark .arco-menu-group-title .arco-menu-icon,.arco-menu-dark .arco-menu-pop-header .arco-menu-icon,.arco-menu-dark .arco-menu-inline-header .arco-menu-icon{color:var(--color-text-3)}.arco-menu-dark .arco-menu-item:hover,.arco-menu-dark .arco-menu-group-title:hover,.arco-menu-dark .arco-menu-pop-header:hover,.arco-menu-dark .arco-menu-inline-header:hover{color:var(--color-text-4);background-color:var(--color-menu-dark-hover)}.arco-menu-dark .arco-menu-item:hover .arco-icon,.arco-menu-dark .arco-menu-group-title:hover .arco-icon,.arco-menu-dark .arco-menu-pop-header:hover .arco-icon,.arco-menu-dark .arco-menu-inline-header:hover .arco-icon,.arco-menu-dark .arco-menu-item:hover .arco-menu-icon,.arco-menu-dark .arco-menu-group-title:hover .arco-menu-icon,.arco-menu-dark .arco-menu-pop-header:hover .arco-menu-icon,.arco-menu-dark .arco-menu-inline-header:hover .arco-menu-icon{color:var(--color-text-3)}.arco-menu-dark .arco-menu-item.arco-menu-selected,.arco-menu-dark .arco-menu-group-title.arco-menu-selected,.arco-menu-dark .arco-menu-pop-header.arco-menu-selected,.arco-menu-dark .arco-menu-inline-header.arco-menu-selected,.arco-menu-dark .arco-menu-item.arco-menu-selected .arco-icon,.arco-menu-dark .arco-menu-group-title.arco-menu-selected .arco-icon,.arco-menu-dark .arco-menu-pop-header.arco-menu-selected .arco-icon,.arco-menu-dark .arco-menu-inline-header.arco-menu-selected .arco-icon,.arco-menu-dark .arco-menu-item.arco-menu-selected .arco-menu-icon,.arco-menu-dark .arco-menu-group-title.arco-menu-selected .arco-menu-icon,.arco-menu-dark .arco-menu-pop-header.arco-menu-selected .arco-menu-icon,.arco-menu-dark .arco-menu-inline-header.arco-menu-selected .arco-menu-icon{color:var(--color-white)}.arco-menu-dark .arco-menu-item.arco-menu-disabled,.arco-menu-dark .arco-menu-group-title.arco-menu-disabled,.arco-menu-dark .arco-menu-pop-header.arco-menu-disabled,.arco-menu-dark .arco-menu-inline-header.arco-menu-disabled{color:var(--color-text-2);background-color:var(--color-menu-dark-bg)}.arco-menu-dark .arco-menu-item.arco-menu-disabled .arco-icon,.arco-menu-dark .arco-menu-group-title.arco-menu-disabled .arco-icon,.arco-menu-dark .arco-menu-pop-header.arco-menu-disabled .arco-icon,.arco-menu-dark .arco-menu-inline-header.arco-menu-disabled .arco-icon,.arco-menu-dark .arco-menu-item.arco-menu-disabled .arco-menu-icon,.arco-menu-dark .arco-menu-group-title.arco-menu-disabled .arco-menu-icon,.arco-menu-dark .arco-menu-pop-header.arco-menu-disabled .arco-menu-icon,.arco-menu-dark .arco-menu-inline-header.arco-menu-disabled .arco-menu-icon{color:var(--color-text-2)}.arco-menu-dark .arco-menu-item.arco-menu-selected{background-color:var(--color-menu-dark-hover)}.arco-menu-dark .arco-menu-inline-header.arco-menu-selected,.arco-menu-dark .arco-menu-inline-header.arco-menu-selected .arco-icon,.arco-menu-dark .arco-menu-inline-header.arco-menu-selected .arco-menu-icon{color:rgb(var(--primary-6))}.arco-menu-dark .arco-menu-inline-header.arco-menu-selected:hover{background-color:var(--color-menu-dark-hover)}.arco-menu-dark.arco-menu-horizontal .arco-menu-item.arco-menu-selected,.arco-menu-dark.arco-menu-horizontal .arco-menu-group-title.arco-menu-selected,.arco-menu-dark.arco-menu-horizontal .arco-menu-pop-header.arco-menu-selected,.arco-menu-dark.arco-menu-horizontal .arco-menu-inline-header.arco-menu-selected{background:none;transition:color .2s cubic-bezier(0,0,1,1)}.arco-menu-dark.arco-menu-horizontal .arco-menu-item.arco-menu-selected:hover,.arco-menu-dark.arco-menu-horizontal .arco-menu-group-title.arco-menu-selected:hover,.arco-menu-dark.arco-menu-horizontal .arco-menu-pop-header.arco-menu-selected:hover,.arco-menu-dark.arco-menu-horizontal .arco-menu-inline-header.arco-menu-selected:hover{background-color:var(--color-menu-dark-hover)}.arco-menu-dark .arco-menu-group-title{color:var(--color-text-3);pointer-events:none}.arco-menu-dark .arco-menu-collapse-button{color:var(--color-white);background-color:rgb(var(--primary-6))}.arco-menu-dark .arco-menu-collapse-button:hover{background-color:rgb(var(--primary-7))}.arco-menu a,.arco-menu a:hover,.arco-menu a:focus,.arco-menu a:active{color:inherit;text-decoration:none;cursor:inherit}.arco-menu-inner{box-sizing:border-box;width:100%;height:100%;overflow:auto}.arco-menu-icon-suffix.is-open{transform:rotate(180deg)}.arco-menu-vertical .arco-menu-item,.arco-menu-vertical .arco-menu-group-title,.arco-menu-vertical .arco-menu-pop-header,.arco-menu-vertical .arco-menu-inline-header{padding:0 12px;line-height:40px}.arco-menu-vertical .arco-menu-item .arco-menu-icon-suffix .arco-icon,.arco-menu-vertical .arco-menu-group-title .arco-menu-icon-suffix .arco-icon,.arco-menu-vertical .arco-menu-pop-header .arco-menu-icon-suffix .arco-icon,.arco-menu-vertical .arco-menu-inline-header .arco-menu-icon-suffix .arco-icon{margin-right:0}.arco-menu-vertical .arco-menu-item,.arco-menu-vertical .arco-menu-group-title,.arco-menu-vertical .arco-menu-pop-header,.arco-menu-vertical .arco-menu-inline-header{margin-bottom:4px}.arco-menu-vertical .arco-menu-item:not(.arco-menu-has-icon),.arco-menu-vertical .arco-menu-group-title:not(.arco-menu-has-icon),.arco-menu-vertical .arco-menu-pop-header:not(.arco-menu-has-icon),.arco-menu-vertical .arco-menu-inline-header:not(.arco-menu-has-icon){overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-menu-vertical .arco-menu-item.arco-menu-has-icon,.arco-menu-vertical .arco-menu-group-title.arco-menu-has-icon,.arco-menu-vertical .arco-menu-pop-header.arco-menu-has-icon,.arco-menu-vertical .arco-menu-inline-header.arco-menu-has-icon{display:flex;align-items:center}.arco-menu-vertical .arco-menu-item.arco-menu-has-icon>.arco-menu-indent-list,.arco-menu-vertical .arco-menu-group-title.arco-menu-has-icon>.arco-menu-indent-list,.arco-menu-vertical .arco-menu-pop-header.arco-menu-has-icon>.arco-menu-indent-list,.arco-menu-vertical .arco-menu-inline-header.arco-menu-has-icon>.arco-menu-indent-list,.arco-menu-vertical .arco-menu-item.arco-menu-has-icon>.arco-menu-icon,.arco-menu-vertical .arco-menu-group-title.arco-menu-has-icon>.arco-menu-icon,.arco-menu-vertical .arco-menu-pop-header.arco-menu-has-icon>.arco-menu-icon,.arco-menu-vertical .arco-menu-inline-header.arco-menu-has-icon>.arco-menu-icon{flex:none}.arco-menu-vertical .arco-menu-item.arco-menu-has-icon .arco-menu-icon,.arco-menu-vertical .arco-menu-group-title.arco-menu-has-icon .arco-menu-icon,.arco-menu-vertical .arco-menu-pop-header.arco-menu-has-icon .arco-menu-icon,.arco-menu-vertical .arco-menu-inline-header.arco-menu-has-icon .arco-menu-icon{line-height:1}.arco-menu-vertical .arco-menu-item.arco-menu-has-icon .arco-menu-title,.arco-menu-vertical .arco-menu-group-title.arco-menu-has-icon .arco-menu-title,.arco-menu-vertical .arco-menu-pop-header.arco-menu-has-icon .arco-menu-title,.arco-menu-vertical .arco-menu-inline-header.arco-menu-has-icon .arco-menu-title{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-menu-vertical .arco-menu-item .arco-menu-item-inner,.arco-menu-vertical .arco-menu-group-title .arco-menu-item-inner,.arco-menu-vertical .arco-menu-pop-header .arco-menu-item-inner,.arco-menu-vertical .arco-menu-inline-header .arco-menu-item-inner{overflow:hidden;white-space:nowrap;text-overflow:ellipsis;width:100%}.arco-menu-vertical .arco-menu-item .arco-menu-icon-suffix,.arco-menu-vertical .arco-menu-group-title .arco-menu-icon-suffix,.arco-menu-vertical .arco-menu-pop-header .arco-menu-icon-suffix,.arco-menu-vertical .arco-menu-inline-header .arco-menu-icon-suffix{position:absolute;right:12px}.arco-menu-vertical .arco-menu-inner{padding:4px 8px}.arco-menu-vertical .arco-menu-item.arco-menu-item-indented{display:flex}.arco-menu-vertical .arco-menu-pop-header,.arco-menu-vertical .arco-menu-inline-header{padding-right:28px}.arco-menu-horizontal{width:100%;height:auto}.arco-menu-horizontal .arco-menu-item,.arco-menu-horizontal .arco-menu-group-title,.arco-menu-horizontal .arco-menu-pop-header,.arco-menu-horizontal .arco-menu-inline-header{padding:0 12px;line-height:30px}.arco-menu-horizontal .arco-menu-item .arco-menu-icon-suffix .arco-icon,.arco-menu-horizontal .arco-menu-group-title .arco-menu-icon-suffix .arco-icon,.arco-menu-horizontal .arco-menu-pop-header .arco-menu-icon-suffix .arco-icon,.arco-menu-horizontal .arco-menu-inline-header .arco-menu-icon-suffix .arco-icon{margin-right:0}.arco-menu-horizontal .arco-menu-item .arco-icon,.arco-menu-horizontal .arco-menu-group-title .arco-icon,.arco-menu-horizontal .arco-menu-pop-header .arco-icon,.arco-menu-horizontal .arco-menu-inline-header .arco-icon,.arco-menu-horizontal .arco-menu-item .arco-menu-icon,.arco-menu-horizontal .arco-menu-group-title .arco-menu-icon,.arco-menu-horizontal .arco-menu-pop-header .arco-menu-icon,.arco-menu-horizontal .arco-menu-inline-header .arco-menu-icon{margin-right:16px}.arco-menu-horizontal .arco-menu-item .arco-menu-icon-suffix,.arco-menu-horizontal .arco-menu-group-title .arco-menu-icon-suffix,.arco-menu-horizontal .arco-menu-pop-header .arco-menu-icon-suffix,.arco-menu-horizontal .arco-menu-inline-header .arco-menu-icon-suffix{margin-left:6px}.arco-menu-horizontal .arco-menu-inner{display:flex;align-items:center;padding:14px 20px}.arco-menu-horizontal .arco-menu-item,.arco-menu-horizontal .arco-menu-pop{display:inline-block;flex-shrink:0;vertical-align:middle}.arco-menu-horizontal .arco-menu-item:not(:first-child),.arco-menu-horizontal .arco-menu-pop:not(:first-child){margin-left:12px}.arco-menu-horizontal .arco-menu-pop:after{position:absolute;bottom:-14px;left:0;width:100%;height:14px;content:" "}.arco-menu-overflow-wrap{width:100%}.arco-menu-overflow-sub-menu-mirror,.arco-menu-overflow-hidden-menu-item{position:absolute!important;white-space:nowrap;visibility:hidden;pointer-events:none}.arco-menu-selected-label{position:absolute;right:12px;bottom:-14px;left:12px;height:3px;background-color:rgb(var(--primary-6))}.arco-menu-pop-button{width:auto;background:none;box-shadow:none}.arco-menu-pop-button.arco-menu-collapsed{width:auto}.arco-menu-pop-button .arco-menu-item,.arco-menu-pop-button .arco-menu-group-title,.arco-menu-pop-button .arco-menu-pop-header,.arco-menu-pop-button .arco-menu-inline-header{width:40px;height:40px;margin-bottom:16px;line-height:40px;border:1px solid transparent;border-radius:50%;box-shadow:0 4px 10px #0000001a}.arco-menu-collapsed{width:48px}.arco-menu-collapsed .arco-menu-inner{padding:4px}.arco-menu-collapsed .arco-menu-icon-suffix{display:none}.arco-menu-collapsed .arco-menu-has-icon>*:not(.arco-menu-icon){opacity:0}.arco-menu-collapsed .arco-menu-item .arco-icon,.arco-menu-collapsed .arco-menu-group-title .arco-icon,.arco-menu-collapsed .arco-menu-pop-header .arco-icon,.arco-menu-collapsed .arco-menu-inline-header .arco-icon{margin-right:100%}.arco-menu-collapse-button{position:absolute;right:12px;bottom:12px;display:flex;align-items:center;justify-content:center;width:24px;height:24px;border-radius:var(--border-radius-small);cursor:pointer}.arco-menu-inline-content{height:auto;overflow:hidden;transition:height .2s cubic-bezier(.34,.69,.1,1)}.arco-menu-inline-content-hide{height:0}.arco-menu-item-tooltip a{color:inherit;cursor:text}.arco-menu-item-tooltip a:hover,.arco-menu-item-tooltip a:focus,.arco-menu-item-tooltip a:active{color:inherit}.arco-menu-pop-trigger.arco-trigger-position-bl{transform:translateY(14px)}.arco-menu-pop-trigger.arco-trigger-position-bl .arco-trigger-arrow{z-index:0;border-top:1px solid var(--color-neutral-3);border-left:1px solid var(--color-neutral-3)}.arco-menu-pop-trigger.arco-trigger-position-rt{transform:translate(8px)}.arco-menu-pop-trigger.arco-trigger-position-rt .arco-trigger-arrow{z-index:0;border-bottom:1px solid var(--color-neutral-3);border-left:1px solid var(--color-neutral-3)}.arco-menu-pop-trigger.arco-menu-pop-trigger-dark .arco-trigger-arrow{background-color:var(--color-menu-dark-bg);border-color:var(--color-menu-dark-bg)}.arco-trigger-menu{position:relative;box-sizing:border-box;max-height:200px;padding:4px 0;overflow:auto;background-color:var(--color-bg-popup);border:1px solid var(--color-fill-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-trigger-menu-hidden{display:none}.arco-trigger-menu-item,.arco-trigger-menu-pop-header{position:relative;z-index:1;box-sizing:border-box;width:100%;height:36px;padding:0 12px;color:var(--color-text-1);font-size:14px;line-height:36px;text-align:left;background-color:transparent;cursor:pointer;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-trigger-menu-item.arco-trigger-menu-selected,.arco-trigger-menu-pop-header.arco-trigger-menu-selected{color:var(--color-text-1);font-weight:500;background-color:transparent;transition:all .1s cubic-bezier(0,0,1,1)}.arco-trigger-menu-item:hover,.arco-trigger-menu-pop-header:hover{color:var(--color-text-1);background-color:var(--color-fill-2)}.arco-trigger-menu-item.arco-trigger-menu-disabled,.arco-trigger-menu-pop-header.arco-trigger-menu-disabled{color:var(--color-text-4);background-color:transparent;cursor:not-allowed}.arco-trigger-menu .arco-trigger-menu-has-icon{display:flex;align-items:center}.arco-trigger-menu .arco-trigger-menu-has-icon .arco-trigger-menu-icon{margin-right:8px;line-height:1}.arco-trigger-menu .arco-trigger-menu-has-icon>*{flex:none}.arco-trigger-menu .arco-trigger-menu-has-icon .arco-trigger-menu-title{flex:auto;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-trigger-menu-pop-header{display:flex;align-items:center;justify-content:space-between}.arco-trigger-menu-pop-header .arco-trigger-menu-icon-suffix{margin-left:12px}.arco-trigger-menu-group:first-child .arco-trigger-menu-group-title{padding-top:4px}.arco-trigger-menu-group-title{box-sizing:border-box;width:100%;padding:8px 12px 0;color:var(--color-text-3);font-size:12px;line-height:20px;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-trigger-menu-pop-trigger .arco-trigger-arrow{display:none}.arco-trigger-menu-dark{background-color:var(--color-menu-dark-bg);border-color:var(--color-menu-dark-bg)}.arco-trigger-menu-dark .arco-trigger-menu-item,.arco-trigger-menu-dark .arco-trigger-menu-pop-header{color:var(--color-text-4);background-color:transparent}.arco-trigger-menu-dark .arco-trigger-menu-item.arco-trigger-menu-selected,.arco-trigger-menu-dark .arco-trigger-menu-pop-header.arco-trigger-menu-selected{color:var(--color-white);background-color:transparent}.arco-trigger-menu-dark .arco-trigger-menu-item.arco-trigger-menu-selected:hover,.arco-trigger-menu-dark .arco-trigger-menu-pop-header.arco-trigger-menu-selected:hover{color:var(--color-white)}.arco-trigger-menu-dark .arco-trigger-menu-item:hover,.arco-trigger-menu-dark .arco-trigger-menu-pop-header:hover{color:var(--color-text-4);background-color:var(--color-menu-dark-hover)}.arco-trigger-menu-dark .arco-trigger-menu-item.arco-trigger-menu-disabled,.arco-trigger-menu-dark .arco-trigger-menu-pop-header.arco-trigger-menu-disabled{color:var(--color-text-2);background-color:transparent}.arco-trigger-menu-dark .arco-trigger-menu-group-title{color:var(--color-text-3)}.arco-message-list{position:fixed;z-index:1003;display:flex;flex-direction:column;align-items:center;box-sizing:border-box;width:100%;margin:0;padding:0 10px;text-align:center;pointer-events:none}.arco-message-list-top{top:40px}.arco-message-list-bottom{bottom:40px}.arco-message{position:relative;display:inline-flex;align-items:center;margin-bottom:16px;padding:10px 16px;overflow:hidden;line-height:1;text-align:center;list-style:none;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-small);box-shadow:0 4px 10px #0000001a;transition:all .1s cubic-bezier(0,0,1,1);pointer-events:auto}.arco-message-icon{display:inline-block;margin-right:8px;color:var(--color-text-1);font-size:20px;vertical-align:middle;animation:arco-msg-fade .1s cubic-bezier(0,0,1,1),arco-msg-fade .4s cubic-bezier(.3,1.3,.3,1)}.arco-message-content{font-size:14px;color:var(--color-text-1);vertical-align:middle}.arco-message-info{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-message-info .arco-message-icon{color:rgb(var(--primary-6))}.arco-message-info .arco-message-content{color:var(--color-text-1)}.arco-message-success{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-message-success .arco-message-icon{color:rgb(var(--success-6))}.arco-message-success .arco-message-content{color:var(--color-text-1)}.arco-message-warning{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-message-warning .arco-message-icon{color:rgb(var(--warning-6))}.arco-message-warning .arco-message-content{color:var(--color-text-1)}.arco-message-error{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-message-error .arco-message-icon{color:rgb(var(--danger-6))}.arco-message-error .arco-message-content{color:var(--color-text-1)}.arco-message-loading{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-message-loading .arco-message-icon{color:rgb(var(--primary-6))}.arco-message-loading .arco-message-content{color:var(--color-text-1)}.arco-message-close-btn{margin-left:8px;color:var(--color-text-1);font-size:12px}.arco-message .arco-icon-hover.arco-message-icon-hover:before{width:20px;height:20px}.fade-message-enter-from,.fade-message-appear-from{opacity:0}.fade-message-enter-to,.fade-message-appear-to{opacity:1}.fade-message-enter-active,.fade-message-appear-active{transition:opacity .1s cubic-bezier(0,0,1,1)}.fade-message-leave-from{opacity:1}.fade-message-leave-to{opacity:0}.fade-message-leave-active{position:absolute}.flip-list-move{transition:transform .8s ease}@keyframes arco-msg-fade{0%{opacity:0}to{opacity:1}}@keyframes arco-msg-scale{0%{transform:scale(0)}to{transform:scale(1)}}.arco-modal-container{position:fixed;top:0;right:0;bottom:0;left:0}.arco-modal-mask{position:absolute;top:0;right:0;bottom:0;left:0;background-color:var(--color-mask-bg)}.arco-modal-wrapper{position:absolute;top:0;right:0;bottom:0;left:0;overflow:auto;text-align:center}.arco-modal-wrapper.arco-modal-wrapper-align-center{white-space:nowrap}.arco-modal-wrapper.arco-modal-wrapper-align-center:after{display:inline-block;width:0;height:100%;vertical-align:middle;content:""}.arco-modal-wrapper.arco-modal-wrapper-align-center .arco-modal{top:0;vertical-align:middle}.arco-modal-wrapper.arco-modal-wrapper-moved{text-align:left}.arco-modal-wrapper.arco-modal-wrapper-moved .arco-modal{top:0;vertical-align:top}.arco-modal{position:relative;top:100px;display:inline-block;width:520px;margin:0 auto;line-height:1.5715;white-space:initial;text-align:left;background-color:var(--color-bg-3);border-radius:var(--border-radius-medium)}.arco-modal-draggable .arco-modal-header{cursor:move}.arco-modal-header{display:flex;flex-shrink:0;align-items:center;box-sizing:border-box;width:100%;height:48px;padding:0 20px;border-bottom:1px solid var(--color-neutral-3)}.arco-modal-header .arco-modal-title{display:flex;flex:1;align-items:center;justify-content:center}.arco-modal-header .arco-modal-title-align-start{justify-content:flex-start}.arco-modal-header .arco-modal-title-align-center{justify-content:center}.arco-modal-body{position:relative;padding:24px 20px;overflow:auto;color:var(--color-text-1);font-size:14px}.arco-modal-footer{box-sizing:border-box;flex-shrink:0;width:100%;padding:16px 20px;text-align:right;border-top:1px solid var(--color-neutral-3)}.arco-modal-footer>.arco-btn:not(:nth-child(1)){margin-left:12px}.arco-modal-close-btn{margin-left:-12px;color:var(--color-text-1);font-size:12px;cursor:pointer}.arco-modal-title{color:var(--color-text-1);font-weight:500;font-size:16px}.arco-modal-title-icon{margin-right:10px;font-size:18px;vertical-align:-.15em}.arco-modal-title-icon .arco-icon-info-circle-fill{color:rgb(var(--primary-6))}.arco-modal-title-icon .arco-icon-check-circle-fill{color:rgb(var(--success-6))}.arco-modal-title-icon .arco-icon-exclamation-circle-fill{color:rgb(var(--warning-6))}.arco-modal-title-icon .arco-icon-close-circle-fill{color:rgb(var(--danger-6))}.arco-modal-simple{width:400px;padding:24px 32px 32px}.arco-modal-simple .arco-modal-header,.arco-modal-simple .arco-modal-footer{height:unset;padding:0;border:none}.arco-modal-simple .arco-modal-header{margin-bottom:24px}.arco-modal-simple .arco-modal-title{justify-content:center}.arco-modal-simple .arco-modal-title-align-start{justify-content:flex-start}.arco-modal-simple .arco-modal-title-align-center{justify-content:center}.arco-modal-simple .arco-modal-footer{margin-top:32px;text-align:center}.arco-modal-simple .arco-modal-body{padding:0}.arco-modal-fullscreen{top:0;display:inline-flex;flex-direction:column;box-sizing:border-box;width:100%;height:100%}.arco-modal-fullscreen .arco-modal-footer{margin-top:auto}.zoom-modal-enter-from,.zoom-modal-appear-from{transform:scale(.5);opacity:0}.zoom-modal-enter-to,.zoom-modal-appear-to{transform:scale(1);opacity:1}.zoom-modal-enter-active,.zoom-modal-appear-active{transition:opacity .4s cubic-bezier(.3,1.3,.3,1),transform .4s cubic-bezier(.3,1.3,.3,1)}.zoom-modal-leave-from{transform:scale(1);opacity:1}.zoom-modal-leave-to{transform:scale(.5);opacity:0}.zoom-modal-leave-active{transition:opacity .4s cubic-bezier(.3,1.3,.3,1),transform .4s cubic-bezier(.3,1.3,.3,1)}.fade-modal-enter-from,.fade-modal-appear-from{opacity:0}.fade-modal-enter-to,.fade-modal-appear-to{opacity:1}.fad-modal-enter-active,.fade-modal-appear-active{transition:opacity .4s cubic-bezier(.3,1.3,.3,1)}.fade-modal-leave-from{opacity:1}.fade-modal-leave-to{opacity:0}.fade-modal-leave-active{transition:opacity .4s cubic-bezier(.3,1.3,.3,1)}.arco-notification-list{position:fixed;z-index:1003;margin:0;padding-left:0}.arco-notification-list-top-left{top:20px;left:20px}.arco-notification-list-top-right{top:20px;right:20px}.arco-notification-list-top-right .arco-notification{margin-left:auto}.arco-notification-list-bottom-left{bottom:20px;left:20px}.arco-notification-list-bottom-right{right:20px;bottom:20px}.arco-notification-list-bottom-right .arco-notification{margin-left:auto}.arco-notification{position:relative;display:flex;box-sizing:border-box;width:340px;padding:20px;overflow:hidden;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 12px #00000026;opacity:1;transition:opacity .2s cubic-bezier(0,0,1,1)}.arco-notification:not(:last-child){margin-bottom:20px}.arco-notification-icon{display:flex;align-items:center;font-size:24px}.arco-notification-info{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-notification-info .arco-notification-icon{color:rgb(var(--primary-6))}.arco-notification-success{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-notification-success .arco-notification-icon{color:rgb(var(--success-6))}.arco-notification-warning{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-notification-warning .arco-notification-icon{color:rgb(var(--warning-6))}.arco-notification-error{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-notification-error .arco-notification-icon{color:rgb(var(--danger-6))}.arco-notification-left{padding-right:16px}.arco-notification-right{flex:1;word-break:break-word}.arco-notification-title{color:var(--color-text-1);font-weight:500;font-size:16px}.arco-notification-title+.arco-notification-content{margin-top:4px}.arco-notification-content{color:var(--color-text-1);font-size:14px}.arco-notification-info .arco-notification-title,.arco-notification-info .arco-notification-content,.arco-notification-success .arco-notification-title,.arco-notification-success .arco-notification-content,.arco-notification-warning .arco-notification-title,.arco-notification-warning .arco-notification-content,.arco-notification-error .arco-notification-title,.arco-notification-error .arco-notification-content{color:var(--color-text-1)}.arco-notification-footer{margin-top:16px;text-align:right}.arco-notification-close-btn{position:absolute;top:12px;right:12px;color:var(--color-text-1);font-size:12px;cursor:pointer}.arco-notification-close-btn>svg{position:relative}.arco-notification .arco-icon-hover.arco-notification-icon-hover:before{width:20px;height:20px}.slide-left-notification-enter-from,.slide-left-notification-appear-from{transform:translate(-100%)}.slide-left-notification-enter-to,.slide-left-notification-appear-to{transform:translate(0)}.slide-left-notification-enter-active,.slide-left-notification-appear-active{transition:transform .4s cubic-bezier(.3,1.3,.3,1)}.slide-left-notification-leave-from{opacity:1}.slide-left-notification-leave-to{height:0;margin-top:0;margin-bottom:0;padding-top:0;padding-bottom:0;opacity:0}.slide-left-notification-leave-active{transition:all .3s cubic-bezier(.34,.69,.1,1)}.slide-right-notification-enter-from,.slide-right-notification-appear-from{transform:translate(100%)}.slide-right-notification-enter-to,.slide-right-notification-appear-to{transform:translate(0)}.slide-right-notification-enter-active,.slide-right-notification-appear-active{transition:transform .4s cubic-bezier(.3,1.3,.3,1)}.slide-right-notification-leave-from{opacity:1}.slide-right-notification-leave-to{height:0;margin-top:0;margin-bottom:0;padding-top:0;padding-bottom:0;opacity:0}.slide-right-notification-leave-active{transition:all .3s cubic-bezier(.34,.69,.1,1)}.arco-overflow-list{display:flex;align-items:center;justify-content:flex-start}.arco-overflow-list>*:not(:last-child){flex-shrink:0}.arco-overflow-list-spacer{flex:1;min-width:0;height:1px}.arco-page-header{padding:16px 0}.arco-page-header-breadcrumb+.arco-page-header-header{margin-top:4px}.arco-page-header-wrapper{padding-right:20px;padding-left:24px}.arco-page-header-header{display:flex;align-items:center;justify-content:space-between;line-height:28px}.arco-page-header-header-left{display:flex;align-items:center}.arco-page-header-main{display:flex;align-items:center;min-height:30px}.arco-page-header-main-with-back{margin-left:-8px;padding-left:8px}.arco-page-header-extra{overflow:hidden;white-space:nowrap}.arco-page-header .arco-icon-hover.arco-page-header-icon-hover:before{width:30px;height:30px}.arco-page-header .arco-icon-hover.arco-page-header-icon-hover:hover:before{background-color:var(--color-fill-2)}.arco-page-header-back-btn{margin-right:12px;color:var(--color-text-2);font-size:14px}.arco-page-header-back-btn-icon{position:relative}.arco-page-header-title{overflow:hidden;white-space:nowrap;text-overflow:ellipsis;color:var(--color-text-1);font-weight:600;font-size:20px}.arco-page-header-divider{width:1px;height:16px;margin-right:12px;margin-left:12px;background-color:var(--color-fill-3)}.arco-page-header-subtitle{overflow:hidden;white-space:nowrap;text-overflow:ellipsis;color:var(--color-text-3);font-size:14px}.arco-page-header-content{padding:20px 32px;border-top:1px solid var(--color-neutral-3)}.arco-page-header-footer{padding:16px 20px 0 24px}.arco-page-header-with-breadcrumb{padding:12px 0}.arco-page-header-with-breadcrumb .arco-page-header-footer{padding-top:12px}.arco-page-header-with-content .arco-page-header-wrapper{padding-bottom:12px}.arco-page-header-with-footer{padding-bottom:0}.arco-page-header-wrapper .arco-page-header-header{flex-wrap:wrap}.arco-page-header-wrapper .arco-page-header-header .arco-page-header-head-extra{margin-top:4px}.arco-pagination{display:flex;align-items:center;font-size:14px}.arco-pagination-list{display:inline-block;margin:0;padding:0;white-space:nowrap;list-style:none}.arco-pagination-item{display:inline-block;box-sizing:border-box;padding:0 8px;color:var(--color-text-2);text-align:center;vertical-align:middle;list-style:none;background-color:transparent;border:0 solid transparent;border-radius:var(--border-radius-small);outline:0;cursor:pointer;user-select:none;min-width:32px;height:32px;font-size:14px;line-height:32px}.arco-pagination-item-previous,.arco-pagination-item-next{font-size:12px}.arco-pagination-item:hover{color:var(--color-text-2);background-color:var(--color-fill-1);border-color:transparent}.arco-pagination-item-active,.arco-pagination-item-active:hover{color:rgb(var(--primary-6));background-color:var(--color-primary-light-1);border-color:transparent;transition:color .2s cubic-bezier(0,0,1,1),background-color .2s cubic-bezier(0,0,1,1)}.arco-pagination-item-disabled,.arco-pagination-item-disabled:hover{color:var(--color-text-4);background-color:transparent;border-color:transparent;cursor:not-allowed}.arco-pagination-item:not(:last-child){margin-right:8px}.arco-pagination-item-previous,.arco-pagination-item-next{color:var(--color-text-2);font-size:12px;background-color:transparent}.arco-pagination-item-previous:not(.arco-pagination-item-disabled):hover,.arco-pagination-item-next:not(.arco-pagination-item-disabled):hover{color:rgb(var(--primary-6));background-color:var(--color-fill-1)}.arco-pagination-item-previous:after,.arco-pagination-item-next:after{display:inline-block;font-size:0;vertical-align:middle;content:"."}.arco-pagination .arco-pagination-item-previous.arco-pagination-item-disabled,.arco-pagination .arco-pagination-item-next.arco-pagination-item-disabled{color:var(--color-text-4);background-color:transparent}.arco-pagination-item-jumper{font-size:16px}.arco-pagination-jumper{display:flex;align-items:center;margin-left:8px}.arco-pagination-jumper>span{font-size:14px}.arco-pagination-jumper-text-goto,.arco-pagination-jumper-prepend,.arco-pagination-jumper-append{color:var(--color-text-3);white-space:nowrap}.arco-pagination-jumper-prepend{margin-right:8px}.arco-pagination-jumper-append{margin-left:8px}.arco-pagination-jumper .arco-pagination-jumper-input{width:40px;padding-right:2px;padding-left:2px}.arco-pagination-jumper .arco-pagination-jumper-input input{text-align:center}.arco-pagination-options{position:relative;display:inline-block;flex:0 0 auto;min-width:0;margin-left:8px;text-align:center;vertical-align:middle}.arco-pagination-options .arco-select{width:auto}.arco-pagination-options .arco-select-view-value{padding-right:6px;overflow:inherit}.arco-pagination-total{display:inline-block;height:100%;margin-right:8px;color:var(--color-text-1);font-size:14px;line-height:32px;white-space:nowrap}.arco-pagination-jumper{flex:0 0 auto}.arco-pagination-jumper-separator{padding:0 12px}.arco-pagination-jumper-total-page{margin-right:8px}.arco-pagination-simple{display:flex;align-items:center}.arco-pagination-simple .arco-pagination-item{margin-right:0}.arco-pagination-simple .arco-pagination-jumper{margin:0 4px;color:var(--color-text-1)}.arco-pagination-simple .arco-pagination-jumper .arco-pagination-jumper-input{width:40px;margin-left:0}.arco-pagination-simple .arco-pagination-item-previous,.arco-pagination-simple .arco-pagination-item-next{color:var(--color-text-2);background-color:transparent}.arco-pagination-simple .arco-pagination-item-previous:not(.arco-pagination-item-disabled):hover,.arco-pagination-simple .arco-pagination-item-next:not(.arco-pagination-item-disabled):hover{color:rgb(var(--primary-6));background-color:var(--color-fill-1)}.arco-pagination-simple .arco-pagination-item-previous.arco-pagination-item-disabled,.arco-pagination-simple .arco-pagination-item-next.arco-pagination-item-disabled{color:var(--color-text-4);background-color:transparent}.arco-pagination-disabled{cursor:not-allowed}.arco-pagination-disabled .arco-pagination-item,.arco-pagination-disabled .arco-pagination-item:not(.arco-pagination-item-disabled):not(.arco-pagination-item-active):hover{color:var(--color-text-4);background-color:transparent;border-color:transparent;cursor:not-allowed}.arco-pagination.arco-pagination-disabled .arco-pagination-item-active{color:var(--color-primary-light-3);background-color:var(--color-fill-1);border-color:transparent}.arco-pagination-size-mini .arco-pagination-item{min-width:24px;height:24px;font-size:12px;line-height:24px}.arco-pagination-size-mini .arco-pagination-item-previous,.arco-pagination-size-mini .arco-pagination-item-next{font-size:12px}.arco-pagination-size-mini .arco-pagination-total{font-size:12px;line-height:24px}.arco-pagination-size-mini .arco-pagination-option{height:24px;font-size:12px;line-height:0}.arco-pagination-size-mini .arco-pagination-jumper>span{font-size:12px}.arco-pagination-size-small .arco-pagination-item{min-width:28px;height:28px;font-size:14px;line-height:28px}.arco-pagination-size-small .arco-pagination-item-previous,.arco-pagination-size-small .arco-pagination-item-next{font-size:12px}.arco-pagination-size-small .arco-pagination-total{font-size:14px;line-height:28px}.arco-pagination-size-small .arco-pagination-option{height:28px;font-size:14px;line-height:0}.arco-pagination-size-small .arco-pagination-jumper>span{font-size:14px}.arco-pagination-size-large .arco-pagination-item{min-width:36px;height:36px;font-size:14px;line-height:36px}.arco-pagination-size-large .arco-pagination-item-previous,.arco-pagination-size-large .arco-pagination-item-next{font-size:14px}.arco-pagination-size-large .arco-pagination-total{font-size:14px;line-height:36px}.arco-pagination-size-large .arco-pagination-option{height:36px;font-size:14px;line-height:0}.arco-pagination-size-large .arco-pagination-jumper>span{font-size:14px}.arco-popconfirm-popup-content{box-sizing:border-box;padding:16px;color:var(--color-text-2);font-size:14px;line-height:1.5715;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-popconfirm-popup-content .arco-popconfirm-body{position:relative;display:flex;align-items:flex-start;margin-bottom:16px;color:var(--color-text-1);font-size:14px}.arco-popconfirm-popup-content .arco-popconfirm-body .arco-popconfirm-icon{display:inline-flex;align-items:center;height:22.001px;margin-right:8px;font-size:18px}.arco-popconfirm-popup-content .arco-popconfirm-body .arco-popconfirm-icon .arco-icon-exclamation-circle-fill{color:rgb(var(--warning-6))}.arco-popconfirm-popup-content .arco-popconfirm-body .arco-popconfirm-icon .arco-icon-check-circle-fill{color:rgb(var(--success-6))}.arco-popconfirm-popup-content .arco-popconfirm-body .arco-popconfirm-icon .arco-icon-info-circle-fill{color:rgb(var(--primary-6))}.arco-popconfirm-popup-content .arco-popconfirm-body .arco-popconfirm-icon .arco-icon-close-circle-fill{color:rgb(var(--danger-6))}.arco-popconfirm-popup-content .arco-popconfirm-body .arco-popconfirm-content{text-align:left;word-wrap:break-word}.arco-popconfirm-popup-content .arco-popconfirm-footer{text-align:right}.arco-popconfirm-popup-content .arco-popconfirm-footer>button{margin-left:8px}.arco-popconfirm-popup-arrow{z-index:1;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3)}.arco-popover-popup-content{box-sizing:border-box;padding:12px 16px;color:var(--color-text-2);font-size:14px;line-height:1.5715;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-popover-title{color:var(--color-text-1);font-weight:500;font-size:16px}.arco-popover-content{margin-top:4px;text-align:left;word-wrap:break-word}.arco-popover-popup-arrow{z-index:1;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3)}.arco-progress{position:relative;line-height:1;font-size:12px}.arco-progress-type-line,.arco-progress-type-steps{display:inline-block;max-width:100%;width:100%}.arco-progress-type-line.arco-progress-size-mini{width:auto}.arco-progress-line-wrapper,.arco-progress-steps-wrapper{display:flex;align-items:center;width:100%;max-width:100%;height:100%}.arco-progress-line-text,.arco-progress-steps-text{font-size:12px;margin-left:16px;color:var(--color-text-2);white-space:nowrap;text-align:right;flex-grow:1;flex-shrink:0;min-width:32px}.arco-progress-line-text .arco-icon,.arco-progress-steps-text .arco-icon{font-size:12px;margin-left:4px}.arco-progress-line{background-color:var(--color-fill-3);border-radius:100px;width:100%;position:relative;display:inline-block;overflow:hidden}.arco-progress-line-bar{height:100%;border-radius:100px;background-color:rgb(var(--primary-6));position:relative;transition:width .6s cubic-bezier(.34,.69,.1,1),background .3s cubic-bezier(.34,.69,.1,1);max-width:100%}.arco-progress-line-bar-buffer{position:absolute;background-color:var(--color-primary-light-3);height:100%;top:0;left:0;border-radius:0 100px 100px 0;max-width:100%;transition:all .6s cubic-bezier(.34,.69,.1,1)}.arco-progress-line-bar-animate:after{content:"";display:block;position:absolute;top:0;width:100%;height:100%;border-radius:inherit;background:linear-gradient(90deg,transparent 25%,rgba(255,255,255,.5) 50%,transparent 75%);background-size:400% 100%;animation:arco-progress-loading 1.5s cubic-bezier(.34,.69,.1,1) infinite}.arco-progress-line-text .arco-icon{color:var(--color-text-2)}.arco-progress-type-steps.arco-progress-size-small{width:auto}.arco-progress-type-steps.arco-progress-size-small .arco-progress-steps-item{width:2px;flex:unset;border-radius:2px}.arco-progress-type-steps.arco-progress-size-small .arco-progress-steps-item:not(:last-of-type){margin-right:3px}.arco-progress-steps{display:flex;width:100%}.arco-progress-steps-text{margin-left:8px;min-width:unset}.arco-progress-steps-text .arco-icon{color:var(--color-text-2)}.arco-progress-steps-item{height:100%;flex:1;background-color:var(--color-fill-3);position:relative;display:inline-block}.arco-progress-steps-item:not(:last-of-type){margin-right:3px}.arco-progress-steps-item:last-of-type{border-top-right-radius:100px;border-bottom-right-radius:100px}.arco-progress-steps-item:first-of-type{border-top-left-radius:100px;border-bottom-left-radius:100px}.arco-progress-steps-item-active{background-color:rgb(var(--primary-6))}.arco-progress-status-warning .arco-progress-line-bar,.arco-progress-status-warning .arco-progress-steps-item-active{background-color:rgb(var(--warning-6))}.arco-progress-status-warning .arco-progress-line-text .arco-icon,.arco-progress-status-warning .arco-progress-steps-text .arco-icon{color:rgb(var(--warning-6))}.arco-progress-status-success .arco-progress-line-bar,.arco-progress-status-success .arco-progress-steps-item-active{background-color:rgb(var(--success-6))}.arco-progress-status-success .arco-progress-line-text .arco-icon,.arco-progress-status-success .arco-progress-steps-text .arco-icon{color:rgb(var(--success-6))}.arco-progress-status-danger .arco-progress-line-bar,.arco-progress-status-danger .arco-progress-steps-item-active{background-color:rgb(var(--danger-6))}.arco-progress-status-danger .arco-progress-line-text .arco-icon,.arco-progress-status-danger .arco-progress-steps-text .arco-icon{color:rgb(var(--danger-6))}.arco-progress-size-small .arco-progress-line-text{font-size:12px;margin-left:16px}.arco-progress-size-small .arco-progress-line-text .arco-icon{font-size:12px}.arco-progress-size-large .arco-progress-line-text{font-size:16px;margin-left:16px}.arco-progress-size-large .arco-progress-line-text .arco-icon{font-size:14px}.arco-progress-type-circle{display:inline-block}.arco-progress-circle-wrapper{position:relative;text-align:center;line-height:1;display:inline-block;vertical-align:text-bottom}.arco-progress-circle-svg{transform:rotate(-90deg)}.arco-progress-circle-text{position:absolute;top:50%;left:50%;color:var(--color-text-3);transform:translate(-50%,-50%);font-size:14px}.arco-progress-circle-text .arco-icon{font-size:16px;color:var(--color-text-2)}.arco-progress-circle-bg{stroke:var(--color-fill-3)}.arco-progress-circle-bar{stroke:rgb(var(--primary-6));transition:stroke-dashoffset .6s cubic-bezier(0,0,1,1) 0s,stroke .6s cubic-bezier(0,0,1,1)}.arco-progress-size-mini .arco-progress-circle-bg{stroke:var(--color-primary-light-3)}.arco-progress-size-mini .arco-progress-circle-bar{stroke:rgb(var(--primary-6))}.arco-progress-size-mini.arco-progress-status-warning .arco-progress-circle-bg{stroke:var(--color-warning-light-3)}.arco-progress-size-mini.arco-progress-status-danger .arco-progress-circle-bg{stroke:var(--color-danger-light-3)}.arco-progress-size-mini.arco-progress-status-success .arco-progress-circle-bg{stroke:var(--color-success-light-3)}.arco-progress-size-mini .arco-progress-circle-wrapper .arco-icon-check{position:absolute;top:50%;left:50%;transform:translate(-50%) translateY(-50%)}.arco-progress-size-mini .arco-progress-circle-text{position:static;top:unset;left:unset;transform:unset}.arco-progress-size-small .arco-progress-circle-text{font-size:13px}.arco-progress-size-small .arco-progress-circle-text .arco-icon{font-size:14px}.arco-progress-size-large .arco-progress-circle-text,.arco-progress-size-large .arco-progress-circle-text .arco-icon{font-size:16px}.arco-progress-status-warning .arco-progress-circle-bar{stroke:rgb(var(--warning-6))}.arco-progress-status-warning .arco-icon{color:rgb(var(--warning-6))}.arco-progress-status-success .arco-progress-circle-bar{stroke:rgb(var(--success-6))}.arco-progress-status-success .arco-icon{color:rgb(var(--success-6))}.arco-progress-status-danger .arco-progress-circle-bar{stroke:rgb(var(--danger-6))}.arco-progress-status-danger .arco-icon{color:rgb(var(--danger-6))}@keyframes arco-progress-loading{0%{background-position:100% 50%}to{background-position:0 50%}}.arco-radio>input[type=radio],.arco-radio-button>input[type=radio]{position:absolute;top:0;left:0;width:0;height:0;opacity:0}.arco-radio>input[type=radio]:focus+.arco-radio-icon-hover:before,.arco-radio-button>input[type=radio]:focus+.arco-radio-icon-hover:before{background-color:var(--color-fill-2)}.arco-icon-hover.arco-radio-icon-hover:before{width:24px;height:24px}.arco-radio{position:relative;display:inline-flex;align-items:center;padding-left:5px;font-size:14px;line-height:unset;cursor:pointer}.arco-radio-label{margin-left:8px;color:var(--color-text-1)}.arco-radio-icon{position:relative;display:block;box-sizing:border-box;width:14px;height:14px;line-height:14px;border:2px solid var(--color-neutral-3);border-radius:var(--border-radius-circle)}.arco-radio-icon:after{position:absolute;top:0;left:0;display:inline-block;box-sizing:border-box;width:10px;height:10px;background-color:var(--color-bg-2);border-radius:var(--border-radius-circle);transform:scale(1);transition:transform .3s cubic-bezier(.3,1.3,.3,1);content:""}.arco-radio:hover .arco-radio-icon{border-color:var(--color-neutral-3)}.arco-radio-checked .arco-radio-icon{background-color:rgb(var(--primary-6));border-color:rgb(var(--primary-6))}.arco-radio-checked .arco-radio-icon:after{background-color:var(--color-white);transform:scale(.4)}.arco-radio-checked:hover .arco-radio-icon{border-color:rgb(var(--primary-6))}.arco-radio-disabled,.arco-radio-disabled .arco-radio-icon-hover{cursor:not-allowed}.arco-radio-disabled .arco-radio-label{color:var(--color-text-4)}.arco-radio-disabled .arco-radio-icon{border-color:var(--color-neutral-3)}.arco-radio-disabled .arco-radio-icon:after{background-color:var(--color-fill-2)}.arco-radio-disabled:hover .arco-radio-icon{border-color:var(--color-neutral-3)}.arco-radio-checked.arco-radio-disabled .arco-radio-icon,.arco-radio-checked.arco-radio-disabled:hover .arco-radio-icon{background-color:var(--color-primary-light-3);border-color:transparent}.arco-radio-checked.arco-radio-disabled .arco-radio-icon:after{background-color:var(--color-fill-2)}.arco-radio-checked.arco-radio-disabled .arco-radio-label{color:var(--color-text-4)}.arco-radio:hover .arco-radio-icon-hover:before{background-color:var(--color-fill-2)}.arco-radio-group{display:inline-block;box-sizing:border-box}.arco-radio-group .arco-radio{margin-right:20px}.arco-radio-group-button{display:inline-flex;padding:1.5px;line-height:26px;background-color:var(--color-fill-2);border-radius:var(--border-radius-small)}.arco-radio-button{position:relative;display:inline-block;margin:1.5px;color:var(--color-text-2);font-size:14px;line-height:26px;background-color:transparent;border-radius:var(--border-radius-small);cursor:pointer;transition:all .1s cubic-bezier(0,0,1,1)}.arco-radio-button-content{position:relative;display:block;padding:0 12px}.arco-radio-button:not(:first-of-type):before{position:absolute;top:50%;left:-2px;display:block;width:1px;height:14px;background-color:var(--color-neutral-3);transform:translateY(-50%);transition:all .1s cubic-bezier(0,0,1,1);content:""}.arco-radio-button:hover:before,.arco-radio-button:hover+.arco-radio-button:before,.arco-radio-button.arco-radio-checked:before,.arco-radio-button.arco-radio-checked+.arco-radio-button:before{opacity:0}.arco-radio-button:hover{color:var(--color-text-1);background-color:var(--color-bg-5)}.arco-radio-button.arco-radio-checked{color:rgb(var(--primary-6));background-color:var(--color-bg-5)}.arco-radio-button.arco-radio-disabled{color:var(--color-text-4);background-color:transparent;cursor:not-allowed}.arco-radio-button.arco-radio-disabled.arco-radio-checked{color:var(--color-primary-light-3);background-color:var(--color-bg-5)}.arco-radio-group-size-small{line-height:28px}.arco-radio-group-size-small.arco-radio-group-button,.arco-radio-group-size-small .arco-radio-button{font-size:14px;line-height:22px}.arco-radio-group-size-large{line-height:36px}.arco-radio-group-size-large.arco-radio-group-button,.arco-radio-group-size-large .arco-radio-button{font-size:14px;line-height:30px}.arco-radio-group-size-mini{line-height:24px}.arco-radio-group-size-mini.arco-radio-group-button,.arco-radio-group-size-mini .arco-radio-button{font-size:12px;line-height:18px}.arco-radio-group-direction-vertical .arco-radio{display:flex;margin-right:0;line-height:32px}body[arco-theme=dark] .arco-radio-button.arco-radio-checked,body[arco-theme=dark] .arco-radio-button:not(.arco-radio-disabled):hover{background-color:var(--color-fill-3)}body[arco-theme=dark] .arco-radio-button:after{background-color:var(--color-bg-3)}.arco-rate{display:inline-flex;align-items:center;min-height:32px;font-size:24px;line-height:1;user-select:none}.arco-rate-disabled{cursor:not-allowed}.arco-rate-character{position:relative;color:var(--color-fill-3);transition:transform .2s cubic-bezier(.34,.69,.1,1)}.arco-rate-character:not(:last-child){margin-right:8px}.arco-rate-character-left,.arco-rate-character-right{transition:inherit}.arco-rate-character-left>*,.arco-rate-character-right>*{float:left}.arco-rate-character-left{position:absolute;top:0;left:0;width:50%;overflow:hidden;white-space:nowrap;opacity:0}.arco-rate-character-scale{animation:arco-rate-scale .4s cubic-bezier(.34,.69,.1,1)}.arco-rate-character-full .arco-rate-character-right{color:rgb(var(--gold-6))}.arco-rate-character-half .arco-rate-character-left{color:rgb(var(--gold-6));opacity:1}.arco-rate-character-disabled{cursor:not-allowed}.arco-rate:not(.arco-rate-readonly):not(.arco-rate-disabled) .arco-rate-character{cursor:pointer}.arco-rate:not(.arco-rate-readonly):not(.arco-rate-disabled) .arco-rate-character:hover,.arco-rate:not(.arco-rate-readonly):not(.arco-rate-disabled) .arco-rate-character:focus{transform:scale(1.2)}@keyframes arco-rate-scale{0%{transform:scale(1)}50%{transform:scale(1.2)}to{transform:scale(1)}}.arco-resizebox{position:relative;width:100%;overflow:hidden}.arco-resizebox-direction-left,.arco-resizebox-direction-right,.arco-resizebox-direction-top,.arco-resizebox-direction-bottom{position:absolute;top:0;left:0;box-sizing:border-box;user-select:none}.arco-resizebox-direction-right{right:0;left:unset}.arco-resizebox-direction-bottom{top:unset;bottom:0}.arco-resizebox-trigger-icon-wrapper{display:flex;align-items:center;justify-content:center;height:100%;color:var(--color-text-1);font-size:12px;line-height:1;background-color:var(--color-neutral-3)}.arco-resizebox-trigger-icon{display:inline-block;margin:-3px}.arco-resizebox-trigger-vertical{height:100%;cursor:col-resize}.arco-resizebox-trigger-horizontal{width:100%;cursor:row-resize}.arco-result{box-sizing:border-box;width:100%;padding:32px 32px 24px}.arco-result-icon{margin-bottom:16px;font-size:20px;text-align:center}.arco-result-icon-tip{display:flex;width:45px;height:45px;align-items:center;justify-content:center;border-radius:50%;margin:0 auto}.arco-result-icon-custom .arco-result-icon-tip{font-size:45px;color:inherit;width:unset;height:unset}.arco-result-icon-success .arco-result-icon-tip{color:rgb(var(--success-6));background-color:var(--color-success-light-1)}.arco-result-icon-error .arco-result-icon-tip{color:rgb(var(--danger-6));background-color:var(--color-danger-light-1)}.arco-result-icon-info .arco-result-icon-tip{color:rgb(var(--primary-6));background-color:var(--color-primary-light-1)}.arco-result-icon-warning .arco-result-icon-tip{color:rgb(var(--warning-6));background-color:var(--color-warning-light-1)}.arco-result-icon-404,.arco-result-icon-403,.arco-result-icon-500{padding-top:24px}.arco-result-icon-404 .arco-result-icon-tip,.arco-result-icon-403 .arco-result-icon-tip,.arco-result-icon-500 .arco-result-icon-tip{width:92px;height:92px;line-height:92px}.arco-result-title{color:var(--color-text-1);font-weight:500;font-size:14px;line-height:1.5715;text-align:center}.arco-result-subtitle{color:var(--color-text-2);font-size:14px;line-height:1.5715;text-align:center}.arco-result-extra{margin-top:20px;text-align:center}.arco-result-content{margin-top:20px}.arco-scrollbar{position:relative}.arco-scrollbar-container{position:relative;scrollbar-width:none}.arco-scrollbar-container::-webkit-scrollbar{display:none}.arco-scrollbar-track{position:absolute;z-index:100}.arco-scrollbar-track-direction-horizontal{bottom:0;left:0;box-sizing:border-box;width:100%;height:15px}.arco-scrollbar-track-direction-vertical{top:0;right:0;box-sizing:border-box;width:15px;height:100%}.arco-scrollbar-thumb{position:absolute;display:block;box-sizing:border-box}.arco-scrollbar-thumb-bar{width:100%;height:100%;background-color:var(--color-neutral-4);border-radius:6px}.arco-scrollbar-thumb:hover .arco-scrollbar-thumb-bar,.arco-scrollbar-thumb-dragging .arco-scrollbar-thumb-bar{background-color:var(--color-neutral-6)}.arco-scrollbar-thumb-direction-horizontal .arco-scrollbar-thumb-bar{height:9px;margin:3px 0}.arco-scrollbar-thumb-direction-vertical .arco-scrollbar-thumb-bar{width:9px;margin:0 3px}.arco-scrollbar.arco-scrollbar-type-embed .arco-scrollbar-thumb{opacity:0;transition:opacity ease .2s}.arco-scrollbar.arco-scrollbar-type-embed .arco-scrollbar-thumb-dragging,.arco-scrollbar.arco-scrollbar-type-embed:hover .arco-scrollbar-thumb{opacity:.8}.arco-scrollbar.arco-scrollbar-type-track .arco-scrollbar-track{background-color:var(--color-neutral-1)}.arco-scrollbar.arco-scrollbar-type-track .arco-scrollbar-track-direction-horizontal{border-top:1px solid var(--color-neutral-3);border-bottom:1px solid var(--color-neutral-3)}.arco-scrollbar.arco-scrollbar-type-track .arco-scrollbar-track-direction-vertical{border-right:1px solid var(--color-neutral-3);border-left:1px solid var(--color-neutral-3)}.arco-scrollbar.arco-scrollbar-type-track .arco-scrollbar-thumb-direction-horizontal{margin:-1px 0}.arco-scrollbar.arco-scrollbar-type-track .arco-scrollbar-thumb-direction-vertical{margin:0 -1px}.arco-scrollbar.arco-scrollbar-type-track.arco-scrollbar-both .arco-scrollbar-track-direction-vertical:after{position:absolute;right:-1px;bottom:0;display:block;box-sizing:border-box;width:15px;height:15px;background-color:var(--color-neutral-1);border-right:1px solid var(--color-neutral-3);border-bottom:1px solid var(--color-neutral-3);content:""}.arco-select-dropdown{box-sizing:border-box;padding:4px 0;background-color:var(--color-bg-popup);border:1px solid var(--color-fill-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-select-dropdown .arco-select-dropdown-loading{display:flex;align-items:center;justify-content:center;min-height:50px}.arco-select-dropdown-list{margin-top:0;margin-bottom:0;padding-left:0;list-style:none}.arco-select-dropdown-list-wrapper{max-height:200px;overflow-y:auto}.arco-select-dropdown .arco-select-option{position:relative;z-index:1;display:flex;align-items:center;box-sizing:border-box;width:100%;padding:0 12px;color:var(--color-text-1);font-size:14px;line-height:36px;text-align:left;background-color:var(--color-bg-popup);cursor:pointer}.arco-select-dropdown .arco-select-option-content{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-select-dropdown .arco-select-option-checkbox{overflow:hidden}.arco-select-dropdown .arco-select-option-checkbox .arco-checkbox-label{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-select-dropdown .arco-select-option-has-suffix{justify-content:space-between}.arco-select-dropdown .arco-select-option-active,.arco-select-dropdown .arco-select-option:not(.arco-select-dropdown .arco-select-option-disabled):hover{color:var(--color-text-1);background-color:var(--color-fill-2);transition:all .1s cubic-bezier(0,0,1,1)}.arco-select-dropdown .arco-select-option-disabled{color:var(--color-text-4);background-color:var(--color-bg-popup);cursor:not-allowed}.arco-select-dropdown .arco-select-option-icon{display:inline-flex;margin-right:8px}.arco-select-dropdown .arco-select-option-suffix{margin-left:12px}.arco-select-dropdown .arco-select-group:first-child .arco-select-dropdown .arco-select-group-title{margin-top:8px}.arco-select-dropdown .arco-select-group-title{box-sizing:border-box;width:100%;margin-top:8px;padding:0 12px;color:var(--color-text-3);font-size:12px;line-height:20px;cursor:default;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-select-dropdown.arco-select-dropdown-has-header{padding-top:0}.arco-select-dropdown-header{border-bottom:1px solid var(--color-fill-3)}.arco-select-dropdown.arco-select-dropdown-has-footer{padding-bottom:0}.arco-select-dropdown-footer{border-top:1px solid var(--color-fill-3)}.arco-skeleton-shape{width:48px;height:48px;background-color:var(--color-fill-2);border-radius:var(--border-radius-small)}.arco-skeleton-shape-circle{border-radius:50%}.arco-skeleton-shape-small{width:36px;height:36px}.arco-skeleton-shape-large{width:60px;height:60px}.arco-skeleton-line{margin:0;padding:0;list-style:none}.arco-skeleton-line-row{height:16px;background-color:var(--color-fill-2)}.arco-skeleton-line-row:not(:last-child){margin-bottom:16px}.arco-skeleton-animation .arco-skeleton-shape,.arco-skeleton-animation .arco-skeleton-line-row{background:linear-gradient(90deg,var(--color-fill-2) 25%,var(--color-fill-3) 37%,var(--color-fill-2) 63%);background-size:400% 100%;animation:arco-skeleton-circle 1.5s cubic-bezier(0,0,1,1) infinite}@keyframes arco-skeleton-circle{0%{background-position:100% 50%}to{background-position:0 50%}}.arco-slider{display:inline-flex;align-items:center;width:100%}.arco-slider-vertical{display:inline-block;width:auto;min-width:22px;height:auto}.arco-slider-vertical .arco-slider-wrapper{flex-direction:column}.arco-slider-with-marks{margin-bottom:24px;padding:20px}.arco-slider-vertical.arco-slider-with-marks{margin-bottom:0;padding:0}.arco-slider-track{position:relative;flex:1;width:100%;height:12px;cursor:pointer}.arco-slider-track:before{position:absolute;top:50%;display:block;width:100%;height:2px;background-color:var(--color-fill-3);border-radius:2px;transform:translateY(-50%);content:""}.arco-slider-track.arco-slider-track-vertical{width:12px;max-width:12px;height:100%;min-height:200px;margin-right:0;margin-bottom:6px;margin-top:6px;transform:translateY(0)}.arco-slider-track.arco-slider-track-vertical:before{top:unset;left:50%;width:2px;height:100%;transform:translate(-50%)}.arco-slider-track.arco-slider-track-disabled:before{background-color:var(--color-fill-2)}.arco-slider-track.arco-slider-track-disabled .arco-slider-bar{background-color:var(--color-fill-3)}.arco-slider-track.arco-slider-track-disabled .arco-slider-btn{cursor:not-allowed}.arco-slider-track.arco-slider-track-disabled .arco-slider-btn:after{border-color:var(--color-fill-3)}.arco-slider-track.arco-slider-track-disabled .arco-slider-dots .arco-slider-dot{border-color:var(--color-fill-2)}.arco-slider-track.arco-slider-track-disabled .arco-slider-dots .arco-slider-dot-active{border-color:var(--color-fill-3)}.arco-slider-track.arco-slider-track-disabled .arco-slider-ticks .arco-slider-tick{background:var(--color-fill-2)}.arco-slider-track.arco-slider-track-disabled .arco-slider-ticks .arco-slider-tick-active{background:var(--color-fill-3)}.arco-slider-bar{position:absolute;top:50%;height:2px;background-color:rgb(var(--primary-6));border-radius:2px;transform:translateY(-50%)}.arco-slider-track-vertical .arco-slider-bar{top:unset;left:50%;width:2px;height:unset;transform:translate(-50%)}.arco-slider-btn{position:absolute;top:0;left:0;width:12px;height:12px;transform:translate(-50%)}.arco-slider-btn:after{position:absolute;top:0;left:0;display:inline-block;box-sizing:border-box;width:12px;height:12px;background:var(--color-bg-2);border:2px solid rgb(var(--primary-6));border-radius:50%;transition:all .3s cubic-bezier(.3,1.3,.3,1);content:""}.arco-slider-btn.arco-slider-btn-active:after,.arco-slider-btn:hover:after{box-shadow:0 2px 5px #0000001a;transform:scale(1.16666667)}.arco-slider-track-vertical .arco-slider-btn{top:unset;bottom:0;left:0;transform:translateY(50%)}.arco-slider-marks{position:absolute;top:12px;width:100%}.arco-slider-marks .arco-slider-mark{position:absolute;color:var(--color-text-3);font-size:14px;line-height:1;transform:translate(-50%);cursor:pointer}.arco-slider-track-vertical .arco-slider-marks{top:0;left:15px;height:100%}.arco-slider-track-vertical .arco-slider-marks .arco-slider-mark{transform:translateY(50%)}.arco-slider-dots{height:100%}.arco-slider-dots .arco-slider-dot-wrapper{position:absolute;top:50%;font-size:12px;transform:translate(-50%,-50%)}.arco-slider-track-vertical .arco-slider-dots .arco-slider-dot-wrapper{top:unset;left:50%;transform:translate(-50%,50%)}.arco-slider-dots .arco-slider-dot-wrapper .arco-slider-dot{box-sizing:border-box;width:8px;height:8px;background-color:var(--color-bg-2);border:2px solid var(--color-fill-3);border-radius:50%}.arco-slider-dots .arco-slider-dot-wrapper .arco-slider-dot-active{border-color:rgb(var(--primary-6))}.arco-slider-ticks .arco-slider-tick{position:absolute;top:50%;width:1px;height:3px;margin-top:-1px;background:var(--color-fill-3);transform:translate(-50%,-100%)}.arco-slider-ticks .arco-slider-tick-active{background:rgb(var(--primary-6))}.arco-slider-vertical .arco-slider-ticks .arco-slider-tick{top:unset;left:50%;width:3px;height:1px;margin-top:unset;transform:translate(1px,50%)}.arco-slider-input{display:flex;align-items:center;margin-left:20px}.arco-slider-vertical .arco-slider-input{margin-left:0}.arco-slider-input>.arco-input-number{width:60px;height:32px;overflow:visible;line-height:normal}.arco-slider-input>.arco-input-number input{text-align:center}.arco-slider-input-hyphens{margin:0 6px;width:8px;height:2px;background:rgb(var(--gray-6))}.arco-space{display:inline-flex}.arco-space-horizontal .arco-space-item{display:flex;align-items:center}.arco-space-vertical{flex-direction:column}.arco-space-align-baseline{align-items:baseline}.arco-space-align-start{align-items:flex-start}.arco-space-align-end{align-items:flex-end}.arco-space-align-center{align-items:center}.arco-space-wrap{flex-wrap:wrap}.arco-space-fill{display:flex}.arco-dot-loading{position:relative;display:inline-block;width:56px;height:8px;transform-style:preserve-3d;perspective:200px}.arco-dot-loading-item{position:absolute;top:0;left:50%;width:8px;height:8px;background-color:rgb(var(--primary-6));border-radius:var(--border-radius-circle);transform:translate(-50%) scale(0);animation:arco-dot-loading 2s cubic-bezier(0,0,1,1) infinite forwards}.arco-dot-loading-item:nth-child(2){background-color:rgb(var(--primary-5));animation-delay:.4s}.arco-dot-loading-item:nth-child(3){background-color:rgb(var(--primary-4));animation-delay:.8s}.arco-dot-loading-item:nth-child(4){background-color:rgb(var(--primary-4));animation-delay:1.2s}.arco-dot-loading-item:nth-child(5){background-color:rgb(var(--primary-2));animation-delay:1.6s}@keyframes arco-dot-loading{0%{transform:translate3D(-48.621%,0,-.985px) scale(.511)}2.778%{transform:translate3D(-95.766%,0,-.94px) scale(.545)}5.556%{transform:translate3D(-140%,0,-.866px) scale(.6)}8.333%{transform:translate3D(-179.981%,0,-.766px) scale(.675)}11.111%{transform:translate3D(-214.492%,0,-.643px) scale(.768)}13.889%{transform:translate3D(-242.487%,0,-.5px) scale(.875)}16.667%{transform:translate3D(-263.114%,0,-.342px) scale(.993)}19.444%{transform:translate3D(-275.746%,0,-.174px) scale(1.12)}22.222%{transform:translate3D(-280%,0,0) scale(1.25)}25%{transform:translate3D(-275.746%,0,.174px) scale(1.38)}27.778%{transform:translate3D(-263.114%,0,.342px) scale(1.507)}30.556%{transform:translate3D(-242.487%,0,.5px) scale(1.625)}33.333%{transform:translate3D(-214.492%,0,.643px) scale(1.732)}36.111%{transform:translate3D(-179.981%,0,.766px) scale(1.825)}38.889%{transform:translate3D(-140%,0,.866px) scale(1.9)}41.667%{transform:translate3D(-95.766%,0,.94px) scale(1.955)}44.444%{transform:translate3D(-48.621%,0,.985px) scale(1.989)}47.222%{transform:translateZ(1px) scale(2)}50%{transform:translate3D(48.621%,0,.985px) scale(1.989)}52.778%{transform:translate3D(95.766%,0,.94px) scale(1.955)}55.556%{transform:translate3D(140%,0,.866px) scale(1.9)}58.333%{transform:translate3D(179.981%,0,.766px) scale(1.825)}61.111%{transform:translate3D(214.492%,0,.643px) scale(1.732)}63.889%{transform:translate3D(242.487%,0,.5px) scale(1.625)}66.667%{transform:translate3D(263.114%,0,.342px) scale(1.507)}69.444%{transform:translate3D(275.746%,0,.174px) scale(1.38)}72.222%{transform:translate3D(280%,0,0) scale(1.25)}75%{transform:translate3D(275.746%,0,-.174px) scale(1.12)}77.778%{transform:translate3D(263.114%,0,-.342px) scale(.993)}80.556%{transform:translate3D(242.487%,0,-.5px) scale(.875)}83.333%{transform:translate3D(214.492%,0,-.643px) scale(.768)}86.111%{transform:translate3D(179.981%,0,-.766px) scale(.675)}88.889%{transform:translate3D(140%,0,-.866px) scale(.6)}91.667%{transform:translate3D(95.766%,0,-.94px) scale(.545)}94.444%{transform:translate3D(48.621%,0,-.985px) scale(.511)}97.222%{transform:translateZ(-1px) scale(.5)}}.arco-spin{display:inline-block}.arco-spin-with-tip{text-align:center}.arco-spin-icon{color:rgb(var(--primary-6));font-size:20px}.arco-spin-tip{margin-top:6px;color:rgb(var(--primary-6));font-weight:500;font-size:14px}.arco-spin-mask{position:absolute;top:0;right:0;bottom:0;left:0;z-index:11;text-align:center;background-color:var(--color-spin-layer-bg);transition:opacity .1s cubic-bezier(0,0,1,1);user-select:none}.arco-spin-loading{position:relative;user-select:none}.arco-spin-loading .arco-spin-mask-icon{position:absolute;top:50%;left:50%;z-index:12;transform:translate(-50%,-50%)}.arco-spin-loading .arco-spin-children:after{opacity:1;pointer-events:auto}.arco-split{display:flex}.arco-split-pane{overflow:auto}.arco-split-pane-second{flex:1}.arco-split-horizontal{flex-direction:row}.arco-split-vertical{flex-direction:column}.arco-split-trigger-icon-wrapper{display:flex;align-items:center;justify-content:center;height:100%;color:var(--color-text-1);font-size:12px;line-height:1;background-color:var(--color-neutral-3)}.arco-split-trigger-icon{display:inline-block;margin:-3px}.arco-split-trigger-vertical{height:100%;cursor:col-resize}.arco-split-trigger-horizontal{width:100%;cursor:row-resize}.arco-statistic{display:inline-block;color:var(--color-text-2);line-height:1.5715}.arco-statistic-title{margin-bottom:8px;font-size:14px;color:var(--color-text-2)}.arco-statistic-content .arco-statistic-value{color:var(--color-text-1);font-weight:500;font-size:26px;white-space:nowrap}.arco-statistic-content .arco-statistic-value-integer{font-size:26px;white-space:nowrap}.arco-statistic-content .arco-statistic-value-decimal{display:inline-block;font-size:26px}.arco-statistic-prefix,.arco-statistic-suffix{font-size:14px}.arco-statistic-extra{margin-top:8px;color:var(--color-text-2)}.arco-steps-item{position:relative;flex:1;margin-right:12px;overflow:hidden;white-space:nowrap;text-align:left}.arco-steps-item:last-child{flex:none;margin-right:0}.arco-steps-item-active .arco-steps-item-title{font-weight:500}.arco-steps-item-node{display:inline-block;margin-right:12px;font-weight:500;font-size:16px;vertical-align:top}.arco-steps-icon{box-sizing:border-box;width:28px;height:28px;line-height:26px;text-align:center;border-radius:var(--border-radius-circle);font-size:16px}.arco-steps-item-wait .arco-steps-icon{color:var(--color-text-2);background-color:var(--color-fill-2);border:1px solid transparent}.arco-steps-item-process .arco-steps-icon{color:var(--color-white);background-color:rgb(var(--primary-6));border:1px solid transparent}.arco-steps-item-finish .arco-steps-icon{color:rgb(var(--primary-6));background-color:var(--color-primary-light-1);border:1px solid transparent}.arco-steps-item-error .arco-steps-icon{color:var(--color-white);background-color:rgb(var(--danger-6));border:1px solid transparent}.arco-steps-item-title{position:relative;display:inline-block;padding-right:12px;color:var(--color-text-2);font-size:16px;line-height:28px;white-space:nowrap}.arco-steps-item-wait .arco-steps-item-title{color:var(--color-text-2)}.arco-steps-item-process .arco-steps-item-title,.arco-steps-item-finish .arco-steps-item-title,.arco-steps-item-error .arco-steps-item-title{color:var(--color-text-1)}.arco-steps-item-content{display:inline-block}.arco-steps-item-description{max-width:140px;margin-top:2px;color:var(--color-text-3);font-size:12px;white-space:normal}.arco-steps-item-wait .arco-steps-item-description,.arco-steps-item-process .arco-steps-item-description,.arco-steps-item-finish .arco-steps-item-description,.arco-steps-item-error .arco-steps-item-description{color:var(--color-text-3)}.arco-steps-label-horizontal .arco-steps-item:not(:last-child) .arco-steps-item-title:after{position:absolute;top:13.5px;left:100%;display:block;box-sizing:border-box;width:5000px;height:1px;background-color:var(--color-neutral-3);content:""}.arco-steps-label-horizontal .arco-steps-item.arco-steps-item-process .arco-steps-item-title:after{background-color:var(--color-neutral-3)}.arco-steps-label-horizontal .arco-steps-item.arco-steps-item-finish .arco-steps-item-title:after{background-color:rgb(var(--primary-6))}.arco-steps-label-horizontal .arco-steps-item.arco-steps-item-next-error .arco-steps-item-title:after{background-color:rgb(var(--danger-6))}.arco-steps-item:not(:last-child) .arco-steps-item-tail{position:absolute;top:13.5px;box-sizing:border-box;width:100%;height:1px}.arco-steps-item:not(:last-child) .arco-steps-item-tail:after{display:block;width:100%;height:100%;background-color:var(--color-neutral-3);content:""}.arco-steps-vertical .arco-steps-item:not(:last-child) .arco-steps-item-tail{position:absolute;top:0;left:13.5px;box-sizing:border-box;width:1px;height:100%;padding:34px 0 6px}.arco-steps-vertical .arco-steps-item:not(:last-child) .arco-steps-item-tail:after{display:block;width:100%;height:100%;background-color:var(--color-neutral-3);content:""}.arco-steps-size-small.arco-steps-vertical .arco-steps-item:not(:last-child) .arco-steps-item-tail{left:11.5px;padding:30px 0 6px}.arco-steps-item:not(:last-child).arco-steps-item-finish .arco-steps-item-tail:after{background-color:rgb(var(--primary-6))}.arco-steps-item:not(:last-child).arco-steps-item-next-error .arco-steps-item-tail:after{background-color:rgb(var(--danger-6))}.arco-steps-size-small:not(.arco-steps-vertical) .arco-steps-item:not(:last-child) .arco-steps-item-tail{top:11.5px}.arco-steps-size-small .arco-steps-item-node{font-size:14px}.arco-steps-size-small .arco-steps-item-title{font-size:14px;line-height:24px}.arco-steps-size-small .arco-steps-item-description{font-size:12px}.arco-steps-size-small .arco-steps-icon{width:24px;height:24px;font-size:14px;line-height:22px}.arco-steps-size-small.arco-steps-label-horizontal .arco-steps-item:not(:last-child) .arco-steps-item-title:after{top:11.5px}.arco-steps-label-vertical .arco-steps-item{overflow:visible}.arco-steps-label-vertical .arco-steps-item-title{margin-top:2px;padding-right:0}.arco-steps-label-vertical .arco-steps-item-node{margin-left:56px}.arco-steps-label-vertical .arco-steps-item-tail{left:96px;padding-right:40px}.arco-steps-label-vertical.arco-steps-size-small .arco-steps-item-node{margin-left:58px}.arco-steps-label-vertical.arco-steps-size-small .arco-steps-item-tail{left:94px;padding-right:36px}.arco-steps-mode-dot .arco-steps-item{position:relative;flex:1;margin-right:16px;overflow:visible;white-space:nowrap;text-align:left}.arco-steps-mode-dot .arco-steps-item:last-child{flex:none;margin-right:0}.arco-steps-mode-dot .arco-steps-item-active .arco-steps-item-title{font-weight:500}.arco-steps-mode-dot .arco-steps-item-node{display:inline-block;box-sizing:border-box;width:8px;height:8px;vertical-align:top;border-radius:var(--border-radius-circle)}.arco-steps-mode-dot .arco-steps-item-active .arco-steps-item-node{width:10px;height:10px}.arco-steps-mode-dot .arco-steps-item-wait .arco-steps-item-node{background-color:var(--color-fill-4);border-color:var(--color-fill-4)}.arco-steps-mode-dot .arco-steps-item-process .arco-steps-item-node,.arco-steps-mode-dot .arco-steps-item-finish .arco-steps-item-node{background-color:rgb(var(--primary-6));border-color:rgb(var(--primary-6))}.arco-steps-mode-dot .arco-steps-item-error .arco-steps-item-node{background-color:rgb(var(--danger-6));border-color:rgb(var(--danger-6))}.arco-steps-mode-dot.arco-steps-horizontal .arco-steps-item-node{margin-left:66px}.arco-steps-mode-dot.arco-steps-horizontal .arco-steps-item-active .arco-steps-item-node{margin-top:-1px;margin-left:65px}.arco-steps-mode-dot .arco-steps-item-content{display:inline-block}.arco-steps-mode-dot .arco-steps-item-title{position:relative;display:inline-block;margin-top:4px;font-size:16px}.arco-steps-mode-dot .arco-steps-item-wait .arco-steps-item-title{color:var(--color-text-2)}.arco-steps-mode-dot .arco-steps-item-process .arco-steps-item-title,.arco-steps-mode-dot .arco-steps-item-finish .arco-steps-item-title,.arco-steps-mode-dot .arco-steps-item-error .arco-steps-item-title{color:var(--color-text-1)}.arco-steps-mode-dot .arco-steps-item-description{margin-top:4px;font-size:12px;white-space:normal}.arco-steps-mode-dot .arco-steps-item-wait .arco-steps-item-description,.arco-steps-mode-dot .arco-steps-item-process .arco-steps-item-description,.arco-steps-mode-dot .arco-steps-item-finish .arco-steps-item-description,.arco-steps-mode-dot .arco-steps-item-error .arco-steps-item-description{color:var(--color-text-3)}.arco-steps-mode-dot .arco-steps-item:not(:last-child) .arco-steps-item-tail{position:absolute;top:3.5px;left:78px;box-sizing:border-box;width:100%;height:1px;background-color:var(--color-neutral-3)}.arco-steps-mode-dot .arco-steps-item:not(:last-child).arco-steps-item-process .arco-steps-item-tail{background-color:var(--color-neutral-3)}.arco-steps-mode-dot .arco-steps-item:not(:last-child).arco-steps-item-finish .arco-steps-item-tail{background-color:rgb(var(--primary-6))}.arco-steps-mode-dot .arco-steps-item:not(:last-child).arco-steps-item-next-error .arco-steps-item-tail{background-color:rgb(var(--danger-6))}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item-node{margin-right:16px}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item-content{overflow:hidden}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item-title{margin-top:-2px}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item-description{margin-top:4px}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item:not(:last-child) .arco-steps-item-tail{position:absolute;bottom:0;left:4px;box-sizing:border-box;width:1px;height:100%;padding-top:16px;padding-bottom:2px;background-color:transparent;transform:translate(-50%)}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item:not(:last-child) .arco-steps-item-tail:after{display:block;width:100%;height:100%;background-color:var(--color-neutral-3);content:""}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item:not(:last-child).arco-steps-item-process .arco-steps-item-tail:after{background-color:var(--color-neutral-3)}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item:not(:last-child).arco-steps-item-finish .arco-steps-item-tail:after{background-color:rgb(var(--primary-6))}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item:not(:last-child).arco-steps-item-next-error .arco-steps-item-tail:after{background-color:rgb(var(--danger-6))}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item .arco-steps-item-node{margin-top:8px}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item-active .arco-steps-item-node{margin-top:6px;margin-left:-1px}.arco-steps-mode-arrow .arco-steps-item{position:relative;display:flex;flex:1;align-items:center;height:72px;overflow:visible;white-space:nowrap}.arco-steps-mode-arrow .arco-steps-item:not(:last-child){margin-right:4px}.arco-steps-mode-arrow .arco-steps-item-wait{background-color:var(--color-fill-1)}.arco-steps-mode-arrow .arco-steps-item-process{background-color:rgb(var(--primary-6))}.arco-steps-mode-arrow .arco-steps-item-finish{background-color:var(--color-primary-light-1)}.arco-steps-mode-arrow .arco-steps-item-error{background-color:rgb(var(--danger-6))}.arco-steps-mode-arrow .arco-steps-item-content{display:inline-block;box-sizing:border-box}.arco-steps-mode-arrow .arco-steps-item:first-child .arco-steps-item-content{padding-left:16px}.arco-steps-mode-arrow .arco-steps-item:not(:first-child) .arco-steps-item-content{padding-left:52px}.arco-steps-mode-arrow .arco-steps-item-title{position:relative;display:inline-block;font-size:16px;white-space:nowrap}.arco-steps-mode-arrow .arco-steps-item-title:after{display:none!important}.arco-steps-mode-arrow .arco-steps-item-wait .arco-steps-item-title{color:var(--color-text-2)}.arco-steps-mode-arrow .arco-steps-item-process .arco-steps-item-title{color:var(--color-white)}.arco-steps-mode-arrow .arco-steps-item-finish .arco-steps-item-title{color:var(--color-text-1)}.arco-steps-mode-arrow .arco-steps-item-error .arco-steps-item-title{color:var(--color-white)}.arco-steps-mode-arrow .arco-steps-item-active .arco-steps-item-title{font-weight:500}.arco-steps-mode-arrow .arco-steps-item-description{max-width:none;margin-top:0;font-size:12px;white-space:nowrap}.arco-steps-mode-arrow .arco-steps-item-wait .arco-steps-item-description{color:var(--color-text-3)}.arco-steps-mode-arrow .arco-steps-item-process .arco-steps-item-description{color:var(--color-white)}.arco-steps-mode-arrow .arco-steps-item-finish .arco-steps-item-description{color:var(--color-text-3)}.arco-steps-mode-arrow .arco-steps-item-error .arco-steps-item-description{color:var(--color-white)}.arco-steps-mode-arrow .arco-steps-item:not(:first-child):before{position:absolute;top:0;left:0;z-index:1;display:block;width:0;height:0;border-top:36px solid transparent;border-bottom:36px solid transparent;border-left:36px solid var(--color-bg-2);content:""}.arco-steps-mode-arrow .arco-steps-item:not(:last-child):after{position:absolute;top:0;right:-36px;z-index:2;display:block;clear:both;width:0;height:0;border-top:36px solid transparent;border-bottom:36px solid transparent;content:""}.arco-steps-mode-arrow .arco-steps-item:not(:last-child).arco-steps-item-wait:after{border-left:36px solid var(--color-fill-1)}.arco-steps-mode-arrow .arco-steps-item:not(:last-child).arco-steps-item-process:after{border-left:36px solid rgb(var(--primary-6))}.arco-steps-mode-arrow .arco-steps-item:not(:last-child).arco-steps-item-error:after{border-left:36px solid rgb(var(--danger-6))}.arco-steps-mode-arrow .arco-steps-item:not(:last-child).arco-steps-item-finish:after{border-left:36px solid var(--color-primary-light-1)}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item{height:40px}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item-title{font-size:14px}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item-description{display:none}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item:not(:first-child):before{border-top:20px solid transparent;border-bottom:20px solid transparent;border-left:20px solid var(--color-bg-2)}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item:not(:last-child):after{right:-20px;border-top:20px solid transparent;border-bottom:20px solid transparent;border-left:20px solid var(--color-fill-1)}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item:first-child .arco-steps-item-content{padding-left:20px}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item:not(:first-child) .arco-steps-item-content{padding-left:40px}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item-error:not(:last-child):after{border-left:20px solid rgb(var(--danger-6))}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item:not(:last-child).arco-steps-item-wait:after{border-left:20px solid var(--color-fill-1)}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item:not(:last-child).arco-steps-item-process:after{border-left:20px solid rgb(var(--primary-6))}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item:not(:last-child).arco-steps-item-finish:after{border-left:20px solid var(--color-primary-light-1)}.arco-steps-mode-navigation.arco-steps-label-horizontal .arco-steps-item:not(:last-child) .arco-steps-item-title:after{display:none}.arco-steps-mode-navigation .arco-steps-item{padding-left:20px;padding-right:10px;margin-right:32px}.arco-steps-mode-navigation .arco-steps-item:last-child{flex:1}.arco-steps-mode-navigation .arco-steps-item-content{margin-bottom:20px}.arco-steps-mode-navigation .arco-steps-item-description{padding-right:20px}.arco-steps-mode-navigation .arco-steps-item-active:after{content:"";position:absolute;display:block;height:2px;left:0;right:30px;bottom:0;background-color:rgb(var(--primary-6))}.arco-steps-mode-navigation .arco-steps-item-active:last-child:after{width:100%}.arco-steps-mode-navigation .arco-steps-item:not(:last-child) .arco-steps-item-content:after{position:absolute;top:10px;right:30px;display:inline-block;width:6px;height:6px;background-color:var(--color-bg-2);border:2px solid var(--color-text-4);border-bottom:none;border-left:none;-webkit-transform:rotate(45deg);transform:rotate(45deg);content:""}.arco-steps{display:flex}.arco-steps-changeable .arco-steps-item-title,.arco-steps-changeable .arco-steps-item-description{transition:all .1s cubic-bezier(0,0,1,1)}.arco-steps-changeable .arco-steps-item:not(.arco-steps-item-active):not(.arco-steps-item-disabled){cursor:pointer}.arco-steps-changeable .arco-steps-item:not(.arco-steps-item-active):not(.arco-steps-item-disabled):hover .arco-steps-item-content .arco-steps-item-title,.arco-steps-changeable .arco-steps-item:not(.arco-steps-item-active):not(.arco-steps-item-disabled):hover .arco-steps-item-content .arco-steps-item-description{color:rgb(var(--primary-6))}.arco-steps-line-less .arco-steps-item-title:after{display:none!important}.arco-steps-vertical{flex-direction:column}.arco-steps-vertical .arco-steps-item:not(:last-child){min-height:90px}.arco-steps-vertical .arco-steps-item-title:after{display:none!important}.arco-steps-vertical .arco-steps-item-description{max-width:none}.arco-steps-label-vertical .arco-steps-item-content{display:block;width:140px;text-align:center}.arco-steps-label-vertical .arco-steps-item-description{max-width:none}.switch-slide-text-enter-from{left:-100%!important}.switch-slide-text-enter-to{left:8px!important}.switch-slide-text-enter-active{transition:left .2s cubic-bezier(.34,.69,.1,1)}.switch-slide-text-leave-from{left:100%!important}.switch-slide-text-leave-to{left:26px!important}.switch-slide-text-leave-active{transition:left .2s cubic-bezier(.34,.69,.1,1)}.arco-switch{position:relative;box-sizing:border-box;min-width:40px;height:24px;padding:0;overflow:hidden;line-height:24px;vertical-align:middle;background-color:var(--color-fill-4);border:none;border-radius:12px;outline:none;cursor:pointer;transition:background-color .2s cubic-bezier(.34,.69,.1,1)}.arco-switch-handle{position:absolute;top:4px;left:4px;display:flex;align-items:center;justify-content:center;width:16px;height:16px;color:var(--color-neutral-3);font-size:12px;background-color:var(--color-bg-white);border-radius:50%;transition:all .2s cubic-bezier(.34,.69,.1,1)}.arco-switch-checked{background-color:rgb(var(--primary-6))}.arco-switch-checked .arco-switch-handle{left:calc(100% - 20px);color:rgb(var(--primary-6))}.arco-switch[disabled] .arco-switch-handle{color:var(--color-fill-2)}.arco-switch[disabled].arco-switch-checked .arco-switch-handle{color:var(--color-primary-light-3)}.arco-switch-text-holder{margin:0 8px 0 26px;font-size:12px;opacity:0}.arco-switch-text{position:absolute;top:0;left:26px;color:var(--color-white);font-size:12px}.arco-switch-checked .arco-switch-text-holder{margin:0 26px 0 8px}.arco-switch-checked .arco-switch-text{left:8px;color:var(--color-white)}.arco-switch[disabled]{background-color:var(--color-fill-2);cursor:not-allowed}.arco-switch[disabled] .arco-switch-text{color:var(--color-white)}.arco-switch[disabled].arco-switch-checked{background-color:var(--color-primary-light-3)}.arco-switch[disabled].arco-switch-checked .arco-switch-text{color:var(--color-white)}.arco-switch-loading{background-color:var(--color-fill-2)}.arco-switch-loading .arco-switch-handle{color:var(--color-neutral-3)}.arco-switch-loading .arco-switch-text{color:var(--color-white)}.arco-switch-loading.arco-switch-checked{background-color:var(--color-primary-light-3)}.arco-switch-loading.arco-switch-checked .arco-switch-handle{color:var(--color-primary-light-3)}.arco-switch-loading.arco-switch-checked .arco-switch-text{color:var(--color-primary-light-1)}.arco-switch-small{min-width:28px;height:16px;line-height:16px}.arco-switch-small.arco-switch-checked{padding-left:-2px}.arco-switch-small .arco-switch-handle{top:2px;left:2px;width:12px;height:12px;border-radius:8px}.arco-switch-small .arco-switch-handle-icon{position:absolute;top:50%;left:50%;transform:translate(-50%,-50%) scale(.66667)}.arco-switch-small.arco-switch-checked .arco-switch-handle{left:calc(100% - 14px)}.arco-switch-type-round{min-width:40px;border-radius:var(--border-radius-small)}.arco-switch-type-round .arco-switch-handle{border-radius:2px}.arco-switch-type-round.arco-switch-small{min-width:28px;height:16px;line-height:16px;border-radius:2px}.arco-switch-type-round.arco-switch-small .arco-switch-handle{border-radius:1px}.arco-switch-type-line{min-width:36px;overflow:unset;background-color:transparent}.arco-switch-type-line:after{display:block;width:100%;height:6px;background-color:var(--color-fill-4);border-radius:3px;transition:background-color .2s cubic-bezier(.34,.69,.1,1);content:""}.arco-switch-type-line .arco-switch-handle{top:2px;left:0;width:20px;height:20px;background-color:var(--color-bg-white);border-radius:10px;box-shadow:0 1px 3px var(--color-neutral-6)}.arco-switch-type-line.arco-switch-checked{background-color:transparent}.arco-switch-type-line.arco-switch-checked:after{background-color:rgb(var(--primary-6))}.arco-switch-type-line.arco-switch-custom-color{--custom-color: var(--color-fill-4)}.arco-switch-type-line.arco-switch-custom-color:after{background-color:var(--custom-color)}.arco-switch-type-line.arco-switch-custom-color.arco-switch-checked{--custom-color: rgb(var(--primary-6))}.arco-switch-type-line.arco-switch-checked .arco-switch-handle{left:calc(100% - 20px)}.arco-switch-type-line[disabled]{background-color:transparent;cursor:not-allowed}.arco-switch-type-line[disabled]:after{background-color:var(--color-fill-2)}.arco-switch-type-line[disabled].arco-switch-checked{background-color:transparent}.arco-switch-type-line[disabled].arco-switch-checked:after{background-color:var(--color-primary-light-3)}.arco-switch-type-line.arco-switch-loading{background-color:transparent}.arco-switch-type-line.arco-switch-loading:after{background-color:var(--color-fill-2)}.arco-switch-type-line.arco-switch-loading.arco-switch-checked{background-color:transparent}.arco-switch-type-line.arco-switch-loading.arco-switch-checked:after{background-color:var(--color-primary-light-3)}.arco-switch-type-line.arco-switch-small{min-width:28px;height:16px;line-height:16px}.arco-switch-type-line.arco-switch-small.arco-switch-checked{padding-left:0}.arco-switch-type-line.arco-switch-small .arco-switch-handle{top:0px;width:16px;height:16px;border-radius:8px}.arco-switch-type-line.arco-switch-small .arco-switch-handle-icon{transform:translate(-50%,-50%) scale(1)}.arco-switch-type-line.arco-switch-small.arco-switch-checked .arco-switch-handle{left:calc(100% - 16px)}.arco-table-filters-content{box-sizing:border-box;min-width:100px;background:var(--color-bg-5);border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium);box-shadow:0 2px 5px #0000001a}.arco-table-filters-list{max-height:200px;padding:4px 0;overflow-y:auto}.arco-table-filters-item{height:32px;padding:0 12px;font-size:14px;line-height:32px}.arco-table-filters-text{width:100%;max-width:160px;height:34px;margin-right:0;padding-left:10px;overflow:hidden;line-height:32px;white-space:nowrap;text-overflow:ellipsis;cursor:pointer}.arco-table-filters-bottom{box-sizing:border-box;height:38px;padding:0 12px;overflow:hidden;line-height:38px;border-top:1px solid var(--color-neutral-3)}.arco-table-filters-bottom>*:not(*:last-child){margin-right:8px}.arco-table{position:relative}.arco-table-column-handle{position:absolute;top:0;right:-4px;z-index:1;width:8px;height:100%;cursor:col-resize}.arco-table .arco-spin{display:flex;flex-direction:column;height:100%}.arco-table>.arco-spin>.arco-spin-children:after{z-index:2}.arco-table-footer{border-radius:0 0 var(--border-radius-medium) var(--border-radius-medium)}.arco-table-scroll-position-right .arco-table-col-fixed-left-last:after,.arco-table-scroll-position-middle .arco-table-col-fixed-left-last:after{box-shadow:inset 6px 0 8px -3px #00000026}.arco-table-scroll-position-left .arco-table-col-fixed-right-first:after,.arco-table-scroll-position-middle .arco-table-col-fixed-right-first:after{box-shadow:inset -6px 0 8px -3px #00000026}.arco-table-layout-fixed .arco-table-element{table-layout:fixed}.arco-table .arco-table-element{width:100%;min-width:100%;margin:0;border-collapse:separate;border-spacing:0}.arco-table-th{position:relative;box-sizing:border-box;color:rgb(var(--gray-10));font-weight:500;line-height:1.5715;text-align:left;background-color:var(--color-neutral-2)}.arco-table-th[colspan]{text-align:center}.arco-table-th-align-right{text-align:right}.arco-table-th-align-right .arco-table-cell-with-sorter{justify-content:flex-end}.arco-table-th-align-center{text-align:center}.arco-table-th-align-center .arco-table-cell-with-sorter{justify-content:center}.arco-table-td{box-sizing:border-box;color:rgb(var(--gray-10));line-height:1.5715;text-align:left;word-break:break-all;background-color:var(--color-bg-2);border-bottom:1px solid var(--color-neutral-3)}.arco-table-td-align-right{text-align:right}.arco-table-td-align-center{text-align:center}.arco-table-td.arco-table-drag-handle{cursor:move}.arco-table-cell{display:flex;align-items:center}.arco-table-cell-align-right{justify-content:flex-end;text-align:right}.arco-table-cell-align-center{justify-content:center;text-align:center}.arco-table-text-ellipsis{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-table-td-content{display:block;width:100%}.arco-table-th.arco-table-col-sorted{background-color:var(--color-neutral-3)}.arco-table-td.arco-table-col-sorted{background-color:var(--color-fill-1)}.arco-table-col-fixed-left,.arco-table-col-fixed-right{position:sticky;z-index:10}.arco-table-col-fixed-left-last:after,.arco-table-col-fixed-right-first:after{position:absolute;top:0;bottom:-1px;left:0;width:10px;box-shadow:none;transform:translate(-100%);transition:box-shadow .1s cubic-bezier(0,0,1,1);content:"";pointer-events:none}.arco-table-col-fixed-left-last:after{right:0;left:unset;transform:translate(100%)}.arco-table-cell-text-ellipsis{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-table-editable-row .arco-table-cell-wrap-value{border:1px solid var(--color-white);border-radius:var(--border-radius-medium);cursor:pointer;transition:all .1s cubic-bezier(0,0,1,1)}.arco-table-editable-row:hover .arco-table-cell-wrap-value{border:1px solid var(--color-neutral-3)}.arco-table .arco-table-expand-btn{display:inline-flex;align-items:center;justify-content:center;width:14px;height:14px;padding:0;color:var(--color-text-2);font-size:12px;line-height:14px;background-color:var(--color-neutral-3);border:1px solid transparent;border-radius:2px;outline:none;cursor:pointer;transition:background-color .1s cubic-bezier(0,0,1,1)}.arco-table .arco-table-expand-btn:hover{color:var(--color-text-1);background-color:var(--color-neutral-4);border-color:transparent}.arco-table-cell-expand-icon{display:flex;align-items:center}.arco-table-cell-expand-icon .arco-table-cell-inline-icon{display:inline-flex;margin-right:4px}.arco-table-cell-expand-icon .arco-table-cell-inline-icon .arco-icon-loading{color:rgb(var(--primary-6))}.arco-table-cell-expand-icon-hidden{display:inline-block;width:14px;height:14px;margin-right:4px}.arco-table-tr-expand .arco-table-td{background-color:var(--color-fill-1)}.arco-table-cell-fixed-expand{position:sticky;left:0;box-sizing:border-box}.arco-table-tr-expand .arco-table-td .arco-table .arco-table-container{border:none}.arco-table-tr-expand .arco-table-td .arco-table .arco-table-th{border-bottom:1px solid var(--color-neutral-3)}.arco-table-tr-expand .arco-table-td .arco-table .arco-table-th,.arco-table-tr-expand .arco-table-td .arco-table .arco-table-td{background-color:transparent}.arco-table-tr-expand .arco-table-td .arco-table .arco-table-pagination{margin-bottom:12px}.arco-table-th.arco-table-operation,.arco-table-td.arco-table-operation{text-align:center}.arco-table-th.arco-table-operation .arco-table-cell,.arco-table-td.arco-table-operation .arco-table-cell{display:flex;justify-content:center;padding:0}.arco-table-radio,.arco-table-checkbox{justify-content:center}.arco-table-checkbox .arco-checkbox,.arco-table-radio .arco-radio{padding-left:0}.arco-table-selection-checkbox-col,.arco-table-selection-radio-col,.arco-table-expand-col,.arco-table-drag-handle-col{width:40px;min-width:40px;max-width:40px}.arco-table-th{transition:background-color .1s cubic-bezier(0,0,1,1)}.arco-table-cell-with-sorter{display:flex;align-items:center;cursor:pointer}.arco-table-cell-with-sorter:hover{background-color:rgba(var(--gray-4),.5)}.arco-table-cell-with-filter{display:flex;align-items:center}.arco-table-cell-next-ascend .arco-table-sorter-icon .arco-icon-caret-up,.arco-table-cell-next-descend .arco-table-sorter-icon .arco-icon-caret-down{color:var(--color-neutral-6)}.arco-table-sorter{display:inline-block;margin-left:8px;vertical-align:-3px}.arco-table-sorter.arco-table-sorter-direction-one{vertical-align:0}.arco-table-sorter-icon{position:relative;width:14px;height:8px;overflow:hidden;line-height:8px}.arco-table-sorter-icon .arco-icon-caret-up,.arco-table-sorter-icon .arco-icon-caret-down{position:absolute;top:50%;color:var(--color-neutral-5);font-size:12px;transition:all .1s cubic-bezier(0,0,1,1)}.arco-table-sorter-icon .arco-icon-caret-up{top:-2px;left:1px}.arco-table-sorter-icon .arco-icon-caret-down{top:-3px;left:1px}.arco-table-sorter-icon.arco-table-sorter-icon-active svg{color:rgb(var(--primary-6))}.arco-table-filters{position:absolute;top:0;right:0;display:flex;align-items:center;justify-content:center;width:24px;height:100%;line-height:1;vertical-align:0;background-color:transparent;cursor:pointer;transition:all .1s cubic-bezier(0,0,1,1)}.arco-table-filters:hover,.arco-table-filters-open{background-color:var(--color-neutral-4)}.arco-table-filters svg{color:var(--color-text-2);font-size:16px;transition:all .1s cubic-bezier(0,0,1,1)}.arco-table-filters-active svg{color:rgb(var(--primary-6))}.arco-table-filters-align-left{position:relative;width:auto;margin-left:8px}.arco-table-filters-align-left svg{font-size:12px}.arco-table-filters-align-left:hover,.arco-table-filters-align-left-open{background:none}.arco-table-filters-align-left:hover:before,.arco-table-filters-align-left.arco-table-filters-open:before{background:var(--color-fill-4)}.arco-table-container{position:relative;border-radius:var(--border-radius-medium) var(--border-radius-medium) 0 0}.arco-table-header{flex-shrink:0;border-radius:var(--border-radius-medium) var(--border-radius-medium) 0 0}.arco-table-container{box-sizing:border-box;width:100%;min-height:0}.arco-table-container .arco-table-content{display:flex;flex-direction:column;width:auto;height:100%}.arco-table-container .arco-table-content-scroll-x{overflow-x:auto;overflow-y:hidden}.arco-table-container:before,.arco-table-container:after{position:absolute;z-index:1;width:10px;height:100%;box-shadow:none;transition:box-shadow .1s cubic-bezier(0,0,1,1);content:"";pointer-events:none}.arco-table-container:before{top:0;left:0;border-top-left-radius:var(--border-radius-medium)}.arco-table-container:after{top:0;right:0;border-top-right-radius:var(--border-radius-medium)}.arco-table-container:not(.arco-table-has-fixed-col-left).arco-table-scroll-position-right:before,.arco-table-container:not(.arco-table-has-fixed-col-left).arco-table-scroll-position-middle:before{box-shadow:inset 6px 0 8px -3px #00000026}.arco-table-container:not(.arco-table-has-fixed-col-right).arco-table-scroll-position-left:after,.arco-table-container:not(.arco-table-has-fixed-col-right).arco-table-scroll-position-middle:after{box-shadow:inset -6px 0 8px -3px #00000026}.arco-table-header{overflow-x:hidden;overflow-y:hidden;background-color:var(--color-neutral-2);scrollbar-color:transparent transparent}.arco-table-header-sticky{position:sticky;top:0;z-index:100}.arco-table:not(.arco-table-empty) .arco-table-header::-webkit-scrollbar{height:0;background-color:transparent}.arco-table.arco-table-empty .arco-table-header{overflow-x:auto}.arco-table-body{position:relative;width:100%;min-height:40px;overflow:auto;background-color:var(--color-bg-2)}.arco-table-border .arco-table-container{border-top:1px solid var(--color-neutral-3);border-left:1px solid var(--color-neutral-3)}.arco-table-border .arco-table-scroll-y{border-bottom:1px solid var(--color-neutral-3)}.arco-table-border .arco-table-scroll-y .arco-table-body .arco-table-tr:last-of-type .arco-table-td,.arco-table-border .arco-table-scroll-y tfoot .arco-table-tr:last-of-type .arco-table-td{border-bottom:none}.arco-table-border .arco-table-scroll-y .arco-table-body .arco-table-tr:last-of-type .arco-table-td.arco-table-col-fixed-left-last:after,.arco-table-border .arco-table-scroll-y tfoot .arco-table-tr:last-of-type .arco-table-td.arco-table-col-fixed-left-last:after,.arco-table-border .arco-table-scroll-y .arco-table-body .arco-table-tr:last-of-type .arco-table-td.arco-table-col-fixed-right-first:after,.arco-table-border .arco-table-scroll-y tfoot .arco-table-tr:last-of-type .arco-table-td.arco-table-col-fixed-right-first:after{bottom:0}.arco-table-border .arco-table-tr .arco-table-th{border-bottom:1px solid var(--color-neutral-3)}.arco-table-border .arco-table-footer{border:1px solid var(--color-neutral-3);border-top:0}.arco-table-border:not(.arco-table-border-cell) .arco-table-container{border-right:1px solid var(--color-neutral-3)}.arco-table-border-cell .arco-table-th,.arco-table-border-cell .arco-table-td:not(.arco-table-tr-expand){border-right:1px solid var(--color-neutral-3)}.arco-table-border-cell .arco-table-th-resizing,.arco-table-border-cell .arco-table-td-resizing:not(.arco-table-tr-expand){border-right-color:rgb(var(--primary-6))}.arco-table-border-header-cell .arco-table-th{border-right:1px solid var(--color-neutral-3);border-bottom:1px solid var(--color-neutral-3)}.arco-table-border.arco-table-border-header-cell thead .arco-table-tr:first-child .arco-table-th:last-child{border-right:0}.arco-table-border-body-cell .arco-table-td:not(:last-child):not(.arco-table-tr-expand){border-right:1px solid var(--color-neutral-3)}.arco-table-stripe:not(.arco-table-dragging) .arco-table-tr:not(.arco-table-tr-empty):not(.arco-table-tr-summary):nth-child(even) .arco-table-td:not(.arco-table-col-fixed-left):not(.arco-table-col-fixed-right),.arco-table-stripe .arco-table-tr-drag .arco-table-td:not(.arco-table-col-fixed-left):not(.arco-table-col-fixed-right){background-color:var(--color-fill-1)}.arco-table-stripe:not(.arco-table-dragging) .arco-table-tr:not(.arco-table-tr-empty):not(.arco-table-tr-summary):nth-child(even) .arco-table-td.arco-table-col-fixed-left:before,.arco-table-stripe .arco-table-tr-drag .arco-table-td.arco-table-col-fixed-left:before,.arco-table-stripe:not(.arco-table-dragging) .arco-table-tr:not(.arco-table-tr-empty):not(.arco-table-tr-summary):nth-child(even) .arco-table-td.arco-table-col-fixed-right:before,.arco-table-stripe .arco-table-tr-drag .arco-table-td.arco-table-col-fixed-right:before{position:absolute;top:0;left:0;z-index:-1;width:100%;height:100%;background-color:var(--color-fill-1);content:""}.arco-table .arco-table-tr-draggable{cursor:move}.arco-table-hover:not(.arco-table-dragging) .arco-table-tr:not(.arco-table-tr-empty):not(.arco-table-tr-summary):hover .arco-table-td:not(.arco-table-col-fixed-left):not(.arco-table-col-fixed-right),.arco-table-hover .arco-table-tr-drag .arco-table-td:not(.arco-table-col-fixed-left):not(.arco-table-col-fixed-right){background-color:var(--color-fill-1)}.arco-table-hover:not(.arco-table-dragging) .arco-table-tr:not(.arco-table-tr-empty):not(.arco-table-tr-summary):hover .arco-table-td.arco-table-col-fixed-left:before,.arco-table-hover .arco-table-tr-drag .arco-table-td.arco-table-col-fixed-left:before,.arco-table-hover:not(.arco-table-dragging) .arco-table-tr:not(.arco-table-tr-empty):not(.arco-table-tr-summary):hover .arco-table-td.arco-table-col-fixed-right:before,.arco-table-hover .arco-table-tr-drag .arco-table-td.arco-table-col-fixed-right:before{position:absolute;top:0;left:0;z-index:-1;width:100%;height:100%;background-color:var(--color-fill-1);content:""}.arco-table-hover .arco-table-tr-expand:not(.arco-table-tr-empty):hover .arco-table-td:not(.arco-table-col-fixed-left):not(.arco-table-col-fixed-right){background-color:var(--color-fill-1)}.arco-table-tr-expand .arco-table-td .arco-table-hover .arco-table-tr:not(.arco-table-tr-empty) .arco-table-td:not(.arco-table-col-fixed-left):not(.arco-table-col-fixed-right){background-color:transparent}.arco-table-tr-expand .arco-table-td .arco-table-hover .arco-table-tr:not(.arco-table-tr-empty) .arco-table-td.arco-table-col-fixed-left:before,.arco-table-tr-expand .arco-table-td .arco-table-hover .arco-table-tr:not(.arco-table-tr-empty) .arco-table-td.arco-table-col-fixed-right:before{background-color:transparent}.arco-table-tfoot{position:relative;z-index:1;flex-shrink:0;width:100%;overflow-x:auto;background-color:var(--color-neutral-2);box-shadow:0 -1px 0 var(--color-neutral-3);scrollbar-color:transparent transparent}.arco-table-tfoot::-webkit-scrollbar{height:0;background-color:transparent}.arco-table tfoot .arco-table-td{background-color:var(--color-neutral-2)}.arco-table-tr-checked .arco-table-td{background-color:var(--color-fill-1)}.arco-table .arco-table-cell{padding:9px 16px}.arco-table .arco-table-th,.arco-table .arco-table-td{font-size:14px}.arco-table .arco-table-footer{padding:9px 16px}.arco-table .arco-table-tr-expand .arco-table-td .arco-table{margin:-9px -16px -10px}.arco-table .arco-table-editable-row .arco-table-cell-wrap-value{padding:9px 16px}.arco-table-size-medium .arco-table-cell{padding:7px 16px}.arco-table-size-medium .arco-table-th,.arco-table-size-medium .arco-table-td{font-size:14px}.arco-table-size-medium .arco-table-footer{padding:7px 16px}.arco-table-size-medium .arco-table-tr-expand .arco-table-td .arco-table{margin:-7px -16px -8px}.arco-table-size-medium .arco-table-editable-row .arco-table-cell-wrap-value{padding:7px 16px}.arco-table-size-small .arco-table-cell{padding:5px 16px}.arco-table-size-small .arco-table-th,.arco-table-size-small .arco-table-td{font-size:14px}.arco-table-size-small .arco-table-footer{padding:5px 16px}.arco-table-size-small .arco-table-tr-expand .arco-table-td .arco-table{margin:-5px -16px -6px}.arco-table-size-small .arco-table-editable-row .arco-table-cell-wrap-value{padding:5px 16px}.arco-table-size-mini .arco-table-cell{padding:2px 16px}.arco-table-size-mini .arco-table-th,.arco-table-size-mini .arco-table-td{font-size:12px}.arco-table-size-mini .arco-table-footer{padding:2px 16px}.arco-table-size-mini .arco-table-tr-expand .arco-table-td .arco-table{margin:-2px -16px -3px}.arco-table-size-mini .arco-table-editable-row .arco-table-cell-wrap-value{padding:2px 16px}.arco-table-virtualized .arco-table-element{table-layout:fixed}.arco-table-virtualized div.arco-table-body div.arco-table-tr{display:flex}.arco-table-virtualized div.arco-table-body div.arco-table-td{display:flex;flex:1;align-items:center}.arco-table-pagination{display:flex;align-items:center;justify-content:flex-end;margin-top:12px}.arco-table-pagination-left{justify-content:flex-start}.arco-table-pagination-center{justify-content:center}.arco-table-pagination-top{margin-top:0;margin-bottom:12px}.arco-icon-hover.arco-tabs-icon-hover:before{width:16px;height:16px}.arco-tabs .arco-tabs-icon-hover{color:var(--color-text-2);font-size:12px;user-select:none}.arco-tabs-dropdown-icon{margin-left:6px;font-size:12px;user-select:none}.arco-tabs-tab-close-btn{margin-left:8px;user-select:none}.arco-tabs-nav-add-btn{display:inline-flex;align-items:center;justify-content:center;padding:0 8px;font-size:12px;user-select:none}.arco-tabs-add{position:relative}.arco-tabs-nav-button-left{margin-right:6px;margin-left:10px}.arco-tabs-nav-button-right{margin-right:10px;margin-left:6px}.arco-tabs-nav-button-up{margin-bottom:10px}.arco-tabs-nav-button-down{margin-top:10px}.arco-tabs-nav-button-disabled{color:var(--color-text-4);cursor:not-allowed}.arco-tabs{position:relative;overflow:hidden}.arco-tabs-nav{position:relative;flex-shrink:0}.arco-tabs-nav:before{position:absolute;right:0;bottom:0;left:0;display:block;clear:both;height:1px;background-color:var(--color-neutral-3);content:""}.arco-tabs-nav-tab{display:flex;flex:1;overflow:hidden}.arco-tabs-nav-tab-list{position:relative;display:inline-block;white-space:nowrap;transition:transform .2s cubic-bezier(.34,.69,.1,1)}.arco-tabs-nav-extra{display:flex;align-items:center;width:auto;line-height:32px}.arco-tabs-nav-extra .arco-tabs-nav-add-btn{padding-left:0}.arco-tabs-tab{display:inline-flex;align-items:center;box-sizing:border-box;padding:4px 0;color:var(--color-text-2);font-size:14px;line-height:1.5715;outline:none;cursor:pointer;transition:color .2s cubic-bezier(0,0,1,1)}.arco-tabs-tab-title{display:inline-block}.arco-tabs-tab:hover{color:var(--color-text-2);font-weight:400}.arco-tabs-tab-disabled,.arco-tabs-tab-disabled:hover{color:var(--color-text-4);cursor:not-allowed}.arco-tabs-tab-active,.arco-tabs-tab-active:hover{color:rgb(var(--primary-6));font-weight:500}.arco-tabs-tab-active.arco-tabs-tab-disabled,.arco-tabs-tab-active:hover.arco-tabs-tab-disabled{color:var(--color-primary-light-3)}.arco-tabs-nav-ink{position:absolute;top:initial;right:initial;bottom:0;height:2px;background-color:rgb(var(--primary-6));transition:left .2s cubic-bezier(.34,.69,.1,1),width .2s cubic-bezier(.34,.69,.1,1)}.arco-tabs-nav-ink.arco-tabs-header-ink-no-animation{transition:none}.arco-tabs-nav-ink-disabled{background-color:var(--color-primary-light-3)}.arco-tabs-nav-type-line .arco-tabs-nav-extra{line-height:40px}.arco-tabs-nav-type-line .arco-tabs-tab{margin:0 16px;padding:8px 0;line-height:1.5715}.arco-tabs-nav-type-line .arco-tabs-tab-title{position:relative;display:inline-block;padding:1px 0}.arco-tabs-nav-type-line .arco-tabs-tab-title:before{position:absolute;top:0;right:-8px;bottom:0;left:-8px;z-index:-1;background-color:transparent;border-radius:var(--border-radius-small);opacity:1;transition:background-color,opacity .2s cubic-bezier(0,0,1,1);content:""}.arco-tabs-nav-type-line .arco-tabs-tab:hover .arco-tabs-tab-title:before{background-color:var(--color-fill-2)}.arco-tabs-nav-type-line .arco-tabs-tab-active .arco-tabs-tab-title:before,.arco-tabs-nav-type-line .arco-tabs-tab-active:hover .arco-tabs-tab-title:before{background-color:transparent}.arco-tabs-nav-type-line .arco-tabs-tab-disabled .arco-tabs-tab-title:before,.arco-tabs-nav-type-line .arco-tabs-tab-disabled:hover .arco-tabs-tab-title:before{opacity:0}.arco-tabs-nav-type-line .arco-tabs-tab:focus-visible .arco-tabs-tab-title:before{border:2px solid rgb(var(--primary-6))}.arco-tabs-nav-type-line.arco-tabs-nav-horizontal>.arco-tabs-tab:first-of-type{margin-left:16px}.arco-tabs-nav-type-line.arco-tabs-nav-horizontal .arco-tabs-nav-tab-list-no-padding>.arco-tabs-tab:first-of-type,.arco-tabs-nav-text.arco-tabs-nav-horizontal .arco-tabs-nav-tab-list-no-padding>.arco-tabs-tab:first-of-type{margin-left:0}.arco-tabs-nav-type-card .arco-tabs-tab,.arco-tabs-nav-type-card-gutter .arco-tabs-tab{position:relative;padding:4px 16px;font-size:14px;border:1px solid var(--color-neutral-3);transition:padding .2s cubic-bezier(0,0,1,1),color .2s cubic-bezier(0,0,1,1)}.arco-tabs-nav-type-card .arco-tabs-tab-closable,.arco-tabs-nav-type-card-gutter .arco-tabs-tab-closable{padding-right:12px}.arco-tabs-nav-type-card .arco-tabs-tab-closable:not(.arco-tabs-tab-active):hover .arco-icon-hover:hover:before,.arco-tabs-nav-type-card-gutter .arco-tabs-tab-closable:not(.arco-tabs-tab-active):hover .arco-icon-hover:hover:before{background-color:var(--color-fill-4)}.arco-tabs-nav-type-card .arco-tabs-tab:focus-visible:before,.arco-tabs-nav-type-card-gutter .arco-tabs-tab:focus-visible:before{position:absolute;top:-1px;right:0;bottom:-1px;left:-1px;border:2px solid rgb(var(--primary-6));content:""}.arco-tabs-nav-type-card .arco-tabs-tab:last-child:focus-visible:before,.arco-tabs-nav-type-card-gutter .arco-tabs-tab:last-child:focus-visible:before{right:-1px}.arco-tabs-nav-type-card .arco-tabs-nav-add-btn,.arco-tabs-nav-type-card-gutter .arco-tabs-nav-add-btn{height:32px}.arco-tabs-nav-type-card .arco-tabs-tab{background-color:transparent;border-right:none}.arco-tabs-nav-type-card .arco-tabs-tab:last-child{border-right:1px solid var(--color-neutral-3);border-top-right-radius:var(--border-radius-small)}.arco-tabs-nav-type-card .arco-tabs-tab:first-child{border-top-left-radius:var(--border-radius-small)}.arco-tabs-nav-type-card .arco-tabs-tab:hover{background-color:var(--color-fill-3)}.arco-tabs-nav-type-card .arco-tabs-tab-disabled,.arco-tabs-nav-type-card .arco-tabs-tab-disabled:hover{background-color:transparent}.arco-tabs-nav-type-card .arco-tabs-tab-active,.arco-tabs-nav-type-card .arco-tabs-tab-active:hover{background-color:transparent;border-bottom-color:var(--color-bg-2)}.arco-tabs-nav-type-card-gutter .arco-tabs-tab{margin-left:4px;background-color:var(--color-fill-1);border-right:1px solid var(--color-neutral-3);border-radius:var(--border-radius-small) var(--border-radius-small) 0 0}.arco-tabs-nav-type-card-gutter .arco-tabs-tab:hover{background-color:var(--color-fill-3)}.arco-tabs-nav-type-card-gutter .arco-tabs-tab-disabled,.arco-tabs-nav-type-card-gutter .arco-tabs-tab-disabled:hover{background-color:var(--color-fill-1)}.arco-tabs-nav-type-card-gutter .arco-tabs-tab-active,.arco-tabs-nav-type-card-gutter .arco-tabs-tab-active:hover{background-color:transparent;border-bottom-color:var(--color-bg-2)}.arco-tabs-nav-type-card-gutter .arco-tabs-tab:first-child{margin-left:0}.arco-tabs-nav-type-text:before{display:none}.arco-tabs-nav-type-text .arco-tabs-tab{position:relative;margin:0 9px;padding:5px 0;font-size:14px;line-height:1.5715}.arco-tabs-nav-type-text .arco-tabs-tab:not(:first-of-type):before{position:absolute;top:50%;left:-9px;display:block;width:2px;height:12px;background-color:var(--color-fill-3);transform:translateY(-50%);content:""}.arco-tabs-nav-type-text .arco-tabs-tab-title{padding-right:8px;padding-left:8px;background-color:transparent}.arco-tabs-nav-type-text .arco-tabs-tab-title:hover{background-color:var(--color-fill-2)}.arco-tabs-nav-type-text .arco-tabs-tab-active .arco-tabs-tab-title,.arco-tabs-nav-type-text .arco-tabs-tab-active .arco-tabs-tab-title:hover,.arco-tabs-nav-type-text .arco-tabs-tab-disabled .arco-tabs-tab-title,.arco-tabs-nav-type-text .arco-tabs-tab-disabled .arco-tabs-tab-title:hover{background-color:transparent}.arco-tabs-nav-type-text .arco-tabs-tab-active.arco-tabs-nav-type-text .arco-tabs-tab-disabled .arco-tabs-tab-title,.arco-tabs-nav-type-text .arco-tabs-tab-active.arco-tabs-nav-type-text .arco-tabs-tab-disabled .arco-tabs-tab-title:hover{background-color:var(--color-primary-light-3)}.arco-tabs-nav-type-text .arco-tabs-tab:focus-visible .arco-tabs-tab-title{margin:-2px;border:2px solid rgb(var(--primary-6))}.arco-tabs-nav-type-rounded:before{display:none}.arco-tabs-nav-type-rounded .arco-tabs-tab{margin:0 6px;padding:5px 16px;font-size:14px;background-color:transparent;border-radius:32px}.arco-tabs-nav-type-rounded .arco-tabs-tab:hover{background-color:var(--color-fill-2)}.arco-tabs-nav-type-rounded .arco-tabs-tab-disabled:hover{background-color:transparent}.arco-tabs-nav-type-rounded .arco-tabs-tab-active,.arco-tabs-nav-type-rounded .arco-tabs-tab-active:hover{background-color:var(--color-fill-2)}.arco-tabs-nav-type-rounded .arco-tabs-tab:focus-visible{border-color:rgb(var(--primary-6))}.arco-tabs-nav-type-capsule:before{display:none}.arco-tabs-nav-type-capsule .arco-tabs-nav-tab:not(.arco-tabs-nav-tab-scroll){justify-content:flex-end}.arco-tabs-nav-type-capsule .arco-tabs-nav-tab-list{padding:3px;line-height:1;background-color:var(--color-fill-2);border-radius:var(--border-radius-small)}.arco-tabs-nav-type-capsule .arco-tabs-tab{position:relative;padding:0 10px;font-size:14px;line-height:26px;background-color:transparent}.arco-tabs-nav-type-capsule .arco-tabs-tab:hover{background-color:var(--color-bg-2)}.arco-tabs-nav-type-capsule .arco-tabs-tab-disabled:hover{background-color:unset}.arco-tabs-nav-type-capsule .arco-tabs-tab-active,.arco-tabs-nav-type-capsule .arco-tabs-tab-active:hover{background-color:var(--color-bg-2)}.arco-tabs-nav-type-capsule .arco-tabs-tab-active:before,.arco-tabs-nav-type-capsule .arco-tabs-tab-active:hover:before,.arco-tabs-nav-type-capsule .arco-tabs-tab-active+.arco-tabs-tab:before,.arco-tabs-nav-type-capsule .arco-tabs-tab-active:hover+.arco-tabs-tab:before{opacity:0}.arco-tabs-nav-type-capsule .arco-tabs-tab:focus-visible{border-color:rgb(var(--primary-6))}.arco-tabs-nav-type-capsule.arco-tabs-nav-horizontal .arco-tabs-tab:not(:first-of-type){margin-left:3px}.arco-tabs-nav-type-capsule.arco-tabs-nav-horizontal .arco-tabs-tab:not(:first-of-type):before{position:absolute;top:50%;left:-4px;display:block;width:1px;height:14px;background-color:var(--color-fill-3);transform:translateY(-50%);transition:all .2s cubic-bezier(0,0,1,1);content:""}.arco-tabs-nav{position:relative;display:flex;align-items:center;overflow:hidden}.arco-tabs-content{box-sizing:border-box;width:100%;padding-top:16px;overflow:hidden}.arco-tabs-content-hide{display:none}.arco-tabs-content .arco-tabs-content-list{display:flex;width:100%}.arco-tabs-content .arco-tabs-content-item{flex-shrink:0;width:100%;height:0;overflow:hidden}.arco-tabs-content .arco-tabs-content-item.arco-tabs-content-item-active{height:auto}.arco-tabs-type-card>.arco-tabs-content,.arco-tabs-type-card-gutter>.arco-tabs-content{border:1px solid var(--color-neutral-3);border-top:none}.arco-tabs-content-animation{transition:all .2s cubic-bezier(.34,.69,.1,1)}.arco-tabs-horizontal.arco-tabs-justify{display:flex;flex-direction:column;height:100%}.arco-tabs-horizontal.arco-tabs-justify .arco-tabs-content,.arco-tabs-horizontal.arco-tabs-justify .arco-tabs-content-list,.arco-tabs-horizontal.arco-tabs-justify .arco-tabs-pane{height:100%}.arco-tabs-nav-size-mini.arco-tabs-nav-type-line .arco-tabs-tab{padding-top:6px;padding-bottom:6px;font-size:12px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-line .arco-tabs-nav-extra{font-size:12px;line-height:32px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-card .arco-tabs-tab,.arco-tabs-nav-size-mini.arco-tabs-nav-type-card-gutter .arco-tabs-tab{padding-top:1px;padding-bottom:1px;font-size:12px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-card .arco-tabs-nav-extra,.arco-tabs-nav-size-mini.arco-tabs-nav-type-card-gutter .arco-tabs-nav-extra{font-size:12px;line-height:24px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-card .arco-tabs-nav-add-btn,.arco-tabs-nav-size-mini.arco-tabs-nav-type-card-gutter .arco-tabs-nav-add-btn{height:24px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-capsule .arco-tabs-tab{font-size:12px;line-height:18px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-capsule .arco-tabs-nav-extra{font-size:12px;line-height:24px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-rounded .arco-tabs-tab{padding-top:3px;padding-bottom:3px;font-size:12px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-rounded .arco-tabs-nav-extra{font-size:12px;line-height:24px}.arco-tabs-nav-size-small.arco-tabs-nav-type-line .arco-tabs-tab{padding-top:6px;padding-bottom:6px;font-size:14px}.arco-tabs-nav-size-small.arco-tabs-nav-type-line .arco-tabs-nav-extra{font-size:14px;line-height:36px}.arco-tabs-nav-size-small.arco-tabs-nav-type-card .arco-tabs-tab,.arco-tabs-nav-size-small.arco-tabs-nav-type-card-gutter .arco-tabs-tab{padding-top:1px;padding-bottom:1px;font-size:14px}.arco-tabs-nav-size-small.arco-tabs-nav-type-card .arco-tabs-nav-extra,.arco-tabs-nav-size-small.arco-tabs-nav-type-card-gutter .arco-tabs-nav-extra{font-size:14px;line-height:28px}.arco-tabs-nav-size-small.arco-tabs-nav-type-card .arco-tabs-nav-add-btn,.arco-tabs-nav-size-small.arco-tabs-nav-type-card-gutter .arco-tabs-nav-add-btn{height:28px}.arco-tabs-nav-size-small.arco-tabs-nav-type-capsule .arco-tabs-tab{font-size:14px;line-height:22px}.arco-tabs-nav-size-small.arco-tabs-nav-type-capsule .arco-tabs-nav-extra{font-size:14px;line-height:28px}.arco-tabs-nav-size-small.arco-tabs-nav-type-rounded .arco-tabs-tab{padding-top:3px;padding-bottom:3px;font-size:14px}.arco-tabs-nav-size-small.arco-tabs-nav-type-rounded .arco-tabs-nav-extra{font-size:14px;line-height:28px}.arco-tabs-nav-size-large.arco-tabs-nav-type-line .arco-tabs-tab{padding-top:10px;padding-bottom:10px;font-size:14px}.arco-tabs-nav-size-large.arco-tabs-nav-type-line .arco-tabs-nav-extra{font-size:14px;line-height:44px}.arco-tabs-nav-size-large.arco-tabs-nav-type-card .arco-tabs-tab,.arco-tabs-nav-size-large.arco-tabs-nav-type-card-gutter .arco-tabs-tab{padding-top:5px;padding-bottom:5px;font-size:14px}.arco-tabs-nav-size-large.arco-tabs-nav-type-card .arco-tabs-nav-extra,.arco-tabs-nav-size-large.arco-tabs-nav-type-card-gutter .arco-tabs-nav-extra{font-size:14px;line-height:36px}.arco-tabs-nav-size-large.arco-tabs-nav-type-card .arco-tabs-nav-add-btn,.arco-tabs-nav-size-large.arco-tabs-nav-type-card-gutter .arco-tabs-nav-add-btn{height:36px}.arco-tabs-nav-size-large.arco-tabs-nav-type-capsule .arco-tabs-tab{font-size:14px;line-height:30px}.arco-tabs-nav-size-large.arco-tabs-nav-type-capsule .arco-tabs-nav-extra{font-size:14px;line-height:36px}.arco-tabs-nav-size-large.arco-tabs-nav-type-rounded .arco-tabs-tab{padding-top:7px;padding-bottom:7px;font-size:14px}.arco-tabs-nav-size-large.arco-tabs-nav-type-rounded .arco-tabs-nav-extra{font-size:14px;line-height:36px}.arco-tabs-nav-vertical{float:left;height:100%}.arco-tabs-nav-vertical:before{position:absolute;top:0;right:0;bottom:0;left:initial;clear:both;width:1px;height:100%}.arco-tabs-nav-vertical .arco-tabs-nav-add-btn{height:auto;margin-top:8px;margin-left:0;padding:0 16px}.arco-tabs-nav-right{float:right}.arco-tabs-nav-vertical{flex-direction:column}.arco-tabs-nav-vertical .arco-tabs-nav-tab{flex-direction:column;height:100%}.arco-tabs-nav-vertical .arco-tabs-nav-ink{position:absolute;right:0;bottom:initial;left:initial;width:2px;transition:top .2s cubic-bezier(.34,.69,.1,1),height .2s cubic-bezier(.34,.69,.1,1)}.arco-tabs-nav-vertical .arco-tabs-nav-tab-list{height:auto}.arco-tabs-nav-vertical .arco-tabs-nav-tab-list-overflow-scroll{padding:6px 0}.arco-tabs-nav-vertical .arco-tabs-tab{display:block;margin:12px 0 0;white-space:nowrap}.arco-tabs-nav-vertical .arco-tabs-tab:first-of-type{margin-top:0}.arco-tabs-nav-right:before{right:unset;left:0}.arco-tabs-nav-right .arco-tabs-nav-ink{right:unset;left:0}.arco-tabs-nav-vertical{position:relative;box-sizing:border-box;height:100%}.arco-tabs-nav-vertical.arco-tabs-nav-type-line .arco-tabs-tab{padding:0 20px}.arco-tabs-nav-vertical.arco-tabs-nav-type-card .arco-tabs-tab{position:relative;margin:0;border:1px solid var(--color-neutral-3);border-bottom-color:transparent}.arco-tabs-nav-vertical.arco-tabs-nav-type-card .arco-tabs-tab:first-child{border-top-left-radius:var(--border-radius-small)}.arco-tabs-nav-vertical.arco-tabs-nav-type-card .arco-tabs-tab-active,.arco-tabs-nav-vertical.arco-tabs-nav-type-card .arco-tabs-tab-active:hover{border-right-color:var(--color-bg-2);border-bottom-color:transparent}.arco-tabs-nav-vertical.arco-tabs-nav-type-card .arco-tabs-tab:last-child{border-bottom:1px solid var(--color-neutral-3);border-bottom-left-radius:var(--border-radius-small)}.arco-tabs-nav-vertical.arco-tabs-nav-type-card-gutter .arco-tabs-tab{position:relative;margin-left:0;border-radius:var(--border-radius-small) 0 0 var(--border-radius-small)}.arco-tabs-nav-vertical.arco-tabs-nav-type-card-gutter .arco-tabs-tab:not(:first-of-type){margin-top:4px}.arco-tabs-nav-vertical.arco-tabs-nav-type-card-gutter .arco-tabs-tab-active,.arco-tabs-nav-vertical.arco-tabs-nav-type-card-gutter .arco-tabs-tab-active:hover{border-right-color:var(--color-bg-2);border-bottom-color:var(--color-neutral-3)}.arco-tabs-vertical .arco-tabs-content{width:auto;height:100%;padding:0}.arco-tabs-right.arco-tabs-vertical .arco-tabs-content{padding-right:16px}.arco-tabs-left.arco-tabs-vertical .arco-tabs-content{padding-left:16px}.arco-tabs-vertical.arco-tabs-type-card>.arco-tabs-content,.arco-tabs-vertical.arco-tabs-type-card-gutter>.arco-tabs-content{border:1px solid var(--color-neutral-3);border-left:none}body[arco-theme=dark] .arco-tabs-nav-type-capsule .arco-tabs-tab-active,body[arco-theme=dark] .arco-tabs-nav-type-capsule .arco-tabs-tab:hover{background-color:var(--color-fill-3)}.arco-tag{display:inline-flex;align-items:center;box-sizing:border-box;height:24px;padding:0 8px;color:var(--color-text-1);font-weight:500;font-size:12px;line-height:22px;vertical-align:middle;border:1px solid transparent;border-radius:var(--border-radius-small);overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-tag .arco-icon-hover.arco-tag-icon-hover:before{width:16px;height:16px}.arco-tag .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:var(--color-fill-3)}.arco-tag-checkable{cursor:pointer;transition:all .1s cubic-bezier(0,0,1,1)}.arco-tag-checkable:hover{background-color:var(--color-fill-2)}.arco-tag-checked{background-color:var(--color-fill-2);border-color:transparent}.arco-tag-checkable.arco-tag-checked:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-tag-bordered,.arco-tag-checkable.arco-tag-checked.arco-tag-bordered:hover{border-color:var(--color-border-2)}.arco-tag-size-small{height:20px;font-size:12px;line-height:18px}.arco-tag-size-medium{height:24px;font-size:12px;line-height:22px}.arco-tag-size-large{height:32px;font-size:14px;line-height:30px}.arco-tag-hide{display:none}.arco-tag-loading{cursor:default;opacity:.8}.arco-tag-icon{margin-right:4px;color:var(--color-text-2)}.arco-tag.arco-tag-checked.arco-tag-red{color:rgb(var(--red-6));background-color:rgb(var(--red-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-red .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--red-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-red.arco-tag:hover{background-color:rgb(var(--red-2));border-color:transparent}.arco-tag-checked.arco-tag-red.arco-tag-bordered,.arco-tag-checked.arco-tag-red.arco-tag-bordered:hover{border-color:rgb(var(--red-6))}.arco-tag.arco-tag-checked.arco-tag-red .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-red .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-red .arco-tag-loading-icon{color:rgb(var(--red-6))}.arco-tag.arco-tag-checked.arco-tag-orangered{color:rgb(var(--orangered-6));background-color:rgb(var(--orangered-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-orangered .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--orangered-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-orangered.arco-tag:hover{background-color:rgb(var(--orangered-2));border-color:transparent}.arco-tag-checked.arco-tag-orangered.arco-tag-bordered,.arco-tag-checked.arco-tag-orangered.arco-tag-bordered:hover{border-color:rgb(var(--orangered-6))}.arco-tag.arco-tag-checked.arco-tag-orangered .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-orangered .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-orangered .arco-tag-loading-icon{color:rgb(var(--orangered-6))}.arco-tag.arco-tag-checked.arco-tag-orange{color:rgb(var(--orange-6));background-color:rgb(var(--orange-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-orange .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--orange-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-orange.arco-tag:hover{background-color:rgb(var(--orange-2));border-color:transparent}.arco-tag-checked.arco-tag-orange.arco-tag-bordered,.arco-tag-checked.arco-tag-orange.arco-tag-bordered:hover{border-color:rgb(var(--orange-6))}.arco-tag.arco-tag-checked.arco-tag-orange .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-orange .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-orange .arco-tag-loading-icon{color:rgb(var(--orange-6))}.arco-tag.arco-tag-checked.arco-tag-gold{color:rgb(var(--gold-6));background-color:rgb(var(--gold-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-gold .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--gold-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-gold.arco-tag:hover{background-color:rgb(var(--gold-3));border-color:transparent}.arco-tag-checked.arco-tag-gold.arco-tag-bordered,.arco-tag-checked.arco-tag-gold.arco-tag-bordered:hover{border-color:rgb(var(--gold-6))}.arco-tag.arco-tag-checked.arco-tag-gold .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-gold .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-gold .arco-tag-loading-icon{color:rgb(var(--gold-6))}.arco-tag.arco-tag-checked.arco-tag-lime{color:rgb(var(--lime-6));background-color:rgb(var(--lime-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-lime .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--lime-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-lime.arco-tag:hover{background-color:rgb(var(--lime-2));border-color:transparent}.arco-tag-checked.arco-tag-lime.arco-tag-bordered,.arco-tag-checked.arco-tag-lime.arco-tag-bordered:hover{border-color:rgb(var(--lime-6))}.arco-tag.arco-tag-checked.arco-tag-lime .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-lime .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-lime .arco-tag-loading-icon{color:rgb(var(--lime-6))}.arco-tag.arco-tag-checked.arco-tag-green{color:rgb(var(--green-6));background-color:rgb(var(--green-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-green .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--green-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-green.arco-tag:hover{background-color:rgb(var(--green-2));border-color:transparent}.arco-tag-checked.arco-tag-green.arco-tag-bordered,.arco-tag-checked.arco-tag-green.arco-tag-bordered:hover{border-color:rgb(var(--green-6))}.arco-tag.arco-tag-checked.arco-tag-green .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-green .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-green .arco-tag-loading-icon{color:rgb(var(--green-6))}.arco-tag.arco-tag-checked.arco-tag-cyan{color:rgb(var(--cyan-6));background-color:rgb(var(--cyan-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-cyan .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--cyan-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-cyan.arco-tag:hover{background-color:rgb(var(--cyan-2));border-color:transparent}.arco-tag-checked.arco-tag-cyan.arco-tag-bordered,.arco-tag-checked.arco-tag-cyan.arco-tag-bordered:hover{border-color:rgb(var(--cyan-6))}.arco-tag.arco-tag-checked.arco-tag-cyan .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-cyan .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-cyan .arco-tag-loading-icon{color:rgb(var(--cyan-6))}.arco-tag.arco-tag-checked.arco-tag-blue{color:rgb(var(--blue-6));background-color:rgb(var(--blue-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-blue .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--blue-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-blue.arco-tag:hover{background-color:rgb(var(--blue-2));border-color:transparent}.arco-tag-checked.arco-tag-blue.arco-tag-bordered,.arco-tag-checked.arco-tag-blue.arco-tag-bordered:hover{border-color:rgb(var(--blue-6))}.arco-tag.arco-tag-checked.arco-tag-blue .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-blue .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-blue .arco-tag-loading-icon{color:rgb(var(--blue-6))}.arco-tag.arco-tag-checked.arco-tag-arcoblue{color:rgb(var(--arcoblue-6));background-color:rgb(var(--arcoblue-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-arcoblue .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--arcoblue-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-arcoblue.arco-tag:hover{background-color:rgb(var(--arcoblue-2));border-color:transparent}.arco-tag-checked.arco-tag-arcoblue.arco-tag-bordered,.arco-tag-checked.arco-tag-arcoblue.arco-tag-bordered:hover{border-color:rgb(var(--arcoblue-6))}.arco-tag.arco-tag-checked.arco-tag-arcoblue .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-arcoblue .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-arcoblue .arco-tag-loading-icon{color:rgb(var(--arcoblue-6))}.arco-tag.arco-tag-checked.arco-tag-purple{color:rgb(var(--purple-6));background-color:rgb(var(--purple-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-purple .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--purple-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-purple.arco-tag:hover{background-color:rgb(var(--purple-2));border-color:transparent}.arco-tag-checked.arco-tag-purple.arco-tag-bordered,.arco-tag-checked.arco-tag-purple.arco-tag-bordered:hover{border-color:rgb(var(--purple-6))}.arco-tag.arco-tag-checked.arco-tag-purple .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-purple .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-purple .arco-tag-loading-icon{color:rgb(var(--purple-6))}.arco-tag.arco-tag-checked.arco-tag-pinkpurple{color:rgb(var(--pinkpurple-6));background-color:rgb(var(--pinkpurple-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-pinkpurple .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--pinkpurple-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-pinkpurple.arco-tag:hover{background-color:rgb(var(--pinkpurple-2));border-color:transparent}.arco-tag-checked.arco-tag-pinkpurple.arco-tag-bordered,.arco-tag-checked.arco-tag-pinkpurple.arco-tag-bordered:hover{border-color:rgb(var(--pinkpurple-6))}.arco-tag.arco-tag-checked.arco-tag-pinkpurple .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-pinkpurple .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-pinkpurple .arco-tag-loading-icon{color:rgb(var(--pinkpurple-6))}.arco-tag.arco-tag-checked.arco-tag-magenta{color:rgb(var(--magenta-6));background-color:rgb(var(--magenta-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-magenta .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--magenta-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-magenta.arco-tag:hover{background-color:rgb(var(--magenta-2));border-color:transparent}.arco-tag-checked.arco-tag-magenta.arco-tag-bordered,.arco-tag-checked.arco-tag-magenta.arco-tag-bordered:hover{border-color:rgb(var(--magenta-6))}.arco-tag.arco-tag-checked.arco-tag-magenta .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-magenta .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-magenta .arco-tag-loading-icon{color:rgb(var(--magenta-6))}.arco-tag.arco-tag-checked.arco-tag-gray{color:rgb(var(--gray-6));background-color:rgb(var(--gray-2));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-gray .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--gray-3))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-gray.arco-tag:hover{background-color:rgb(var(--gray-3));border-color:transparent}.arco-tag-checked.arco-tag-gray.arco-tag-bordered,.arco-tag-checked.arco-tag-gray.arco-tag-bordered:hover{border-color:rgb(var(--gray-6))}.arco-tag.arco-tag-checked.arco-tag-gray .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-gray .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-gray .arco-tag-loading-icon{color:rgb(var(--gray-6))}.arco-tag.arco-tag-custom-color{color:var(--color-white)}.arco-tag.arco-tag-custom-color .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:#fff3}.arco-tag .arco-tag-close-btn{margin-left:4px;font-size:12px}.arco-tag .arco-tag-close-btn>svg{position:relative}.arco-tag .arco-tag-loading-icon{margin-left:4px;font-size:12px}body[arco-theme=dark] .arco-tag-checked{color:#ffffffe6}body[arco-theme=dark] .arco-tag-checked.arco-tag-red{background-color:rgba(var(--red-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-red .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--red-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-red:hover{background-color:rgba(var(--red-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-orangered{background-color:rgba(var(--orangered-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-orangered .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--orangered-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-orangered:hover{background-color:rgba(var(--orangered-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-orange{background-color:rgba(var(--orange-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-orange .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--orange-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-orange:hover{background-color:rgba(var(--orange-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-gold{background-color:rgba(var(--gold-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-gold .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--gold-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-gold:hover{background-color:rgba(var(--gold-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-lime{background-color:rgba(var(--lime-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-lime .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--lime-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-lime:hover{background-color:rgba(var(--lime-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-green{background-color:rgba(var(--green-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-green .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--green-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-green:hover{background-color:rgba(var(--green-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-cyan{background-color:rgba(var(--cyan-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-cyan .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--cyan-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-cyan:hover{background-color:rgba(var(--cyan-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-blue{background-color:rgba(var(--blue-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-blue .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--blue-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-blue:hover{background-color:rgba(var(--blue-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-arcoblue{background-color:rgba(var(--arcoblue-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-arcoblue .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--arcoblue-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-arcoblue:hover{background-color:rgba(var(--arcoblue-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-purple{background-color:rgba(var(--purple-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-purple .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--purple-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-purple:hover{background-color:rgba(var(--purple-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-pinkpurple{background-color:rgba(var(--pinkpurple-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-pinkpurple .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--pinkpurple-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-pinkpurple:hover{background-color:rgba(var(--pinkpurple-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-magenta{background-color:rgba(var(--magenta-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-magenta .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--magenta-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-magenta:hover{background-color:rgba(var(--magenta-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-gray{background-color:rgba(var(--gray-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-gray .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--gray-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-gray:hover{background-color:rgba(var(--gray-6),.35)}.arco-textarea-wrapper{display:inline-flex;box-sizing:border-box;color:var(--color-text-1);font-size:14px;background-color:var(--color-fill-2);border:1px solid transparent;border-radius:var(--border-radius-small);cursor:text;transition:color .1s cubic-bezier(0,0,1,1),border-color .1s cubic-bezier(0,0,1,1),background-color .1s cubic-bezier(0,0,1,1);position:relative;display:inline-block;width:100%;padding-right:0;padding-left:0;overflow:hidden}.arco-textarea-wrapper:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-textarea-wrapper:focus-within,.arco-textarea-wrapper.arco-textarea-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-textarea-wrapper.arco-textarea-disabled{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent;cursor:not-allowed}.arco-textarea-wrapper.arco-textarea-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent}.arco-textarea-wrapper.arco-textarea-disabled .arco-textarea-prefix,.arco-textarea-wrapper.arco-textarea-disabled .arco-textarea-suffix{color:inherit}.arco-textarea-wrapper.arco-textarea-error{background-color:var(--color-danger-light-1);border-color:transparent}.arco-textarea-wrapper.arco-textarea-error:hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-textarea-wrapper.arco-textarea-error:focus-within,.arco-textarea-wrapper.arco-textarea-error.arco-textarea-wrapper-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-textarea-wrapper .arco-textarea-prefix,.arco-textarea-wrapper .arco-textarea-suffix{display:inline-flex;flex-shrink:0;align-items:center;white-space:nowrap;user-select:none}.arco-textarea-wrapper .arco-textarea-prefix>svg,.arco-textarea-wrapper .arco-textarea-suffix>svg{font-size:14px}.arco-textarea-wrapper .arco-textarea-prefix{padding-right:12px;color:var(--color-text-2)}.arco-textarea-wrapper .arco-textarea-suffix{padding-left:12px;color:var(--color-text-2)}.arco-textarea-wrapper .arco-textarea-suffix .arco-feedback-icon{display:inline-flex}.arco-textarea-wrapper .arco-textarea-suffix .arco-feedback-icon-status-validating{color:rgb(var(--primary-6))}.arco-textarea-wrapper .arco-textarea-suffix .arco-feedback-icon-status-success{color:rgb(var(--success-6))}.arco-textarea-wrapper .arco-textarea-suffix .arco-feedback-icon-status-warning{color:rgb(var(--warning-6))}.arco-textarea-wrapper .arco-textarea-suffix .arco-feedback-icon-status-error{color:rgb(var(--danger-6))}.arco-textarea-wrapper .arco-textarea-clear-btn{align-self:center;color:var(--color-text-2);font-size:12px;visibility:hidden;cursor:pointer}.arco-textarea-wrapper .arco-textarea-clear-btn>svg{position:relative;transition:color .1s cubic-bezier(0,0,1,1)}.arco-textarea-wrapper:hover .arco-textarea-clear-btn{visibility:visible}.arco-textarea-wrapper:not(.arco-textarea-focus) .arco-textarea-icon-hover:hover:before{background-color:var(--color-fill-4)}.arco-textarea-wrapper .arco-textarea-word-limit{position:absolute;right:10px;bottom:6px;color:var(--color-text-3);font-size:12px;user-select:none}.arco-textarea-wrapper.arco-textarea-scroll .arco-textarea-word-limit{right:25px}.arco-textarea-wrapper .arco-textarea-clear-btn{position:absolute;top:50%;right:10px;transform:translateY(-50%)}.arco-textarea-wrapper.arco-textarea-scroll .arco-textarea-clear-btn{right:25px}.arco-textarea-wrapper:hover .arco-textarea-clear-btn{display:block}.arco-textarea-wrapper .arco-textarea-mirror{position:absolute;visibility:hidden}.arco-textarea{width:100%;color:inherit;background:none;border:none;border-radius:0;outline:none;cursor:inherit;-webkit-appearance:none;-webkit-tap-highlight-color:rgba(0,0,0,0);display:block;box-sizing:border-box;height:100%;min-height:32px;padding:4px 12px;font-size:14px;line-height:1.5715;vertical-align:top;resize:vertical}.arco-textarea::placeholder{color:var(--color-text-3)}.arco-textarea[disabled]::placeholder{color:var(--color-text-4)}.arco-textarea[disabled]{-webkit-text-fill-color:var(--color-text-4)}.arco-timepicker{position:relative;display:flex;box-sizing:border-box;padding:0}.arco-timepicker-container{overflow:hidden;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium);box-shadow:0 2px 5px #0000001a}.arco-timepicker-column{box-sizing:border-box;width:64px;height:224px;overflow:hidden}.arco-timepicker-column:not(:last-child){border-right:1px solid var(--color-neutral-3)}.arco-timepicker-column:hover{overflow-y:auto}.arco-timepicker-column ul{box-sizing:border-box;margin:0;padding:0;list-style:none}.arco-timepicker-column ul:after{display:block;width:100%;height:192px;content:""}.arco-timepicker-cell{padding:4px 0;color:var(--color-text-1);font-weight:500;cursor:pointer}.arco-timepicker-cell-inner{height:24px;padding-left:24px;font-size:14px;line-height:24px}.arco-timepicker-cell:not(.arco-timepicker-cell-selected):not(.arco-timepicker-cell-disabled):hover .arco-timepicker-cell-inner{background-color:var(--color-fill-2)}.arco-timepicker-cell-selected .arco-timepicker-cell-inner{font-weight:500;background-color:var(--color-fill-2)}.arco-timepicker-cell-disabled{color:var(--color-text-4);cursor:not-allowed}.arco-timepicker-footer-extra-wrapper{padding:8px;color:var(--color-text-1);font-size:12px;border-top:1px solid var(--color-neutral-3)}.arco-timepicker-footer-btn-wrapper{display:flex;justify-content:space-between;padding:8px;border-top:1px solid var(--color-neutral-3)}.arco-timepicker-footer-btn-wrapper :only-child{margin-left:auto}.arco-timeline{display:flex;flex-direction:column}.arco-timeline-item{position:relative;min-height:78px;padding-left:6px;color:var(--color-text-1);font-size:14px}.arco-timeline-item-label{color:var(--color-text-3);font-size:12px;line-height:1.667}.arco-timeline-item-content{margin-bottom:4px;color:var(--color-text-1);font-size:14px;line-height:1.5715}.arco-timeline-item-content-wrapper{position:relative;margin-left:16px}.arco-timeline-item.arco-timeline-item-last>.arco-timeline-item-dot-wrapper .arco-timeline-item-dot-line{display:none}.arco-timeline-item-dot-wrapper{position:absolute;left:0;height:100%;text-align:center}.arco-timeline-item-dot-wrapper .arco-timeline-item-dot-content{position:relative;width:6px;height:22.001px;line-height:22.001px}.arco-timeline-item-dot{position:relative;top:50%;box-sizing:border-box;width:6px;height:6px;margin-top:-50%;color:rgb(var(--primary-6));border-radius:var(--border-radius-circle)}.arco-timeline-item-dot-solid{background-color:rgb(var(--primary-6))}.arco-timeline-item-dot-hollow{background-color:var(--color-bg-2);border:2px solid rgb(var(--primary-6))}.arco-timeline-item-dot-custom{position:absolute;top:50%;left:50%;display:inline-flex;box-sizing:border-box;color:rgb(var(--primary-6));background-color:var(--color-bg-2);transform:translate(-50%) translateY(-50%);transform-origin:center}.arco-timeline-item-dot-custom svg{color:inherit}.arco-timeline-item-dot-line{position:absolute;top:18.0005px;bottom:-4.0005px;left:50%;box-sizing:border-box;width:1px;border-color:var(--color-neutral-3);border-left-width:1px;transform:translate(-50%)}.arco-timeline-is-reverse{flex-direction:column-reverse}.arco-timeline-alternate{overflow:hidden}.arco-timeline-alternate .arco-timeline-item-vertical-left{padding-left:0}.arco-timeline-alternate .arco-timeline-item-vertical-left>.arco-timeline-item-dot-wrapper{left:50%}.arco-timeline-alternate .arco-timeline-item-vertical-left>.arco-timeline-item-content-wrapper{left:50%;width:50%;margin-left:22px;padding-right:22px}.arco-timeline-alternate .arco-timeline-item-vertical-right{padding-right:0}.arco-timeline-alternate .arco-timeline-item-vertical-right>.arco-timeline-item-dot-wrapper{left:50%}.arco-timeline-alternate .arco-timeline-item-vertical-right>.arco-timeline-item-content-wrapper{left:0;width:50%;margin-right:0;margin-left:-16px;padding-right:16px;text-align:right}.arco-timeline-right .arco-timeline-item-vertical-right{padding-right:6px}.arco-timeline-right .arco-timeline-item-vertical-right>.arco-timeline-item-dot-wrapper{right:0;left:unset}.arco-timeline-right .arco-timeline-item-vertical-right>.arco-timeline-item-content-wrapper{margin-right:16px;margin-left:0;text-align:right}.arco-timeline-item-label-relative>.arco-timeline-item-label{position:absolute;top:0;box-sizing:border-box;max-width:100px}.arco-timeline-item-vertical-left.arco-timeline-item-label-relative{margin-left:100px}.arco-timeline-item-vertical-left.arco-timeline-item-label-relative>.arco-timeline-item-label{left:0;padding-right:16px;text-align:right;transform:translate(-100%)}.arco-timeline-item-vertical-right.arco-timeline-item-label-relative{margin-right:100px}.arco-timeline-item-vertical-right.arco-timeline-item-label-relative>.arco-timeline-item-label{right:0;padding-left:16px;text-align:left;transform:translate(100%)}.arco-timeline-item-horizontal-top.arco-timeline-item-label-relative{margin-top:50px}.arco-timeline-item-horizontal-top.arco-timeline-item-label-relative>.arco-timeline-item-label{padding-bottom:16px;transform:translateY(-100%)}.arco-timeline-item-horizontal-top.arco-timeline-item-label-relative>.arco-timeline-item-content{margin-bottom:0}.arco-timeline-item-horizontal-bottom.arco-timeline-item-label-relative{margin-bottom:50px}.arco-timeline-item-horizontal-bottom.arco-timeline-item-label-relative>.arco-timeline-item-content{margin-bottom:0}.arco-timeline-item-horizontal-bottom.arco-timeline-item-label-relative>.arco-timeline-item-label{top:unset;bottom:0;padding-top:16px;text-align:left;transform:translateY(100%)}.arco-timeline-alternate .arco-timeline-item-vertical-left.arco-timeline-item-label-relative{margin-left:0}.arco-timeline-alternate .arco-timeline-item-vertical-left.arco-timeline-item-label-relative>.arco-timeline-item-label{left:0;width:50%;max-width:unset;transform:none}.arco-timeline-alternate .arco-timeline-item-vertical-right.arco-timeline-item-label-relative{margin-right:0}.arco-timeline-alternate .arco-timeline-item-vertical-right.arco-timeline-item-label-relative>.arco-timeline-item-label{right:0;width:50%;max-width:unset;transform:none}.arco-timeline-alternate .arco-timeline-item-horizontal-top.arco-timeline-item-label-relative{margin-top:0}.arco-timeline-alternate .arco-timeline-item-horizontal-bottom.arco-timeline-item-label-relative{margin-bottom:0}.arco-timeline-direction-horizontal{display:flex;flex-direction:row}.arco-timeline-direction-horizontal.arco-timeline-is-reverse{flex-direction:row-reverse}.arco-timeline-item-dot-line-is-horizontal{top:50%;right:4px;left:12px;width:unset;height:1px;border-top-width:1px;border-left:none;transform:translateY(-50%)}.arco-timeline-item-horizontal-bottom,.arco-timeline-item-horizontal-top{flex:1;min-height:unset;padding-right:0;padding-left:0}.arco-timeline-item-horizontal-bottom>.arco-timeline-item-dot-wrapper,.arco-timeline-item-horizontal-top>.arco-timeline-item-dot-wrapper{top:0;width:100%;height:auto}.arco-timeline-item-horizontal-bottom>.arco-timeline-item-dot-wrapper .arco-timeline-item-dot,.arco-timeline-item-horizontal-top>.arco-timeline-item-dot-wrapper .arco-timeline-item-dot{top:unset;margin-top:unset}.arco-timeline-item-horizontal-bottom>.arco-timeline-item-dot-wrapper .arco-timeline-item-dot-content,.arco-timeline-item-horizontal-top>.arco-timeline-item-dot-wrapper .arco-timeline-item-dot-content{height:6px;line-height:6px}.arco-timeline-item-horizontal-top{padding-top:6px}.arco-timeline-item-horizontal-top>.arco-timeline-item-dot-wrapper{top:0;bottom:unset}.arco-timeline-item-horizontal-top>.arco-timeline-item-content-wrapper{margin-top:16px;margin-left:0}.arco-timeline-item-horizontal-bottom{padding-bottom:6px}.arco-timeline-item-horizontal-bottom>.arco-timeline-item-dot-wrapper{top:unset;bottom:0}.arco-timeline-item-horizontal-bottom>.arco-timeline-item-content-wrapper{margin-bottom:16px;margin-left:0}.arco-timeline-alternate.arco-timeline-direction-horizontal{align-items:center;min-height:200px;overflow:visible}.arco-timeline-alternate.arco-timeline-direction-horizontal .arco-timeline-item-horizontal-bottom{margin-top:6px;transform:translateY(-50%)}.arco-timeline-alternate.arco-timeline-direction-horizontal .arco-timeline-item-horizontal-top{margin-top:-6px;transform:translateY(50%)}.arco-tooltip-content{max-width:350px;padding:8px 12px;color:#fff;font-size:14px;line-height:1.5715;text-align:left;word-wrap:break-word;background-color:var(--color-tooltip-bg);border-radius:var(--border-radius-small)}.arco-tooltip-mini{padding:4px 12px;font-size:14px}.arco-tooltip-popup-arrow{background-color:var(--color-tooltip-bg)}.arco-transfer{display:flex;align-items:center}.arco-transfer-view{display:flex;flex-direction:column;box-sizing:border-box;width:200px;height:224px;border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-small)}.arco-transfer-view-search{padding:8px 12px 4px}.arco-transfer-view-list{flex:1}.arco-transfer-view-custom-list{flex:1;overflow:auto}.arco-transfer-view-header{display:flex;align-items:center;padding:0 10px}.arco-transfer-view-header>*:first-child{flex:1;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-transfer-view-header>*:first-child:not(:last-child){margin-right:8px}.arco-transfer-view-header{height:40px;color:var(--color-text-1);font-weight:500;font-size:14px;line-height:40px;background-color:var(--color-fill-1)}.arco-transfer-view-header-title{display:flex;align-items:center}.arco-transfer-view-header-title .arco-checkbox{overflow:hidden;white-space:nowrap;text-overflow:ellipsis;font-size:inherit}.arco-transfer-view-header-title .arco-checkbox-text{color:inherit}.arco-transfer-view-header-clear-btn{color:var(--color-text-2);font-size:12px;cursor:pointer}.arco-transfer-view-header-clear-btn:hover:before{background-color:var(--color-fill-3)}.arco-transfer-view-header-count{margin-right:2px;color:var(--color-text-3);font-weight:400;font-size:12px}.arco-transfer-view-body{flex:1 1 auto;overflow:hidden}.arco-transfer-view-body .arco-transfer-view-empty{display:flex;flex-direction:column;align-items:center;justify-content:center;height:100%}.arco-transfer-view .arco-scrollbar{height:100%}.arco-transfer-view .arco-scrollbar-container{height:100%;overflow:auto}.arco-transfer-view .arco-list{border-radius:0}.arco-transfer-view .arco-list-footer{position:relative;display:flex;align-items:center;box-sizing:border-box;height:40px;padding:0 8px}.arco-transfer-view .arco-list .arco-pagination{position:absolute;top:50%;right:8px;margin:0;transform:translateY(-50%)}.arco-transfer-view .arco-list .arco-pagination-jumper-input{width:24px}.arco-transfer-view .arco-list .arco-pagination-jumper-separator{padding:0 8px}.arco-transfer-view .arco-checkbox{padding-left:6px}.arco-transfer-view .arco-checkbox-wrapper{display:inline}.arco-transfer-view .arco-checkbox .arco-icon-hover:hover:before{background-color:var(--color-fill-3)}.arco-transfer-list-item{position:relative;display:flex;align-items:center;height:36px;padding:0 10px;color:var(--color-text-1);line-height:36px;list-style:none;background-color:transparent;cursor:default}.arco-transfer-list-item-content{font-size:14px;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-transfer-list-item-checkbox .arco-checkbox-label{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-transfer-list-item-disabled{color:var(--color-text-4);background-color:transparent;cursor:not-allowed}.arco-transfer-list-item:not(.arco-transfer-list-item-disabled):hover{color:var(--color-text-1);background-color:var(--color-fill-2)}.arco-transfer-list-item .arco-checkbox{width:100%}.arco-transfer-list-item .arco-checkbox-text{color:inherit}.arco-transfer-list-item-remove-btn{margin-left:auto;color:var(--color-text-2);font-size:12px;cursor:pointer}.arco-transfer-list-item-remove-btn:hover:before{background-color:var(--color-fill-3)}.arco-transfer-list-item-draggable:before{position:absolute;right:0;left:0;display:block;height:2px;border-radius:1px;content:""}.arco-transfer-list-item-gap-bottom:before{bottom:-2px;background-color:rgb(var(--primary-6))}.arco-transfer-list-item-gap-top:before{top:-2px;background-color:rgb(var(--primary-6))}.arco-transfer-list-item-dragging{color:var(--color-text-4)!important;background-color:var(--color-fill-1)!important}.arco-transfer-list-item-dragged{animation:arco-transfer-drag-item-blink .4s;animation-timing-function:cubic-bezier(0,0,1,1)}.arco-transfer-operations{padding:0 20px}.arco-transfer-operations .arco-btn{display:block}.arco-transfer-operations .arco-btn:last-child{margin-top:12px}.arco-transfer-operations-words .arco-btn{width:100%;padding:0 12px;text-align:left}.arco-transfer-simple .arco-transfer-view-source{border-right:none;border-top-right-radius:0;border-bottom-right-radius:0}.arco-transfer-simple .arco-transfer-view-target{border-top-left-radius:0;border-bottom-left-radius:0}.arco-transfer-disabled .arco-transfer-view-header{color:var(--color-text-4)}@keyframes arco-transfer-drag-item-blink{0%{background-color:var(--color-primary-light-1)}to{background-color:transparent}}.arco-tree-select-popup{box-sizing:border-box;padding:4px 0;background-color:var(--color-bg-popup);border:1px solid var(--color-fill-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-tree-select-popup .arco-tree-select-tree-wrapper{height:100%;max-height:200px;padding-right:4px;padding-left:10px;overflow:auto}.arco-tree-select-popup .arco-tree-node{padding-left:0}.arco-tree-select-highlight{font-weight:500}.arco-icon-hover.arco-tree-node-icon-hover:before{width:16px;height:16px}.arco-tree-node-switcher{position:relative;display:flex;flex-shrink:0;align-items:center;width:12px;height:32px;margin-right:10px;color:var(--color-text-2);font-size:12px;cursor:pointer;user-select:none}.arco-tree-node-switcher-icon{position:relative;margin:0 auto}.arco-tree-node-switcher-icon svg{position:relative;transform:rotate(-90deg);transition:transform .2s cubic-bezier(.34,.69,.1,1)}.arco-tree-node-expanded .arco-tree-node-switcher-icon svg,.arco-tree-node-is-leaf .arco-tree-node-switcher-icon svg{transform:rotate(0)}.arco-tree-node-drag-icon{margin-left:120px;color:rgb(var(--primary-6));opacity:0}.arco-tree-node-custom-icon{margin-right:10px;font-size:inherit;line-height:1;cursor:pointer;user-select:none}.arco-tree-node .arco-icon-loading{color:rgb(var(--primary-6))}.arco-tree-node-minus-icon,.arco-tree-node-plus-icon{position:relative;display:block;width:14px;height:14px;background:var(--color-fill-2);border-radius:var(--border-radius-small);cursor:pointer}.arco-tree-node-minus-icon:after,.arco-tree-node-plus-icon:after{position:absolute;top:50%;left:50%;display:block;width:6px;height:2px;margin-top:-1px;margin-left:-3px;color:var(--color-text-2);background-color:var(--color-text-2);border-radius:.5px;content:""}.arco-tree-node-plus-icon:before{position:absolute;top:50%;left:50%;display:block;width:2px;height:6px;margin-top:-3px;margin-left:-1px;color:var(--color-text-2);background-color:var(--color-text-2);border-radius:.5px;content:""}.arco-tree{color:var(--color-text-1)}.arco-tree .arco-checkbox{margin-right:10px;padding-left:0;line-height:32px}.arco-tree-node{position:relative;display:flex;flex-wrap:nowrap;align-items:center;padding-left:2px;color:var(--color-text-1);line-height:1.5715;cursor:pointer}.arco-tree-node-selected .arco-tree-node-title,.arco-tree-node-selected .arco-tree-node-title:hover{color:rgb(var(--primary-6));transition:color .2s cubic-bezier(0,0,1,1)}.arco-tree-node-disabled-selectable .arco-tree-node-title,.arco-tree-node-disabled .arco-tree-node-title,.arco-tree-node-disabled-selectable .arco-tree-node-title:hover,.arco-tree-node-disabled .arco-tree-node-title:hover{color:var(--color-text-4);background:none;cursor:not-allowed}.arco-tree-node-disabled.arco-tree-node-selected .arco-tree-node-title{color:var(--color-primary-light-3)}.arco-tree-node-title-block{flex:1;box-sizing:content-box}.arco-tree-node-title-block .arco-tree-node-drag-icon{position:absolute;right:12px}.arco-tree-node-indent{position:relative;flex-shrink:0;align-self:stretch}.arco-tree-node-indent-block{position:relative;display:inline-block;width:12px;height:100%;margin-right:10px}.arco-tree-node-draggable{margin-top:2px}.arco-tree-node-title{position:relative;display:flex;align-items:center;margin-left:-4px;padding:5px 4px;font-size:14px;border-radius:var(--border-radius-small)}.arco-tree-node-title:hover{color:var(--color-text-1);background-color:var(--color-fill-2)}.arco-tree-node-title:hover .arco-tree-node-drag-icon{opacity:1}.arco-tree-node-title-draggable:before{position:absolute;top:-2px;right:0;left:0;display:block;height:2px;border-radius:1px;content:""}.arco-tree-node-title-gap-bottom:before{top:unset;bottom:-2px;background-color:rgb(var(--primary-6))}.arco-tree-node-title-gap-top:before{background-color:rgb(var(--primary-6))}.arco-tree-node-title-highlight{color:var(--color-text-1);background-color:var(--color-primary-light-1)}.arco-tree-node-title-dragging,.arco-tree-node-title-dragging:hover{color:var(--color-text-4);background-color:var(--color-fill-1)}.arco-tree-show-line{padding-left:1px}.arco-tree-show-line .arco-tree-node-switcher{width:14px;text-align:center}.arco-tree-show-line .arco-tree-node-switcher .arco-tree-node-icon-hover{width:100%}.arco-tree-show-line .arco-tree-node-indent-block{width:14px}.arco-tree-show-line .arco-tree-node-indent-block:before{position:absolute;left:50%;box-sizing:border-box;width:1px;border-left:1px solid var(--color-neutral-3);transform:translate(-50%);content:"";top:-5px;bottom:-5px}.arco-tree-show-line .arco-tree-node-is-leaf:not(.arco-tree-node-is-tail) .arco-tree-node-indent:after{position:absolute;right:-7px;box-sizing:border-box;width:1px;border-left:1px solid var(--color-neutral-3);transform:translate(50%);content:"";top:27px;bottom:-5px}.arco-tree-show-line .arco-tree-node-indent-block-lineless:before{display:none}.arco-tree-size-mini .arco-tree-node-switcher{height:24px}.arco-tree-size-mini .arco-checkbox{line-height:24px}.arco-tree-size-mini .arco-tree-node-title{padding-top:2px;padding-bottom:2px;font-size:12px;line-height:1.667}.arco-tree-size-mini .arco-tree-node-indent-block:after{top:23px;bottom:-1px}.arco-tree-size-mini .arco-tree-node-is-leaf:not(.arco-tree-node-is-tail) .arco-tree-node-indent:before{top:-1px;bottom:-1px}.arco-tree-size-small .arco-tree-node-switcher{height:28px}.arco-tree-size-small .arco-checkbox{line-height:28px}.arco-tree-size-small .arco-tree-node-title{padding-top:3px;padding-bottom:3px;font-size:14px}.arco-tree-size-small .arco-tree-node-indent-block:after{top:25px;bottom:-3px}.arco-tree-size-small .arco-tree-node-is-leaf:not(.arco-tree-node-is-tail) .arco-tree-node-indent:before{top:-3px;bottom:-3px}.arco-tree-size-large .arco-tree-node-switcher{height:36px}.arco-tree-size-large .arco-checkbox{line-height:36px}.arco-tree-size-large .arco-tree-node-title{padding-top:7px;padding-bottom:7px;font-size:14px}.arco-tree-size-large .arco-tree-node-indent-block:after{top:29px;bottom:-7px}.arco-tree-size-large .arco-tree-node-is-leaf:not(.arco-tree-node-is-tail) .arco-tree-node-indent:before{top:-7px;bottom:-7px}.arco-tree-node-list{overflow:hidden;transition:height .2s cubic-bezier(.34,.69,.1,1)}.arco-typography{color:var(--color-text-1);line-height:1.5715}h1.arco-typography,h2.arco-typography,h3.arco-typography,h4.arco-typography,h5.arco-typography,h6.arco-typography{margin-top:1em;margin-bottom:.5em;font-weight:500}h1.arco-typography{font-size:36px;line-height:1.23}h2.arco-typography{font-size:32px;line-height:1.25}h3.arco-typography{font-size:28px;line-height:1.29}h4.arco-typography{font-size:24px;line-height:1.33}h5.arco-typography{font-size:20px;line-height:1.4}h6.arco-typography{font-size:16px;line-height:1.5}div.arco-typography,p.arco-typography{margin-top:0;margin-bottom:1em}.arco-typography-primary{color:rgb(var(--primary-6))}.arco-typography-secondary{color:var(--color-text-2)}.arco-typography-success{color:rgb(var(--success-6))}.arco-typography-warning{color:rgb(var(--warning-6))}.arco-typography-danger{color:rgb(var(--danger-6))}.arco-typography-disabled{color:var(--color-text-4);cursor:not-allowed}.arco-typography mark{background-color:rgb(var(--yellow-4))}.arco-typography u{text-decoration:underline}.arco-typography del{text-decoration:line-through}.arco-typography b{font-weight:500}.arco-typography code{margin:0 2px;padding:2px 8px;color:var(--color-text-2);font-size:85%;background-color:var(--color-neutral-2);border:1px solid var(--color-neutral-3);border-radius:2px}.arco-typography blockquote{margin:0 0 1em;padding-left:8px;background-color:var(--color-bg-2);border-left:2px solid var(--color-neutral-6)}.arco-typography ol,.arco-typography ul{margin:0;padding:0}.arco-typography ul li,.arco-typography ol li{margin-left:20px}.arco-typography ul{list-style:circle}.arco-typography-spacing-close{line-height:1.3}.arco-typography-operation-copy,.arco-typography-operation-copied{margin-left:2px;padding:2px}.arco-typography-operation-copy{color:var(--color-text-2);background-color:transparent;border-radius:2px;cursor:pointer;transition:background-color .1s cubic-bezier(0,0,1,1)}.arco-typography-operation-copy:hover{color:var(--color-text-2);background-color:var(--color-fill-2)}.arco-typography-operation-copied{color:rgb(var(--success-6))}.arco-typography-operation-edit{margin-left:2px;padding:2px;color:var(--color-text-2);background-color:transparent;border-radius:2px;cursor:pointer;transition:background-color .1s cubic-bezier(0,0,1,1)}.arco-typography-operation-edit:hover{color:var(--color-text-2);background-color:var(--color-fill-2)}.arco-typography-operation-expand{margin:0 4px;color:rgb(var(--primary-6));cursor:pointer}.arco-typography-operation-expand:hover{color:rgb(var(--primary-5))}.arco-typography-edit-content{position:relative;left:-13px;margin-top:-5px;margin-right:-13px;margin-bottom:calc(1em - 5px)}.arco-typography-css-operation{margin-top:-1em;margin-bottom:1em;text-align:right}.arco-upload{display:inline-block;max-width:100%;cursor:pointer}.arco-upload.arco-upload-draggable{width:100%}.arco-upload-tip{margin-top:4px;overflow:hidden;color:var(--color-text-3);font-size:12px;line-height:1.5;white-space:nowrap;text-overflow:ellipsis}.arco-upload-picture-card{display:flex;flex-direction:column;justify-content:center;min-width:80px;height:80px;margin-bottom:0;color:var(--color-text-2);text-align:center;background:var(--color-fill-2);border:1px dashed var(--color-neutral-3);border-radius:var(--border-radius-small);transition:all .1s cubic-bezier(0,0,1,1)}.arco-upload-picture-card:hover{color:var(--color-text-2);background-color:var(--color-fill-3);border-color:var(--color-neutral-4)}.arco-upload-drag{width:100%;padding:50px 0;color:var(--color-text-1);text-align:center;background-color:var(--color-fill-1);border:1px dashed var(--color-neutral-3);border-radius:var(--border-radius-small);transition:all .2s ease}.arco-upload-drag .arco-icon-plus{margin-bottom:24px;color:var(--color-text-2);font-size:14px}.arco-upload-drag:hover{background-color:var(--color-fill-3);border-color:var(--color-neutral-4)}.arco-upload-drag:hover .arco-upload-drag-text{color:var(--color-text-1)}.arco-upload-drag:hover .arco-icon-plus{color:var(--color-text-2)}.arco-upload-drag-active{color:var(--color-text-1);background-color:var(--color-primary-light-1);border-color:rgb(var(--primary-6))}.arco-upload-drag-active .arco-upload-drag-text{color:var(--color-text-1)}.arco-upload-drag-active .arco-icon-plus{color:rgb(var(--primary-6))}.arco-upload-drag .arco-upload-tip{margin-top:0}.arco-upload-drag-text{color:var(--color-text-1);font-size:14px;line-height:1.5}.arco-upload-wrapper{width:100%}.arco-upload-wrapper.arco-upload-wrapper-type-picture-card{display:flex;justify-content:flex-start}.arco-upload-drag{width:100%}.arco-upload-hide{display:none}.arco-upload-disabled .arco-upload-picture-card,.arco-upload-disabled .arco-upload-picture-card:hover{color:var(--color-text-4);background-color:var(--color-fill-1);border-color:var(--color-neutral-4);cursor:not-allowed}.arco-upload-disabled .arco-upload-drag,.arco-upload-disabled .arco-upload-drag:hover{background-color:var(--color-fill-1);border-color:var(--color-text-4);cursor:not-allowed}.arco-upload-disabled .arco-upload-drag .arco-icon-plus,.arco-upload-disabled .arco-upload-drag:hover .arco-icon-plus,.arco-upload-disabled .arco-upload-drag .arco-upload-drag-text,.arco-upload-disabled .arco-upload-drag:hover .arco-upload-drag-text,.arco-upload-disabled .arco-upload-tip{color:var(--color-text-4)}.arco-upload-icon{cursor:pointer}.arco-upload-icon-error{margin-left:4px;color:rgb(var(--danger-6))}.arco-upload-icon-success{color:rgb(var(--success-6));font-size:14px;line-height:14px}.arco-upload-icon-remove{position:relative;font-size:14px}.arco-upload-icon-start,.arco-upload-icon-cancel{position:absolute;top:50%;left:50%;color:var(--color-white);font-size:12px;transform:translate(-50%) translateY(-50%)}.arco-upload-icon-upload{color:rgb(var(--primary-6));font-size:14px;cursor:pointer;transition:all .2s ease}.arco-upload-icon-upload:active,.arco-upload-icon-upload:hover{color:rgb(var(--primary-7))}.arco-upload-list{margin:0;padding:0;list-style:none}.arco-upload-list.arco-upload-list-type-text,.arco-upload-list.arco-upload-list-type-picture{width:100%}.arco-upload-list.arco-upload-list-type-text .arco-upload-list-item:first-of-type,.arco-upload-list.arco-upload-list-type-picture .arco-upload-list-item:first-of-type{margin-top:24px}.arco-upload-list-item-done .arco-upload-list-item-file-icon{color:rgb(var(--primary-6))}.arco-upload-list-item{position:relative;display:flex;align-items:center;box-sizing:border-box;margin-top:12px}.arco-upload-list-item-content{display:flex;flex:1;flex-wrap:nowrap;align-items:center;box-sizing:border-box;width:100%;padding:8px 10px 8px 12px;overflow:hidden;font-size:14px;background-color:var(--color-fill-1);border-radius:var(--border-radius-small);transition:background-color .1s cubic-bezier(0,0,1,1)}.arco-upload-list-item-file-icon{margin-right:12px;color:rgb(var(--primary-6));font-size:16px;line-height:16px}.arco-upload-list-item-thumbnail{flex-shrink:0;width:40px;height:40px;margin-right:12px}.arco-upload-list-item-thumbnail img{width:100%;height:100%}.arco-upload-list-item-name{display:flex;flex:1;align-items:center;margin-right:10px;overflow:hidden;color:var(--color-text-1);font-size:14px;line-height:1.4286;white-space:nowrap;text-overflow:ellipsis}.arco-upload-list-item-name-link{overflow:hidden;color:rgb(var(--link-6));text-decoration:none;text-overflow:ellipsis;cursor:pointer}.arco-upload-list-item-name-text{overflow:hidden;text-overflow:ellipsis;cursor:pointer}.arco-upload-list-item .arco-upload-progress{position:relative;margin-left:auto;line-height:12px}.arco-upload-list-item .arco-upload-progress:hover .arco-progress-circle-bg{stroke:rgba(var(--gray-10),.2)}.arco-upload-list-item .arco-upload-progress:hover .arco-progress-circle-bar{stroke:rgb(var(--primary-7))}.arco-upload-list-item-operation{margin-left:12px;color:var(--color-text-2);font-size:12px}.arco-upload-list-item-operation .arco-upload-icon-remove{font-size:inherit}.arco-upload-list-item-error .arco-upload-list-status,.arco-upload-list-item-done .arco-upload-list-status{display:none}.arco-upload-list-type-text .arco-upload-list-item-error .arco-upload-list-item-name-link,.arco-upload-list-type-text .arco-upload-list-item-error .arco-upload-list-item-name{color:rgb(var(--danger-6))}.arco-upload-list.arco-upload-list-type-picture-card{display:flex;flex-wrap:wrap;vertical-align:top}.arco-upload-list.arco-upload-list-type-picture-card .arco-upload-list-status{top:50%;margin-left:0;transform:translateY(-50%)}.arco-upload-list-picture{display:inline-block;margin-top:0;margin-right:8px;margin-bottom:8px;padding-right:0;overflow:hidden;vertical-align:top;transition:all .2s cubic-bezier(.34,.69,.1,1)}.arco-upload-list-picture-status-error .arco-upload-list-picture-mask{opacity:1}.arco-upload-list-picture{position:relative;box-sizing:border-box;width:80px;height:80px;overflow:hidden;line-height:80px;text-align:center;vertical-align:top;border-radius:var(--border-radius-small)}.arco-upload-list-picture img{width:100%;height:100%}.arco-upload-list-picture-mask{position:absolute;top:0;right:0;bottom:0;left:0;color:var(--color-white);font-size:16px;line-height:80px;text-align:center;background:rgba(0,0,0,.5);cursor:pointer;opacity:0;transition:opacity .1s cubic-bezier(0,0,1,1)}.arco-upload-list-picture-operation{display:none;font-size:14px}.arco-upload-list-picture-operation .arco-upload-icon-retry{color:var(--color-white)}.arco-upload-list-picture-error-tip .arco-upload-icon-error{color:var(--color-white);font-size:26px}.arco-upload-list-picture-mask:hover{opacity:1}.arco-upload-list-picture-mask:hover .arco-upload-list-picture-operation{display:flex;justify-content:space-evenly}.arco-upload-list-picture-mask:hover .arco-upload-list-picture-error-tip{display:none}.arco-upload-list-type-picture .arco-upload-list-item-content{padding-top:8px;padding-bottom:8px}.arco-upload-list-type-picture .arco-upload-list-item-error .arco-upload-list-item-content{background-color:var(--color-danger-light-1)}.arco-upload-list-type-picture .arco-upload-list-item-error .arco-upload-list-item-name-link,.arco-upload-list-type-picture .arco-upload-list-item-error .arco-upload-list-item-name{color:rgb(var(--danger-6))}.arco-upload-hide+.arco-upload-list .arco-upload-list-item:first-of-type{margin-top:0}.arco-upload-slide-up-enter{opacity:0}.arco-upload-slide-up-enter-active{opacity:1;transition:opacity .2s cubic-bezier(.34,.69,.1,1)}.arco-upload-slide-up-exit{opacity:1}.arco-upload-slide-up-exit-active{margin:0;overflow:hidden;opacity:0;transition:opacity .1s cubic-bezier(0,0,1,1),height .3s cubic-bezier(.34,.69,.1,1) .1s,margin .3s cubic-bezier(.34,.69,.1,1) .1s}.arco-upload-list-item.arco-upload-slide-inline-enter{opacity:0}.arco-upload-list-item.arco-upload-slide-inline-enter-active{opacity:1;transition:opacity .2s cubic-bezier(0,0,1,1)}.arco-upload-list-item.arco-upload-slide-inline-exit{opacity:1}.arco-upload-list-item.arco-upload-slide-inline-exit-active{margin:0;overflow:hidden;opacity:0;transition:opacity .1s cubic-bezier(0,0,1,1),width .3s cubic-bezier(.34,.69,.1,1) .1s,margin .3s cubic-bezier(.34,.69,.1,1) .1s}body{font-family:Nunito Sans-SemiBold,Nunito Sans}html,body,#app{height:100%}.arco-table-td-content{color:#4e5969;font-size:12px;font-weight:400;padding:8px 0}.arco-table-th-title{color:#1d2129;font-size:12px;font-weight:500}.loadingDirectiveElement{position:absolute;left:0;right:0;top:0;bottom:0;z-index:10;display:flex;justify-content:center;align-items:center;text-align:center;background-color:#fff9;transition:opacity .1s cubic-bezier(0,0,1,1);user-select:none}.loadingDirectiveElement.fullScreen{position:fixed;z-index:1000}.posRelative{position:relative}.spaceBTW{justify-content:space-between}.headerInner{height:100%;padding:0 16px}.headerInner .title{color:#1d2129;font-size:14px;font-weight:500}.v-binder-follower-content{max-width:300px}.typing-pre>.code_container:last-child pre code:after{display:block;color:#fff;content:"▋";margin-left:4px;animation:blink 1s steps(5,start) infinite}.typing-text>*:last-child:after{content:"▋";margin-left:4px;vertical-align:baseline;animation:blink 1s steps(5,start) infinite}@keyframes blink{to{visibility:hidden}}.rotate{animation:rotate 1.5s infinite linear}@keyframes rotate{0%{transform:rotate(0)}to{transform:rotate(360deg)}}.avatarWrap{height:40px;width:40px;border-radius:20px;box-sizing:border-box;overflow:hidden}.avatarWrap img{width:100%;height:100%;object-fit:cover}::-webkit-scrollbar{height:16px;width:8px}::-webkit-scrollbar:horizontal{height:8px;width:16px}::-webkit-scrollbar-track{background-color:transparent;border-radius:9999px}::-webkit-scrollbar-thumb{background-color:#d9d9e3cc;border-color:#fff;border-radius:9999px;border-width:1px}::-webkit-scrollbar-thumb:hover{background-color:#ececf1}.hide-scrollbar{-ms-overflow-style:none;scrollbar-width:none}.hide-scrollbar ::-webkit-scrollbar{display:none}.login{height:100%;display:flex}.login .loginbg{flex:2;background-size:cover;position:relative}.login .loginbg .logiWhite{position:absolute;top:20px;left:20px}.login .loginbg .title{color:var(--fill-color-bg-white, #fff);font-family:PingFang SC;font-size:34px;font-style:normal;font-weight:600;line-height:normal;margin-left:20%;margin-top:40%}.login .loginform{flex:3;display:flex;justify-content:center;align-items:center}.login .loginform .formTitle{color:#2d2a2a;font-size:24px;font-weight:500}.login .loginform .toolBox{line-height:50px}.login .loginform .toolBox .toolBoxBtn{cursor:pointer;font-weight:400}.login .loginform .desc{font-size:12px;font-weight:400;color:#86909c}.login .loginform .desc .arco-link{font-size:12px}.IconCommon{fill:currentColor;outline:none;width:1em;height:1em}.IconCommon.iconDisabled{filter:opacity(.5);cursor:not-allowed!important}dialog[data-v-6fddb6c7]:not([open]){opacity:0;visibility:hidden;display:block}.customDialog[data-v-6fddb6c7]{opacity:1;padding:20px;box-sizing:border-box;border:none;border-radius:20px;filter:drop-shadow(0px 0px 40px rgba(168,168,168,.25));transition:opacity .3s ease;display:flex;flex-direction:column;outline:none}.customDialog .header[data-v-6fddb6c7]{width:100%;display:flex;justify-content:flex-end}.customDialog .content[data-v-6fddb6c7]{flex:1}.wechatModal[data-v-e442bd8c]{height:407px;padding:0 20px;box-sizing:border-box;display:flex;flex-direction:column;align-items:center;justify-content:flex-start;gap:12px}.wechatModal .title[data-v-e442bd8c]{display:flex;align-items:center;justify-content:center;gap:10px}.wechatModal .title .titleText[data-v-e442bd8c]{color:#000;font-family:Helvetica Neue;font-size:24px;font-style:normal;font-weight:500;line-height:normal}.wechatModal .desc[data-v-e442bd8c]{color:var(--light-text-color-text-2, #4e5969);font-family:Helvetica Neue;font-size:16px;font-style:normal;font-weight:400;line-height:150%}.wechatModal .qrCode[data-v-e442bd8c]{width:242px;height:263.868px;flex-shrink:0;border-radius:20px;background:#fff;box-shadow:0 4px 40px 10px #0000000d;padding:20px;box-sizing:border-box;margin-top:4px}.wechatModal .qrCode .scanText[data-v-e442bd8c]{display:flex;flex-direction:row;align-items:center;gap:10px}.wechatModal .qrCode .scanText span[data-v-e442bd8c]{color:var(--light-text-color-text-1, #1d2129);font-family:Helvetica Neue;font-size:16px;font-style:normal;font-weight:400;line-height:normal}.baseFont[data-v-01535cfb],.heroWrapper .links .link .linkName[data-v-01535cfb],.heroWrapper .affiliations[data-v-01535cfb],.heroWrapper .affiliationIndex[data-v-01535cfb],.heroWrapper .contributor[data-v-01535cfb],.heroWrapper h1[data-v-01535cfb]{font-size:20px;font-weight:400;line-height:32px;letter-spacing:0em}.heroWrapper[data-v-01535cfb]{width:100%;display:flex;align-items:center;flex-direction:column;color:#1d2129;font-family:Helvetica Neue;padding-bottom:50px;overflow:hidden}.heroWrapper h1[data-v-01535cfb]{font-size:56px;font-weight:700;line-height:70px;text-align:center;color:#1d2129;max-width:1350px;margin:72px 0 0}.heroWrapper .contributors[data-v-01535cfb]{max-width:1350px;text-align:center;margin-top:24px}.heroWrapper .contributor[data-v-01535cfb]{text-align:center;color:#4080ff}.heroWrapper .affiliationIndex[data-v-01535cfb]{text-align:center;font-family:PingFang SC}.heroWrapper .affiliations[data-v-01535cfb]{text-align:center;color:#1d2129;margin-top:10px}.heroWrapper .links[data-v-01535cfb]{display:flex;flex-direction:row;gap:16px;margin-top:40px;z-index:1;flex-wrap:wrap;justify-content:center}.heroWrapper .links .link[data-v-01535cfb]{height:42px;padding:8px 16px;border-radius:50px;box-sizing:border-box;background:linear-gradient(90deg,#e8f3ff -1.99%,#e2e8ff 100%);color:#1d2129;display:flex;align-items:center;justify-content:center;gap:8px;user-select:none;cursor:not-allowed;transition:all .3s}.heroWrapper .links .link .linkName[data-v-01535cfb]{line-height:24px;color:#1d2129}.heroWrapper .links .enabled[data-v-01535cfb]{cursor:pointer}.heroWrapper .links .enabled[data-v-01535cfb]:hover{transform:scale(1.1)}.heroWrapper .bigTex[data-v-01535cfb]{margin-top:50px;width:100%;max-width:1251px;height:356px;flex-shrink:0;border-radius:20px;background:linear-gradient(129deg,#1d1c48 15.07%,#252436 76.51%);padding-top:19.49px;box-sizing:border-box;overflow:hidden;position:relative}.heroWrapper .bigTex .bigTexContent[data-v-01535cfb]{width:100%;height:301px;box-sizing:border-box;padding:29px 43px;z-index:999;position:absolute;margin-top:46px;color:var(--light-text-color-white, #fff);font-family:Helvetica Neue;font-size:20px;font-style:normal;font-weight:400;line-height:160%}.heroWrapper .bigTex .header[data-v-01535cfb]{position:absolute;top:46px;left:64px;z-index:999;color:#fff;font-size:24px;font-weight:700;line-height:0%}.heroWrapper .bigTex .copyBtn[data-v-01535cfb]{position:absolute;top:24px;right:22px;color:red;z-index:999;color:#fff;font-size:24px;font-weight:700;line-height:0%;cursor:pointer}.heroWrapper .bigTex .copyBtn[data-v-01535cfb]:active{scale:.9}.heroWrapper .galance[data-v-01535cfb]{margin-top:50px;max-width:1440px;position:relative;overflow:hidden;user-select:none;pointer-events:none}@media screen and (max-width: 1440px){.heroWrapper .galance[data-v-01535cfb]{margin:12px}}.wechatModal[data-v-d5d425dc]{height:407px;padding:0 20px;box-sizing:border-box;display:flex;flex-direction:column;align-items:center;justify-content:flex-start;gap:12px;font-family:Helvetica Neue}.wechatModal .title[data-v-d5d425dc]{display:flex;align-items:center;justify-content:center;gap:10px}.wechatModal .title .titleText[data-v-d5d425dc]{color:#000;font-size:24px;font-style:normal;font-weight:500;line-height:normal}.wechatModal .desc[data-v-d5d425dc]{color:var(--light-text-color-text-2, #4e5969);font-size:16px;font-style:normal;font-weight:400;line-height:150%}.wechatModal .links[data-v-d5d425dc]{width:100%;padding-top:8px;display:flex;gap:8px;flex-direction:column;color:var(--light-text-color-text-1, #1d2129);font-size:16px;font-style:normal;font-weight:400;line-height:normal;border-top:1px dashed #e5e6eb}.wechatModal .links .link[data-v-d5d425dc]{color:var(--light-text-color-text-1, #1d2129);font-size:16px;font-style:normal;font-weight:400;line-height:normal;cursor:pointer}.wechatModal .links .link[data-v-d5d425dc]:hover{text-decoration:underline}.wechatModal .viwer[data-v-d5d425dc]{width:548px;height:304.385px;flex-shrink:0;border-radius:20px;margin-top:8px}.wechatModal .button[data-v-d5d425dc]{display:flex;height:42px;padding:18px 24px;box-sizing:border-box;justify-content:center;align-items:center;gap:10px;border-radius:10px;background:linear-gradient(270deg,#5772ff 0%,#165dff 89.78%);color:#fff;margin-top:8px;cursor:pointer;transition:transform .3s}.wechatModal .button[data-v-d5d425dc]:hover{transform:scale(1.1)}.wechatModal .welcomText[data-v-d5d425dc]{color:var(--light-text-color-text-1, #1d2129);font-size:16px;font-style:normal;font-weight:500;line-height:normal;margin-top:10px}.wechatModal .contributor[data-v-d5d425dc]{color:var(--light-text-color-text-2, #4e5969);font-size:16px;font-style:normal;font-weight:400;line-height:normal;display:flex;align-items:center;gap:4px;margin-top:6px}.wechatModal .contributor .count[data-v-d5d425dc]{display:flex;padding:2px 5px;align-items:center;gap:4px;border-radius:40px;background:var(--color-fill-2, #f2f3f5)}.roleListWrapper[data-v-d4dce6b2]{width:429px;height:100%;background-image:linear-gradient(170.11deg,#e9e9ff 1.21%,#ffffff 10.31%,#ffffff 98.31%);font-family:Helvetica Neue;display:flex;flex-direction:column;overflow:hidden}.roleListWrapper[data-v-d4dce6b2] .arco-select-view-size-large{height:48px;width:369px;margin:20px 30px 0;border-radius:10px}.roleListWrapper .title[data-v-d4dce6b2]{font-size:24px;font-weight:700;line-height:29px;letter-spacing:0em;text-align:left;display:flex;align-items:center;gap:10px;padding:16px 32px;box-sizing:border-box}.roleListWrapper .keyFill[data-v-d4dce6b2]{margin:0 30px;height:78px;padding:0 28.75px;box-sizing:border-box;border-radius:10px;border:1px;text-align:left;border:1px solid #e5e6eb;box-shadow:0 2px 10px #0000001a;background:linear-gradient(0deg,#f7f8fa,#f7f8fa),linear-gradient(0deg,#e5e6eb,#e5e6eb);display:flex;align-items:center}.roleListWrapper .keyFill input[data-v-d4dce6b2]{width:100%;height:19px;resize:none;outline:none;border:none;background:none;color:var(--light-text-color-text-2, #4e5969);font-family:Helvetica Neue;font-size:16px;font-style:normal;font-weight:400;line-height:normal}.roleListWrapper .keyFill .placeholder[data-v-d4dce6b2]{color:#86909c;font-size:16px;font-weight:400;line-height:19px;letter-spacing:0em}.roleListWrapper .keyFill .showPassword[data-v-d4dce6b2]{width:50px;color:#86909c;font-size:16px;font-weight:400;line-height:19px;letter-spacing:0em;cursor:pointer;display:flex;justify-content:flex-end}.roleListWrapper .keyFilled[data-v-d4dce6b2]{border-radius:10px;border:1px solid var(--light-line-color-border-2, #e5e6eb);background:linear-gradient(90deg,#e8f3ff 0%,#e2e8ff 100%)}.roleListWrapper .shake[data-v-d4dce6b2]{animation:shake-d4dce6b2 .5s 1}@keyframes shake-d4dce6b2{0%,to{transform:translate(0)}10%,30%,50%,70%,90%{transform:translate(-10px)}20%,40%,60%,80%{transform:translate(10px)}}.roleListWrapper .roleList[data-v-d4dce6b2]{width:100%;overflow:hidden;display:flex;flex-direction:column;gap:14px;padding:3px 32px 32px;box-sizing:border-box}.roleListWrapper .roleList .role[data-v-d4dce6b2]{width:100%;height:92px;padding:0 0 16px;box-sizing:border-box;border-radius:4.8px;gap:12px;display:flex;flex-direction:row}.roleListWrapper .roleList .role .avatar[data-v-d4dce6b2]{width:54px;height:54px;border-radius:50%;border:3px solid #c9cdd4;position:relative}.roleListWrapper .roleList .role .avatar .innerPie[data-v-d4dce6b2]{margin:3px;border-radius:50%;position:absolute;width:calc(100% - 6px);height:calc(100% - 6px);background:linear-gradient(0deg,#e5e6eb,#e5e6eb),linear-gradient(0deg,#ffffff,#ffffff)}.roleListWrapper .roleList .role .avatar .rightPoint[data-v-d4dce6b2]{position:absolute;content:"";width:10px;height:10px;top:40px;left:40px;border:2px;border-radius:50%;background:linear-gradient(0deg,#c9cdd4,#c9cdd4),linear-gradient(0deg,#ffffff,#ffffff);border:2px solid #ffffff}.roleListWrapper .roleList .role .avatar .pointActive[data-v-d4dce6b2]{background:#0fd267}.roleListWrapper .roleList .role .avatar img[data-v-d4dce6b2]{width:32px;margin:8px 12px;position:absolute}.roleListWrapper .roleList .role .infomation[data-v-d4dce6b2]{flex:1;display:flex;flex-direction:column;justify-content:space-between;overflow:hidden}.roleListWrapper .roleList .role .infomation .job[data-v-d4dce6b2]{flex:1;display:flex;flex-direction:row;justify-content:space-between;overflow:hidden}.roleListWrapper .roleList .role .infomation .job .jobName[data-v-d4dce6b2]{font-size:16px;font-weight:500;line-height:20px;letter-spacing:.01em;text-align:left;color:#1d2129;margin-top:5px}.roleListWrapper .roleList .role .infomation .job .jobStatus[data-v-d4dce6b2]{font-size:16px;font-weight:400;letter-spacing:0em;text-align:right;color:#86909c}.roleListWrapper .roleList .role .infomation .tags[data-v-d4dce6b2]{flex:1;display:flex;flex-direction:row;justify-content:space-between}.roleListWrapper .roleList .role .infomation .tags .tagItem[data-v-d4dce6b2]{width:auto;height:21px;padding:2px 10px;box-sizing:border-box;border-radius:5px;gap:4px;background:#f2f3f5;font-family:Helvetica Neue;font-size:14px;font-weight:500;line-height:17px;letter-spacing:0em;text-align:left}.roleListWrapper .roleList .role .infomation .tags .action[data-v-d4dce6b2]{font-family:Helvetica Neue;font-size:16px;font-weight:500;line-height:20px;letter-spacing:0em;text-align:left;color:#165dff;cursor:pointer;user-select:none}.roleListWrapper .roleList .role .infomation .tags .action[data-v-d4dce6b2]:hover{text-decoration:underline}.loading_wrap[data-v-491f84be]{display:inline-flex;align-items:center}.loading[data-v-491f84be],.loading>span[data-v-491f84be]{position:relative;box-sizing:border-box}.loading[data-v-491f84be]{display:inline-block;font-size:0;color:inherit}.loading>span[data-v-491f84be]{display:inline-block;float:none;background-color:currentColor;border:0 solid inherit}.loading[data-v-491f84be]{width:27px;height:9px}.loading>span[data-v-491f84be]{width:5px;height:5px;margin:2px;border-radius:100%;animation:ball-beat-491f84be .7s -.15s infinite linear}.loading>span[data-v-491f84be]:nth-child(2n-1){animation-delay:-.5s}@keyframes ball-beat-491f84be{50%{opacity:.2;transform:scale(.75)}to{opacity:1;transform:scale(1)}}.message_info[data-v-de77c762]{display:flex;padding:16px;gap:16px}.message_info .avatar[data-v-de77c762]{width:40px;height:40px;position:relative;flex-shrink:0}.message_info .avatar[data-v-de77c762]:after{content:"";position:absolute;width:8px;height:8px;border-radius:50%;background-color:currentColor;right:6px;bottom:0}.message_info[data-v-de77c762] .avatar .arco-avatar-text{color:#fff}.message_info .info_box[data-v-de77c762]{display:flex;flex-direction:column;gap:8px;width:100%;overflow:hidden}.message_info .info_box .item_info[data-v-de77c762]{display:flex;gap:16px;font-size:14px;font-weight:400;color:#4e5969}.message_info .info_box .item_info .name[data-v-de77c762]{font-weight:500;color:var(--light-text-color-text-1, #1d2129)}.message_info .info_box .item_info .time[data-v-de77c762]{color:#86909c;font-size:14px;font-weight:400}.message_info .info_box .item_info .responseSwitcher[data-v-de77c762]{display:flex;align-items:center;column-gap:4px;color:#4e5969;user-select:none;font-size:14px;font-weight:400;margin-left:-8px;margin-right:-8px}.message_info .info_box .item_info .responseSwitcher>svg[data-v-de77c762]{cursor:pointer}.message_info .info_box .item_info .responseSwitcher .disabled[data-v-de77c762]{cursor:not-allowed;color:#c9cdd4}.message_info .info_box .item_info .rate_wrap[data-v-de77c762]{position:relative;display:none;align-items:center;height:22px;gap:8px}.message_info .info_box .item_info .rate_wrap[data-v-de77c762] .rate_box{background-color:#fff;border-radius:4px;padding:8px 16px;height:32px;font-size:12px;box-sizing:border-box}.message_info .info_box .item_info .rate_wrap[data-v-de77c762] .rate_box .arco-rate{font-size:16px;min-height:16px}.message_info .info_box:hover .rate_wrap[data-v-de77c762]{display:flex}.message_info .info_box .message_wrap[data-v-de77c762]{width:100%}.message_info .info_box .answer_feedback[data-v-de77c762]{position:relative;min-width:440px;max-width:min(700px,70%);display:flex;flex-direction:row;justify-content:flex-start;align-items:center;column-gap:32px;padding:12px 16px;box-sizing:border-box;color:var(--color-text-1);background-color:#fff;font-size:14px;font-weight:400;line-height:22px}.message_info .info_box .answer_feedback .icon_close[data-v-de77c762]{position:absolute;top:5px;right:5px;font-size:12px;font-weight:300}.message_info .info_box .answer_feedback .feedback[data-v-de77c762]{display:flex;align-items:center;column-gap:6px;cursor:pointer}.message_info .info_box .answer_feedback .feedback.active[data-v-de77c762],.message_info .info_box .answer_feedback .feedback[data-v-de77c762]:hover{color:#165dff}.message_info .right_pos[data-v-de77c762]{justify-content:flex-end;align-items:flex-end}.message_container[data-v-6f899d6f]{width:100%;display:flex;flex-direction:column;align-items:flex-end}.message_container .user_message[data-v-6f899d6f]{min-width:440px;max-width:min(700px,70%);position:relative;background:#eaf3ff;padding:16px 64px 16px 16px;border-radius:4px;box-sizing:border-box;display:flex;flex-direction:column;row-gap:30px}.message_container .user_message .msg_wrap[data-v-6f899d6f]{display:flex;align-items:center}.message_container .user_message[data-v-6f899d6f] .msg_wrap .arco-textarea-wrapper{background-color:transparent;border-color:transparent;box-shadow:none;padding:0}.message_container .user_message[data-v-6f899d6f] .msg_wrap .arco-textarea-wrapper .arco-textarea{padding:0}.message_container .user_message .icon_more_wrap[data-v-6f899d6f]{position:absolute;right:16px;top:16px;cursor:pointer}.message_container .user_message .icon_more_wrap .icon_more[data-v-6f899d6f]{display:none}.message_container .user_message .btn_group[data-v-6f899d6f]{align-self:flex-end;display:flex;column-gap:8px;margin-right:-48px}.message_container:hover .icon_more_wrap .icon_more[data-v-6f899d6f]{display:block}.step_skill[data-v-17bf8a16]{display:flex;align-items:center;margin-left:54px;font-weight:400;color:#1d2129;font-size:14px;line-height:22px;column-gap:8px}.step_skill .trigger[data-v-17bf8a16]{color:#4e5969;margin-right:16px;display:flex;align-items:center;column-gap:8px}.step_skill .link_group[data-v-17bf8a16]{margin-left:16px;display:flex;column-gap:8px}.step_skill .link_group>a[data-v-17bf8a16]{display:flex;align-items:center;column-gap:4px}.step_item[data-v-690b1166]{width:100%;height:100%}.step_item+.step_item[data-v-690b1166]{margin-top:16px}.step_item .step[data-v-690b1166]{width:100%;display:flex;align-items:center;min-height:initial!important}.step_item .step .step_title_wrap[data-v-690b1166]{width:100%;display:flex;flex-direction:row;align-items:center;font-weight:400}.step_item .step .step_title_wrap .title[data-v-690b1166]{color:#1d2129;font-size:16px;line-height:24px}.step_item .step .step_title_wrap .icon_loading[data-v-690b1166]{display:inline-flex;align-items:center;margin:0 8px}.step_item .step .step_title_wrap .description[data-v-690b1166]{margin-left:auto;color:#4e5969;font-size:14px;line-height:22px;text-wrap:wrap;max-width:500px}.step_item[data-v-690b1166] .step .arco-steps-item-content{flex:1}.step_item[data-v-690b1166] .step .arco-steps-item-content .arco-steps-item-title{width:100%;height:100%}.step_item .step_info[data-v-690b1166]{height:100%;margin-top:4px;display:flex;flex-direction:column;row-gap:8px}.step_item .step_info .step_content_wrap[data-v-690b1166]{display:flex;column-gap:28px;min-height:50px}.step_item .step_info .step_content_wrap .divider[data-v-690b1166]{flex-shrink:0;height:inherit;background:#165dff}.step_item .step_info .step_content_wrap .divider.active[data-v-690b1166]{background:#e5e6eb}.step_item .step_info .step_content_wrap .step_content[data-v-690b1166]{width:calc(100% - 54px);padding-top:3.5px;box-sizing:border-box}pre code.hljs{display:block;overflow-x:auto;padding:1em}code.hljs{padding:3px 5px}.hljs{color:#abb2bf;background:#282c34}.hljs-comment,.hljs-quote{color:#5c6370;font-style:italic}.hljs-doctag,.hljs-formula,.hljs-keyword{color:#c678dd}.hljs-deletion,.hljs-name,.hljs-section,.hljs-selector-tag,.hljs-subst{color:#e06c75}.hljs-literal{color:#56b6c2}.hljs-addition,.hljs-attribute,.hljs-meta .hljs-string,.hljs-regexp,.hljs-string{color:#98c379}.hljs-attr,.hljs-number,.hljs-selector-attr,.hljs-selector-class,.hljs-selector-pseudo,.hljs-template-variable,.hljs-type,.hljs-variable{color:#d19a66}.hljs-bullet,.hljs-link,.hljs-meta,.hljs-selector-id,.hljs-symbol,.hljs-title{color:#61aeee}.hljs-built_in,.hljs-class .hljs-title,.hljs-title.class_{color:#e6c07b}.hljs-emphasis{font-style:italic}.hljs-strong{font-weight:700}.hljs-link{text-decoration:underline}.operation_wrap[data-v-ea9cc5f9]{width:100%;position:relative}.operation_wrap .operate_icon[data-v-ea9cc5f9]{display:inline-block;position:absolute;left:calc(100% + 8px);top:0px;width:32px;height:32px;text-align:center;line-height:32px;box-sizing:border-box;border-radius:4px;background-color:#fff;color:#4e5969;cursor:pointer;visibility:hidden;transition:all .2s ease}.operation_wrap .operate_icon[data-v-ea9cc5f9]:hover{background-color:var(--color-fill-2)}.operation_wrap .operate_icon:hover svg[data-v-ea9cc5f9]{transform:scale(1.1)}.operation_wrap:hover .operate_icon[data-v-ea9cc5f9]{visibility:visible}.code_container[data-v-4f00a864]{display:flex;flex-direction:column;border-radius:6px;overflow:hidden}.code_container .tool_wrap[data-v-4f00a864]{font-size:10px;line-height:24px;display:flex;color:#d9d9e3;background:rgb(52,53,65);padding:8px 16px}.code_container .tool_wrap .copy_btn[data-v-4f00a864]{font-size:12px;color:#d9d9e3;background-color:transparent;margin-left:auto;cursor:pointer;outline:none;border:none;display:flex;padding:0;gap:6px;align-items:center}.code_container .tool_wrap .copy_btn .copy_icon[data-v-4f00a864]{width:16px;height:16px;display:block}.markdown_wrap{padding:16px;border-radius:4px;box-sizing:border-box;font-size:14px;font-weight:400;color:#1d2129;line-height:22px;background-color:#fff}.markdown_wrap>p:first-child{margin-top:0}.markdown_wrap>p:last-child{margin-bottom:0}.markdown_wrap pre{margin:0;padding:0}.markdown_wrap .hljs_code{width:100%;box-sizing:border-box;padding:15px;overflow-x:auto}.chatHistoryImageItem{background-color:#fff;display:inline-flex;flex-wrap:wrap;max-width:324px;padding:8px;gap:4px}.chatHistoryImageItem .imageItem{width:160px;height:160px;position:relative;display:flex;justify-content:center;align-items:center}.chatHistoryImageItem .imageItem .n-image{height:100%;width:100%}.chatHistoryImageItem .imageItem img{width:100%;height:100%}.chatHistoryImageItem .imageItem .maxCover{height:100%;width:100%;position:absolute;left:0;top:0;background:rgba(0,0,0,.4);display:flex;justify-content:center;align-items:center}.chatHistoryAudioItem{width:574px;display:inline-block;padding:4px 16px;background-color:#fff}.chatHistoryAudioItem .audio{display:flex;align-items:center;gap:16px;color:#4e5969}.chatHistoryAudioItem .audio .control{font-size:32px;color:#165dff;cursor:pointer}.chatHistoryAudioItem audio{display:none}.error_msg[data-v-84a7773a]{border:1px solid #f53f3f;background-color:#f53f3f1a;padding:16px;border-radius:4px;font-size:14px;font-weight:400;color:#1d2129;line-height:22px;box-sizing:border-box;white-space:normal;overflow-wrap:break-word}.agent_message_wrap[data-v-898355de]{display:flex;flex-direction:column;row-gap:8px}.steps_container[data-v-77479ba1]{width:100%;margin-top:12px}.steps_container .steps_wrap[data-v-77479ba1]{min-width:440px;max-width:min(700px,70%)}.steps_container .steps_wrap[data-v-77479ba1] .arco-steps-icon{background-color:#fff0!important}.status_btn[data-v-240aae5d]{position:absolute;top:0;right:0;transform:translateY(calc(-100% - 16px))}.status_btn .error_msg[data-v-240aae5d]{font-size:14px;font-weight:400;line-height:22px;color:#86909c}.chatRoomWrapper[data-v-ae197aef]{flex:1;height:100%;background-color:#f8faff;padding:40px 100px;box-sizing:border-box;display:flex;flex-direction:column;align-items:center;font-family:Helvetica Neue;overflow:hidden}.chatRoomWrapper .visionWrapper[data-v-ae197aef]{width:100%;flex:1;display:flex;overflow:hidden}.chatRoomWrapper .visionWrapper .emptyWrapper[data-v-ae197aef]{padding:53px 0;box-sizing:border-box;display:flex;flex-direction:column;align-items:center}.chatRoomWrapper .visionWrapper .emptyWrapper .descWrapper[data-v-ae197aef]{flex:1;width:100%;display:flex;flex-direction:column;align-items:center;justify-content:center;gap:16px}.chatRoomWrapper .visionWrapper .emptyWrapper .descWrapper .title[data-v-ae197aef]{font-size:40px;font-weight:500;line-height:49px;letter-spacing:0em;text-align:center;color:#86909c}.chatRoomWrapper .visionWrapper .emptyWrapper .descWrapper .desc[data-v-ae197aef]{text-align:center;font-size:20px;line-height:32px}.chatRoomWrapper .visionWrapper .emptyWrapper .descWrapper .desc .text2[data-v-ae197aef]{color:#86909c;font-weight:500}.chatRoomWrapper .visionWrapper .emptyWrapper .descWrapper .desc .text3[data-v-ae197aef]{font-weight:400;color:#c9cdd4}.chatRoomWrapper .visionWrapper .emptyWrapper .actionWrapper[data-v-ae197aef]{width:900px;height:136px;display:flex;flex-direction:row;flex-wrap:wrap;gap:20px;align-items:center;justify-content:center}.chatRoomWrapper .visionWrapper .emptyWrapper .actionWrapper .button[data-v-ae197aef]{width:440px;height:58px;padding:18px 24px;box-sizing:border-box;border-radius:10px;gap:10px;background:linear-gradient(180deg,#ffffff 0%,#f4f4f4 100%);border:1.32px solid #e5e6eb;cursor:pointer;transition:all .3s;display:flex;align-items:center;justify-content:center}.chatRoomWrapper .visionWrapper .emptyWrapper .actionWrapper .button span[data-v-ae197aef]{color:#1d2129;font-size:16px;font-style:normal;font-weight:500;line-height:normal;word-wrap:normal;word-break:keep-all;white-space:nowrap}.chatRoomWrapper .visionWrapper .emptyWrapper .actionWrapper .button[data-v-ae197aef]:hover{border-radius:10px;background:linear-gradient(0deg,#2c67f7 0%,#5486ff 100%)!important}.chatRoomWrapper .visionWrapper .emptyWrapper .actionWrapper .button:hover span[data-v-ae197aef]{color:#fff!important}.chatRoomWrapper .visionWrapper .emptyWrapper .actionWrapper .button[data-v-ae197aef]:active{transform:scale(.98)}.chatRoomWrapper .visionWrapper .chatWrapper[data-v-ae197aef]{width:100%;flex:1;display:flex;overflow:hidden;position:relative}.chatRoomWrapper .visionWrapper .chatWrapper .msg_history_area[data-v-ae197aef]{flex:1;display:flex;flex-direction:column;overflow:scroll;position:relative;padding-top:10px}.chatRoomWrapper .visionWrapper .chatWrapper .msg_history_area .scroll_wrap[data-v-ae197aef]{width:100%;height:100%;overflow-y:auto}.chatRoomWrapper .visionWrapper .chatWrapper .msg_history_area .msg_text[data-v-ae197aef]{min-width:440px;max-width:min(700px,70%);padding:16px;border-radius:4px;background-color:#fff;font-size:14px;font-weight:400;color:#1d2129;line-height:22px;box-sizing:border-box;white-space:normal;overflow-wrap:break-word}.chatRoomWrapper .visionWrapper .chatWrapper .msg_history_area .bottom_trigger[data-v-ae197aef]{width:200px;position:absolute;bottom:0px;left:calc(50% - 150px)}.chatRoomWrapper .inputWrapper[data-v-ae197aef]{width:100%;height:59px}.chatRoomWrapper .inputWrapper .inputInner[data-v-ae197aef]{display:flex;width:100%;height:100%;align-items:center;gap:10px;box-sizing:border-box;padding:12px;border-radius:10px;border:2px solid #e1e3e8;background:#fff;box-shadow:2.6px 2.6px 8px #00000014 inset}.chatRoomWrapper .inputWrapper .inputInner input[data-v-ae197aef]{flex:1;height:100%;color:#1d2129;font-size:16px;font-style:normal;font-weight:400;line-height:0%;border:none;outline:none}.chatRoomWrapper .inputWrapper .inputInner input[data-v-ae197aef]:is(:disabled){background:none;cursor:not-allowed}.chatRoomWrapper .inputWrapper .inputInner input[data-v-ae197aef]:is(:disabled)::placeholder{color:#86909c}.chatRoomWrapper .inputWrapper .inputInner .sendBtn[data-v-ae197aef]{width:42px;height:42px;flex-shrink:0;background:#165dff;border-radius:8px;margin-right:-4px;display:flex;align-items:center;justify-content:center;user-select:none;cursor:pointer;transition:transform .3s;border:none}.chatRoomWrapper .inputWrapper .inputInner .sendBtn[data-v-ae197aef]:is(:not(:disabled)):hover{transform:scale(1.1)}.chatRoomWrapper .inputWrapper .inputInner .sendBtn[data-v-ae197aef]:is(:not(:disabled)):active{transform:scale(.98)}.chatRoomWrapper .emptyWrapper[data-v-ae197aef]{width:100%;height:100%;display:flex}.chatWrapper[data-v-7d0d8d24]{width:100%;height:100vh;display:flex;flex-direction:row;overflow:hidden}.hfHomeWrapper[data-v-7444cb54]{width:100%;height:100vh} diff --git a/spaces/sunshineatnoon/TextureScraping/libs/data_coco_stuff_geo_pho.py b/spaces/sunshineatnoon/TextureScraping/libs/data_coco_stuff_geo_pho.py deleted file mode 100644 index 53344c1fe090e79f0350649f30acbf5b9a6988b9..0000000000000000000000000000000000000000 --- a/spaces/sunshineatnoon/TextureScraping/libs/data_coco_stuff_geo_pho.py +++ /dev/null @@ -1,145 +0,0 @@ -import cv2 -import torch -from PIL import Image -import os.path as osp -import numpy as np -from torch.utils import data -import torchvision.transforms as transforms -import torchvision.transforms.functional as TF -import torchvision.transforms.functional as TF -from .custom_transform import * - -class _Coco164kCuratedFew(data.Dataset): - """Base class - This contains fields and methods common to all COCO 164k curated few datasets: - - (curated) Coco164kFew_Stuff - (curated) Coco164kFew_Stuff_People - (curated) Coco164kFew_Stuff_Animals - (curated) Coco164kFew_Stuff_People_Animals - - """ - def __init__(self, root, img_size, crop_size, split = "train2017"): - super(_Coco164kCuratedFew, self).__init__() - - # work out name - self.split = split - self.root = root - self.include_things_labels = False # people - self.incl_animal_things = False # animals - - version = 6 - - name = "Coco164kFew_Stuff" - if self.include_things_labels and self.incl_animal_things: - name += "_People_Animals" - elif self.include_things_labels: - name += "_People" - elif self.incl_animal_things: - name += "_Animals" - - self.name = (name + "_%d" % version) - - print("Specific type of _Coco164kCuratedFew dataset: %s" % self.name) - - self._set_files() - - self.transform = transforms.Compose([ - transforms.Resize(int(img_size)), - transforms.RandomCrop(crop_size)]) - - N = len(self.files) - # eqv transform - self.random_horizontal_flip = RandomHorizontalTensorFlip(N=N) - self.random_vertical_flip = RandomVerticalFlip(N=N) - self.random_resized_crop = RandomResizedCrop(N=N, res=288) - - # photometric transform - self.random_color_brightness = [RandomColorBrightness(x=0.3, p=0.8, N=N) for _ in range(2)] # Control this later (NOTE)] - self.random_color_contrast = [RandomColorContrast(x=0.3, p=0.8, N=N) for _ in range(2)] # Control this later (NOTE) - self.random_color_saturation = [RandomColorSaturation(x=0.3, p=0.8, N=N) for _ in range(2)] # Control this later (NOTE) - self.random_color_hue = [RandomColorHue(x=0.1, p=0.8, N=N) for _ in range(2)] # Control this later (NOTE) - self.random_gray_scale = [RandomGrayScale(p=0.2, N=N) for _ in range(2)] - self.random_gaussian_blur = [RandomGaussianBlur(sigma=[.1, 2.], p=0.5, N=N) for _ in range(2)] - - self.eqv_list = ['random_crop', 'h_flip'] - self.inv_list = ['brightness', 'contrast', 'saturation', 'hue', 'gray', 'blur'] - - self.transform_tensor = TensorTransform() - - - def _set_files(self): - # Create data list by parsing the "images" folder - if self.split in ["train2017", "val2017"]: - file_list = osp.join(self.root, "curated", self.split, self.name + ".txt") - file_list = tuple(open(file_list, "r")) - file_list = [id_.rstrip() for id_ in file_list] - - self.files = file_list - print("In total {} images.".format(len(self.files))) - else: - raise ValueError("Invalid split name: {}".format(self.split)) - - def transform_eqv(self, indice, image): - if 'random_crop' in self.eqv_list: - image = self.random_resized_crop(indice, image) - if 'h_flip' in self.eqv_list: - image = self.random_horizontal_flip(indice, image) - if 'v_flip' in self.eqv_list: - image = self.random_vertical_flip(indice, image) - - return image - - def transform_inv(self, index, image, ver): - """ - Hyperparameters same as MoCo v2. - (https://github.com/facebookresearch/moco/blob/master/main_moco.py) - """ - if 'brightness' in self.inv_list: - image = self.random_color_brightness[ver](index, image) - if 'contrast' in self.inv_list: - image = self.random_color_contrast[ver](index, image) - if 'saturation' in self.inv_list: - image = self.random_color_saturation[ver](index, image) - if 'hue' in self.inv_list: - image = self.random_color_hue[ver](index, image) - if 'gray' in self.inv_list: - image = self.random_gray_scale[ver](index, image) - if 'blur' in self.inv_list: - image = self.random_gaussian_blur[ver](index, image) - - return image - - def transform_image(self, index, image): - image1 = self.transform_inv(index, image, 0) - image1 = self.transform_tensor(image) - - image2 = self.transform_inv(index, image, 1) - #image2 = TF.resize(image2, self.crop_size, Image.BILINEAR) - image2 = self.transform_tensor(image2) - return image1, image2 - - def __getitem__(self, index): - # same as _Coco164k - # Set paths - image_id = self.files[index] - image_path = osp.join(self.root, "images", self.split, image_id + ".jpg") - # Load an image - ori_img = Image.open(image_path) - ori_img = self.transform(ori_img) - - image1, image2 = self.transform_image(index, ori_img) - if image1.shape[0] < 3: - image1 = image1.repeat(3, 1, 1) - if image2.shape[0] < 3: - image2 = image2.repeat(3, 1, 1) - - rets = [] - rets.append(image1) - rets.append(image2) - rets.append(index) - - return rets - - def __len__(self): - return len(self.files) diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Model Contract Prestari Servicii Lucrari Constructii VERIFIED.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Model Contract Prestari Servicii Lucrari Constructii VERIFIED.md deleted file mode 100644 index e80fdb6051e6b12854ae84e9e2e52ca5ef43fa5b..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Model Contract Prestari Servicii Lucrari Constructii VERIFIED.md +++ /dev/null @@ -1,12 +0,0 @@ -

Model Contract Prestari Servicii Lucrari Constructii


Download ✸✸✸ https://cinurl.com/2uEYtw



-
-. Definitia lui, conform unui drept international care aparut in jurisprudenta Greciei, a fost de o lucrare constructiva". - -Iata o fotografie dintr-un interviu-retract din 2005 a fostului primar liberal cu nimeni altul decat primarul modernist pro-occidental. Acesta sustine ca a fost cel care a dezvoltat principiul parerii constructiviste la numarul cateva mii de sferturi din Alexandria. La fel de ciudat este ceea ce s-a prezentat la "Jurnalul National" ca fiind un interviu-retract, de la luna decembrie 2014, cu liderul PSD, Liviu Dragnea. - -Cu alte cuvinte, un politician care s-a aflat, cu tot cu primarul Alexandriei, in liniste constructiviste a fost ales, s-a supus imediat, si a castigat, presedintia unei institutii centrale. Iata ce declara cu multi ani in urma prezentului primar: - -„Alexandria oficial si institutionat din 1952 este o cuvinta bunica. Cine mai de care, nu numai ca si-a dat dreptate, dar si se pricepe la, si se indeparteaza de sistemul de drept, nu numai ca si-a dus sub ultima tara interesul si nici sub interesul lui Dumnezeu, sunt spuse foarte multe de acolo. Dar ce nu ar fi de vazut, o noua Alexandrie, o Alexandrie moderna, una in care vedem si materialism, si in care am putea schimba tara. Dar Alexandria a fost un proiect care a fost depus si realizat in fata tuturor panteilor, un proiect care avea ca obiectiv primul principiu, care a fost primarul nostru, primarul modernist, G.Ionescu, care a construit Alexandria, a avut o at 4fefd39f24
-
-
-

diff --git a/spaces/tammm/vits-models/models.py b/spaces/tammm/vits-models/models.py deleted file mode 100644 index 8353b867f441de7e4d05aef980e672899c3a8889..0000000000000000000000000000000000000000 --- a/spaces/tammm/vits-models/models.py +++ /dev/null @@ -1,533 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/tang155/bingo/src/lib/storage.ts b/spaces/tang155/bingo/src/lib/storage.ts deleted file mode 100644 index a5b7825c4f76a28c704da512ae39e8bb45addd09..0000000000000000000000000000000000000000 --- a/spaces/tang155/bingo/src/lib/storage.ts +++ /dev/null @@ -1,27 +0,0 @@ -import { getMany, set, del, clear } from 'idb-keyval'; - -export const Storage = { - async get(key: string | string[] | null): Promise { - if (key === null) return null; - if (typeof key === 'string') { - key = [key] - } - const returnData: Record = {} - const values = await getMany(key) - key.forEach((k, idx)=> { - returnData[k] = values[idx] - }) - return returnData; - }, - async set(object: any) { - for (let key of Object.keys(object)) { - await set(key, object[key]) - } - }, - async remove(key: string) { - return del(key); - }, - async clear() { - return clear(); - } -} diff --git a/spaces/terfces0erbo/CollegeProjectV2/D16 Phoscyon VSTi V1 6 0 Incl HOT! Keygen AiRl.md b/spaces/terfces0erbo/CollegeProjectV2/D16 Phoscyon VSTi V1 6 0 Incl HOT! Keygen AiRl.md deleted file mode 100644 index 194766d988a49ed3accb2d1b05ee0f5cad29e636..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/D16 Phoscyon VSTi V1 6 0 Incl HOT! Keygen AiRl.md +++ /dev/null @@ -1,6 +0,0 @@ -

D16 Phoscyon VSTi V1 6 0 Incl Keygen AiRl


Downloadhttps://bytlly.com/2uGiL5



-
-D16 Phoscyon VSTi V1 6 0 Incl Keygen AiRl ->>> DOWNLOAD Angular Momentum Chromium VA v1.0 i-NEMESiS ... AIR.rar. Arturia.CS-80V.VSTi.RTAS.v1.6. 4d29de3e1b
-
-
-

diff --git a/spaces/terfces0erbo/CollegeProjectV2/Eat Pray Love Movie Download In Hindi.md b/spaces/terfces0erbo/CollegeProjectV2/Eat Pray Love Movie Download In Hindi.md deleted file mode 100644 index 74c361ef19b169260f9a4ff9b95559ed94b04cc2..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Eat Pray Love Movie Download In Hindi.md +++ /dev/null @@ -1,6 +0,0 @@ -

Eat Pray Love Movie Download In Hindi


Download Ziphttps://bytlly.com/2uGkN2



- -The actress, who is in Pataudi, Haryana, shooting for her film Eat, Pray, Love, sought the blessings of Swami Dharmdev, in whose Hari Mandir ... 1fdad05405
-
-
-

diff --git a/spaces/test-org-q/stable-diffusion/app.py b/spaces/test-org-q/stable-diffusion/app.py deleted file mode 100644 index dd19894386e65d34898ce30088f686ec56840485..0000000000000000000000000000000000000000 --- a/spaces/test-org-q/stable-diffusion/app.py +++ /dev/null @@ -1,371 +0,0 @@ -import gradio as gr -#import torch -#from torch import autocast -#from diffusers import StableDiffusionPipeline -from datasets import load_dataset -from PIL import Image -#from io import BytesIO -#import base64 -import re -import os -import requests - -from share_btn import community_icon_html, loading_icon_html, share_js - -model_id = "CompVis/stable-diffusion-v1-4" -device = "cuda" - -#If you are running this code locally, you need to either do a 'huggingface-cli login` or paste your User Access Token from here https://huggingface.co/settings/tokens into the use_auth_token field below. -#pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True, revision="fp16", torch_dtype=torch.float16) -#pipe = pipe.to(device) -#torch.backends.cudnn.benchmark = True - -#When running locally, you won`t have access to this, so you can remove this part -word_list_dataset = load_dataset("stabilityai/word-list", data_files="list.txt", use_auth_token=True) -word_list = word_list_dataset["train"]['text'] - -is_gpu_busy = False -def infer(prompt): - global is_gpu_busy - samples = 4 - steps = 50 - scale = 7.5 - #When running locally you can also remove this filter - for filter in word_list: - if re.search(rf"\b{filter}\b", prompt): - raise gr.Error("Unsafe content found. Please try again with different prompts.") - - #generator = torch.Generator(device=device).manual_seed(seed) - #print("Is GPU busy? ", is_gpu_busy) - images = [] - #if(not is_gpu_busy): - # is_gpu_busy = True - # images_list = pipe( - # [prompt] * samples, - # num_inference_steps=steps, - # guidance_scale=scale, - #generator=generator, - # ) - # is_gpu_busy = False - # safe_image = Image.open(r"unsafe.png") - # for i, image in enumerate(images_list["sample"]): - # if(images_list["nsfw_content_detected"][i]): - # images.append(safe_image) - # else: - # images.append(image) - #else: - url = os.getenv('JAX_BACKEND_URL') - payload = {'prompt': prompt} - images_request = requests.post(url, json = payload) - for image in images_request.json()["images"]: - image_b64 = (f"data:image/png;base64,{image}") - images.append(image_b64) - - return images, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True) - - -css = """ - .gradio-container { - font-family: 'IBM Plex Sans', sans-serif; - } - .gr-button { - color: white; - border-color: black; - background: black; - } - input[type='range'] { - accent-color: black; - } - .dark input[type='range'] { - accent-color: #dfdfdf; - } - .container { - max-width: 730px; - margin: auto; - padding-top: 1.5rem; - } - #gallery { - min-height: 22rem; - margin-bottom: 15px; - margin-left: auto; - margin-right: auto; - border-bottom-right-radius: .5rem !important; - border-bottom-left-radius: .5rem !important; - } - #gallery>div>.h-full { - min-height: 20rem; - } - .details:hover { - text-decoration: underline; - } - .gr-button { - white-space: nowrap; - } - .gr-button:focus { - border-color: rgb(147 197 253 / var(--tw-border-opacity)); - outline: none; - box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); - --tw-border-opacity: 1; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); - --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); - --tw-ring-opacity: .5; - } - #advanced-btn { - font-size: .7rem !important; - line-height: 19px; - margin-top: 12px; - margin-bottom: 12px; - padding: 2px 8px; - border-radius: 14px !important; - } - #advanced-options { - display: none; - margin-bottom: 20px; - } - .footer { - margin-bottom: 45px; - margin-top: 35px; - text-align: center; - border-bottom: 1px solid #e5e5e5; - } - .footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; - } - .dark .footer { - border-color: #303030; - } - .dark .footer>p { - background: #0b0f19; - } - .acknowledgments h4{ - margin: 1.25em 0 .25em 0; - font-weight: bold; - font-size: 115%; - } - #container-advanced-btns{ - display: flex; - flex-wrap: wrap; - justify-content: space-between; - align-items: center; - } - .animate-spin { - animation: spin 1s linear infinite; - } - @keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } - } - #share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; - } - #share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; - } - #share-btn * { - all: unset; - } - .gr-form{ - flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0; - } - #prompt-container{ - gap: 0; - } -""" - -block = gr.Blocks(css=css) - -examples = [ - [ - 'A high tech solarpunk utopia in the Amazon rainforest', -# 4, -# 45, -# 7.5, -# 1024, - ], - [ - 'A pikachu fine dining with a view to the Eiffel Tower', -# 4, -# 45, -# 7, -# 1024, - ], - [ - 'A mecha robot in a favela in expressionist style', -# 4, -# 45, -# 7, -# 1024, - ], - [ - 'an insect robot preparing a delicious meal', -# 4, -# 45, -# 7, -# 1024, - ], - [ - "A small cabin on top of a snowy mountain in the style of Disney, artstation", -# 4, -# 45, -# 7, -# 1024, - ], -] - - -with block: - gr.HTML( - """ -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - -

- Stable Diffusion Demo -

-
-

- Stable Diffusion is a state of the art text-to-image model that generates - images from text.
For faster generation and forthcoming API - access you can try - DreamStudio Beta -

-
- """ - ) - with gr.Group(): - with gr.Box(): - with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True): - text = gr.Textbox( - label="Enter your prompt", - show_label=False, - max_lines=1, - placeholder="Enter your prompt", - elem_id="prompt-text-input", - ).style( - border=(True, False, True, True), - rounded=(True, False, False, True), - container=False, - ) - btn = gr.Button("Generate image").style( - margin=False, - rounded=(False, True, True, False), - full_width=False, - ) - - gallery = gr.Gallery( - label="Generated images", show_label=False, elem_id="gallery" - ).style(grid=[2], height="auto") - - with gr.Group(elem_id="container-advanced-btns"): - advanced_button = gr.Button("Advanced options", elem_id="advanced-btn") - with gr.Group(elem_id="share-btn-container"): - community_icon = gr.HTML(community_icon_html, visible=False) - loading_icon = gr.HTML(loading_icon_html, visible=False) - share_button = gr.Button("Share to community", elem_id="share-btn", visible=False) - - with gr.Row(elem_id="advanced-options"): - gr.Markdown("Advanced settings are temporarily unavailable") - samples = gr.Slider(label="Images", minimum=1, maximum=4, value=4, step=1) - steps = gr.Slider(label="Steps", minimum=1, maximum=50, value=45, step=1) - scale = gr.Slider( - label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1 - ) - seed = gr.Slider( - label="Seed", - minimum=0, - maximum=2147483647, - step=1, - randomize=True, - ) - - ex = gr.Examples(examples=examples, fn=infer, inputs=text, outputs=[gallery, community_icon, loading_icon, share_button], cache_examples=False) - ex.dataset.headers = [""] - - text.submit(infer, inputs=text, outputs=[gallery, community_icon, loading_icon, share_button], postprocess=False) - btn.click(infer, inputs=text, outputs=[gallery, community_icon, loading_icon, share_button], postprocess=False) - - advanced_button.click( - None, - [], - text, - _js=""" - () => { - const options = document.querySelector("body > gradio-app").querySelector("#advanced-options"); - options.style.display = ["none", ""].includes(options.style.display) ? "flex" : "none"; - }""", - ) - share_button.click( - None, - [], - [], - _js=share_js, - ) - gr.HTML( - """ - -
-

LICENSE

-The model is licensed with a CreativeML Open RAIL-M license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please read the license

-

Biases and content acknowledgment

-Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the LAION-5B dataset, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the model card

-
- """ - ) - -block.queue(max_size=5, concurrency_count=2).launch() \ No newline at end of file diff --git a/spaces/thelou1s/stabilityai-stable-diffusion-2/README.md b/spaces/thelou1s/stabilityai-stable-diffusion-2/README.md deleted file mode 100644 index 90841ef44f7e64dc7e6d15aa3e8c6dfdc59de248..0000000000000000000000000000000000000000 --- a/spaces/thelou1s/stabilityai-stable-diffusion-2/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stabilityai Stable Diffusion 2 -emoji: 📚 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/CrossManager The Ultimate Solution for CAD File Conversion.md b/spaces/tialenAdioni/chat-gpt-api/logs/CrossManager The Ultimate Solution for CAD File Conversion.md deleted file mode 100644 index ab4d0721c95475c9e1e241586391444ef4bb1a75..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/CrossManager The Ultimate Solution for CAD File Conversion.md +++ /dev/null @@ -1,30 +0,0 @@ - -

CrossManager: A Standalone Software for Converting CAD Files

-

CrossManager is a software developed by DATAKIT that allows you to convert files from most CAD formats. With CrossManager, you can easily translate one or several CAD files into the format you want, without needing the original CAD software.

-

cross manager crack download


Download Zip 🌟 https://urlcod.com/2uK1tg



-

CrossManager supports a wide range of input and output formats, including 3DXML, 3MF, ACIS, CATIA V4/V5/V6, CGR, COLLADA, FBX, glTF, IFC, IGES, JT, OBJ, Open CASCADE, Parasolid, PDF 3D, PRC, Robcad, SOLIDWORKS 3D, STEP, STL, UG NX 3D, Unisurf and VDA. You can also convert 2D files such as DWG/DXF and CATIA V4/V5.

-

CrossManager is available in two types of licenses: single licenses for working on a single workstation and floating licenses for using the software on several workstations. You can order your custom configuration according to your needs and only buy the formats that you need.

-

To download CrossManager, you can visit the official website of DATAKIT and choose the version that suits your system. The latest version is 2023.2 and it was updated on 2023-04-03. You can also download the documentation for more technical information.

-

If you are looking for an alternative software for converting CAD files, you can also check out SIMATIC Version Cross Manager, a product from Siemens that allows you to import and export data between different versions of SIMATIC PCS 7 engineering systems. Another option is Motrix, a cross-platform, open-source download manager that supports HTTP/FTP/SFTP/Bittorrent/Magnet protocols.

-

How to Use CrossManager

-

CrossManager is easy to use and has a user-friendly interface. To convert a CAD file, you just need to follow these steps:

-
    -
  1. Select the input format from the drop-down list or drag and drop the file into the software.
  2. -
  3. Select the output format from the drop-down list or choose one of the predefined configurations.
  4. -
  5. Click on the "Convert" button and wait for the process to finish.
  6. -
  7. Open the converted file with your preferred software or viewer.
  8. -
-

You can also batch convert multiple files at once by selecting them in the file explorer and clicking on the "Batch" button. You can also customize the conversion settings by clicking on the "Options" button and adjusting the parameters according to your needs.

-

CrossManager is compatible with Windows, Linux and Mac OS operating systems. It requires a minimum of 2 GB of RAM and 100 MB of disk space. It also supports multi-core processors and 64-bit architectures for faster conversions.

Why Choose CrossManager

-

CrossManager is a reliable and efficient software for converting CAD files. It has many advantages over other similar products, such as:

-
    -
  • It supports a large number of input and output formats, covering most of the CAD software on the market.
  • -
  • It does not require the installation of the original CAD software, saving you time and money.
  • -
  • It preserves the quality and integrity of the original CAD file, ensuring a faithful conversion.
  • -
  • It allows you to customize the conversion settings, giving you more control and flexibility.
  • -
  • It offers a fast and easy conversion process, with a simple and intuitive interface.
  • -
  • It is compatible with different operating systems and platforms, making it accessible to a wide range of users.
  • -
-

CrossManager is a powerful tool for anyone who needs to convert CAD files for different purposes, such as data exchange, collaboration, visualization, analysis or simulation. It is also a useful solution for CAD professionals who want to work with multiple formats without compromising the quality of their work.

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/F1 2010 Crack Only Razor 1911 11 Learn How to Overcome the Steam and Xlive Protection.md b/spaces/tialenAdioni/chat-gpt-api/logs/F1 2010 Crack Only Razor 1911 11 Learn How to Overcome the Steam and Xlive Protection.md deleted file mode 100644 index 2f08ff2c6d0e5a64aac4f8ddd0c80a683e81fa7b..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/F1 2010 Crack Only Razor 1911 11 Learn How to Overcome the Steam and Xlive Protection.md +++ /dev/null @@ -1,88 +0,0 @@ -
-

What is F1 2010 Crack Only Razor 1911 11?

-

If you are a fan of racing games, you might have heard of F1 2010, a simulation game based on the 2010 Formula One season. The game was developed by Codemasters and released in September 2010 for PC, PlayStation 3 and Xbox 360. It features all the official drivers, teams and circuits from the season, as well as a realistic damage model, dynamic weather system and competitive multiplayer modes.

-

However, if you want to play F1 2010 on your PC, you might also have heard of F1 2010 Crack Only Razor 1911 11, a file that allows you to run the game without having to buy it or activate it online. This file was created by Razor 1911, a group of hackers who have been cracking games since 1985. They claim that their crack is a way to protest against the digital rights management (DRM) system that Codemasters used to protect their game from piracy.

-

F1 2010 Crack Only Razor 1911 11


Download Filehttps://urlcod.com/2uKaEq



-

In this article, we will explain what F1 2010 Crack Only Razor 1911 11 is, why people use it, how to use it, what are the risks of using it and what are some alternatives to it. We hope that this article will help you make an informed decision about whether you want to use F1 2010 Crack Only Razor 1911 11 or not.

-

Why do people use F1 2010 Crack Only Razor 1911 11?

-

There are several reasons why some people choose to use F1 2010 Crack Only Razor 1911 11 instead of buying or activating the game legally. Here are some of them:

-
    -
  • They want to save money. Buying a new game can be expensive, especially if you live in a country where games are not easily available or affordable. By using F1 2010 Crack Only Razor 1911 11 , they can play the game for free without spending any money.
  • -
  • They want to bypass DRM. DRM is a system that prevents unauthorized copying or distribution of digital content. However, some people argue that DRM also restricts the rights of legitimate customers who have paid for the content. For example, DRM may require online activation or registration, limit the number of installations or devices that can run the content, or prevent offline or modded gameplay. By using F 12010 Crack Only Razor 191111 , they can avoid these limitations and play the game as they wish.
  • -
  • They want to try before they buy. Some people may not be sure if they like a game or not before they buy it. They may want to test the game's performance, graphics, gameplay or features before they decide whether to purchase it or not. By using F12010CrackOnlyRazor191111, they can play a demo version of the game without committing to buying it.
  • -
-

How to use F12010CrackOnlyRazor191111?

-

If you decide to use F12010CrackOnlyRazor191111, you will need to follow these steps:

-
    -
  1. Download F12010CrackOnlyRazor191111. You can find this file on various websites that offer cracked games or torrents. However, be careful when downloading files from unknown sources, as they may contain viruses or malware that can harm your computer.
  2. -
  3. Extract RARs. After downloading F12010CrackOnlyRazor191111, you will need to extract its contents using a program like WinRAR or WinZip. You should see a folder named "F12010" with an ISO file inside.
  4. -
  5. Mount or Burn ISO. An ISO file is an image of a CD or DVD that contains all the data needed to run a program. You can either mount it on a virtual drive using a program like Daemon Tools or PowerISO, or burn it on a physical disc using a program like Nero or ImgBurn.
  6. -
  7. Install Game. After mounting or burning the ISO file, you can run the setup.exe file inside it and follow the instructions to install F12010 on your computer.
  8. -
  9. Copy Crack over original files. After installing F12010, you will need to copy the crack files from the folder named "Razor191111" inside F12010CrackOnlyRazor191111 and paste them over the original files in your installation folder (usually C:\Program Files\Codemasters\F12010). This will overwrite them and make them work without DRM.
  10. -
  11. Enjoy Game. After copying the crack files, you can launch F12010 by clicking on F12010_Launcher.exe in your installation folder. You can play the game without having to activate it online or insert any disc.
  12. -
-

What are the risks of using F12010CrackOnlyRazor191111?

-

While using F12010CrackOnlyRazor191111 may seem tempting, it also comes with some risks that you should be aware of. Here are some of them:

-
    -
  • You may face legal issues. Using cracked games is illegal in most countries, as it violates the intellectual property rights of the developers and publishers. You may face fines, lawsuits or even jail time if you are caught using or distributing cracked games. You may also lose your access to online services or platforms that detect cracked games, such as Steam or xlive.
  • -
  • You may get viruses or malware. As mentioned earlier, downloading files from unknown sources can be dangerous for your computer's security. You may unknowingly download viruses or malware that can damage your system, steal your personal information, or hijack your online accounts. You may also expose yourself to hackers or scammers who may try to trick you into giving them money or access to your computer.
  • -
  • You may experience bugs or compatibility problems. Cracked games may not work properly on your computer, as they may not be compatible with your hardware, software or operating system. You may encounter errors, crashes, glitches or performance issues that can ruin your gaming experience. You may also miss out on updates, patches or fixes that can improve the game's quality or security.
  • -
-

What are some alternatives to F12010CrackOnlyRazor191111?

-

If you want to play F12010 without using F12010CrackOnlyRazor191111, you have some alternatives that are legal and safe. Here are some of them:

-

Buying The Game

-

The best way to play F12010 is to buy it from official sources, such as Codem asters' website or online stores like Amazon or Steam. By buying the game, you will support the developers and publishers who worked hard to create it. You will also get access to updates, patches or fixes that can improve the game's quality or security. You will also be able to play the game online without any problems.

-

F1 2010 Razor 1911 Crack Download
-How to Install F1 2010 Crack by Razor 1911
-F1 2010 Razor 1911 Crack Fix
-F1 2010 Crack Only Razor 1911 Torrent
-F1 2010 Razor 1911 Crack No CD
-F1 2010 Crack Only Razor 1911 Free Download
-F1 2010 Razor 1911 Crack Full Version
-F1 2010 Crack Only Razor 1911 PC Game
-F1 2010 Razor 1911 Crack Serial Key
-F1 2010 Crack Only Razor 1911 Password
-F1 2010 Razor 1911 Crack Working
-F1 2010 Crack Only Razor 1911 Online
-F1 2010 Razor 1911 Crack Update
-F1 2010 Crack Only Razor 1911 Skidrow
-F1 2010 Razor 1911 Crack Patch
-F1 2010 Crack Only Razor 1911 Keygen
-F1 2010 Razor 1911 Crack Activation
-F1 2010 Crack Only Razor 1911 ISO
-F1 2010 Razor 1911 Crack RAR
-F1 2010 Crack Only Razor 1911 ZIP
-F1 2010 Razor 1911 Crack Mega
-F1 2010 Crack Only Razor 1911 Mediafire
-F1 2010 Razor 1911 Crack Google Drive
-F1 2010 Crack Only Razor 1911 Dropbox
-F1 2010 Razor 1911 Crack OneDrive
-F1 2010 Crack Only Razor 1911 Zippyshare
-F1 2010 Razor 1911 Crack Direct Link
-F1 2010 Crack Only Razor 1911 Mirror Link
-F1 2010 Razor 1911 Crack File Size
-F1 2010 Crack Only Razor 1911 System Requirements
-F1 2010 Razor

-

Using Steam

-

Another way to play F1 2010 is to use Steam, a digital distribution platform that offers thousands of games for PC, Mac and Linux. You can buy F1 2010 on Steam for a reasonable price and download it to your computer. You will need to create a free Steam account and install the Steam client on your computer. By playing F1 2010 on Steam, you will enjoy some benefits, such as cloud saving, achievements and multiplayer. You will also be able to use Steam's features, such as chat, forums and community guides.

-

Using xlive

-

A third way to play F1 2010 is to use xlive, a service that provides online gaming and social networking for Windows games. You can buy F1 2010 on xlive for a similar price as Steam and download it to your computer. You will need to create a free xlive account and install the xlive client on your computer. By playing F1 2010 on xlive, you will be able to use some features, such as online profile, leaderboards and chat. You will also be able to play with other xlive users or friends.

-

Conclusion

-

In conclusion, F1 2010 Crack Only Razor 1911 11 is a file that allows you to play F1 2010 without buying or activating it online. It was created by Razor 1911, a group of hackers who oppose DRM. Some people use F1 2010 Crack Only Razor 1911 11 because they want to save money, bypass DRM or try before they buy. However, using F1 2010 Crack Only Razor 1911 11 also has some risks, such as legal issues, viruses, bugs and compatibility problems. Therefore, we recommend that you use some alternatives to F1 2010 Crack Only Razor 1911 11, such as buying the game, using Steam or xlive. These alternatives are legal and safe, and they offer more benefits and features than using F1 2010 Crack Only Razor 1911 11.

-

FAQs

-

Here are some common questions and answers about F1 2010 Crack Only Razor 1911 11:

-
    -
  • Q: Is F1 2010 Crack Only Razor 1911 11 legal?
  • -
  • A: No, it is not legal. Using cracked games is illegal in most countries, as it violates the intellectual property rights of the developers and publishers. You may face fines, lawsuits or even jail time if you are caught using or distributing cracked games.
  • -
  • Q: Is F1 2010 Crack Only Razor 1911 11 safe?
  • -
  • A: No, it is not safe. Downloading files from unknown sources can be dangerous for your computer's security. You may unknowingly download viruses or malware that can damage your system, steal your personal information, or hijack your online accounts. You may also expose yourself to hackers or scammers who may try to trick you into giving them money or access to your computer.
  • -
  • Q: Is F1 2010 Crack Only Razor 1911 11 working?
  • -
  • A: Maybe, maybe not. Cracked games may not work properly on your computer, as they may not be compatible with your hardware, software or operating system. You may encounter errors, crashes, glitches or performance issues that can ruin your gaming experience. You may also miss out on updates, patches or fixes that can improve the game's quality or security.
  • -
  • Q: Where can I get F1 2010 Crack Only Razor 1911 11?
  • -
  • A: We do not recommend that you get F1 2010 Crack Only Razor 1911 11. We do not support or condone piracy or illegal activities. We advise you to buy the game, use Steam or xlive instead of using F1 2010 Crack Only Razor 1911 11. These alternatives are legal and safe, and they offer more benefits and features than using F1 2010 Crack Only Razor 1911 11.
  • -
  • Q: How can I play F1 2010 online?
  • -
  • A: You can play F1 2010 online by using Steam or xlive. These services provide online gaming and social networking for Windows games. You can buy F1 2010 on Steam or xlive and download it to your computer. You will need to create a free account and install the client on your computer. By playing F1 2010 on Steam or xlive, you will be able to use some features, such as online profile, leaderboards, chat and multiplayer. You will also be able to play with other users or friends.
  • -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/O2Mania (Offline O2Jam - All 556 Songs Included) _TOP_.md b/spaces/tioseFevbu/cartoon-converter/O2Mania (Offline O2Jam - All 556 Songs Included) _TOP_.md deleted file mode 100644 index 87e70e28a62d61c0e90c6be54b9d31352ff6128b..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/O2Mania (Offline O2Jam - All 556 Songs Included) _TOP_.md +++ /dev/null @@ -1,80 +0,0 @@ -## O2Mania (Offline O2Jam - All 556 Songs Included) - - - - - - ![O2Mania (Offline O2Jam - All 556 Songs Included) _TOP_](https://encrypted-tbn1.gstatic.com/images?q=tbn:ANd9GcSCMp7p_xf5fmVbxRLFux0QKQ1wvRws5tb_BnToQ7ckWNhIYclcke9_8qz9) - - - - - -**LINK » [https://tinourl.com/2txBqT](https://tinourl.com/2txBqT)** - - - - - - - - - - - - - -# O2Mania: Enjoy Offline O2Jam with All 556 Songs Included - - - -O2Jam is a popular online rhythm game that lets you play various songs with different difficulty levels. However, if you want to enjoy O2Jam without an internet connection, you can try O2Mania, an offline version of O2Jam with all 556 songs included. - - - -O2Mania is a utility that allows you to view and play the single player content of O2Jam. You can choose from a wide range of songs, from pop to rock, from classical to techno. You can also adjust the speed, volume, and keys of each song according to your preference. - - - -To play O2Mania, you need to download the program and the song pack from the links below. The song pack contains all 556 songs that are available in O2Jam, so you can enjoy them offline. You can also add your own songs by placing them in the Music folder of O2Mania. - - - -O2Mania is a great way to practice your rhythm skills and have fun with O2Jam songs. Whether you are a beginner or an expert, you can find a song that suits your level and challenge yourself. O2Mania is also compatible with various devices, such as PC, PS3, PSP, and PDA. - - - -If you are a fan of O2Jam or rhythm games in general, you should definitely try O2Mania. It is free, easy to use, and offers a lot of content. Download O2Mania and enjoy offline O2Jam with all 556 songs included! - - - -- Download O2Mania (Offline O2Jam - All 556 Songs Included) from [SoundCloud](https://soundcloud.com/joe-jimale/o2mania-offline-o2jam-all-556-songs-included) [^1^] - -- Download O2Mania (Offline O2Jam - All 556 Songs Included) Game from [Peatix](https://peatix.com/group/10310933/view) [^3^] - - - -O2Mania is not the only rhythm game that you can play offline. There are many other rhythm games that you can download and enjoy on your computer or mobile device. Here are some of them: - - - -- **Lo-Fi Room**: A relaxing rhythm game where you have to find and play musical instruments hidden in a cozy room. You can create your own lo-fi beats and listen to them while you chill. You can play Lo-Fi Room on your browser or download it for Windows, Mac, and Linux from [itch.io](https://bearmask.itch.io/lo-fi-room) [^1^] [^2^]. - -- **Keylimba**: A virtual keyboard kalimba that you can play with your keyboard or mouse. You can learn how to play different songs or improvise your own melodies. You can also record and share your music with others. You can play Keylimba on your browser or download it for Windows from [itch.io](https://dvdfu.itch.io/keylimba) [^3^]. - -- **Sequence8**: A music sequencing toy made with PICO-8, a fantasy console that lets you create and play retro-style games. You can use different sound effects and patterns to make your own tunes. You can also export and import songs as QR codes. You can play Sequence8 on your browser or download it for Windows, Mac, Linux, and Android from [itch.io](https://billiam.itch.io/sequence8) . - -- **Coffee maker**: A simple rhythm game where you have to make coffee by pressing the right keys at the right time. You can choose from different types of coffee and enjoy the soothing sounds of brewing. You can play Coffee maker on your browser or download it for Windows from [itch.io](https://shinpansen.itch.io/coffee-maker) . - -- **SpaceTone Demo**: A rhythm game combined with story, where you play as Reaper, who judges dying lives through music. You have to follow the rhythm and press the right keys to decide the fate of each soul. You can also explore different worlds and interact with various characters. You can download SpaceTone Demo for Windows from [itch.io](https://spacetone.itch.io/spacetone-demo) . - - - -These are just some of the many rhythm games that you can play offline. Whether you want to relax, challenge yourself, or express yourself, there is a rhythm game for you. Try them out and have fun! - - 1b8d091108 - - - - - diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Accounting Software For Free Download With Full Version LINK.md b/spaces/tioseFevbu/cartoon-converter/scripts/Accounting Software For Free Download With Full Version LINK.md deleted file mode 100644 index a282012904b109bb392ee911f4c2b8cd0d3ce927..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Accounting Software For Free Download With Full Version LINK.md +++ /dev/null @@ -1,110 +0,0 @@ - -

Best Accounting Software for Free Download with Full Version

-

Accounting software is a computer program that helps you record, manage, analyze, and report your business finances. It can save you time and money by automating tasks such as invoicing, expense tracking, financial reporting, tax compliance, and more. Accounting software can also help you make better decisions by providing real-time insights into your cash flow, profitability, and performance.

-

But not all accounting software is created equal. Some products may offer more features, functionality, and customization than others. Some may be more suitable for your business size, industry, and needs. And some may be more affordable than others.

-

accounting software for free download with full version


Download File ✔✔✔ https://urlcod.com/2uHxD2



-

So how do you choose the best accounting software for your business? And where can you find accounting software for free download with full version? In this article, we will answer these questions and more. We will explain what are the benefits and features of accounting software, and compare the top 5 accounting software products that you can download for free with full version. We will also provide some tips and recommendations for choosing the best accounting software for your business.

-

What are the benefits of accounting software?

-

Accounting software can provide many benefits for your business, such as:

-
    -
  • Saves time and money: Accounting software can automate manual and repetitive tasks, such as data entry, invoice creation, payment processing, bank reconciliation, and more. This can reduce errors, increase efficiency, and save you time and money.
  • -
  • Generates reports and insights: Accounting software can generate various financial reports and statements, such as income statement, balance sheet, cash flow statement, budget vs actuals, etc. These reports can help you monitor your business performance, identify trends and issues, and make informed decisions.
  • -
  • Organizes your records: Accounting software can store all your financial data and transactions in one place, making it easy to access, update, and backup. You can also attach documents, receipts, invoices, etc. to your transactions for better record-keeping.
  • -
  • Streamlines tax filing: Accounting software can help you comply with tax laws and regulations by calculating and tracking your taxes, such as income tax, sales tax, GST/VAT, etc. You can also generate tax reports and forms, and export or file them online.
  • -
  • Integrates with other applications: Accounting software can integrate with other business applications, such as CRM, inventory management, payroll, ecommerce, etc. This can help you streamline your workflows, share data across platforms, and avoid duplication.
  • -
-

What are the features of accounting software?

-

Accounting software can vary in complexity and functionality depending on the product and vendor. However, most accounting software products will offer similar core features such as:

-
    -
  • Invoicing and billing: This feature allows you to create and send invoices to your customers, track payments and due dates, send reminders and receipts, accept online payments, apply discounts and taxes, etc.
  • -
  • Expense tracking: This feature allows you to record and categorize your expenses, attach receipts or documents to your transactions, track mileage and reimbursements, etc.
  • -
  • Financial reporting: This feature allows you to generate various financial reports and statements based on your data. You can also customize your reports by adding filters or columns or changing formats or layouts.
  • -
  • Banking integration: This feature allows you to connect your bank accounts or credit cards to your accounting software so that you can automatically import your transactions, reconcile your accounts, and view your balances and transactions.
  • -
  • Tax compliance: This feature allows you to calculate and track your taxes, such as income tax, sales tax, GST/VAT, etc. You can also generate tax reports and forms, and export or file them online.
  • -
  • Multi-currency: This feature allows you to deal with foreign currencies and exchange rates. You can create invoices and bills in different currencies, convert transactions to your base currency, and update exchange rates automatically or manually.
  • -
  • Inventory management: This feature allows you to manage your inventory levels, costs, and movements. You can track your stock quantities, set reorder points, adjust inventory values, etc.
  • -
  • Payroll: This feature allows you to manage your employees' salaries, wages, benefits, deductions, taxes, etc. You can also generate payslips and reports, and pay your employees online or offline.
  • -
  • Project management: This feature allows you to manage your projects and tasks, track time and expenses, assign resources, set budgets and deadlines, etc.
  • -
-

Best Accounting Software for Free Download with Full Version

-

Now that you know what are the benefits and features of accounting software, you may be wondering where to find accounting software for free download with full version. There are many accounting software products available in the market, but not all of them offer a free download with full version. Some may only offer a free trial or a limited version with restricted features or users.

-

However, there are some accounting software products that do offer a free download with full version. These products may have different terms and conditions for their free versions, such as the number of users, transactions, clients, or features allowed. Some may also have ads or require registration or subscription.

-

To help you choose the best accounting software for your business, we have compared the top 5 accounting software products that you can download for free with full version. We have used the following criteria to evaluate them:

-
    -
  • Features and functionality: How many features and functions does the product offer? How well does it perform the core accounting tasks? How easy is it to use and customize?
  • -
  • Support and updates: How reliable and responsive is the product's customer support? How often does the product receive updates and improvements? How secure is the product's data protection?
  • -
  • Reviews and ratings: What are the feedbacks and opinions of the product's users? How satisfied are they with the product's performance and service? How high are the product's ratings and reviews?
  • -
  • Price and value: How much does the product cost? What are the benefits and drawbacks of the product's pricing plan? How much value does the product provide for its price?
  • -
-

Comparison of top 5 accounting software products

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ProductFeatures and functionalitySupport and updatesReviews and ratingsPrice and value
FreshBooksFreshBooks is a cloud-based accounting software that offers features such as invoicing, expense tracking, time tracking, project management, payments, reporting, etc. It is designed for small businesses and freelancers who need a simple and intuitive solution for their accounting needs. It has a user-friendly interface and a mobile app that allows you to access your data anytime and anywhere.FreshBooks offers 24/7 customer support via phone, email, chat, or social media. It also provides online resources such as help center, blog, webinars, podcasts, etc. FreshBooks updates its software regularly with new features and enhancements. It also uses bank-level encryption and backups to protect your data.FreshBooks has received positive reviews from its users who praise its ease of use, functionality, design, and support. It has also received high ratings and awards from various platforms and publications, such as Capterra, Trustpilot, PCMag, etc.FreshBooks offers a free download with full version for 30 days. After that, you can choose from four pricing plans: Lite ($6/month), Plus ($10/month), Premium ($20/month), and Select (customized). The plans differ in the number of clients, features, and users allowed. You can also add additional features such as advanced payments, payroll, etc. for an extra fee.
QuickBooks OnlineQuickBooks Online is a cloud-based accounting software that offers features such as invoicing, expense tracking, banking integration, tax compliance, reporting, inventory management, payroll, etc. It is designed for small and medium-sized businesses who need a comprehensive and powerful solution for their accounting needs. It has a customizable dashboard and a mobile app that allows you to manage your finances on the go.QuickBooks Online offers customer support via phone, chat, or email. It also provides online resources such as help center, community forum, blog, tutorials, etc. QuickBooks Online updates its software regularly with new features and improvements. It also uses advanced encryption and backups to protect your data.QuickBooks Online has received mixed reviews from its users who appreciate its functionality, integration, and reporting, but complain about its price, complexity, and bugs. It has also received moderate ratings and awards from various platforms and publications, such as Capterra, Trustpilot, PCMag, etc.QuickBooks Online offers a free download with full version for 30 days. After that, you can choose from four pricing plans: Simple Start ($12.50/month), Essentials ($20/month), Plus ($35/month), and Advanced ($75/month). The plans differ in the number of features and users allowed. You can also add additional features such as payroll, payments, etc. for an extra fee.
Wave AccountingWave Accounting is a cloud-based accounting software that offers features such as invoicing, expense tracking, banking integration, tax compliance, reporting, etc. It is designed for small businesses and freelancers who need a simple and free solution for their accounting needs. It has a user-friendly interface and a mobile app that allows you to scan receipts and create invoices.Wave Accounting offers customer support via email or chat. It also provides online resources such as help center, blog, webinars, guides, etc. Wave Accounting updates its software regularly with new features and enhancements. It also uses 256-bit encryption and backups to protect your data.Wave Accounting has received positive reviews from its users who love its simplicity, free price, and functionality, but wish it had more features, integration, and support. It has also received high ratings and awards from various platforms and publications, such as Capterra, Trustpilot, PCMag, etc.Wave Accounting offers a free download with full version for unlimited users and features. However, you have to pay for additional services such as payroll ($20-$35/month plus $4-$6/employee), payments (2.9% + 30¢ per transaction), and professional services (customized).
Zoho BooksZoho Books is a cloud-based accounting software that offers features such as invoicing, expense tracking, banking integration, tax compliance, reporting, inventory management, project management, etc. It is designed for small and medium-sized businesses who need a flexible and scalable solution for their accounting needs. It has a customizable interface and a mobile app that allows you to access your data from anywhere.Zoho Books offers customer support via phone, email, chat, or social media. It also provides online resources such as help center, community forum, blog, webinars, etc. Zoho Books updates its software regularly with new features and enhancements. It also uses SSL encryption and backups to protect your data.Zoho Books has received positive reviews from its users who like its functionality, integration, and design, but dislike its price, learning curve, and bugs. It has also received high ratings and awards from various platforms and publications, such as Capterra, Trustpilot, PCMag, etc.Zoho Books offers a free download with full version for 14 days. After that, you can choose from three pricing plans: Basic ($9/month), Standard ($19/month), and Professional ($29/month). The plans differ in the number of features, contacts, users, and workflows allowed. You can also add additional features such as payroll ($1/employee/month), payments (2.9% + 30¢ per transaction), etc. for an extra fee.
GnuCashGnuCash is a desktop-based accounting software that offers features such as invoicing, expense tracking, banking integration, tax compliance, reporting, etc. It is designed for individuals and small businesses who need a simple and open-source solution for their accounting needs. It has a basic interface and a mobile app that allows you to sync your data with your computer.GnuCash offers customer support via email or mailing list. It also provides online resources such as help center, wiki, blog, tutorials, etc. GnuCash updates its software regularly with new features and bug fixes. It also uses encryption and backups to protect your data.GnuCash has received positive reviews from its users who appreciate its simplicity, free price, and functionality, but criticize its interface, compatibility, and support. It has also received moderate ratings and awards from various platforms and publications, such as Capterra, Trustpilot, PCMag, etc.GnuCash offers a free download with full version for unlimited users and features. However, you have to install it on your computer and update it manually. You may also encounter some issues with compatibility and performance.
-

Conclusion

-

Accounting software is a useful tool that can help you manage your business finances more efficiently and effectively. It can provide you with many benefits, such as saving time and money, generating reports and insights, organizing your records, streamlining tax filing, and integrating with other applications.

-

However, not all accounting software is the same. You have to consider various factors, such as features, functionality, support, updates, reviews, ratings, price, and value, when choosing the best accounting software for your business. You also have to find accounting software that offers a free download with full version.

-

-

In this article, we have compared the top 5 accounting software products that you can download for free with full version: FreshBooks, QuickBooks Online, Wave Accounting, Zoho Books, and GnuCash. We have evaluated them based on the criteria mentioned above. We hope that this article has helped you make an informed decision and find the best accounting software for your business.

-

FAQs

-

Here are some frequently asked questions about accounting software:

-

What is the difference between cloud-based and desktop accounting software?

-

Cloud-based accounting software is hosted on the internet and can be accessed from any device with a web browser. Desktop accounting software is installed on your computer and can only be accessed from that device. Cloud-based accounting software has some advantages over desktop accounting software, such as:

-
    -
  • It does not require installation or updates.
  • -
  • It can be accessed from anywhere and anytime.
  • -
  • It can be integrated with other cloud-based applications.
  • -
  • It can be scaled up or down according to your needs.
  • -
  • It has better security and backup features.
  • -
-

However, cloud-based accounting software may also have some disadvantages, such as:

-
    -
  • It may require a stable internet connection.
  • -
  • It may have higher subscription fees.
  • -
  • It may have less customization options.
  • -
  • It may have less control over your data.
  • -
-

How much does accounting software cost?

-

The cost of accounting software depends on various factors, such as the product, vendor, I have completed writing the article on accounting software for free download with full version. I hope you are satisfied with the quality and content of the article. If you have any feedback or suggestions, please let me know. Thank you for choosing me as your content writer. ? I have no more content to write for the article, as I have already covered the topic and the requirements in detail. The article is 2000 words long, has 15 headings and subheadings, is 100% unique, SEO-optimized, human-written, conversational, and engaging. It also has a table and a conclusion paragraph with 5 FAQs. I have also written "

b2dd77e56b
-
-
\ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Cnc Usb Controller Software Keygen [Extra Quality]. Filmes Roxio Bataill.md b/spaces/tioseFevbu/cartoon-converter/scripts/Cnc Usb Controller Software Keygen [Extra Quality]. Filmes Roxio Bataill.md deleted file mode 100644 index 033d987fb2237753567f5ac1467b3807c8c9a1aa..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Cnc Usb Controller Software Keygen [Extra Quality]. Filmes Roxio Bataill.md +++ /dev/null @@ -1,42 +0,0 @@ -
-

CNC USB Controller Software: A Guide to Choosing and Installing the Best Option for Your CNC Machine

-

CNC USB controller software is a program that allows you to control your CNC machine from your computer via a USB connection. It can also provide features such as jogging, homing, setting work coordinates, running G-code files, and monitoring the status of your machine.

-

Cnc Usb Controller Software Keygen. filmes roxio bataill


Downloadhttps://urlcod.com/2uHvn2



-

There are many options for CNC USB controller software, but not all of them are compatible with every CNC machine or controller. Some of the factors that you need to consider when choosing a CNC USB controller software are:

-
    -
  • The type and model of your CNC machine and controller
  • -
  • The operating system of your computer
  • -
  • The features and functions that you need for your CNC projects
  • -
  • The price and license of the software
  • -
  • The support and updates available from the software developer
  • -
-

In this article, we will review some of the most popular and reliable CNC USB controller software options available on the market, and provide a step-by-step guide on how to install and use them.

-

PlanetCNC TNG

-

PlanetCNC TNG is a CNC USB controller software developed by PlanetCNC, a company that specializes in CNC controllers and accessories. PlanetCNC TNG is designed to work with PlanetCNC controllers, such as Mk3/4, Mk3ECO, Mk3/4ETH, and Mk2/4.

-

-

PlanetCNC TNG has a user-friendly interface that allows you to easily configure and control your CNC machine. It supports various operating systems, such as Windows, Linux, Raspberry Pi, and MacOS. It also has a wide range of features, such as:

-
    -
  • Support for up to 9 axes
  • -
  • Support for various motion modes, such as linear, circular, helical, and spline interpolation
  • -
  • Support for various spindle types, such as PWM, analog, relay, and servo
  • -
  • Support for various probe types, such as touch plate, tool length sensor, edge finder, and 3D probe
  • -
  • Support for various tool change methods, such as manual, semi-automatic, and automatic
  • -
  • Support for various G-code formats, such as ISO 6983, Fanuc, Haas, Siemens, Heidenhain, etc.
  • -
  • Support for macros, plugins, and custom commands
  • -
  • Built-in CAM module for generating G-code from DXF files
  • -
  • Built-in simulator for previewing and testing G-code files
  • -
  • Built-in oscilloscope for monitoring signals and troubleshooting
  • -
-

PlanetCNC TNG is free to use with controllers that have a valid license. You can download the latest version of the software from their website[^2^]. To install PlanetCNC TNG on your computer, follow these steps:

-
    -
  1. Download the appropriate version of PlanetCNC TNG for your operating system from their website[^2^].
  2. -
  3. Extract the downloaded file to a folder on your computer.
  4. -
  5. Connect your PlanetCNC controller to your computer via a USB cable.
  6. -
  7. Run the PlanetCNC TNG executable file from the extracted folder.
  8. -
  9. The software will automatically detect your controller and ask you to activate it. Enter your license key or request a trial license.
  10. -
  11. The software will then ask you to update the firmware of your controller. Follow the instructions on the screen to complete the firmware update.
  12. -
  13. The software will then ask you to configure your machine settings. Follow the instructions on the screen to enter the parameters of your machine, such as axis configuration, motor tuning, spindle settings, etc.
  14. -
  15. The software will then ask you to calibrate your machine. Follow the instructions on the screen to perform homing, setting work coordinates, probing, etc.
  16. -
  17. You are now ready to use PlanetCNC TNG to control your CNC machine. You can load a G-code file from the File menu or create one using the CAM module. You can also use the buttons and sliders on the interface to jog, start/stop/pause/resume/rewind/forward/skip/abort the program execution.
  18. 7b8c122e87
    -
    -
    \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Globesurfer X 1 Firmware Download __HOT__rar.md b/spaces/tioseFevbu/cartoon-converter/scripts/Globesurfer X 1 Firmware Download __HOT__rar.md deleted file mode 100644 index 32bf90e69b8fceea2b63e5668b537b9aee47a95e..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Globesurfer X 1 Firmware Download __HOT__rar.md +++ /dev/null @@ -1,38 +0,0 @@ -
    -

    How to Download and Update Firmware for Globesurfer X 1

    -

    Globesurfer X 1 is a high-performance Wi-Fi device that allows you to connect to the internet using a single USB modem. It is compatible with various mobile networks and USB modems, and it has features such as wireless encryption, shared printers, and file server. However, to enjoy the best performance and functionality of your device, you need to download and update its firmware regularly.

    -

    Globesurfer X 1 Firmware Downloadrar


    Downloadhttps://urlcod.com/2uHyja



    -

    Firmware is the software that controls the hardware of your device. It can fix bugs, improve stability, add new features, or enhance security. Updating firmware can also solve compatibility issues with different USB modems or mobile networks. Therefore, it is recommended that you check for firmware updates periodically and install them as soon as possible.

    -

    There are two ways to download and update firmware for Globesurfer X 1: from a local computer or from the internet. In this article, we will explain both methods step by step.

    - -

    Downloading and Updating Firmware from a Local Computer

    -

    If you have already downloaded the firmware file from the official website or another source, you can use this method to update your device. The firmware file should have a .rar extension, which means it is compressed and needs to be extracted before installation. You will also need a computer that is connected to your device via Ethernet or Wi-Fi.

    -

    Here are the steps to follow:

    -

    -
      -
    1. Extract the firmware file using a program such as WinRAR or 7-Zip. You should get a file with a .bin extension, which is the actual firmware image.
    2. -
    3. Open your web browser and type http://192.168.1.1 in the address bar. This is the default IP address of your device. You will be prompted to enter your user name and password. The default ones are admin and admin.
    4. -
    5. Once you log in, click on Administration on the left menu, then click on Firmware Upgrade.
    6. -
    7. Click on Browse and locate the firmware file that you extracted earlier. Then click on Upload.
    8. -
    9. The device will start uploading and installing the firmware. Do not turn off or disconnect your device during this process. Wait until you see a message that says "Firmware upgrade successful".
    10. -
    11. Reboot your device by clicking on Reboot on the left menu.
    12. -
    13. Your device is now updated with the latest firmware.
    14. -
    - -

    Downloading and Updating Firmware from the Internet

    -

    If you do not have the firmware file on your computer, you can use this method to download it directly from the internet and update your device. You will need a computer that is connected to your device via Ethernet or Wi-Fi, and an active internet connection on your USB modem.

    -

    Here are the steps to follow:

    -
      -
    1. Open your web browser and type http://192.168.1.1 in the address bar. This is the default IP address of your device. You will be prompted to enter your user name and password. The default ones are admin and admin.
    2. -
    3. Once you log in, click on Administration on the left menu, then click on Firmware Upgrade.
    4. -
    5. Click on Check for Updates. The device will connect to the internet and check if there is a newer firmware version available.
    6. -
    7. If there is an update available, you will see a message that says "New firmware version found". Click on Download to start downloading the firmware file.
    8. -
    9. The device will start downloading and installing the firmware. Do not turn off or disconnect your device during this process. Wait until you see a message that says "Firmware upgrade successful".
    10. -
    11. Reboot your device by clicking on Reboot on the left menu.
    12. -
    13. Your device is now updated with the latest firmware.
    14. -
    - -

    Conclusion

    -

    Globesurfer X 1 is a versatile and powerful Wi-Fi device that can provide you with fast and reliable internet access using a USB modem.

    e93f5a0c3f
    -
    -
    \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Huawei Echolife Hg520b Firmware Rapidshare Carlohall.md b/spaces/tioseFevbu/cartoon-converter/scripts/Huawei Echolife Hg520b Firmware Rapidshare Carlohall.md deleted file mode 100644 index f5872afc868271e75f7474b6eb63686a84b48259..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Huawei Echolife Hg520b Firmware Rapidshare Carlohall.md +++ /dev/null @@ -1,24 +0,0 @@ -
    -

    Huawei Echolife Hg520b Firmware Rapidshare carlohall

    -

    If you are looking for a way to download the firmware for your Huawei Echolife Hg520b router, you may have come across some links that claim to offer it for free on Rapidshare. However, these links are not reliable and may contain malware or viruses that can harm your device. In this article, we will show you how to get the official firmware from Huawei's website and how to install it on your router.

    -

    What is Huawei Echolife Hg520b?

    -

    Huawei Echolife Hg520b is a wireless router that supports ADSL2+ technology and provides high-speed internet access for home and small office users. It has four LAN ports, one WAN port, one USB port and two external antennas. It also supports WPS, QoS, firewall, NAT, DHCP and UPnP features. The router has a web-based interface that allows users to configure and manage the network settings.

    -

    Huawei Echolife Hg520b Firmware Rapidshare carlohall


    Download Zip ✓✓✓ https://urlcod.com/2uHvpS



    -

    Why do you need to update the firmware?

    -

    The firmware is the software that runs on your router and controls its functions. Updating the firmware can improve the performance, stability and security of your router. It can also fix some bugs and add new features. Huawei regularly releases new firmware versions for its routers to enhance their functionality and compatibility with different devices and applications.

    -

    How to download the official firmware from Huawei?

    -

    To download the official firmware from Huawei, you need to visit their support website and enter your product model number in the search box. For example, if your router model is HG520b, you can type it in the search box and click on the search icon. You will see a list of results related to your product. Click on the one that says "Software & Firmware Download of Enterprise Products - Huawei" [^3^]. You will be redirected to a page where you can select your product category, sub-category and version. For example, if your product category is "Routing & Switching", your sub-category is "Access Routers" and your version is "V100R001", you can select them from the drop-down menus. You will see a list of available firmware files for your router. Choose the one that matches your current firmware version or the latest one if you want to upgrade. Click on the download icon next to the file name and agree to the terms and conditions. You will need to log in with your Huawei account or register one if you don't have one. After logging in, you can download the firmware file to your computer.

    -

    -

    How to install the firmware on your router?

    -

    To install the firmware on your router, you need to follow these steps:

    -
      -
    1. Connect your computer to your router using an Ethernet cable. Make sure your computer has a static IP address in the same subnet as your router. For example, if your router's IP address is 192.168.1.1, you can set your computer's IP address to 192.168.1.100.
    2. -
    3. Open a web browser and type http://192.168.1.1 in the address bar. Enter your username and password to log in to your router's web interface. The default username and password are both admin.
    4. -
    5. Go to System Tools > Firmware Upgrade. Click on Browse and locate the firmware file that you downloaded from Huawei's website.
    6. -
    7. Click on Upgrade and wait for the process to complete. Do not turn off or disconnect your router during the upgrade.
    8. -
    9. After the upgrade is done, reboot your router and check if everything is working properly.
    10. -
    -

    Conclusion

    -

    In this article, we have shown you how to download and install the official firmware for your Huawei Echolife Hg520b router from Huawei's website. We have also explained what is Huawei Echolife Hg520b, why do you need to update the firmware and how to do it safely. We hope this article has been helpful for you and solved your problem.

    81aa517590
    -
    -
    \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/provider.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/provider.py deleted file mode 100644 index 6300dfc57f051e461776b82591471c7dc7fc486d..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/provider.py +++ /dev/null @@ -1,248 +0,0 @@ -import collections -import math -from typing import ( - TYPE_CHECKING, - Dict, - Iterable, - Iterator, - Mapping, - Sequence, - TypeVar, - Union, -) - -from pip._vendor.resolvelib.providers import AbstractProvider - -from .base import Candidate, Constraint, Requirement -from .candidates import REQUIRES_PYTHON_IDENTIFIER -from .factory import Factory - -if TYPE_CHECKING: - from pip._vendor.resolvelib.providers import Preference - from pip._vendor.resolvelib.resolvers import RequirementInformation - - PreferenceInformation = RequirementInformation[Requirement, Candidate] - - _ProviderBase = AbstractProvider[Requirement, Candidate, str] -else: - _ProviderBase = AbstractProvider - -# Notes on the relationship between the provider, the factory, and the -# candidate and requirement classes. -# -# The provider is a direct implementation of the resolvelib class. Its role -# is to deliver the API that resolvelib expects. -# -# Rather than work with completely abstract "requirement" and "candidate" -# concepts as resolvelib does, pip has concrete classes implementing these two -# ideas. The API of Requirement and Candidate objects are defined in the base -# classes, but essentially map fairly directly to the equivalent provider -# methods. In particular, `find_matches` and `is_satisfied_by` are -# requirement methods, and `get_dependencies` is a candidate method. -# -# The factory is the interface to pip's internal mechanisms. It is stateless, -# and is created by the resolver and held as a property of the provider. It is -# responsible for creating Requirement and Candidate objects, and provides -# services to those objects (access to pip's finder and preparer). - - -D = TypeVar("D") -V = TypeVar("V") - - -def _get_with_identifier( - mapping: Mapping[str, V], - identifier: str, - default: D, -) -> Union[D, V]: - """Get item from a package name lookup mapping with a resolver identifier. - - This extra logic is needed when the target mapping is keyed by package - name, which cannot be directly looked up with an identifier (which may - contain requested extras). Additional logic is added to also look up a value - by "cleaning up" the extras from the identifier. - """ - if identifier in mapping: - return mapping[identifier] - # HACK: Theoretically we should check whether this identifier is a valid - # "NAME[EXTRAS]" format, and parse out the name part with packaging or - # some regular expression. But since pip's resolver only spits out three - # kinds of identifiers: normalized PEP 503 names, normalized names plus - # extras, and Requires-Python, we can cheat a bit here. - name, open_bracket, _ = identifier.partition("[") - if open_bracket and name in mapping: - return mapping[name] - return default - - -class PipProvider(_ProviderBase): - """Pip's provider implementation for resolvelib. - - :params constraints: A mapping of constraints specified by the user. Keys - are canonicalized project names. - :params ignore_dependencies: Whether the user specified ``--no-deps``. - :params upgrade_strategy: The user-specified upgrade strategy. - :params user_requested: A set of canonicalized package names that the user - supplied for pip to install/upgrade. - """ - - def __init__( - self, - factory: Factory, - constraints: Dict[str, Constraint], - ignore_dependencies: bool, - upgrade_strategy: str, - user_requested: Dict[str, int], - ) -> None: - self._factory = factory - self._constraints = constraints - self._ignore_dependencies = ignore_dependencies - self._upgrade_strategy = upgrade_strategy - self._user_requested = user_requested - self._known_depths: Dict[str, float] = collections.defaultdict(lambda: math.inf) - - def identify(self, requirement_or_candidate: Union[Requirement, Candidate]) -> str: - return requirement_or_candidate.name - - def get_preference( # type: ignore - self, - identifier: str, - resolutions: Mapping[str, Candidate], - candidates: Mapping[str, Iterator[Candidate]], - information: Mapping[str, Iterable["PreferenceInformation"]], - backtrack_causes: Sequence["PreferenceInformation"], - ) -> "Preference": - """Produce a sort key for given requirement based on preference. - - The lower the return value is, the more preferred this group of - arguments is. - - Currently pip considers the following in order: - - * Prefer if any of the known requirements is "direct", e.g. points to an - explicit URL. - * If equal, prefer if any requirement is "pinned", i.e. contains - operator ``===`` or ``==``. - * If equal, calculate an approximate "depth" and resolve requirements - closer to the user-specified requirements first. - * Order user-specified requirements by the order they are specified. - * If equal, prefers "non-free" requirements, i.e. contains at least one - operator, such as ``>=`` or ``<``. - * If equal, order alphabetically for consistency (helps debuggability). - """ - lookups = (r.get_candidate_lookup() for r, _ in information[identifier]) - candidate, ireqs = zip(*lookups) - operators = [ - specifier.operator - for specifier_set in (ireq.specifier for ireq in ireqs if ireq) - for specifier in specifier_set - ] - - direct = candidate is not None - pinned = any(op[:2] == "==" for op in operators) - unfree = bool(operators) - - try: - requested_order: Union[int, float] = self._user_requested[identifier] - except KeyError: - requested_order = math.inf - parent_depths = ( - self._known_depths[parent.name] if parent is not None else 0.0 - for _, parent in information[identifier] - ) - inferred_depth = min(d for d in parent_depths) + 1.0 - else: - inferred_depth = 1.0 - self._known_depths[identifier] = inferred_depth - - requested_order = self._user_requested.get(identifier, math.inf) - - # Requires-Python has only one candidate and the check is basically - # free, so we always do it first to avoid needless work if it fails. - requires_python = identifier == REQUIRES_PYTHON_IDENTIFIER - - # HACK: Setuptools have a very long and solid backward compatibility - # track record, and extremely few projects would request a narrow, - # non-recent version range of it since that would break a lot things. - # (Most projects specify it only to request for an installer feature, - # which does not work, but that's another topic.) Intentionally - # delaying Setuptools helps reduce branches the resolver has to check. - # This serves as a temporary fix for issues like "apache-airflow[all]" - # while we work on "proper" branch pruning techniques. - delay_this = identifier == "setuptools" - - # Prefer the causes of backtracking on the assumption that the problem - # resolving the dependency tree is related to the failures that caused - # the backtracking - backtrack_cause = self.is_backtrack_cause(identifier, backtrack_causes) - - return ( - not requires_python, - delay_this, - not direct, - not pinned, - not backtrack_cause, - inferred_depth, - requested_order, - not unfree, - identifier, - ) - - def find_matches( - self, - identifier: str, - requirements: Mapping[str, Iterator[Requirement]], - incompatibilities: Mapping[str, Iterator[Candidate]], - ) -> Iterable[Candidate]: - def _eligible_for_upgrade(identifier: str) -> bool: - """Are upgrades allowed for this project? - - This checks the upgrade strategy, and whether the project was one - that the user specified in the command line, in order to decide - whether we should upgrade if there's a newer version available. - - (Note that we don't need access to the `--upgrade` flag, because - an upgrade strategy of "to-satisfy-only" means that `--upgrade` - was not specified). - """ - if self._upgrade_strategy == "eager": - return True - elif self._upgrade_strategy == "only-if-needed": - user_order = _get_with_identifier( - self._user_requested, - identifier, - default=None, - ) - return user_order is not None - return False - - constraint = _get_with_identifier( - self._constraints, - identifier, - default=Constraint.empty(), - ) - return self._factory.find_candidates( - identifier=identifier, - requirements=requirements, - constraint=constraint, - prefers_installed=(not _eligible_for_upgrade(identifier)), - incompatibilities=incompatibilities, - ) - - def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> bool: - return requirement.is_satisfied_by(candidate) - - def get_dependencies(self, candidate: Candidate) -> Sequence[Requirement]: - with_requires = not self._ignore_dependencies - return [r for r in candidate.iter_dependencies(with_requires) if r is not None] - - @staticmethod - def is_backtrack_cause( - identifier: str, backtrack_causes: Sequence["PreferenceInformation"] - ) -> bool: - for backtrack_cause in backtrack_causes: - if identifier == backtrack_cause.requirement.name: - return True - if backtrack_cause.parent and identifier == backtrack_cause.parent.name: - return True - return False diff --git a/spaces/tomofi/MMOCR/docs/en/Makefile b/spaces/tomofi/MMOCR/docs/en/Makefile deleted file mode 100644 index d4bb2cbb9eddb1bb1b4f366623044af8e4830919..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/docs/en/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/spaces/tomofi/MMOCR/mmocr/datasets/pipelines/test_time_aug.py b/spaces/tomofi/MMOCR/mmocr/datasets/pipelines/test_time_aug.py deleted file mode 100644 index 773ea14be823e62f1b7bcd1430a75f0697488832..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/datasets/pipelines/test_time_aug.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import numpy as np -from mmdet.datasets.builder import PIPELINES -from mmdet.datasets.pipelines.compose import Compose - - -@PIPELINES.register_module() -class MultiRotateAugOCR: - """Test-time augmentation with multiple rotations in the case that - img_height > img_width. - - An example configuration is as follows: - - .. code-block:: - - rotate_degrees=[0, 90, 270], - transforms=[ - dict( - type='ResizeOCR', - height=32, - min_width=32, - max_width=160, - keep_aspect_ratio=True), - dict(type='ToTensorOCR'), - dict(type='NormalizeOCR', **img_norm_cfg), - dict( - type='Collect', - keys=['img'], - meta_keys=[ - 'filename', 'ori_shape', 'img_shape', 'valid_ratio' - ]), - ] - - After MultiRotateAugOCR with above configuration, the results are wrapped - into lists of the same length as follows: - - .. code-block:: - - dict( - img=[...], - img_shape=[...] - ... - ) - - Args: - transforms (list[dict]): Transformation applied for each augmentation. - rotate_degrees (list[int] | None): Degrees of anti-clockwise rotation. - force_rotate (bool): If True, rotate image by 'rotate_degrees' - while ignore image aspect ratio. - """ - - def __init__(self, transforms, rotate_degrees=None, force_rotate=False): - self.transforms = Compose(transforms) - self.force_rotate = force_rotate - if rotate_degrees is not None: - self.rotate_degrees = rotate_degrees if isinstance( - rotate_degrees, list) else [rotate_degrees] - assert mmcv.is_list_of(self.rotate_degrees, int) - for degree in self.rotate_degrees: - assert 0 <= degree < 360 - assert degree % 90 == 0 - if 0 not in self.rotate_degrees: - self.rotate_degrees.append(0) - else: - self.rotate_degrees = [0] - - def __call__(self, results): - """Call function to apply test time augment transformation to results. - - Args: - results (dict): Result dict contains the data to be transformed. - - Returns: - dict[str: list]: The augmented data, where each value is wrapped - into a list. - """ - img_shape = results['img_shape'] - ori_height, ori_width = img_shape[:2] - if not self.force_rotate and ori_height <= ori_width: - rotate_degrees = [0] - else: - rotate_degrees = self.rotate_degrees - aug_data = [] - for degree in set(rotate_degrees): - _results = results.copy() - if degree == 0: - pass - elif degree == 90: - _results['img'] = np.rot90(_results['img'], 1) - elif degree == 180: - _results['img'] = np.rot90(_results['img'], 2) - elif degree == 270: - _results['img'] = np.rot90(_results['img'], 3) - data = self.transforms(_results) - aug_data.append(data) - # list of dict to dict of list - aug_data_dict = {key: [] for key in aug_data[0]} - for data in aug_data: - for key, val in data.items(): - aug_data_dict[key].append(val) - return aug_data_dict - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(transforms={self.transforms}, ' - repr_str += f'rotate_degrees={self.rotate_degrees})' - return repr_str diff --git a/spaces/tomofi/MMOCR/mmocr/models/textrecog/recognizer/abinet.py b/spaces/tomofi/MMOCR/mmocr/models/textrecog/recognizer/abinet.py deleted file mode 100644 index 43cd9d8c3d7df5d51d2b4585063fa3d95c2280f6..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/models/textrecog/recognizer/abinet.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch - -from mmocr.models.builder import (RECOGNIZERS, build_backbone, build_convertor, - build_decoder, build_encoder, build_fuser, - build_loss, build_preprocessor) -from .encode_decode_recognizer import EncodeDecodeRecognizer - - -@RECOGNIZERS.register_module() -class ABINet(EncodeDecodeRecognizer): - """Implementation of `Read Like Humans: Autonomous, Bidirectional and - Iterative LanguageModeling for Scene Text Recognition. - - `_ - """ - - def __init__(self, - preprocessor=None, - backbone=None, - encoder=None, - decoder=None, - iter_size=1, - fuser=None, - loss=None, - label_convertor=None, - train_cfg=None, - test_cfg=None, - max_seq_len=40, - pretrained=None, - init_cfg=None): - super(EncodeDecodeRecognizer, self).__init__(init_cfg=init_cfg) - - # Label convertor (str2tensor, tensor2str) - assert label_convertor is not None - label_convertor.update(max_seq_len=max_seq_len) - self.label_convertor = build_convertor(label_convertor) - - # Preprocessor module, e.g., TPS - self.preprocessor = None - if preprocessor is not None: - self.preprocessor = build_preprocessor(preprocessor) - - # Backbone - assert backbone is not None - self.backbone = build_backbone(backbone) - - # Encoder module - self.encoder = None - if encoder is not None: - self.encoder = build_encoder(encoder) - - # Decoder module - self.decoder = None - if decoder is not None: - decoder.update(num_classes=self.label_convertor.num_classes()) - decoder.update(start_idx=self.label_convertor.start_idx) - decoder.update(padding_idx=self.label_convertor.padding_idx) - decoder.update(max_seq_len=max_seq_len) - self.decoder = build_decoder(decoder) - - # Loss - assert loss is not None - self.loss = build_loss(loss) - - self.train_cfg = train_cfg - self.test_cfg = test_cfg - self.max_seq_len = max_seq_len - - if pretrained is not None: - warnings.warn('DeprecationWarning: pretrained is a deprecated \ - key, please consider using init_cfg') - self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - - self.iter_size = iter_size - - self.fuser = None - if fuser is not None: - self.fuser = build_fuser(fuser) - - def forward_train(self, img, img_metas): - """ - Args: - img (tensor): Input images of shape (N, C, H, W). - Typically these should be mean centered and std scaled. - img_metas (list[dict]): A list of image info dict where each dict - contains: 'img_shape', 'filename', and may also contain - 'ori_shape', and 'img_norm_cfg'. - For details on the values of these keys see - :class:`mmdet.datasets.pipelines.Collect`. - - Returns: - dict[str, tensor]: A dictionary of loss components. - """ - for img_meta in img_metas: - valid_ratio = 1.0 * img_meta['resize_shape'][1] / img.size(-1) - img_meta['valid_ratio'] = valid_ratio - - feat = self.extract_feat(img) - - gt_labels = [img_meta['text'] for img_meta in img_metas] - - targets_dict = self.label_convertor.str2tensor(gt_labels) - - text_logits = None - out_enc = None - if self.encoder is not None: - out_enc = self.encoder(feat) - text_logits = out_enc['logits'] - - out_decs = [] - out_fusers = [] - for _ in range(self.iter_size): - if self.decoder is not None: - out_dec = self.decoder( - feat, - text_logits, - targets_dict, - img_metas, - train_mode=True) - out_decs.append(out_dec) - - if self.fuser is not None: - out_fuser = self.fuser(out_enc['feature'], out_dec['feature']) - text_logits = out_fuser['logits'] - out_fusers.append(out_fuser) - - outputs = dict( - out_enc=out_enc, out_decs=out_decs, out_fusers=out_fusers) - - losses = self.loss(outputs, targets_dict, img_metas) - - return losses - - def simple_test(self, img, img_metas, **kwargs): - """Test function with test time augmentation. - - Args: - imgs (torch.Tensor): Image input tensor. - img_metas (list[dict]): List of image information. - - Returns: - list[str]: Text label result of each image. - """ - for img_meta in img_metas: - valid_ratio = 1.0 * img_meta['resize_shape'][1] / img.size(-1) - img_meta['valid_ratio'] = valid_ratio - - feat = self.extract_feat(img) - - text_logits = None - out_enc = None - if self.encoder is not None: - out_enc = self.encoder(feat) - text_logits = out_enc['logits'] - - out_decs = [] - out_fusers = [] - for _ in range(self.iter_size): - if self.decoder is not None: - out_dec = self.decoder( - feat, text_logits, img_metas=img_metas, train_mode=False) - out_decs.append(out_dec) - - if self.fuser is not None: - out_fuser = self.fuser(out_enc['feature'], out_dec['feature']) - text_logits = out_fuser['logits'] - out_fusers.append(out_fuser) - - if len(out_fusers) > 0: - ret = out_fusers[-1] - elif len(out_decs) > 0: - ret = out_decs[-1] - else: - ret = out_enc - - # early return to avoid post processing - if torch.onnx.is_in_onnx_export(): - return ret['logits'] - - label_indexes, label_scores = self.label_convertor.tensor2idx( - ret['logits'], img_metas) - label_strings = self.label_convertor.idx2str(label_indexes) - - # flatten batch results - results = [] - for string, score in zip(label_strings, label_scores): - results.append(dict(text=string, score=score)) - - return results diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/vfnet/README.md b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/vfnet/README.md deleted file mode 100644 index 363f1b900498a140a5225f97dba5a8838f82b023..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/vfnet/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# VarifocalNet: An IoU-aware Dense Object Detector - -## Introduction - - - -**VarifocalNet (VFNet)** learns to predict the IoU-aware classification score which mixes the object presence confidence and localization accuracy together as the detection score for a bounding box. The learning is supervised by the proposed Varifocal Loss (VFL), based on a new star-shaped bounding box feature representation (the features at nine yellow sampling points). Given the new representation, the object localization accuracy is further improved by refining the initially regressed bounding box. The full paper is available at: [https://arxiv.org/abs/2008.13367](https://arxiv.org/abs/2008.13367). - -
    - -

    Learning to Predict the IoU-aware Classification Score.

    -
    - -## Citing VarifocalNet - -```latex -@article{zhang2020varifocalnet, - title={VarifocalNet: An IoU-aware Dense Object Detector}, - author={Zhang, Haoyang and Wang, Ying and Dayoub, Feras and S{\"u}nderhauf, Niko}, - journal={arXiv preprint arXiv:2008.13367}, - year={2020} -} -``` - -## Results and Models - -| Backbone | Style | DCN | MS train | Lr schd |Inf time (fps) | box AP (val) | box AP (test-dev) | Config | Download | -|:------------:|:---------:|:-------:|:--------:|:-------:|:-------------:|:------------:|:-----------------:|:------:|:--------:| -| R-50 | pytorch | N | N | 1x | - | 41.6 | 41.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_1x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco.json)| -| R-50 | pytorch | N | Y | 2x | - | 44.5 | 44.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco_20201027-7cc75bd2.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco.json)| -| R-50 | pytorch | Y | Y | 2x | - | 47.8 | 48.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| -| R-101 | pytorch | N | N | 1x | - | 43.0 | 43.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_1x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco_20201027pth-c831ece7.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco.json)| -| R-101 | pytorch | N | Y | 2x | - | 46.2 | 46.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco_20201027pth-4a5d53f1.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco.json)| -| R-101 | pytorch | Y | Y | 2x | - | 49.0 | 49.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-7729adb5.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| -| X-101-32x4d | pytorch | Y | Y | 2x | - | 49.7 | 50.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-d300a6fc.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| -| X-101-64x4d | pytorch | Y | Y | 2x | - | 50.4 | 50.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-b5f6da5e.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| - -**Notes:** - -- The MS-train scale range is 1333x[480:960] (`range` mode) and the inference scale keeps 1333x800. -- DCN means using `DCNv2` in both backbone and head. -- Inference time will be updated soon. -- More results and pre-trained models can be found in [VarifocalNet-Github](https://github.com/hyz-xmaster/VarifocalNet) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/centripetal_head.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/centripetal_head.py deleted file mode 100644 index a9d3ddf5bee1a8b42cedb02d4fcd36cc212e42b6..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/centripetal_head.py +++ /dev/null @@ -1,426 +0,0 @@ -import torch.nn as nn -from mmcv.cnn import ConvModule, normal_init -from mmcv.ops import DeformConv2d - -from mmdet.core import multi_apply -from ..builder import HEADS, build_loss -from .corner_head import CornerHead - - -@HEADS.register_module() -class CentripetalHead(CornerHead): - """Head of CentripetalNet: Pursuing High-quality Keypoint Pairs for Object - Detection. - - CentripetalHead inherits from :class:`CornerHead`. It removes the - embedding branch and adds guiding shift and centripetal shift branches. - More details can be found in the `paper - `_ . - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - num_feat_levels (int): Levels of feature from the previous module. 2 - for HourglassNet-104 and 1 for HourglassNet-52. HourglassNet-104 - outputs the final feature and intermediate supervision feature and - HourglassNet-52 only outputs the final feature. Default: 2. - corner_emb_channels (int): Channel of embedding vector. Default: 1. - train_cfg (dict | None): Training config. Useless in CornerHead, - but we keep this variable for SingleStageDetector. Default: None. - test_cfg (dict | None): Testing config of CornerHead. Default: None. - loss_heatmap (dict | None): Config of corner heatmap loss. Default: - GaussianFocalLoss. - loss_embedding (dict | None): Config of corner embedding loss. Default: - AssociativeEmbeddingLoss. - loss_offset (dict | None): Config of corner offset loss. Default: - SmoothL1Loss. - loss_guiding_shift (dict): Config of guiding shift loss. Default: - SmoothL1Loss. - loss_centripetal_shift (dict): Config of centripetal shift loss. - Default: SmoothL1Loss. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - *args, - centripetal_shift_channels=2, - guiding_shift_channels=2, - feat_adaption_conv_kernel=3, - loss_guiding_shift=dict( - type='SmoothL1Loss', beta=1.0, loss_weight=0.05), - loss_centripetal_shift=dict( - type='SmoothL1Loss', beta=1.0, loss_weight=1), - init_cfg=None, - **kwargs): - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - assert centripetal_shift_channels == 2, ( - 'CentripetalHead only support centripetal_shift_channels == 2') - self.centripetal_shift_channels = centripetal_shift_channels - assert guiding_shift_channels == 2, ( - 'CentripetalHead only support guiding_shift_channels == 2') - self.guiding_shift_channels = guiding_shift_channels - self.feat_adaption_conv_kernel = feat_adaption_conv_kernel - super(CentripetalHead, self).__init__( - *args, init_cfg=init_cfg, **kwargs) - self.loss_guiding_shift = build_loss(loss_guiding_shift) - self.loss_centripetal_shift = build_loss(loss_centripetal_shift) - - def _init_centripetal_layers(self): - """Initialize centripetal layers. - - Including feature adaption deform convs (feat_adaption), deform offset - prediction convs (dcn_off), guiding shift (guiding_shift) and - centripetal shift ( centripetal_shift). Each branch has two parts: - prefix `tl_` for top-left and `br_` for bottom-right. - """ - self.tl_feat_adaption = nn.ModuleList() - self.br_feat_adaption = nn.ModuleList() - self.tl_dcn_offset = nn.ModuleList() - self.br_dcn_offset = nn.ModuleList() - self.tl_guiding_shift = nn.ModuleList() - self.br_guiding_shift = nn.ModuleList() - self.tl_centripetal_shift = nn.ModuleList() - self.br_centripetal_shift = nn.ModuleList() - - for _ in range(self.num_feat_levels): - self.tl_feat_adaption.append( - DeformConv2d(self.in_channels, self.in_channels, - self.feat_adaption_conv_kernel, 1, 1)) - self.br_feat_adaption.append( - DeformConv2d(self.in_channels, self.in_channels, - self.feat_adaption_conv_kernel, 1, 1)) - - self.tl_guiding_shift.append( - self._make_layers( - out_channels=self.guiding_shift_channels, - in_channels=self.in_channels)) - self.br_guiding_shift.append( - self._make_layers( - out_channels=self.guiding_shift_channels, - in_channels=self.in_channels)) - - self.tl_dcn_offset.append( - ConvModule( - self.guiding_shift_channels, - self.feat_adaption_conv_kernel**2 * - self.guiding_shift_channels, - 1, - bias=False, - act_cfg=None)) - self.br_dcn_offset.append( - ConvModule( - self.guiding_shift_channels, - self.feat_adaption_conv_kernel**2 * - self.guiding_shift_channels, - 1, - bias=False, - act_cfg=None)) - - self.tl_centripetal_shift.append( - self._make_layers( - out_channels=self.centripetal_shift_channels, - in_channels=self.in_channels)) - self.br_centripetal_shift.append( - self._make_layers( - out_channels=self.centripetal_shift_channels, - in_channels=self.in_channels)) - - def _init_layers(self): - """Initialize layers for CentripetalHead. - - Including two parts: CornerHead layers and CentripetalHead layers - """ - super()._init_layers() # using _init_layers in CornerHead - self._init_centripetal_layers() - - def init_weights(self): - super(CentripetalHead, self).init_weights() - for i in range(self.num_feat_levels): - normal_init(self.tl_feat_adaption[i], std=0.01) - normal_init(self.br_feat_adaption[i], std=0.01) - normal_init(self.tl_dcn_offset[i].conv, std=0.1) - normal_init(self.br_dcn_offset[i].conv, std=0.1) - _ = [x.conv.reset_parameters() for x in self.tl_guiding_shift[i]] - _ = [x.conv.reset_parameters() for x in self.br_guiding_shift[i]] - _ = [ - x.conv.reset_parameters() for x in self.tl_centripetal_shift[i] - ] - _ = [ - x.conv.reset_parameters() for x in self.br_centripetal_shift[i] - ] - - def forward_single(self, x, lvl_ind): - """Forward feature of a single level. - - Args: - x (Tensor): Feature of a single level. - lvl_ind (int): Level index of current feature. - - Returns: - tuple[Tensor]: A tuple of CentripetalHead's output for current - feature level. Containing the following Tensors: - - - tl_heat (Tensor): Predicted top-left corner heatmap. - - br_heat (Tensor): Predicted bottom-right corner heatmap. - - tl_off (Tensor): Predicted top-left offset heatmap. - - br_off (Tensor): Predicted bottom-right offset heatmap. - - tl_guiding_shift (Tensor): Predicted top-left guiding shift - heatmap. - - br_guiding_shift (Tensor): Predicted bottom-right guiding - shift heatmap. - - tl_centripetal_shift (Tensor): Predicted top-left centripetal - shift heatmap. - - br_centripetal_shift (Tensor): Predicted bottom-right - centripetal shift heatmap. - """ - tl_heat, br_heat, _, _, tl_off, br_off, tl_pool, br_pool = super( - ).forward_single( - x, lvl_ind, return_pool=True) - - tl_guiding_shift = self.tl_guiding_shift[lvl_ind](tl_pool) - br_guiding_shift = self.br_guiding_shift[lvl_ind](br_pool) - - tl_dcn_offset = self.tl_dcn_offset[lvl_ind](tl_guiding_shift.detach()) - br_dcn_offset = self.br_dcn_offset[lvl_ind](br_guiding_shift.detach()) - - tl_feat_adaption = self.tl_feat_adaption[lvl_ind](tl_pool, - tl_dcn_offset) - br_feat_adaption = self.br_feat_adaption[lvl_ind](br_pool, - br_dcn_offset) - - tl_centripetal_shift = self.tl_centripetal_shift[lvl_ind]( - tl_feat_adaption) - br_centripetal_shift = self.br_centripetal_shift[lvl_ind]( - br_feat_adaption) - - result_list = [ - tl_heat, br_heat, tl_off, br_off, tl_guiding_shift, - br_guiding_shift, tl_centripetal_shift, br_centripetal_shift - ] - return result_list - - def loss(self, - tl_heats, - br_heats, - tl_offs, - br_offs, - tl_guiding_shifts, - br_guiding_shifts, - tl_centripetal_shifts, - br_centripetal_shifts, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - tl_heats (list[Tensor]): Top-left corner heatmaps for each level - with shape (N, num_classes, H, W). - br_heats (list[Tensor]): Bottom-right corner heatmaps for each - level with shape (N, num_classes, H, W). - tl_offs (list[Tensor]): Top-left corner offsets for each level - with shape (N, corner_offset_channels, H, W). - br_offs (list[Tensor]): Bottom-right corner offsets for each level - with shape (N, corner_offset_channels, H, W). - tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each - level with shape (N, guiding_shift_channels, H, W). - br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for - each level with shape (N, guiding_shift_channels, H, W). - tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts - for each level with shape (N, centripetal_shift_channels, H, - W). - br_centripetal_shifts (list[Tensor]): Bottom-right centripetal - shifts for each level with shape (N, - centripetal_shift_channels, H, W). - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [left, top, right, bottom] format. - gt_labels (list[Tensor]): Class indices corresponding to each box. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor] | None): Specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. Containing the - following losses: - - - det_loss (list[Tensor]): Corner keypoint losses of all - feature levels. - - off_loss (list[Tensor]): Corner offset losses of all feature - levels. - - guiding_loss (list[Tensor]): Guiding shift losses of all - feature levels. - - centripetal_loss (list[Tensor]): Centripetal shift losses of - all feature levels. - """ - targets = self.get_targets( - gt_bboxes, - gt_labels, - tl_heats[-1].shape, - img_metas[0]['pad_shape'], - with_corner_emb=self.with_corner_emb, - with_guiding_shift=True, - with_centripetal_shift=True) - mlvl_targets = [targets for _ in range(self.num_feat_levels)] - [det_losses, off_losses, guiding_losses, centripetal_losses - ] = multi_apply(self.loss_single, tl_heats, br_heats, tl_offs, - br_offs, tl_guiding_shifts, br_guiding_shifts, - tl_centripetal_shifts, br_centripetal_shifts, - mlvl_targets) - loss_dict = dict( - det_loss=det_losses, - off_loss=off_losses, - guiding_loss=guiding_losses, - centripetal_loss=centripetal_losses) - return loss_dict - - def loss_single(self, tl_hmp, br_hmp, tl_off, br_off, tl_guiding_shift, - br_guiding_shift, tl_centripetal_shift, - br_centripetal_shift, targets): - """Compute losses for single level. - - Args: - tl_hmp (Tensor): Top-left corner heatmap for current level with - shape (N, num_classes, H, W). - br_hmp (Tensor): Bottom-right corner heatmap for current level with - shape (N, num_classes, H, W). - tl_off (Tensor): Top-left corner offset for current level with - shape (N, corner_offset_channels, H, W). - br_off (Tensor): Bottom-right corner offset for current level with - shape (N, corner_offset_channels, H, W). - tl_guiding_shift (Tensor): Top-left guiding shift for current level - with shape (N, guiding_shift_channels, H, W). - br_guiding_shift (Tensor): Bottom-right guiding shift for current - level with shape (N, guiding_shift_channels, H, W). - tl_centripetal_shift (Tensor): Top-left centripetal shift for - current level with shape (N, centripetal_shift_channels, H, W). - br_centripetal_shift (Tensor): Bottom-right centripetal shift for - current level with shape (N, centripetal_shift_channels, H, W). - targets (dict): Corner target generated by `get_targets`. - - Returns: - tuple[torch.Tensor]: Losses of the head's differnet branches - containing the following losses: - - - det_loss (Tensor): Corner keypoint loss. - - off_loss (Tensor): Corner offset loss. - - guiding_loss (Tensor): Guiding shift loss. - - centripetal_loss (Tensor): Centripetal shift loss. - """ - targets['corner_embedding'] = None - - det_loss, _, _, off_loss = super().loss_single(tl_hmp, br_hmp, None, - None, tl_off, br_off, - targets) - - gt_tl_guiding_shift = targets['topleft_guiding_shift'] - gt_br_guiding_shift = targets['bottomright_guiding_shift'] - gt_tl_centripetal_shift = targets['topleft_centripetal_shift'] - gt_br_centripetal_shift = targets['bottomright_centripetal_shift'] - - gt_tl_heatmap = targets['topleft_heatmap'] - gt_br_heatmap = targets['bottomright_heatmap'] - # We only compute the offset loss at the real corner position. - # The value of real corner would be 1 in heatmap ground truth. - # The mask is computed in class agnostic mode and its shape is - # batch * 1 * width * height. - tl_mask = gt_tl_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( - gt_tl_heatmap) - br_mask = gt_br_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( - gt_br_heatmap) - - # Guiding shift loss - tl_guiding_loss = self.loss_guiding_shift( - tl_guiding_shift, - gt_tl_guiding_shift, - tl_mask, - avg_factor=tl_mask.sum()) - br_guiding_loss = self.loss_guiding_shift( - br_guiding_shift, - gt_br_guiding_shift, - br_mask, - avg_factor=br_mask.sum()) - guiding_loss = (tl_guiding_loss + br_guiding_loss) / 2.0 - # Centripetal shift loss - tl_centripetal_loss = self.loss_centripetal_shift( - tl_centripetal_shift, - gt_tl_centripetal_shift, - tl_mask, - avg_factor=tl_mask.sum()) - br_centripetal_loss = self.loss_centripetal_shift( - br_centripetal_shift, - gt_br_centripetal_shift, - br_mask, - avg_factor=br_mask.sum()) - centripetal_loss = (tl_centripetal_loss + br_centripetal_loss) / 2.0 - - return det_loss, off_loss, guiding_loss, centripetal_loss - - def get_bboxes(self, - tl_heats, - br_heats, - tl_offs, - br_offs, - tl_guiding_shifts, - br_guiding_shifts, - tl_centripetal_shifts, - br_centripetal_shifts, - img_metas, - rescale=False, - with_nms=True): - """Transform network output for a batch into bbox predictions. - - Args: - tl_heats (list[Tensor]): Top-left corner heatmaps for each level - with shape (N, num_classes, H, W). - br_heats (list[Tensor]): Bottom-right corner heatmaps for each - level with shape (N, num_classes, H, W). - tl_offs (list[Tensor]): Top-left corner offsets for each level - with shape (N, corner_offset_channels, H, W). - br_offs (list[Tensor]): Bottom-right corner offsets for each level - with shape (N, corner_offset_channels, H, W). - tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each - level with shape (N, guiding_shift_channels, H, W). Useless in - this function, we keep this arg because it's the raw output - from CentripetalHead. - br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for - each level with shape (N, guiding_shift_channels, H, W). - Useless in this function, we keep this arg because it's the - raw output from CentripetalHead. - tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts - for each level with shape (N, centripetal_shift_channels, H, - W). - br_centripetal_shifts (list[Tensor]): Bottom-right centripetal - shifts for each level with shape (N, - centripetal_shift_channels, H, W). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - """ - assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas) - result_list = [] - for img_id in range(len(img_metas)): - result_list.append( - self._get_bboxes_single( - tl_heats[-1][img_id:img_id + 1, :], - br_heats[-1][img_id:img_id + 1, :], - tl_offs[-1][img_id:img_id + 1, :], - br_offs[-1][img_id:img_id + 1, :], - img_metas[img_id], - tl_emb=None, - br_emb=None, - tl_centripetal_shift=tl_centripetal_shifts[-1][ - img_id:img_id + 1, :], - br_centripetal_shift=br_centripetal_shifts[-1][ - img_id:img_id + 1, :], - rescale=rescale, - with_nms=with_nms)) - - return result_list diff --git a/spaces/training-transformers-together/Dashboard/streamlit_observable/frontend/src/streamlit/streamlit.ts b/spaces/training-transformers-together/Dashboard/streamlit_observable/frontend/src/streamlit/streamlit.ts deleted file mode 100644 index 7e77b4d80fedbe6ff8f23d45e7651e20f7164f4c..0000000000000000000000000000000000000000 --- a/spaces/training-transformers-together/Dashboard/streamlit_observable/frontend/src/streamlit/streamlit.ts +++ /dev/null @@ -1,198 +0,0 @@ -/** - * @license - * Copyright 2018-2020 Streamlit Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Safari doesn't support the EventTarget class, so we use a shim. -import { EventTarget } from "event-target-shim" -import { ArrowDataframeProto, ArrowTable } from "./ArrowTable" - -/** Data sent in the custom Streamlit render event. */ -export interface RenderData { - args: any - disabled: boolean -} - -/** Messages from Component -> Streamlit */ -enum ComponentMessageType { - // A component sends this message when it's ready to receive messages - // from Streamlit. Streamlit won't send any messages until it gets this. - // Data: { apiVersion: number } - COMPONENT_READY = "streamlit:componentReady", - - // The component has a new widget value. Send it back to Streamlit, which - // will then re-run the app. - // Data: { value: any } - SET_COMPONENT_VALUE = "streamlit:setComponentValue", - - // The component has a new height for its iframe. - // Data: { height: number } - SET_FRAME_HEIGHT = "streamlit:setFrameHeight", -} - -/** - * Streamlit communication API. - * - * Components can send data to Streamlit via the functions defined here, - * and receive data from Streamlit via the `events` property. - */ -export class Streamlit { - /** - * The Streamlit component API version we're targetting. - * There's currently only 1! - */ - public static readonly API_VERSION = 1 - - public static readonly RENDER_EVENT = "streamlit:render" - - /** Dispatches events received from Streamlit. */ - public static readonly events = new EventTarget() - - private static registeredMessageListener = false - private static lastFrameHeight?: number - - /** - * Tell Streamlit that the component is ready to start receiving data. - * Streamlit will defer emitting RENDER events until it receives the - * COMPONENT_READY message. - */ - public static setComponentReady = (): void => { - if (!Streamlit.registeredMessageListener) { - // Register for message events if we haven't already - window.addEventListener("message", Streamlit.onMessageEvent) - Streamlit.registeredMessageListener = true - } - - Streamlit.sendBackMsg(ComponentMessageType.COMPONENT_READY, { - apiVersion: Streamlit.API_VERSION, - }) - } - - /** - * Report the component's height to Streamlit. - * This should be called every time the component changes its DOM - that is, - * when it's first loaded, and any time it updates. - */ - public static setFrameHeight = (height?: number): void => { - if (height === undefined) { - // `height` is optional. If undefined, it defaults to scrollHeight, - // which is the entire height of the element minus its border, - // scrollbar, and margin. - height = document.body.scrollHeight + 10; - } - - if (height === Streamlit.lastFrameHeight) { - // Don't bother updating if our height hasn't changed. - return - } - - Streamlit.lastFrameHeight = height - Streamlit.sendBackMsg(ComponentMessageType.SET_FRAME_HEIGHT, { height }) - } - - /** - * Set the component's value. This value will be returned to the Python - * script, and the script will be re-run. - * - * For example: - * - * JavaScript: - * Streamlit.setComponentValue("ahoy!") - * - * Python: - * value = st.my_component(...) - * st.write(value) # -> "ahoy!" - * - * The value must be serializable into JSON. - */ - public static setComponentValue = (value: any): void => { - Streamlit.sendBackMsg(ComponentMessageType.SET_COMPONENT_VALUE, { value }) - } - - /** Receive a ForwardMsg from the Streamlit app */ - private static onMessageEvent = (event: MessageEvent): void => { - const type = event.data["type"] - switch (type) { - case Streamlit.RENDER_EVENT: - Streamlit.onRenderMessage(event.data) - break - } - } - - /** - * Handle an untyped Streamlit render event and redispatch it as a - * StreamlitRenderEvent. - */ - private static onRenderMessage = (data: any): void => { - let args = data["args"] - if (args == null) { - console.error( - `Got null args in onRenderMessage. This should never happen` - ) - args = {} - } - - // Parse our dataframe arguments with arrow, and merge them into our args dict - const dataframeArgs = - data["dfs"] && data["dfs"].length > 0 - ? Streamlit.argsDataframeToObject(data["dfs"]) - : {} - - args = { - ...args, - ...dataframeArgs, - } - - const disabled = Boolean(data["disabled"]) - - // Dispatch a render event! - const eventData = { disabled, args } - const event = new CustomEvent(Streamlit.RENDER_EVENT, { - detail: eventData, - }) - Streamlit.events.dispatchEvent(event) - } - - private static argsDataframeToObject = ( - argsDataframe: ArgsDataframe[] - ): object => { - const argsDataframeArrow = argsDataframe.map( - ({ key, value }: ArgsDataframe) => [key, Streamlit.toArrowTable(value)] - ) - return Object.fromEntries(argsDataframeArrow) - } - - private static toArrowTable = (df: ArrowDataframeProto): ArrowTable => { - const { data, index, columns } = df.data - return new ArrowTable(data, index, columns) - } - - /** Post a message to the Streamlit app. */ - private static sendBackMsg = (type: string, data?: any): void => { - window.parent.postMessage( - { - isStreamlitMessage: true, - type: type, - ...data, - }, - "*" - ) - } -} - -interface ArgsDataframe { - key: string - value: ArrowDataframeProto -} diff --git a/spaces/tryolabs/norfair-demo/demo_utils/configuration.py b/spaces/tryolabs/norfair-demo/demo_utils/configuration.py deleted file mode 100644 index 0e57761838ab907b6c91a4a885c95fe6e0bf9cc5..0000000000000000000000000000000000000000 --- a/spaces/tryolabs/norfair-demo/demo_utils/configuration.py +++ /dev/null @@ -1,13 +0,0 @@ -DISTANCE_THRESHOLD_CENTROID: int = 0.8 -DISTANCE_THRESHOLD_BBOX: int = 0.7 -MAX_DISTANCE: int = 10000 - -models_path = {"YOLOv7": "custom_models/yolov7.pt", "YOLOv7 Tiny": "custom_models/yolov7-tiny.pt"} - -style = {"Bounding box": "bbox", "Centroid": "centroid"} - -examples = { - "soccer": {"distance_threshold": 0.7, "absolute_path": True, "classes": [0]}, - "oxford_town_center": {"distance_threshold": 0.7, "absolute_path": False, "classes": [0]}, - "traffic_1_A": {"distance_threshold": 0.7, "absolute_path": False, "classes": [2, 3, 5, 7]}, -} diff --git a/spaces/trysem/image-matting-app/ppmatting/models/ppmatting.py b/spaces/trysem/image-matting-app/ppmatting/models/ppmatting.py deleted file mode 100644 index 2ed14528b5e598eda3a8fd6030a51ecc81dc6e3c..0000000000000000000000000000000000000000 --- a/spaces/trysem/image-matting-app/ppmatting/models/ppmatting.py +++ /dev/null @@ -1,338 +0,0 @@ -# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import defaultdict -import time - -import paddle -import paddle.nn as nn -import paddle.nn.functional as F -import paddleseg -from paddleseg.models import layers -from paddleseg import utils -from paddleseg.cvlibs import manager - -from ppmatting.models.losses import MRSD, GradientLoss -from ppmatting.models.backbone import resnet_vd - - -@manager.MODELS.add_component -class PPMatting(nn.Layer): - """ - The PPMattinh implementation based on PaddlePaddle. - - The original article refers to - Guowei Chen, et, al. "PP-Matting: High-Accuracy Natural Image Matting" - (https://arxiv.org/pdf/2204.09433.pdf). - - Args: - backbone: backbone model. - pretrained(str, optional): The path of pretrianed model. Defautl: None. - - """ - - def __init__(self, backbone, pretrained=None): - super().__init__() - self.backbone = backbone - self.pretrained = pretrained - self.loss_func_dict = self.get_loss_func_dict() - - self.backbone_channels = backbone.feat_channels - - self.scb = SCB(self.backbone_channels[-1]) - - self.hrdb = HRDB( - self.backbone_channels[0] + self.backbone_channels[1], - scb_channels=self.scb.out_channels, - gf_index=[0, 2, 4]) - - self.init_weight() - - def forward(self, inputs): - x = inputs['img'] - input_shape = paddle.shape(x) - fea_list = self.backbone(x) - - scb_logits = self.scb(fea_list[-1]) - semantic_map = F.softmax(scb_logits[-1], axis=1) - - fea0 = F.interpolate( - fea_list[0], input_shape[2:], mode='bilinear', align_corners=False) - fea1 = F.interpolate( - fea_list[1], input_shape[2:], mode='bilinear', align_corners=False) - hrdb_input = paddle.concat([fea0, fea1], 1) - hrdb_logit = self.hrdb(hrdb_input, scb_logits) - detail_map = F.sigmoid(hrdb_logit) - fusion = self.fusion(semantic_map, detail_map) - - if self.training: - logit_dict = { - 'semantic': semantic_map, - 'detail': detail_map, - 'fusion': fusion - } - loss_dict = self.loss(logit_dict, inputs) - return logit_dict, loss_dict - else: - return fusion - - def get_loss_func_dict(self): - loss_func_dict = defaultdict(list) - loss_func_dict['semantic'].append(nn.NLLLoss()) - loss_func_dict['detail'].append(MRSD()) - loss_func_dict['detail'].append(GradientLoss()) - loss_func_dict['fusion'].append(MRSD()) - loss_func_dict['fusion'].append(MRSD()) - loss_func_dict['fusion'].append(GradientLoss()) - return loss_func_dict - - def loss(self, logit_dict, label_dict): - loss = {} - - # semantic loss computation - # get semantic label - semantic_label = label_dict['trimap'] - semantic_label_trans = (semantic_label == 128).astype('int64') - semantic_label_bg = (semantic_label == 0).astype('int64') - semantic_label = semantic_label_trans + semantic_label_bg * 2 - loss_semantic = self.loss_func_dict['semantic'][0]( - paddle.log(logit_dict['semantic'] + 1e-6), - semantic_label.squeeze(1)) - loss['semantic'] = loss_semantic - - # detail loss computation - transparent = label_dict['trimap'] == 128 - detail_alpha_loss = self.loss_func_dict['detail'][0]( - logit_dict['detail'], label_dict['alpha'], transparent) - # gradient loss - detail_gradient_loss = self.loss_func_dict['detail'][1]( - logit_dict['detail'], label_dict['alpha'], transparent) - loss_detail = detail_alpha_loss + detail_gradient_loss - loss['detail'] = loss_detail - loss['detail_alpha'] = detail_alpha_loss - loss['detail_gradient'] = detail_gradient_loss - - # fusion loss - loss_fusion_func = self.loss_func_dict['fusion'] - # fusion_sigmoid loss - fusion_alpha_loss = loss_fusion_func[0](logit_dict['fusion'], - label_dict['alpha']) - # composion loss - comp_pred = logit_dict['fusion'] * label_dict['fg'] + ( - 1 - logit_dict['fusion']) * label_dict['bg'] - comp_gt = label_dict['alpha'] * label_dict['fg'] + ( - 1 - label_dict['alpha']) * label_dict['bg'] - fusion_composition_loss = loss_fusion_func[1](comp_pred, comp_gt) - # grandient loss - fusion_grad_loss = loss_fusion_func[2](logit_dict['fusion'], - label_dict['alpha']) - # fusion loss - loss_fusion = fusion_alpha_loss + fusion_composition_loss + fusion_grad_loss - loss['fusion'] = loss_fusion - loss['fusion_alpha'] = fusion_alpha_loss - loss['fusion_composition'] = fusion_composition_loss - loss['fusion_gradient'] = fusion_grad_loss - - loss[ - 'all'] = 0.25 * loss_semantic + 0.25 * loss_detail + 0.25 * loss_fusion - - return loss - - def fusion(self, semantic_map, detail_map): - # semantic_map [N, 3, H, W] - # In index, 0 is foreground, 1 is transition, 2 is backbone - # After fusion, the foreground is 1, the background is 0, and the transion is between [0, 1] - index = paddle.argmax(semantic_map, axis=1, keepdim=True) - transition_mask = (index == 1).astype('float32') - fg = (index == 0).astype('float32') - alpha = detail_map * transition_mask + fg - return alpha - - def init_weight(self): - if self.pretrained is not None: - utils.load_entire_model(self, self.pretrained) - - -class SCB(nn.Layer): - def __init__(self, in_channels): - super().__init__() - self.in_channels = [512 + in_channels, 512, 256, 128, 128, 64] - self.mid_channels = [512, 256, 128, 128, 64, 64] - self.out_channels = [256, 128, 64, 64, 64, 3] - - self.psp_module = layers.PPModule( - in_channels, - 512, - bin_sizes=(1, 3, 5), - dim_reduction=False, - align_corners=False) - - psp_upsamples = [2, 4, 8, 16] - self.psps = nn.LayerList([ - self.conv_up_psp(512, self.out_channels[i], psp_upsamples[i]) - for i in range(4) - ]) - - scb_list = [ - self._make_stage( - self.in_channels[i], - self.mid_channels[i], - self.out_channels[i], - padding=int(i == 0) + 1, - dilation=int(i == 0) + 1) - for i in range(len(self.in_channels) - 1) - ] - scb_list += [ - nn.Sequential( - layers.ConvBNReLU( - self.in_channels[-1], self.mid_channels[-1], 3, padding=1), - layers.ConvBNReLU( - self.mid_channels[-1], self.mid_channels[-1], 3, padding=1), - nn.Conv2D( - self.mid_channels[-1], self.out_channels[-1], 3, padding=1)) - ] - self.scb_stages = nn.LayerList(scb_list) - - def forward(self, x): - psp_x = self.psp_module(x) - psps = [psp(psp_x) for psp in self.psps] - - scb_logits = [] - for i, scb_stage in enumerate(self.scb_stages): - if i == 0: - x = scb_stage(paddle.concat((psp_x, x), 1)) - elif i <= len(psps): - x = scb_stage(paddle.concat((psps[i - 1], x), 1)) - else: - x = scb_stage(x) - scb_logits.append(x) - return scb_logits - - def conv_up_psp(self, in_channels, out_channels, up_sample): - return nn.Sequential( - layers.ConvBNReLU( - in_channels, out_channels, 3, padding=1), - nn.Upsample( - scale_factor=up_sample, mode='bilinear', align_corners=False)) - - def _make_stage(self, - in_channels, - mid_channels, - out_channels, - padding=1, - dilation=1): - layer_list = [ - layers.ConvBNReLU( - in_channels, mid_channels, 3, padding=1), layers.ConvBNReLU( - mid_channels, - mid_channels, - 3, - padding=padding, - dilation=dilation), layers.ConvBNReLU( - mid_channels, - out_channels, - 3, - padding=padding, - dilation=dilation), nn.Upsample( - scale_factor=2, - mode='bilinear', - align_corners=False) - ] - return nn.Sequential(*layer_list) - - -class HRDB(nn.Layer): - """ - The High-Resolution Detail Branch - - Args: - in_channels(int): The number of input channels. - scb_channels(list|tuple): The channels of scb logits - gf_index(list|tuple, optional): Which logit is selected as guidance flow from scb logits. Default: (0, 2, 4) - """ - - def __init__(self, in_channels, scb_channels, gf_index=(0, 2, 4)): - super().__init__() - self.gf_index = gf_index - self.gf_list = nn.LayerList( - [nn.Conv2D(scb_channels[i], 1, 1) for i in gf_index]) - - channels = [64, 32, 16, 8] - self.res_list = [ - resnet_vd.BasicBlock( - in_channels, channels[0], stride=1, shortcut=False) - ] - self.res_list += [ - resnet_vd.BasicBlock( - i, i, stride=1) for i in channels[1:-1] - ] - self.res_list = nn.LayerList(self.res_list) - - self.convs = nn.LayerList([ - nn.Conv2D( - channels[i], channels[i + 1], kernel_size=1) - for i in range(len(channels) - 1) - ]) - self.gates = nn.LayerList( - [GatedSpatailConv2d(i, i) for i in channels[1:]]) - - self.detail_conv = nn.Conv2D(channels[-1], 1, 1, bias_attr=False) - - def forward(self, x, scb_logits): - for i in range(len(self.res_list)): - x = self.res_list[i](x) - x = self.convs[i](x) - gf = self.gf_list[i](scb_logits[self.gf_index[i]]) - gf = F.interpolate( - gf, paddle.shape(x)[-2:], mode='bilinear', align_corners=False) - x = self.gates[i](x, gf) - return self.detail_conv(x) - - -class GatedSpatailConv2d(nn.Layer): - def __init__(self, - in_channels, - out_channels, - kernel_size=1, - stride=1, - padding=0, - dilation=1, - groups=1, - bias_attr=False): - super().__init__() - self._gate_conv = nn.Sequential( - layers.SyncBatchNorm(in_channels + 1), - nn.Conv2D( - in_channels + 1, in_channels + 1, kernel_size=1), - nn.ReLU(), - nn.Conv2D( - in_channels + 1, 1, kernel_size=1), - layers.SyncBatchNorm(1), - nn.Sigmoid()) - self.conv = nn.Conv2D( - in_channels, - out_channels, - kernel_size=kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - bias_attr=bias_attr) - - def forward(self, input_features, gating_features): - cat = paddle.concat([input_features, gating_features], axis=1) - alphas = self._gate_conv(cat) - x = input_features * (alphas + 1) - x = self.conv(x) - return x diff --git a/spaces/tumuyan/speaker-verification/samples/readme.md b/spaces/tumuyan/speaker-verification/samples/readme.md deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/tusharust/darkstorm2150-Protogen_x5.8_Official_Release/README.md b/spaces/tusharust/darkstorm2150-Protogen_x5.8_Official_Release/README.md deleted file mode 100644 index 31e9ddb5463f89bffd72c61c6d7e6920b57d4e2d..0000000000000000000000000000000000000000 --- a/spaces/tusharust/darkstorm2150-Protogen_x5.8_Official_Release/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Darkstorm2150-Protogen X5.8 Official Release -emoji: 🚀 -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ulysses115/diffsvc_test/preprocessing/SVCpre.py b/spaces/ulysses115/diffsvc_test/preprocessing/SVCpre.py deleted file mode 100644 index 2faa0737fb5d61f6bdb4ac1fb959711c50311d0e..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/diffsvc_test/preprocessing/SVCpre.py +++ /dev/null @@ -1,63 +0,0 @@ -''' - - item: one piece of data - item_name: data id - wavfn: wave file path - txt: lyrics - ph: phoneme - tgfn: text grid file path (unused) - spk: dataset name - wdb: word boundary - ph_durs: phoneme durations - midi: pitch as midi notes - midi_dur: midi duration - is_slur: keep singing upon note changes -''' - - -from copy import deepcopy - -import logging - -from preprocessing.process_pipeline import File2Batch -from utils.hparams import hparams -from preprocessing.base_binarizer import BaseBinarizer - -SVCSINGING_ITEM_ATTRIBUTES = ['wav_fn', 'spk_id'] -class SVCBinarizer(BaseBinarizer): - def __init__(self, item_attributes=SVCSINGING_ITEM_ATTRIBUTES): - super().__init__(item_attributes) - print('spkers: ', set(item['spk_id'] for item in self.items.values())) - self.item_names = sorted(list(self.items.keys())) - self._train_item_names, self._test_item_names = self.split_train_test_set(self.item_names) - # self._valid_item_names=[] - - def split_train_test_set(self, item_names): - item_names = deepcopy(item_names) - if hparams['choose_test_manually']: - test_item_names = [x for x in item_names if any([x.startswith(ts) for ts in hparams['test_prefixes']])] - else: - test_item_names = item_names[-5:] - train_item_names = [x for x in item_names if x not in set(test_item_names)] - logging.info("train {}".format(len(train_item_names))) - logging.info("test {}".format(len(test_item_names))) - return train_item_names, test_item_names - - @property - def train_item_names(self): - return self._train_item_names - - @property - def valid_item_names(self): - return self._test_item_names - - @property - def test_item_names(self): - return self._test_item_names - - def load_meta_data(self): - self.items = File2Batch.file2temporary_dict() - - def _phone_encoder(self): - from preprocessing.hubertinfer import Hubertencoder - return Hubertencoder(hparams['hubert_path']) \ No newline at end of file diff --git a/spaces/ulysses115/diffsvc_test/training/from huggingface_hub import Repository.py b/spaces/ulysses115/diffsvc_test/training/from huggingface_hub import Repository.py deleted file mode 100644 index 5c3c8ded414d2af662eb49404e608a8a15462e9a..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/diffsvc_test/training/from huggingface_hub import Repository.py +++ /dev/null @@ -1,2 +0,0 @@ -from huggingface_hub import Repository -repo = Repository(local_dir="w2v2", clone_from="facebook/wav2vec2-large-960h-lv60") \ No newline at end of file diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/yolo/data/dataset_wrappers.py b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/yolo/data/dataset_wrappers.py deleted file mode 100644 index 72a6fb57a373cb9fbfd2a4facde7dfb427452a64..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/yolo/data/dataset_wrappers.py +++ /dev/null @@ -1,53 +0,0 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license - -import collections -from copy import deepcopy - -from .augment import LetterBox - - -class MixAndRectDataset: - """ - A dataset class that applies mosaic and mixup transformations as well as rectangular training. - - Attributes: - dataset: The base dataset. - imgsz: The size of the images in the dataset. - """ - - def __init__(self, dataset): - """ - Args: - dataset (BaseDataset): The base dataset to apply transformations to. - """ - self.dataset = dataset - self.imgsz = dataset.imgsz - - def __len__(self): - """Returns the number of items in the dataset.""" - return len(self.dataset) - - def __getitem__(self, index): - """ - Applies mosaic, mixup and rectangular training transformations to an item in the dataset. - - Args: - index (int): Index of the item in the dataset. - - Returns: - (dict): A dictionary containing the transformed item data. - """ - labels = deepcopy(self.dataset[index]) - for transform in self.dataset.transforms.tolist(): - # Mosaic and mixup - if hasattr(transform, 'get_indexes'): - indexes = transform.get_indexes(self.dataset) - if not isinstance(indexes, collections.abc.Sequence): - indexes = [indexes] - labels['mix_labels'] = [deepcopy(self.dataset[index]) for index in indexes] - if self.dataset.rect and isinstance(transform, LetterBox): - transform.new_shape = self.dataset.batch_shapes[self.dataset.batch[index]] - labels = transform(labels) - if 'mix_labels' in labels: - labels.pop('mix_labels') - return labels diff --git a/spaces/videfikri/aicover/vc_infer_pipeline.py b/spaces/videfikri/aicover/vc_infer_pipeline.py deleted file mode 100644 index 7ff98b2c812f4e74afe92048fb26009fb008479d..0000000000000000000000000000000000000000 --- a/spaces/videfikri/aicover/vc_infer_pipeline.py +++ /dev/null @@ -1,320 +0,0 @@ -import numpy as np, parselmouth, torch, pdb -from time import time as ttime -import torch.nn.functional as F -import scipy.signal as signal -import pyworld, os, traceback, faiss -from scipy import signal - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - - -class VC(object): - def __init__(self, tgt_sr, config): - self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( - config.x_pad, - config.x_query, - config.x_center, - config.x_max, - config.is_half, - ) - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * self.x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * self.x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * self.x_query # 查询切点前后查询时间 - self.t_center = self.sr * self.x_center # 查询切点位置 - self.t_max = self.sr * self.x_max # 免查询时长阈值 - self.device = config.device - - def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None): - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - f0, t = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ - :shape - ] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9, # layer 9 - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) - - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - - # _, I = index.search(npy, 1) - # npy = big_npy[I.squeeze()] - - score, ix = index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - times, - f0_up_key, - f0_method, - file_index, - # file_big_npy, - index_rate, - if_f0, - f0_file=None, - ): - if ( - file_index != "" - # and file_big_npy != "" - # and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - # big_npy = np.load(file_big_npy) - big_npy = index.reconstruct_n(0, index.ntotal) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt diff --git a/spaces/vivym/image-matting-app/ppmatting/ml/__init__.py b/spaces/vivym/image-matting-app/ppmatting/ml/__init__.py deleted file mode 100644 index 612dff101f358f74db3eca601f0b9573ca6d93cb..0000000000000000000000000000000000000000 --- a/spaces/vivym/image-matting-app/ppmatting/ml/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .methods import CloseFormMatting, KNNMatting, LearningBasedMatting, FastMatting, RandomWalksMatting diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/cnn/resnet.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/cnn/resnet.py deleted file mode 100644 index 1cb3ac057ee2d52c46fc94685b5d4e698aad8d5f..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/cnn/resnet.py +++ /dev/null @@ -1,316 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import logging - -import torch.nn as nn -import torch.utils.checkpoint as cp - -from .utils import constant_init, kaiming_init - - -def conv3x3(in_planes, out_planes, stride=1, dilation=1): - """3x3 convolution with padding.""" - return nn.Conv2d( - in_planes, - out_planes, - kernel_size=3, - stride=stride, - padding=dilation, - dilation=dilation, - bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, - inplanes, - planes, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False): - super(BasicBlock, self).__init__() - assert style in ['pytorch', 'caffe'] - self.conv1 = conv3x3(inplanes, planes, stride, dilation) - self.bn1 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = nn.BatchNorm2d(planes) - self.downsample = downsample - self.stride = stride - self.dilation = dilation - assert not with_cp - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, - inplanes, - planes, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False): - """Bottleneck block. - - If style is "pytorch", the stride-two layer is the 3x3 conv layer, if - it is "caffe", the stride-two layer is the first 1x1 conv layer. - """ - super(Bottleneck, self).__init__() - assert style in ['pytorch', 'caffe'] - if style == 'pytorch': - conv1_stride = 1 - conv2_stride = stride - else: - conv1_stride = stride - conv2_stride = 1 - self.conv1 = nn.Conv2d( - inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False) - self.conv2 = nn.Conv2d( - planes, - planes, - kernel_size=3, - stride=conv2_stride, - padding=dilation, - dilation=dilation, - bias=False) - - self.bn1 = nn.BatchNorm2d(planes) - self.bn2 = nn.BatchNorm2d(planes) - self.conv3 = nn.Conv2d( - planes, planes * self.expansion, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * self.expansion) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - self.dilation = dilation - self.with_cp = with_cp - - def forward(self, x): - - def _inner_forward(x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -def make_res_layer(block, - inplanes, - planes, - blocks, - stride=1, - dilation=1, - style='pytorch', - with_cp=False): - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d( - inplanes, - planes * block.expansion, - kernel_size=1, - stride=stride, - bias=False), - nn.BatchNorm2d(planes * block.expansion), - ) - - layers = [] - layers.append( - block( - inplanes, - planes, - stride, - dilation, - downsample, - style=style, - with_cp=with_cp)) - inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append( - block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp)) - - return nn.Sequential(*layers) - - -class ResNet(nn.Module): - """ResNet backbone. - - Args: - depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. - num_stages (int): Resnet stages, normally 4. - strides (Sequence[int]): Strides of the first block of each stage. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - frozen_stages (int): Stages to be frozen (all param fixed). -1 means - not freezing any parameters. - bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze - running stats (mean and var). - bn_frozen (bool): Whether to freeze weight and bias of BN layers. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - """ - - arch_settings = { - 18: (BasicBlock, (2, 2, 2, 2)), - 34: (BasicBlock, (3, 4, 6, 3)), - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, - depth, - num_stages=4, - strides=(1, 2, 2, 2), - dilations=(1, 1, 1, 1), - out_indices=(0, 1, 2, 3), - style='pytorch', - frozen_stages=-1, - bn_eval=True, - bn_frozen=False, - with_cp=False): - super(ResNet, self).__init__() - if depth not in self.arch_settings: - raise KeyError(f'invalid depth {depth} for resnet') - assert num_stages >= 1 and num_stages <= 4 - block, stage_blocks = self.arch_settings[depth] - stage_blocks = stage_blocks[:num_stages] - assert len(strides) == len(dilations) == num_stages - assert max(out_indices) < num_stages - - self.out_indices = out_indices - self.style = style - self.frozen_stages = frozen_stages - self.bn_eval = bn_eval - self.bn_frozen = bn_frozen - self.with_cp = with_cp - - self.inplanes = 64 - self.conv1 = nn.Conv2d( - 3, 64, kernel_size=7, stride=2, padding=3, bias=False) - self.bn1 = nn.BatchNorm2d(64) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - self.res_layers = [] - for i, num_blocks in enumerate(stage_blocks): - stride = strides[i] - dilation = dilations[i] - planes = 64 * 2**i - res_layer = make_res_layer( - block, - self.inplanes, - planes, - num_blocks, - stride=stride, - dilation=dilation, - style=self.style, - with_cp=with_cp) - self.inplanes = planes * block.expansion - layer_name = f'layer{i + 1}' - self.add_module(layer_name, res_layer) - self.res_layers.append(layer_name) - - self.feat_dim = block.expansion * 64 * 2**(len(stage_blocks) - 1) - - def init_weights(self, pretrained=None): - if isinstance(pretrained, str): - logger = logging.getLogger() - from ..runner import load_checkpoint - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, nn.BatchNorm2d): - constant_init(m, 1) - else: - raise TypeError('pretrained must be a str or None') - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - outs = [] - for i, layer_name in enumerate(self.res_layers): - res_layer = getattr(self, layer_name) - x = res_layer(x) - if i in self.out_indices: - outs.append(x) - if len(outs) == 1: - return outs[0] - else: - return tuple(outs) - - def train(self, mode=True): - super(ResNet, self).train(mode) - if self.bn_eval: - for m in self.modules(): - if isinstance(m, nn.BatchNorm2d): - m.eval() - if self.bn_frozen: - for params in m.parameters(): - params.requires_grad = False - if mode and self.frozen_stages >= 0: - for param in self.conv1.parameters(): - param.requires_grad = False - for param in self.bn1.parameters(): - param.requires_grad = False - self.bn1.eval() - self.bn1.weight.requires_grad = False - self.bn1.bias.requires_grad = False - for i in range(1, self.frozen_stages + 1): - mod = getattr(self, f'layer{i}') - mod.eval() - for param in mod.parameters(): - param.requires_grad = False diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/fileio/file_client.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/fileio/file_client.py deleted file mode 100644 index 950f0c1aeab14b8e308a7455ccd64a95b5d98add..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/fileio/file_client.py +++ /dev/null @@ -1,1148 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import inspect -import os -import os.path as osp -import re -import tempfile -import warnings -from abc import ABCMeta, abstractmethod -from contextlib import contextmanager -from pathlib import Path -from typing import Iterable, Iterator, Optional, Tuple, Union -from urllib.request import urlopen - -import annotator.uniformer.mmcv as mmcv -from annotator.uniformer.mmcv.utils.misc import has_method -from annotator.uniformer.mmcv.utils.path import is_filepath - - -class BaseStorageBackend(metaclass=ABCMeta): - """Abstract class of storage backends. - - All backends need to implement two apis: ``get()`` and ``get_text()``. - ``get()`` reads the file as a byte stream and ``get_text()`` reads the file - as texts. - """ - - # a flag to indicate whether the backend can create a symlink for a file - _allow_symlink = False - - @property - def name(self): - return self.__class__.__name__ - - @property - def allow_symlink(self): - return self._allow_symlink - - @abstractmethod - def get(self, filepath): - pass - - @abstractmethod - def get_text(self, filepath): - pass - - -class CephBackend(BaseStorageBackend): - """Ceph storage backend (for internal use). - - Args: - path_mapping (dict|None): path mapping dict from local path to Petrel - path. When ``path_mapping={'src': 'dst'}``, ``src`` in ``filepath`` - will be replaced by ``dst``. Default: None. - - .. warning:: - :class:`mmcv.fileio.file_client.CephBackend` will be deprecated, - please use :class:`mmcv.fileio.file_client.PetrelBackend` instead. - """ - - def __init__(self, path_mapping=None): - try: - import ceph - except ImportError: - raise ImportError('Please install ceph to enable CephBackend.') - - warnings.warn( - 'CephBackend will be deprecated, please use PetrelBackend instead') - self._client = ceph.S3Client() - assert isinstance(path_mapping, dict) or path_mapping is None - self.path_mapping = path_mapping - - def get(self, filepath): - filepath = str(filepath) - if self.path_mapping is not None: - for k, v in self.path_mapping.items(): - filepath = filepath.replace(k, v) - value = self._client.Get(filepath) - value_buf = memoryview(value) - return value_buf - - def get_text(self, filepath, encoding=None): - raise NotImplementedError - - -class PetrelBackend(BaseStorageBackend): - """Petrel storage backend (for internal use). - - PetrelBackend supports reading and writing data to multiple clusters. - If the file path contains the cluster name, PetrelBackend will read data - from specified cluster or write data to it. Otherwise, PetrelBackend will - access the default cluster. - - Args: - path_mapping (dict, optional): Path mapping dict from local path to - Petrel path. When ``path_mapping={'src': 'dst'}``, ``src`` in - ``filepath`` will be replaced by ``dst``. Default: None. - enable_mc (bool, optional): Whether to enable memcached support. - Default: True. - - Examples: - >>> filepath1 = 's3://path/of/file' - >>> filepath2 = 'cluster-name:s3://path/of/file' - >>> client = PetrelBackend() - >>> client.get(filepath1) # get data from default cluster - >>> client.get(filepath2) # get data from 'cluster-name' cluster - """ - - def __init__(self, - path_mapping: Optional[dict] = None, - enable_mc: bool = True): - try: - from petrel_client import client - except ImportError: - raise ImportError('Please install petrel_client to enable ' - 'PetrelBackend.') - - self._client = client.Client(enable_mc=enable_mc) - assert isinstance(path_mapping, dict) or path_mapping is None - self.path_mapping = path_mapping - - def _map_path(self, filepath: Union[str, Path]) -> str: - """Map ``filepath`` to a string path whose prefix will be replaced by - :attr:`self.path_mapping`. - - Args: - filepath (str): Path to be mapped. - """ - filepath = str(filepath) - if self.path_mapping is not None: - for k, v in self.path_mapping.items(): - filepath = filepath.replace(k, v) - return filepath - - def _format_path(self, filepath: str) -> str: - """Convert a ``filepath`` to standard format of petrel oss. - - If the ``filepath`` is concatenated by ``os.path.join``, in a Windows - environment, the ``filepath`` will be the format of - 's3://bucket_name\\image.jpg'. By invoking :meth:`_format_path`, the - above ``filepath`` will be converted to 's3://bucket_name/image.jpg'. - - Args: - filepath (str): Path to be formatted. - """ - return re.sub(r'\\+', '/', filepath) - - def get(self, filepath: Union[str, Path]) -> memoryview: - """Read data from a given ``filepath`` with 'rb' mode. - - Args: - filepath (str or Path): Path to read data. - - Returns: - memoryview: A memory view of expected bytes object to avoid - copying. The memoryview object can be converted to bytes by - ``value_buf.tobytes()``. - """ - filepath = self._map_path(filepath) - filepath = self._format_path(filepath) - value = self._client.Get(filepath) - value_buf = memoryview(value) - return value_buf - - def get_text(self, - filepath: Union[str, Path], - encoding: str = 'utf-8') -> str: - """Read data from a given ``filepath`` with 'r' mode. - - Args: - filepath (str or Path): Path to read data. - encoding (str): The encoding format used to open the ``filepath``. - Default: 'utf-8'. - - Returns: - str: Expected text reading from ``filepath``. - """ - return str(self.get(filepath), encoding=encoding) - - def put(self, obj: bytes, filepath: Union[str, Path]) -> None: - """Save data to a given ``filepath``. - - Args: - obj (bytes): Data to be saved. - filepath (str or Path): Path to write data. - """ - filepath = self._map_path(filepath) - filepath = self._format_path(filepath) - self._client.put(filepath, obj) - - def put_text(self, - obj: str, - filepath: Union[str, Path], - encoding: str = 'utf-8') -> None: - """Save data to a given ``filepath``. - - Args: - obj (str): Data to be written. - filepath (str or Path): Path to write data. - encoding (str): The encoding format used to encode the ``obj``. - Default: 'utf-8'. - """ - self.put(bytes(obj, encoding=encoding), filepath) - - def remove(self, filepath: Union[str, Path]) -> None: - """Remove a file. - - Args: - filepath (str or Path): Path to be removed. - """ - if not has_method(self._client, 'delete'): - raise NotImplementedError( - ('Current version of Petrel Python SDK has not supported ' - 'the `delete` method, please use a higher version or dev' - ' branch instead.')) - - filepath = self._map_path(filepath) - filepath = self._format_path(filepath) - self._client.delete(filepath) - - def exists(self, filepath: Union[str, Path]) -> bool: - """Check whether a file path exists. - - Args: - filepath (str or Path): Path to be checked whether exists. - - Returns: - bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise. - """ - if not (has_method(self._client, 'contains') - and has_method(self._client, 'isdir')): - raise NotImplementedError( - ('Current version of Petrel Python SDK has not supported ' - 'the `contains` and `isdir` methods, please use a higher' - 'version or dev branch instead.')) - - filepath = self._map_path(filepath) - filepath = self._format_path(filepath) - return self._client.contains(filepath) or self._client.isdir(filepath) - - def isdir(self, filepath: Union[str, Path]) -> bool: - """Check whether a file path is a directory. - - Args: - filepath (str or Path): Path to be checked whether it is a - directory. - - Returns: - bool: Return ``True`` if ``filepath`` points to a directory, - ``False`` otherwise. - """ - if not has_method(self._client, 'isdir'): - raise NotImplementedError( - ('Current version of Petrel Python SDK has not supported ' - 'the `isdir` method, please use a higher version or dev' - ' branch instead.')) - - filepath = self._map_path(filepath) - filepath = self._format_path(filepath) - return self._client.isdir(filepath) - - def isfile(self, filepath: Union[str, Path]) -> bool: - """Check whether a file path is a file. - - Args: - filepath (str or Path): Path to be checked whether it is a file. - - Returns: - bool: Return ``True`` if ``filepath`` points to a file, ``False`` - otherwise. - """ - if not has_method(self._client, 'contains'): - raise NotImplementedError( - ('Current version of Petrel Python SDK has not supported ' - 'the `contains` method, please use a higher version or ' - 'dev branch instead.')) - - filepath = self._map_path(filepath) - filepath = self._format_path(filepath) - return self._client.contains(filepath) - - def join_path(self, filepath: Union[str, Path], - *filepaths: Union[str, Path]) -> str: - """Concatenate all file paths. - - Args: - filepath (str or Path): Path to be concatenated. - - Returns: - str: The result after concatenation. - """ - filepath = self._format_path(self._map_path(filepath)) - if filepath.endswith('/'): - filepath = filepath[:-1] - formatted_paths = [filepath] - for path in filepaths: - formatted_paths.append(self._format_path(self._map_path(path))) - return '/'.join(formatted_paths) - - @contextmanager - def get_local_path(self, filepath: Union[str, Path]) -> Iterable[str]: - """Download a file from ``filepath`` and return a temporary path. - - ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It - can be called with ``with`` statement, and when exists from the - ``with`` statement, the temporary path will be released. - - Args: - filepath (str | Path): Download a file from ``filepath``. - - Examples: - >>> client = PetrelBackend() - >>> # After existing from the ``with`` clause, - >>> # the path will be removed - >>> with client.get_local_path('s3://path/of/your/file') as path: - ... # do something here - - Yields: - Iterable[str]: Only yield one temporary path. - """ - filepath = self._map_path(filepath) - filepath = self._format_path(filepath) - assert self.isfile(filepath) - try: - f = tempfile.NamedTemporaryFile(delete=False) - f.write(self.get(filepath)) - f.close() - yield f.name - finally: - os.remove(f.name) - - def list_dir_or_file(self, - dir_path: Union[str, Path], - list_dir: bool = True, - list_file: bool = True, - suffix: Optional[Union[str, Tuple[str]]] = None, - recursive: bool = False) -> Iterator[str]: - """Scan a directory to find the interested directories or files in - arbitrary order. - - Note: - Petrel has no concept of directories but it simulates the directory - hierarchy in the filesystem through public prefixes. In addition, - if the returned path ends with '/', it means the path is a public - prefix which is a logical directory. - - Note: - :meth:`list_dir_or_file` returns the path relative to ``dir_path``. - In addition, the returned path of directory will not contains the - suffix '/' which is consistent with other backends. - - Args: - dir_path (str | Path): Path of the directory. - list_dir (bool): List the directories. Default: True. - list_file (bool): List the path of files. Default: True. - suffix (str or tuple[str], optional): File suffix - that we are interested in. Default: None. - recursive (bool): If set to True, recursively scan the - directory. Default: False. - - Yields: - Iterable[str]: A relative path to ``dir_path``. - """ - if not has_method(self._client, 'list'): - raise NotImplementedError( - ('Current version of Petrel Python SDK has not supported ' - 'the `list` method, please use a higher version or dev' - ' branch instead.')) - - dir_path = self._map_path(dir_path) - dir_path = self._format_path(dir_path) - if list_dir and suffix is not None: - raise TypeError( - '`list_dir` should be False when `suffix` is not None') - - if (suffix is not None) and not isinstance(suffix, (str, tuple)): - raise TypeError('`suffix` must be a string or tuple of strings') - - # Petrel's simulated directory hierarchy assumes that directory paths - # should end with `/` - if not dir_path.endswith('/'): - dir_path += '/' - - root = dir_path - - def _list_dir_or_file(dir_path, list_dir, list_file, suffix, - recursive): - for path in self._client.list(dir_path): - # the `self.isdir` is not used here to determine whether path - # is a directory, because `self.isdir` relies on - # `self._client.list` - if path.endswith('/'): # a directory path - next_dir_path = self.join_path(dir_path, path) - if list_dir: - # get the relative path and exclude the last - # character '/' - rel_dir = next_dir_path[len(root):-1] - yield rel_dir - if recursive: - yield from _list_dir_or_file(next_dir_path, list_dir, - list_file, suffix, - recursive) - else: # a file path - absolute_path = self.join_path(dir_path, path) - rel_path = absolute_path[len(root):] - if (suffix is None - or rel_path.endswith(suffix)) and list_file: - yield rel_path - - return _list_dir_or_file(dir_path, list_dir, list_file, suffix, - recursive) - - -class MemcachedBackend(BaseStorageBackend): - """Memcached storage backend. - - Attributes: - server_list_cfg (str): Config file for memcached server list. - client_cfg (str): Config file for memcached client. - sys_path (str | None): Additional path to be appended to `sys.path`. - Default: None. - """ - - def __init__(self, server_list_cfg, client_cfg, sys_path=None): - if sys_path is not None: - import sys - sys.path.append(sys_path) - try: - import mc - except ImportError: - raise ImportError( - 'Please install memcached to enable MemcachedBackend.') - - self.server_list_cfg = server_list_cfg - self.client_cfg = client_cfg - self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg, - self.client_cfg) - # mc.pyvector servers as a point which points to a memory cache - self._mc_buffer = mc.pyvector() - - def get(self, filepath): - filepath = str(filepath) - import mc - self._client.Get(filepath, self._mc_buffer) - value_buf = mc.ConvertBuffer(self._mc_buffer) - return value_buf - - def get_text(self, filepath, encoding=None): - raise NotImplementedError - - -class LmdbBackend(BaseStorageBackend): - """Lmdb storage backend. - - Args: - db_path (str): Lmdb database path. - readonly (bool, optional): Lmdb environment parameter. If True, - disallow any write operations. Default: True. - lock (bool, optional): Lmdb environment parameter. If False, when - concurrent access occurs, do not lock the database. Default: False. - readahead (bool, optional): Lmdb environment parameter. If False, - disable the OS filesystem readahead mechanism, which may improve - random read performance when a database is larger than RAM. - Default: False. - - Attributes: - db_path (str): Lmdb database path. - """ - - def __init__(self, - db_path, - readonly=True, - lock=False, - readahead=False, - **kwargs): - try: - import lmdb - except ImportError: - raise ImportError('Please install lmdb to enable LmdbBackend.') - - self.db_path = str(db_path) - self._client = lmdb.open( - self.db_path, - readonly=readonly, - lock=lock, - readahead=readahead, - **kwargs) - - def get(self, filepath): - """Get values according to the filepath. - - Args: - filepath (str | obj:`Path`): Here, filepath is the lmdb key. - """ - filepath = str(filepath) - with self._client.begin(write=False) as txn: - value_buf = txn.get(filepath.encode('ascii')) - return value_buf - - def get_text(self, filepath, encoding=None): - raise NotImplementedError - - -class HardDiskBackend(BaseStorageBackend): - """Raw hard disks storage backend.""" - - _allow_symlink = True - - def get(self, filepath: Union[str, Path]) -> bytes: - """Read data from a given ``filepath`` with 'rb' mode. - - Args: - filepath (str or Path): Path to read data. - - Returns: - bytes: Expected bytes object. - """ - with open(filepath, 'rb') as f: - value_buf = f.read() - return value_buf - - def get_text(self, - filepath: Union[str, Path], - encoding: str = 'utf-8') -> str: - """Read data from a given ``filepath`` with 'r' mode. - - Args: - filepath (str or Path): Path to read data. - encoding (str): The encoding format used to open the ``filepath``. - Default: 'utf-8'. - - Returns: - str: Expected text reading from ``filepath``. - """ - with open(filepath, 'r', encoding=encoding) as f: - value_buf = f.read() - return value_buf - - def put(self, obj: bytes, filepath: Union[str, Path]) -> None: - """Write data to a given ``filepath`` with 'wb' mode. - - Note: - ``put`` will create a directory if the directory of ``filepath`` - does not exist. - - Args: - obj (bytes): Data to be written. - filepath (str or Path): Path to write data. - """ - mmcv.mkdir_or_exist(osp.dirname(filepath)) - with open(filepath, 'wb') as f: - f.write(obj) - - def put_text(self, - obj: str, - filepath: Union[str, Path], - encoding: str = 'utf-8') -> None: - """Write data to a given ``filepath`` with 'w' mode. - - Note: - ``put_text`` will create a directory if the directory of - ``filepath`` does not exist. - - Args: - obj (str): Data to be written. - filepath (str or Path): Path to write data. - encoding (str): The encoding format used to open the ``filepath``. - Default: 'utf-8'. - """ - mmcv.mkdir_or_exist(osp.dirname(filepath)) - with open(filepath, 'w', encoding=encoding) as f: - f.write(obj) - - def remove(self, filepath: Union[str, Path]) -> None: - """Remove a file. - - Args: - filepath (str or Path): Path to be removed. - """ - os.remove(filepath) - - def exists(self, filepath: Union[str, Path]) -> bool: - """Check whether a file path exists. - - Args: - filepath (str or Path): Path to be checked whether exists. - - Returns: - bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise. - """ - return osp.exists(filepath) - - def isdir(self, filepath: Union[str, Path]) -> bool: - """Check whether a file path is a directory. - - Args: - filepath (str or Path): Path to be checked whether it is a - directory. - - Returns: - bool: Return ``True`` if ``filepath`` points to a directory, - ``False`` otherwise. - """ - return osp.isdir(filepath) - - def isfile(self, filepath: Union[str, Path]) -> bool: - """Check whether a file path is a file. - - Args: - filepath (str or Path): Path to be checked whether it is a file. - - Returns: - bool: Return ``True`` if ``filepath`` points to a file, ``False`` - otherwise. - """ - return osp.isfile(filepath) - - def join_path(self, filepath: Union[str, Path], - *filepaths: Union[str, Path]) -> str: - """Concatenate all file paths. - - Join one or more filepath components intelligently. The return value - is the concatenation of filepath and any members of *filepaths. - - Args: - filepath (str or Path): Path to be concatenated. - - Returns: - str: The result of concatenation. - """ - return osp.join(filepath, *filepaths) - - @contextmanager - def get_local_path( - self, filepath: Union[str, Path]) -> Iterable[Union[str, Path]]: - """Only for unified API and do nothing.""" - yield filepath - - def list_dir_or_file(self, - dir_path: Union[str, Path], - list_dir: bool = True, - list_file: bool = True, - suffix: Optional[Union[str, Tuple[str]]] = None, - recursive: bool = False) -> Iterator[str]: - """Scan a directory to find the interested directories or files in - arbitrary order. - - Note: - :meth:`list_dir_or_file` returns the path relative to ``dir_path``. - - Args: - dir_path (str | Path): Path of the directory. - list_dir (bool): List the directories. Default: True. - list_file (bool): List the path of files. Default: True. - suffix (str or tuple[str], optional): File suffix - that we are interested in. Default: None. - recursive (bool): If set to True, recursively scan the - directory. Default: False. - - Yields: - Iterable[str]: A relative path to ``dir_path``. - """ - if list_dir and suffix is not None: - raise TypeError('`suffix` should be None when `list_dir` is True') - - if (suffix is not None) and not isinstance(suffix, (str, tuple)): - raise TypeError('`suffix` must be a string or tuple of strings') - - root = dir_path - - def _list_dir_or_file(dir_path, list_dir, list_file, suffix, - recursive): - for entry in os.scandir(dir_path): - if not entry.name.startswith('.') and entry.is_file(): - rel_path = osp.relpath(entry.path, root) - if (suffix is None - or rel_path.endswith(suffix)) and list_file: - yield rel_path - elif osp.isdir(entry.path): - if list_dir: - rel_dir = osp.relpath(entry.path, root) - yield rel_dir - if recursive: - yield from _list_dir_or_file(entry.path, list_dir, - list_file, suffix, - recursive) - - return _list_dir_or_file(dir_path, list_dir, list_file, suffix, - recursive) - - -class HTTPBackend(BaseStorageBackend): - """HTTP and HTTPS storage bachend.""" - - def get(self, filepath): - value_buf = urlopen(filepath).read() - return value_buf - - def get_text(self, filepath, encoding='utf-8'): - value_buf = urlopen(filepath).read() - return value_buf.decode(encoding) - - @contextmanager - def get_local_path(self, filepath: str) -> Iterable[str]: - """Download a file from ``filepath``. - - ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It - can be called with ``with`` statement, and when exists from the - ``with`` statement, the temporary path will be released. - - Args: - filepath (str): Download a file from ``filepath``. - - Examples: - >>> client = HTTPBackend() - >>> # After existing from the ``with`` clause, - >>> # the path will be removed - >>> with client.get_local_path('http://path/of/your/file') as path: - ... # do something here - """ - try: - f = tempfile.NamedTemporaryFile(delete=False) - f.write(self.get(filepath)) - f.close() - yield f.name - finally: - os.remove(f.name) - - -class FileClient: - """A general file client to access files in different backends. - - The client loads a file or text in a specified backend from its path - and returns it as a binary or text file. There are two ways to choose a - backend, the name of backend and the prefix of path. Although both of them - can be used to choose a storage backend, ``backend`` has a higher priority - that is if they are all set, the storage backend will be chosen by the - backend argument. If they are all `None`, the disk backend will be chosen. - Note that It can also register other backend accessor with a given name, - prefixes, and backend class. In addition, We use the singleton pattern to - avoid repeated object creation. If the arguments are the same, the same - object will be returned. - - Args: - backend (str, optional): The storage backend type. Options are "disk", - "ceph", "memcached", "lmdb", "http" and "petrel". Default: None. - prefix (str, optional): The prefix of the registered storage backend. - Options are "s3", "http", "https". Default: None. - - Examples: - >>> # only set backend - >>> file_client = FileClient(backend='petrel') - >>> # only set prefix - >>> file_client = FileClient(prefix='s3') - >>> # set both backend and prefix but use backend to choose client - >>> file_client = FileClient(backend='petrel', prefix='s3') - >>> # if the arguments are the same, the same object is returned - >>> file_client1 = FileClient(backend='petrel') - >>> file_client1 is file_client - True - - Attributes: - client (:obj:`BaseStorageBackend`): The backend object. - """ - - _backends = { - 'disk': HardDiskBackend, - 'ceph': CephBackend, - 'memcached': MemcachedBackend, - 'lmdb': LmdbBackend, - 'petrel': PetrelBackend, - 'http': HTTPBackend, - } - # This collection is used to record the overridden backends, and when a - # backend appears in the collection, the singleton pattern is disabled for - # that backend, because if the singleton pattern is used, then the object - # returned will be the backend before overwriting - _overridden_backends = set() - _prefix_to_backends = { - 's3': PetrelBackend, - 'http': HTTPBackend, - 'https': HTTPBackend, - } - _overridden_prefixes = set() - - _instances = {} - - def __new__(cls, backend=None, prefix=None, **kwargs): - if backend is None and prefix is None: - backend = 'disk' - if backend is not None and backend not in cls._backends: - raise ValueError( - f'Backend {backend} is not supported. Currently supported ones' - f' are {list(cls._backends.keys())}') - if prefix is not None and prefix not in cls._prefix_to_backends: - raise ValueError( - f'prefix {prefix} is not supported. Currently supported ones ' - f'are {list(cls._prefix_to_backends.keys())}') - - # concatenate the arguments to a unique key for determining whether - # objects with the same arguments were created - arg_key = f'{backend}:{prefix}' - for key, value in kwargs.items(): - arg_key += f':{key}:{value}' - - # if a backend was overridden, it will create a new object - if (arg_key in cls._instances - and backend not in cls._overridden_backends - and prefix not in cls._overridden_prefixes): - _instance = cls._instances[arg_key] - else: - # create a new object and put it to _instance - _instance = super().__new__(cls) - if backend is not None: - _instance.client = cls._backends[backend](**kwargs) - else: - _instance.client = cls._prefix_to_backends[prefix](**kwargs) - - cls._instances[arg_key] = _instance - - return _instance - - @property - def name(self): - return self.client.name - - @property - def allow_symlink(self): - return self.client.allow_symlink - - @staticmethod - def parse_uri_prefix(uri: Union[str, Path]) -> Optional[str]: - """Parse the prefix of a uri. - - Args: - uri (str | Path): Uri to be parsed that contains the file prefix. - - Examples: - >>> FileClient.parse_uri_prefix('s3://path/of/your/file') - 's3' - - Returns: - str | None: Return the prefix of uri if the uri contains '://' - else ``None``. - """ - assert is_filepath(uri) - uri = str(uri) - if '://' not in uri: - return None - else: - prefix, _ = uri.split('://') - # In the case of PetrelBackend, the prefix may contains the cluster - # name like clusterName:s3 - if ':' in prefix: - _, prefix = prefix.split(':') - return prefix - - @classmethod - def infer_client(cls, - file_client_args: Optional[dict] = None, - uri: Optional[Union[str, Path]] = None) -> 'FileClient': - """Infer a suitable file client based on the URI and arguments. - - Args: - file_client_args (dict, optional): Arguments to instantiate a - FileClient. Default: None. - uri (str | Path, optional): Uri to be parsed that contains the file - prefix. Default: None. - - Examples: - >>> uri = 's3://path/of/your/file' - >>> file_client = FileClient.infer_client(uri=uri) - >>> file_client_args = {'backend': 'petrel'} - >>> file_client = FileClient.infer_client(file_client_args) - - Returns: - FileClient: Instantiated FileClient object. - """ - assert file_client_args is not None or uri is not None - if file_client_args is None: - file_prefix = cls.parse_uri_prefix(uri) # type: ignore - return cls(prefix=file_prefix) - else: - return cls(**file_client_args) - - @classmethod - def _register_backend(cls, name, backend, force=False, prefixes=None): - if not isinstance(name, str): - raise TypeError('the backend name should be a string, ' - f'but got {type(name)}') - if not inspect.isclass(backend): - raise TypeError( - f'backend should be a class but got {type(backend)}') - if not issubclass(backend, BaseStorageBackend): - raise TypeError( - f'backend {backend} is not a subclass of BaseStorageBackend') - if not force and name in cls._backends: - raise KeyError( - f'{name} is already registered as a storage backend, ' - 'add "force=True" if you want to override it') - - if name in cls._backends and force: - cls._overridden_backends.add(name) - cls._backends[name] = backend - - if prefixes is not None: - if isinstance(prefixes, str): - prefixes = [prefixes] - else: - assert isinstance(prefixes, (list, tuple)) - for prefix in prefixes: - if prefix not in cls._prefix_to_backends: - cls._prefix_to_backends[prefix] = backend - elif (prefix in cls._prefix_to_backends) and force: - cls._overridden_prefixes.add(prefix) - cls._prefix_to_backends[prefix] = backend - else: - raise KeyError( - f'{prefix} is already registered as a storage backend,' - ' add "force=True" if you want to override it') - - @classmethod - def register_backend(cls, name, backend=None, force=False, prefixes=None): - """Register a backend to FileClient. - - This method can be used as a normal class method or a decorator. - - .. code-block:: python - - class NewBackend(BaseStorageBackend): - - def get(self, filepath): - return filepath - - def get_text(self, filepath): - return filepath - - FileClient.register_backend('new', NewBackend) - - or - - .. code-block:: python - - @FileClient.register_backend('new') - class NewBackend(BaseStorageBackend): - - def get(self, filepath): - return filepath - - def get_text(self, filepath): - return filepath - - Args: - name (str): The name of the registered backend. - backend (class, optional): The backend class to be registered, - which must be a subclass of :class:`BaseStorageBackend`. - When this method is used as a decorator, backend is None. - Defaults to None. - force (bool, optional): Whether to override the backend if the name - has already been registered. Defaults to False. - prefixes (str or list[str] or tuple[str], optional): The prefixes - of the registered storage backend. Default: None. - `New in version 1.3.15.` - """ - if backend is not None: - cls._register_backend( - name, backend, force=force, prefixes=prefixes) - return - - def _register(backend_cls): - cls._register_backend( - name, backend_cls, force=force, prefixes=prefixes) - return backend_cls - - return _register - - def get(self, filepath: Union[str, Path]) -> Union[bytes, memoryview]: - """Read data from a given ``filepath`` with 'rb' mode. - - Note: - There are two types of return values for ``get``, one is ``bytes`` - and the other is ``memoryview``. The advantage of using memoryview - is that you can avoid copying, and if you want to convert it to - ``bytes``, you can use ``.tobytes()``. - - Args: - filepath (str or Path): Path to read data. - - Returns: - bytes | memoryview: Expected bytes object or a memory view of the - bytes object. - """ - return self.client.get(filepath) - - def get_text(self, filepath: Union[str, Path], encoding='utf-8') -> str: - """Read data from a given ``filepath`` with 'r' mode. - - Args: - filepath (str or Path): Path to read data. - encoding (str): The encoding format used to open the ``filepath``. - Default: 'utf-8'. - - Returns: - str: Expected text reading from ``filepath``. - """ - return self.client.get_text(filepath, encoding) - - def put(self, obj: bytes, filepath: Union[str, Path]) -> None: - """Write data to a given ``filepath`` with 'wb' mode. - - Note: - ``put`` should create a directory if the directory of ``filepath`` - does not exist. - - Args: - obj (bytes): Data to be written. - filepath (str or Path): Path to write data. - """ - self.client.put(obj, filepath) - - def put_text(self, obj: str, filepath: Union[str, Path]) -> None: - """Write data to a given ``filepath`` with 'w' mode. - - Note: - ``put_text`` should create a directory if the directory of - ``filepath`` does not exist. - - Args: - obj (str): Data to be written. - filepath (str or Path): Path to write data. - encoding (str, optional): The encoding format used to open the - `filepath`. Default: 'utf-8'. - """ - self.client.put_text(obj, filepath) - - def remove(self, filepath: Union[str, Path]) -> None: - """Remove a file. - - Args: - filepath (str, Path): Path to be removed. - """ - self.client.remove(filepath) - - def exists(self, filepath: Union[str, Path]) -> bool: - """Check whether a file path exists. - - Args: - filepath (str or Path): Path to be checked whether exists. - - Returns: - bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise. - """ - return self.client.exists(filepath) - - def isdir(self, filepath: Union[str, Path]) -> bool: - """Check whether a file path is a directory. - - Args: - filepath (str or Path): Path to be checked whether it is a - directory. - - Returns: - bool: Return ``True`` if ``filepath`` points to a directory, - ``False`` otherwise. - """ - return self.client.isdir(filepath) - - def isfile(self, filepath: Union[str, Path]) -> bool: - """Check whether a file path is a file. - - Args: - filepath (str or Path): Path to be checked whether it is a file. - - Returns: - bool: Return ``True`` if ``filepath`` points to a file, ``False`` - otherwise. - """ - return self.client.isfile(filepath) - - def join_path(self, filepath: Union[str, Path], - *filepaths: Union[str, Path]) -> str: - """Concatenate all file paths. - - Join one or more filepath components intelligently. The return value - is the concatenation of filepath and any members of *filepaths. - - Args: - filepath (str or Path): Path to be concatenated. - - Returns: - str: The result of concatenation. - """ - return self.client.join_path(filepath, *filepaths) - - @contextmanager - def get_local_path(self, filepath: Union[str, Path]) -> Iterable[str]: - """Download data from ``filepath`` and write the data to local path. - - ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It - can be called with ``with`` statement, and when exists from the - ``with`` statement, the temporary path will be released. - - Note: - If the ``filepath`` is a local path, just return itself. - - .. warning:: - ``get_local_path`` is an experimental interface that may change in - the future. - - Args: - filepath (str or Path): Path to be read data. - - Examples: - >>> file_client = FileClient(prefix='s3') - >>> with file_client.get_local_path('s3://bucket/abc.jpg') as path: - ... # do something here - - Yields: - Iterable[str]: Only yield one path. - """ - with self.client.get_local_path(str(filepath)) as local_path: - yield local_path - - def list_dir_or_file(self, - dir_path: Union[str, Path], - list_dir: bool = True, - list_file: bool = True, - suffix: Optional[Union[str, Tuple[str]]] = None, - recursive: bool = False) -> Iterator[str]: - """Scan a directory to find the interested directories or files in - arbitrary order. - - Note: - :meth:`list_dir_or_file` returns the path relative to ``dir_path``. - - Args: - dir_path (str | Path): Path of the directory. - list_dir (bool): List the directories. Default: True. - list_file (bool): List the path of files. Default: True. - suffix (str or tuple[str], optional): File suffix - that we are interested in. Default: None. - recursive (bool): If set to True, recursively scan the - directory. Default: False. - - Yields: - Iterable[str]: A relative path to ``dir_path``. - """ - yield from self.client.list_dir_or_file(dir_path, list_dir, list_file, - suffix, recursive) diff --git a/spaces/wanghuoto/gogoai/src/app/layout.tsx b/spaces/wanghuoto/gogoai/src/app/layout.tsx deleted file mode 100644 index 8b5122759987177b8dc4e4356d1d06cea25c15ea..0000000000000000000000000000000000000000 --- a/spaces/wanghuoto/gogoai/src/app/layout.tsx +++ /dev/null @@ -1,47 +0,0 @@ -import { Metadata } from 'next' -import { Toaster } from 'react-hot-toast' -import { TailwindIndicator } from '@/components/tailwind-indicator' -import { Providers } from '@/components/providers' -import { Header } from '@/components/header' - -import '@/app/globals.scss' - - -export const metadata: Metadata = { - title: { - default: 'Bing AI Chatbot', - template: `%s - Bing AI Chatbot` - }, - description: 'Bing AI Chatbot Web App.', - themeColor: [ - { media: '(prefers-color-scheme: light)', color: 'white' }, - { media: '(prefers-color-scheme: dark)', color: 'dark' } - ], - icons: { - icon: '/favicon.ico', - shortcut: '../assets/images/logo.svg', - apple: '../assets/images/logo.svg' - } -} - -interface RootLayoutProps { - children: React.ReactNode -} - -export default function RootLayout({ children }: RootLayoutProps) { - return ( - - - - -
    - {/* @ts-ignore */} -
    -
    {children}
    -
    - -
    - - - ) -} diff --git a/spaces/warmazzzzz/bing-ai/Dockerfile b/spaces/warmazzzzz/bing-ai/Dockerfile deleted file mode 100644 index 3698c7cb7938e025afc53b18a571ae2961fbdffe..0000000000000000000000000000000000000000 --- a/spaces/warmazzzzz/bing-ai/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Build Stage -# 使用 golang:alpine 作为构建阶段的基础镜像 -FROM golang:alpine AS builder - -# 添加 git,以便之后能从GitHub克隆项目 -RUN apk --no-cache add git - -# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下 -RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app - -# 设置工作目录为之前克隆的项目目录 -WORKDIR /workspace/app - -# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小 -RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go - -# Runtime Stage -# 使用轻量级的 alpine 镜像作为运行时的基础镜像 -FROM alpine - -# 设置工作目录 -WORKDIR /workspace/app - -# 从构建阶段复制编译后的二进制文件到运行时镜像中 -COPY --from=builder /workspace/app/go-proxy-bingai . - -# 设置环境变量,此处为随机字符 -ENV Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92ncMzLaoQWYtX5rG6bE3fZ4iO" - -# 暴露8080端口 -EXPOSE 8080 - -# 容器启动时运行的命令 -CMD ["/workspace/app/go-proxy-bingai"] \ No newline at end of file diff --git a/spaces/wenpeng/Sod_Inpaint/inpaint/saicinpainting/training/trainers/default.py b/spaces/wenpeng/Sod_Inpaint/inpaint/saicinpainting/training/trainers/default.py deleted file mode 100644 index 771eab50436734873619e4c9902ca73421fa2fa3..0000000000000000000000000000000000000000 --- a/spaces/wenpeng/Sod_Inpaint/inpaint/saicinpainting/training/trainers/default.py +++ /dev/null @@ -1,53 +0,0 @@ - -import torch - -from saicinpainting.training.trainers.base import BaseInpaintingTrainingModule - - - - -class DefaultInpaintingTrainingModule(BaseInpaintingTrainingModule): - def __init__(self, *args, concat_mask=True, rescale_scheduler_kwargs=None, image_to_discriminator='predicted_image', - add_noise_kwargs=None, noise_fill_hole=False, const_area_crop_kwargs=None, - distance_weighter_kwargs=None, distance_weighted_mask_for_discr=False, - fake_fakes_proba=0, fake_fakes_generator_kwargs=None, - **kwargs): - super().__init__(*args, **kwargs) - self.concat_mask = concat_mask - self.image_to_discriminator = image_to_discriminator - self.add_noise_kwargs = add_noise_kwargs - self.noise_fill_hole = noise_fill_hole - self.const_area_crop_kwargs = const_area_crop_kwargs - # print(distance_weighter_kwargs) - self.refine_mask_for_losses = None - self.distance_weighted_mask_for_discr = distance_weighted_mask_for_discr - - self.fake_fakes_proba = fake_fakes_proba - - def forward(self, batch): - - img = batch['image'] - mask = batch['mask'] - - masked_img = img * (1 - mask) - if self.concat_mask: - masked_img = torch.cat([masked_img, mask], dim=1) - - batch['predicted_image'] = self.generator(masked_img) - batch['inpainted'] = mask * batch['predicted_image'] + (1 - mask) * batch['image'] - if self.fake_fakes_proba > 1e-3: - if self.training and torch.rand(1).item() < self.fake_fakes_proba: - batch['fake_fakes'], batch['fake_fakes_masks'] = self.fake_fakes_gen(img, mask) - batch['use_fake_fakes'] = True - else: - batch['fake_fakes'] = torch.zeros_like(img) - batch['fake_fakes_masks'] = torch.zeros_like(mask) - batch['use_fake_fakes'] = False - - batch['mask_for_losses'] = self.refine_mask_for_losses(img, batch['predicted_image'], mask) \ - if self.refine_mask_for_losses is not None and self.training \ - else mask - - return batch - - \ No newline at end of file diff --git a/spaces/whisper-event/winners/README.md b/spaces/whisper-event/winners/README.md deleted file mode 100644 index acf1d6dcb3dcbd4978041c64a980ac86a072227e..0000000000000000000000000000000000000000 --- a/spaces/whisper-event/winners/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Winners -emoji: 🏆 -colorFrom: purple -colorTo: blue -sdk: streamlit -sdk_version: 1.23.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/whitphx/gradio-static-test/dist/assets/Empty-91947ea3.js b/spaces/whitphx/gradio-static-test/dist/assets/Empty-91947ea3.js deleted file mode 100644 index bb43a2cf8d4370c86a2b3ef5f71e00b9cd3a4cbf..0000000000000000000000000000000000000000 --- a/spaces/whitphx/gradio-static-test/dist/assets/Empty-91947ea3.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as _,i as r,s as m,W as c,H as d,D as f,N as u,h as g,F as p,Y as b,Z as v,$ as z,q as h,t as j,r as q}from"../lite.js";import"./Button-0391b19a.js";function E(n){let s,i,t;const o=n[3].default,a=c(o,n,n[2],null);return{c(){s=d("div"),i=d("div"),a&&a.c(),f(i,"class","icon svelte-1u5vjgs"),f(s,"class","empty svelte-1u5vjgs"),u(s,"small",n[0]==="small"),u(s,"large",n[0]==="large"),u(s,"unpadded_box",n[1])},m(e,l){g(e,s,l),p(s,i),a&&a.m(i,null),t=!0},p(e,[l]){a&&a.p&&(!t||l&4)&&b(a,o,e,e[2],t?z(o,e[2],l,null):v(e[2]),null),(!t||l&1)&&u(s,"small",e[0]==="small"),(!t||l&1)&&u(s,"large",e[0]==="large"),(!t||l&2)&&u(s,"unpadded_box",e[1])},i(e){t||(h(a,e),t=!0)},o(e){j(a,e),t=!1},d(e){e&&q(s),a&&a.d(e)}}}function S(n,s,i){let{$$slots:t={},$$scope:o}=s,{size:a="small"}=s,{unpadded_box:e=!1}=s;return n.$$set=l=>{"size"in l&&i(0,a=l.size),"unpadded_box"in l&&i(1,e=l.unpadded_box),"$$scope"in l&&i(2,o=l.$$scope)},[a,e,o,t]}class F extends _{constructor(s){super(),r(this,s,S,E,m,{size:0,unpadded_box:1})}}export{F as E}; -//# sourceMappingURL=Empty-91947ea3.js.map diff --git a/spaces/wu981526092/Optimal_Cluster_Analysis_with_PCA_Visualization/README.md b/spaces/wu981526092/Optimal_Cluster_Analysis_with_PCA_Visualization/README.md deleted file mode 100644 index 9f0ff0e3906ee1a55464fad79a0a22b5d3015a10..0000000000000000000000000000000000000000 --- a/spaces/wu981526092/Optimal_Cluster_Analysis_with_PCA_Visualization/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Optimal Cluster Analysis With PCA Visualization -emoji: 📚 -colorFrom: purple -colorTo: red -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/wuhuik/bingo/src/app/loading.css b/spaces/wuhuik/bingo/src/app/loading.css deleted file mode 100644 index eaaab6a86a228334c4eca3c5368ae6f0f593d405..0000000000000000000000000000000000000000 --- a/spaces/wuhuik/bingo/src/app/loading.css +++ /dev/null @@ -1,68 +0,0 @@ -::-webkit-scrollbar { - width: 10px; - height: 10px; - display: none; -} - -::-webkit-scrollbar-button:start:decrement, -::-webkit-scrollbar-button:end:increment { - height: 30px; - background-color: transparent; -} - -::-webkit-scrollbar-track-piece { - background-color: #3b3b3b; - -webkit-border-radius: 16px; -} - -::-webkit-scrollbar-thumb:vertical { - height: 50px; - background-color: #666; - border: 1px solid #eee; - -webkit-border-radius: 6px; -} - -/* loading start */ -.loading-spinner { - display: flex; - justify-content: center; - align-items: center; - height: 100vh; - opacity: 1; - transition: opacity .8s ease-out; -} - -.loading-spinner.hidden { - opacity: 0; -} - -.loading-spinner>div { - width: 30px; - height: 30px; - background: linear-gradient(90deg, #2870EA 10.79%, #1B4AEF 87.08%); - - border-radius: 100%; - display: inline-block; - animation: sk-bouncedelay 1.4s infinite ease-in-out both; -} - -.loading-spinner .bounce1 { - animation-delay: -0.32s; -} - -.loading-spinner .bounce2 { - animation-delay: -0.16s; -} - -@keyframes sk-bouncedelay { - - 0%, - 80%, - 100% { - transform: scale(0); - } - - 40% { - transform: scale(1.0); - } -} diff --git a/spaces/xcchen/xcchenvits-uma-genshin-honkai/transforms.py b/spaces/xcchen/xcchenvits-uma-genshin-honkai/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/xcchen/xcchenvits-uma-genshin-honkai/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/xdecoder/Demo/xdecoder/backbone/registry.py b/spaces/xdecoder/Demo/xdecoder/backbone/registry.py deleted file mode 100644 index 9e19cc8068fff5f5de219c0739594b404d837e00..0000000000000000000000000000000000000000 --- a/spaces/xdecoder/Demo/xdecoder/backbone/registry.py +++ /dev/null @@ -1,14 +0,0 @@ -_model_entrypoints = {} - - -def register_backbone(fn): - module_name_split = fn.__module__.split('.') - model_name = module_name_split[-1] - _model_entrypoints[model_name] = fn - return fn - -def model_entrypoints(model_name): - return _model_entrypoints[model_name] - -def is_model(model_name): - return model_name in _model_entrypoints diff --git a/spaces/xiang-wuu/yolov5/utils/callbacks.py b/spaces/xiang-wuu/yolov5/utils/callbacks.py deleted file mode 100644 index 2b32df0bf1c13ffaaec2e7598bb7c16ae76ab14c..0000000000000000000000000000000000000000 --- a/spaces/xiang-wuu/yolov5/utils/callbacks.py +++ /dev/null @@ -1,71 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Callback utils -""" - - -class Callbacks: - """" - Handles all registered callbacks for YOLOv5 Hooks - """ - - def __init__(self): - # Define the available callbacks - self._callbacks = { - 'on_pretrain_routine_start': [], - 'on_pretrain_routine_end': [], - 'on_train_start': [], - 'on_train_epoch_start': [], - 'on_train_batch_start': [], - 'optimizer_step': [], - 'on_before_zero_grad': [], - 'on_train_batch_end': [], - 'on_train_epoch_end': [], - 'on_val_start': [], - 'on_val_batch_start': [], - 'on_val_image_end': [], - 'on_val_batch_end': [], - 'on_val_end': [], - 'on_fit_epoch_end': [], # fit = train + val - 'on_model_save': [], - 'on_train_end': [], - 'on_params_update': [], - 'teardown': [],} - self.stop_training = False # set True to interrupt training - - def register_action(self, hook, name='', callback=None): - """ - Register a new action to a callback hook - - Args: - hook: The callback hook name to register the action to - name: The name of the action for later reference - callback: The callback to fire - """ - assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" - assert callable(callback), f"callback '{callback}' is not callable" - self._callbacks[hook].append({'name': name, 'callback': callback}) - - def get_registered_actions(self, hook=None): - """" - Returns all the registered actions by callback hook - - Args: - hook: The name of the hook to check, defaults to all - """ - return self._callbacks[hook] if hook else self._callbacks - - def run(self, hook, *args, **kwargs): - """ - Loop through the registered actions and fire all callbacks - - Args: - hook: The name of the hook to check, defaults to all - args: Arguments to receive from YOLOv5 - kwargs: Keyword Arguments to receive from YOLOv5 - """ - - assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" - - for logger in self._callbacks[hook]: - logger['callback'](*args, **kwargs) diff --git a/spaces/xinyu1205/recognize-anything/GroundingDINO/groundingdino/util/utils.py b/spaces/xinyu1205/recognize-anything/GroundingDINO/groundingdino/util/utils.py deleted file mode 100644 index e9f0318e306fa04bff0ada70486b41aaa69b07c8..0000000000000000000000000000000000000000 --- a/spaces/xinyu1205/recognize-anything/GroundingDINO/groundingdino/util/utils.py +++ /dev/null @@ -1,608 +0,0 @@ -import argparse -import json -import warnings -from collections import OrderedDict -from copy import deepcopy -from typing import Any, Dict, List - -import numpy as np -import torch -from transformers import AutoTokenizer - -from groundingdino.util.slconfig import SLConfig - - -def slprint(x, name="x"): - if isinstance(x, (torch.Tensor, np.ndarray)): - print(f"{name}.shape:", x.shape) - elif isinstance(x, (tuple, list)): - print("type x:", type(x)) - for i in range(min(10, len(x))): - slprint(x[i], f"{name}[{i}]") - elif isinstance(x, dict): - for k, v in x.items(): - slprint(v, f"{name}[{k}]") - else: - print(f"{name}.type:", type(x)) - - -def clean_state_dict(state_dict): - new_state_dict = OrderedDict() - for k, v in state_dict.items(): - if k[:7] == "module.": - k = k[7:] # remove `module.` - new_state_dict[k] = v - return new_state_dict - - -def renorm( - img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] -) -> torch.FloatTensor: - # img: tensor(3,H,W) or tensor(B,3,H,W) - # return: same as img - assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim() - if img.dim() == 3: - assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % ( - img.size(0), - str(img.size()), - ) - img_perm = img.permute(1, 2, 0) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(2, 0, 1) - else: # img.dim() == 4 - assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % ( - img.size(1), - str(img.size()), - ) - img_perm = img.permute(0, 2, 3, 1) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(0, 3, 1, 2) - - -class CocoClassMapper: - def __init__(self) -> None: - self.category_map_str = { - "1": 1, - "2": 2, - "3": 3, - "4": 4, - "5": 5, - "6": 6, - "7": 7, - "8": 8, - "9": 9, - "10": 10, - "11": 11, - "13": 12, - "14": 13, - "15": 14, - "16": 15, - "17": 16, - "18": 17, - "19": 18, - "20": 19, - "21": 20, - "22": 21, - "23": 22, - "24": 23, - "25": 24, - "27": 25, - "28": 26, - "31": 27, - "32": 28, - "33": 29, - "34": 30, - "35": 31, - "36": 32, - "37": 33, - "38": 34, - "39": 35, - "40": 36, - "41": 37, - "42": 38, - "43": 39, - "44": 40, - "46": 41, - "47": 42, - "48": 43, - "49": 44, - "50": 45, - "51": 46, - "52": 47, - "53": 48, - "54": 49, - "55": 50, - "56": 51, - "57": 52, - "58": 53, - "59": 54, - "60": 55, - "61": 56, - "62": 57, - "63": 58, - "64": 59, - "65": 60, - "67": 61, - "70": 62, - "72": 63, - "73": 64, - "74": 65, - "75": 66, - "76": 67, - "77": 68, - "78": 69, - "79": 70, - "80": 71, - "81": 72, - "82": 73, - "84": 74, - "85": 75, - "86": 76, - "87": 77, - "88": 78, - "89": 79, - "90": 80, - } - self.origin2compact_mapper = {int(k): v - 1 for k, v in self.category_map_str.items()} - self.compact2origin_mapper = {int(v - 1): int(k) for k, v in self.category_map_str.items()} - - def origin2compact(self, idx): - return self.origin2compact_mapper[int(idx)] - - def compact2origin(self, idx): - return self.compact2origin_mapper[int(idx)] - - -def to_device(item, device): - if isinstance(item, torch.Tensor): - return item.to(device) - elif isinstance(item, list): - return [to_device(i, device) for i in item] - elif isinstance(item, dict): - return {k: to_device(v, device) for k, v in item.items()} - else: - raise NotImplementedError( - "Call Shilong if you use other containers! type: {}".format(type(item)) - ) - - -# -def get_gaussian_mean(x, axis, other_axis, softmax=True): - """ - - Args: - x (float): Input images(BxCxHxW) - axis (int): The index for weighted mean - other_axis (int): The other index - - Returns: weighted index for axis, BxC - - """ - mat2line = torch.sum(x, axis=other_axis) - # mat2line = mat2line / mat2line.mean() * 10 - if softmax: - u = torch.softmax(mat2line, axis=2) - else: - u = mat2line / (mat2line.sum(2, keepdim=True) + 1e-6) - size = x.shape[axis] - ind = torch.linspace(0, 1, size).to(x.device) - batch = x.shape[0] - channel = x.shape[1] - index = ind.repeat([batch, channel, 1]) - mean_position = torch.sum(index * u, dim=2) - return mean_position - - -def get_expected_points_from_map(hm, softmax=True): - """get_gaussian_map_from_points - B,C,H,W -> B,N,2 float(0, 1) float(0, 1) - softargmax function - - Args: - hm (float): Input images(BxCxHxW) - - Returns: - weighted index for axis, BxCx2. float between 0 and 1. - - """ - # hm = 10*hm - B, C, H, W = hm.shape - y_mean = get_gaussian_mean(hm, 2, 3, softmax=softmax) # B,C - x_mean = get_gaussian_mean(hm, 3, 2, softmax=softmax) # B,C - # return torch.cat((x_mean.unsqueeze(-1), y_mean.unsqueeze(-1)), 2) - return torch.stack([x_mean, y_mean], dim=2) - - -# Positional encoding (section 5.1) -# borrow from nerf -class Embedder: - def __init__(self, **kwargs): - self.kwargs = kwargs - self.create_embedding_fn() - - def create_embedding_fn(self): - embed_fns = [] - d = self.kwargs["input_dims"] - out_dim = 0 - if self.kwargs["include_input"]: - embed_fns.append(lambda x: x) - out_dim += d - - max_freq = self.kwargs["max_freq_log2"] - N_freqs = self.kwargs["num_freqs"] - - if self.kwargs["log_sampling"]: - freq_bands = 2.0 ** torch.linspace(0.0, max_freq, steps=N_freqs) - else: - freq_bands = torch.linspace(2.0**0.0, 2.0**max_freq, steps=N_freqs) - - for freq in freq_bands: - for p_fn in self.kwargs["periodic_fns"]: - embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq)) - out_dim += d - - self.embed_fns = embed_fns - self.out_dim = out_dim - - def embed(self, inputs): - return torch.cat([fn(inputs) for fn in self.embed_fns], -1) - - -def get_embedder(multires, i=0): - import torch.nn as nn - - if i == -1: - return nn.Identity(), 3 - - embed_kwargs = { - "include_input": True, - "input_dims": 3, - "max_freq_log2": multires - 1, - "num_freqs": multires, - "log_sampling": True, - "periodic_fns": [torch.sin, torch.cos], - } - - embedder_obj = Embedder(**embed_kwargs) - embed = lambda x, eo=embedder_obj: eo.embed(x) - return embed, embedder_obj.out_dim - - -class APOPMeter: - def __init__(self) -> None: - self.tp = 0 - self.fp = 0 - self.tn = 0 - self.fn = 0 - - def update(self, pred, gt): - """ - Input: - pred, gt: Tensor() - """ - assert pred.shape == gt.shape - self.tp += torch.logical_and(pred == 1, gt == 1).sum().item() - self.fp += torch.logical_and(pred == 1, gt == 0).sum().item() - self.tn += torch.logical_and(pred == 0, gt == 0).sum().item() - self.tn += torch.logical_and(pred == 1, gt == 0).sum().item() - - def update_cm(self, tp, fp, tn, fn): - self.tp += tp - self.fp += fp - self.tn += tn - self.tn += fn - - -def inverse_sigmoid(x, eps=1e-5): - x = x.clamp(min=0, max=1) - x1 = x.clamp(min=eps) - x2 = (1 - x).clamp(min=eps) - return torch.log(x1 / x2) - - -def get_raw_dict(args): - """ - return the dicf contained in args. - - e.g: - >>> with open(path, 'w') as f: - json.dump(get_raw_dict(args), f, indent=2) - """ - if isinstance(args, argparse.Namespace): - return vars(args) - elif isinstance(args, dict): - return args - elif isinstance(args, SLConfig): - return args._cfg_dict - else: - raise NotImplementedError("Unknown type {}".format(type(args))) - - -def stat_tensors(tensor): - assert tensor.dim() == 1 - tensor_sm = tensor.softmax(0) - entropy = (tensor_sm * torch.log(tensor_sm + 1e-9)).sum() - - return { - "max": tensor.max(), - "min": tensor.min(), - "mean": tensor.mean(), - "var": tensor.var(), - "std": tensor.var() ** 0.5, - "entropy": entropy, - } - - -class NiceRepr: - """Inherit from this class and define ``__nice__`` to "nicely" print your - objects. - - Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function - Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``. - If the inheriting class has a ``__len__``, method then the default - ``__nice__`` method will return its length. - - Example: - >>> class Foo(NiceRepr): - ... def __nice__(self): - ... return 'info' - >>> foo = Foo() - >>> assert str(foo) == '' - >>> assert repr(foo).startswith('>> class Bar(NiceRepr): - ... pass - >>> bar = Bar() - >>> import pytest - >>> with pytest.warns(None) as record: - >>> assert 'object at' in str(bar) - >>> assert 'object at' in repr(bar) - - Example: - >>> class Baz(NiceRepr): - ... def __len__(self): - ... return 5 - >>> baz = Baz() - >>> assert str(baz) == '' - """ - - def __nice__(self): - """str: a "nice" summary string describing this module""" - if hasattr(self, "__len__"): - # It is a common pattern for objects to use __len__ in __nice__ - # As a convenience we define a default __nice__ for these objects - return str(len(self)) - else: - # In all other cases force the subclass to overload __nice__ - raise NotImplementedError(f"Define the __nice__ method for {self.__class__!r}") - - def __repr__(self): - """str: the string of the module""" - try: - nice = self.__nice__() - classname = self.__class__.__name__ - return f"<{classname}({nice}) at {hex(id(self))}>" - except NotImplementedError as ex: - warnings.warn(str(ex), category=RuntimeWarning) - return object.__repr__(self) - - def __str__(self): - """str: the string of the module""" - try: - classname = self.__class__.__name__ - nice = self.__nice__() - return f"<{classname}({nice})>" - except NotImplementedError as ex: - warnings.warn(str(ex), category=RuntimeWarning) - return object.__repr__(self) - - -def ensure_rng(rng=None): - """Coerces input into a random number generator. - - If the input is None, then a global random state is returned. - - If the input is a numeric value, then that is used as a seed to construct a - random state. Otherwise the input is returned as-is. - - Adapted from [1]_. - - Args: - rng (int | numpy.random.RandomState | None): - if None, then defaults to the global rng. Otherwise this can be an - integer or a RandomState class - Returns: - (numpy.random.RandomState) : rng - - a numpy random number generator - - References: - .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501 - """ - - if rng is None: - rng = np.random.mtrand._rand - elif isinstance(rng, int): - rng = np.random.RandomState(rng) - else: - rng = rng - return rng - - -def random_boxes(num=1, scale=1, rng=None): - """Simple version of ``kwimage.Boxes.random`` - - Returns: - Tensor: shape (n, 4) in x1, y1, x2, y2 format. - - References: - https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 - - Example: - >>> num = 3 - >>> scale = 512 - >>> rng = 0 - >>> boxes = random_boxes(num, scale, rng) - >>> print(boxes) - tensor([[280.9925, 278.9802, 308.6148, 366.1769], - [216.9113, 330.6978, 224.0446, 456.5878], - [405.3632, 196.3221, 493.3953, 270.7942]]) - """ - rng = ensure_rng(rng) - - tlbr = rng.rand(num, 4).astype(np.float32) - - tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) - tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) - br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) - br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) - - tlbr[:, 0] = tl_x * scale - tlbr[:, 1] = tl_y * scale - tlbr[:, 2] = br_x * scale - tlbr[:, 3] = br_y * scale - - boxes = torch.from_numpy(tlbr) - return boxes - - -class ModelEma(torch.nn.Module): - def __init__(self, model, decay=0.9997, device=None): - super(ModelEma, self).__init__() - # make a copy of the model for accumulating moving average of weights - self.module = deepcopy(model) - self.module.eval() - - # import ipdb; ipdb.set_trace() - - self.decay = decay - self.device = device # perform ema on different device from model if set - if self.device is not None: - self.module.to(device=device) - - def _update(self, model, update_fn): - with torch.no_grad(): - for ema_v, model_v in zip( - self.module.state_dict().values(), model.state_dict().values() - ): - if self.device is not None: - model_v = model_v.to(device=self.device) - ema_v.copy_(update_fn(ema_v, model_v)) - - def update(self, model): - self._update(model, update_fn=lambda e, m: self.decay * e + (1.0 - self.decay) * m) - - def set(self, model): - self._update(model, update_fn=lambda e, m: m) - - -class BestMetricSingle: - def __init__(self, init_res=0.0, better="large") -> None: - self.init_res = init_res - self.best_res = init_res - self.best_ep = -1 - - self.better = better - assert better in ["large", "small"] - - def isbetter(self, new_res, old_res): - if self.better == "large": - return new_res > old_res - if self.better == "small": - return new_res < old_res - - def update(self, new_res, ep): - if self.isbetter(new_res, self.best_res): - self.best_res = new_res - self.best_ep = ep - return True - return False - - def __str__(self) -> str: - return "best_res: {}\t best_ep: {}".format(self.best_res, self.best_ep) - - def __repr__(self) -> str: - return self.__str__() - - def summary(self) -> dict: - return { - "best_res": self.best_res, - "best_ep": self.best_ep, - } - - -class BestMetricHolder: - def __init__(self, init_res=0.0, better="large", use_ema=False) -> None: - self.best_all = BestMetricSingle(init_res, better) - self.use_ema = use_ema - if use_ema: - self.best_ema = BestMetricSingle(init_res, better) - self.best_regular = BestMetricSingle(init_res, better) - - def update(self, new_res, epoch, is_ema=False): - """ - return if the results is the best. - """ - if not self.use_ema: - return self.best_all.update(new_res, epoch) - else: - if is_ema: - self.best_ema.update(new_res, epoch) - return self.best_all.update(new_res, epoch) - else: - self.best_regular.update(new_res, epoch) - return self.best_all.update(new_res, epoch) - - def summary(self): - if not self.use_ema: - return self.best_all.summary() - - res = {} - res.update({f"all_{k}": v for k, v in self.best_all.summary().items()}) - res.update({f"regular_{k}": v for k, v in self.best_regular.summary().items()}) - res.update({f"ema_{k}": v for k, v in self.best_ema.summary().items()}) - return res - - def __repr__(self) -> str: - return json.dumps(self.summary(), indent=2) - - def __str__(self) -> str: - return self.__repr__() - - -def targets_to(targets: List[Dict[str, Any]], device): - """Moves the target dicts to the given device.""" - excluded_keys = [ - "questionId", - "tokens_positive", - "strings_positive", - "tokens", - "dataset_name", - "sentence_id", - "original_img_id", - "nb_eval", - "task_id", - "original_id", - "token_span", - "caption", - "dataset_type", - ] - return [ - {k: v.to(device) if k not in excluded_keys else v for k, v in t.items()} for t in targets - ] - - -def get_phrases_from_posmap( - posmap: torch.BoolTensor, tokenized: Dict, tokenizer: AutoTokenizer -): - assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor" - if posmap.dim() == 1: - non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist() - token_ids = [tokenized["input_ids"][i] for i in non_zero_idx] - return tokenizer.decode(token_ids) - else: - raise NotImplementedError("posmap must be 1-dim") diff --git a/spaces/ygtxr1997/ReliableSwap_Demo/third_party/PIPNet/lib/preprocess_gssl.py b/spaces/ygtxr1997/ReliableSwap_Demo/third_party/PIPNet/lib/preprocess_gssl.py deleted file mode 100644 index ea804e6874f0ac49ba53bac27db83c8f3285e43f..0000000000000000000000000000000000000000 --- a/spaces/ygtxr1997/ReliableSwap_Demo/third_party/PIPNet/lib/preprocess_gssl.py +++ /dev/null @@ -1,544 +0,0 @@ -import os, cv2 -import hdf5storage -import numpy as np -import sys - -def process_300w(root_folder, folder_name, image_name, label_name, target_size): - image_path = os.path.join(root_folder, folder_name, image_name) - label_path = os.path.join(root_folder, folder_name, label_name) - - with open(label_path, 'r') as ff: - anno = ff.readlines()[3:-1] - anno = [x.strip().split() for x in anno] - anno = [[int(float(x[0])), int(float(x[1]))] for x in anno] - image = cv2.imread(image_path) - image_height, image_width, _ = image.shape - anno_x = [x[0] for x in anno] - anno_y = [x[1] for x in anno] - bbox_xmin = min(anno_x) - bbox_ymin = min(anno_y) - bbox_xmax = max(anno_x) - bbox_ymax = max(anno_y) - bbox_width = bbox_xmax - bbox_xmin - bbox_height = bbox_ymax - bbox_ymin - scale = 1.3 - bbox_xmin -= int((scale-1)/2*bbox_width) - bbox_ymin -= int((scale-1)/2*bbox_height) - bbox_width *= scale - bbox_height *= scale - bbox_width = int(bbox_width) - bbox_height = int(bbox_height) - bbox_xmin = max(bbox_xmin, 0) - bbox_ymin = max(bbox_ymin, 0) - bbox_width = min(bbox_width, image_width-bbox_xmin-1) - bbox_height = min(bbox_height, image_height-bbox_ymin-1) - anno = [[(x-bbox_xmin)/bbox_width, (y-bbox_ymin)/bbox_height] for x,y in anno] - - bbox_xmax = bbox_xmin + bbox_width - bbox_ymax = bbox_ymin + bbox_height - image_crop = image[bbox_ymin:bbox_ymax, bbox_xmin:bbox_xmax, :] - image_crop = cv2.resize(image_crop, (target_size, target_size)) - return image_crop, anno - -def process_wflw(anno, target_size): - image_name = anno[-1] - image_path = os.path.join('..', 'data', 'WFLW', 'WFLW_images', image_name) - image = cv2.imread(image_path) - image_height, image_width, _ = image.shape - lms = anno[:196] - lms = [float(x) for x in lms] - lms_x = lms[0::2] - lms_y = lms[1::2] - lms_x = [x if x >=0 else 0 for x in lms_x] - lms_x = [x if x <=image_width else image_width for x in lms_x] - lms_y = [y if y >=0 else 0 for y in lms_y] - lms_y = [y if y <=image_height else image_height for y in lms_y] - lms = [[x,y] for x,y in zip(lms_x, lms_y)] - lms = [x for z in lms for x in z] - bbox = anno[196:200] - bbox = [float(x) for x in bbox] - attrs = anno[200:206] - attrs = np.array([int(x) for x in attrs]) - bbox_xmin, bbox_ymin, bbox_xmax, bbox_ymax = bbox - - width = bbox_xmax - bbox_xmin - height = bbox_ymax - bbox_ymin - scale = 1.2 - bbox_xmin -= width * (scale-1)/2 - # remove a part of top area for alignment, see details in paper - bbox_ymin += height * (scale-1)/2 - bbox_xmax += width * (scale-1)/2 - bbox_ymax += height * (scale-1)/2 - bbox_xmin = max(bbox_xmin, 0) - bbox_ymin = max(bbox_ymin, 0) - bbox_xmax = min(bbox_xmax, image_width-1) - bbox_ymax = min(bbox_ymax, image_height-1) - width = bbox_xmax - bbox_xmin - height = bbox_ymax - bbox_ymin - image_crop = image[int(bbox_ymin):int(bbox_ymax), int(bbox_xmin):int(bbox_xmax), :] - image_crop = cv2.resize(image_crop, (target_size, target_size)) - - tmp1 = [bbox_xmin, bbox_ymin]*98 - tmp1 = np.array(tmp1) - tmp2 = [width, height]*98 - tmp2 = np.array(tmp2) - lms = np.array(lms) - tmp1 - lms = lms / tmp2 - lms = lms.tolist() - lms = zip(lms[0::2], lms[1::2]) - return image_crop, list(lms) - -def process_celeba(root_folder, image_name, bbox, target_size): - image = cv2.imread(os.path.join(root_folder, 'CELEBA', 'img_celeba', image_name)) - image_height, image_width, _ = image.shape - xmin, ymin, xmax, ymax = bbox - width = xmax - xmin + 1 - height = ymax - ymin + 1 - scale = 1.2 - xmin -= width * (scale-1)/2 - # remove a part of top area for alignment, see details in paper - ymin += height * (scale+0.1-1)/2 - xmax += width * (scale-1)/2 - ymax += height * (scale-1)/2 - xmin = max(xmin, 0) - ymin = max(ymin, 0) - xmax = min(xmax, image_width-1) - ymax = min(ymax, image_height-1) - image_crop = image[int(ymin):int(ymax), int(xmin):int(xmax), :] - image_crop = cv2.resize(image_crop, (target_size, target_size)) - return image_crop - -def process_cofw_68_train(image, bbox, anno, target_size): - image_height, image_width, _ = image.shape - anno_x = anno[:29] - anno_y = anno[29:58] - xmin, ymin, width, height = bbox - xmax = xmin + width -1 - ymax = ymin + height -1 - scale = 1.3 - xmin -= width * (scale-1)/2 - ymin -= height * (scale-1)/2 - xmax += width * (scale-1)/2 - ymax += height * (scale-1)/2 - xmin = max(xmin, 0) - ymin = max(ymin, 0) - xmax = min(xmax, image_width-1) - ymax = min(ymax, image_height-1) - anno_x = (anno_x - xmin) / (xmax - xmin) - anno_y = (anno_y - ymin) / (ymax - ymin) - anno = np.concatenate([anno_x.reshape(-1,1), anno_y.reshape(-1,1)], axis=1) - anno = list(anno) - anno = [list(x) for x in anno] - image_crop = image[int(ymin):int(ymax), int(xmin):int(xmax), :] - image_crop = cv2.resize(image_crop, (target_size, target_size)) - return image_crop, anno - -def process_cofw_68_test(image, bbox, anno, target_size): - image_height, image_width, _ = image.shape - anno_x = anno[:,0].flatten() - anno_y = anno[:,1].flatten() - - xmin, ymin, width, height = bbox - xmax = xmin + width -1 - ymax = ymin + height -1 - - scale = 1.3 - xmin -= width * (scale-1)/2 - ymin -= height * (scale-1)/2 - xmax += width * (scale-1)/2 - ymax += height * (scale-1)/2 - xmin = max(xmin, 0) - ymin = max(ymin, 0) - xmax = min(xmax, image_width-1) - ymax = min(ymax, image_height-1) - anno_x = (anno_x - xmin) / (xmax - xmin) - anno_y = (anno_y - ymin) / (ymax - ymin) - anno = np.concatenate([anno_x.reshape(-1,1), anno_y.reshape(-1,1)], axis=1) - anno = list(anno) - anno = [list(x) for x in anno] - image_crop = image[int(ymin):int(ymax), int(xmin):int(xmax), :] - image_crop = cv2.resize(image_crop, (target_size, target_size)) - return image_crop, anno - -def gen_meanface(root_folder, data_name): - with open(os.path.join(root_folder, data_name, 'train_300W.txt'), 'r') as f: - annos = f.readlines() - annos = [x.strip().split()[1:] for x in annos] - annos = [[float(x) for x in anno] for anno in annos] - annos = np.array(annos) - meanface = np.mean(annos, axis=0) - meanface = meanface.tolist() - meanface = [str(x) for x in meanface] - - with open(os.path.join(root_folder, data_name, 'meanface.txt'), 'w') as f: - f.write(' '.join(meanface)) - -def convert_wflw(root_folder, data_name): - with open(os.path.join(root_folder, data_name, 'test_WFLW_98.txt'), 'r') as f: - annos = f.readlines() - annos = [x.strip().split() for x in annos] - annos_new = [] - for anno in annos: - annos_new.append([]) - # name - annos_new[-1].append(anno[0]) - anno = anno[1:] - # jaw - for i in range(17): - annos_new[-1].append(anno[i*2*2]) - annos_new[-1].append(anno[i*2*2+1]) - # left eyebrow - annos_new[-1].append(anno[33*2]) - annos_new[-1].append(anno[33*2+1]) - annos_new[-1].append(anno[34*2]) - annos_new[-1].append(str((float(anno[34*2+1])+float(anno[41*2+1]))/2)) - annos_new[-1].append(anno[35*2]) - annos_new[-1].append(str((float(anno[35*2+1])+float(anno[40*2+1]))/2)) - annos_new[-1].append(anno[36*2]) - annos_new[-1].append(str((float(anno[36*2+1])+float(anno[39*2+1]))/2)) - annos_new[-1].append(anno[37*2]) - annos_new[-1].append(str((float(anno[37*2+1])+float(anno[38*2+1]))/2)) - # right eyebrow - annos_new[-1].append(anno[42*2]) - annos_new[-1].append(str((float(anno[42*2+1])+float(anno[50*2+1]))/2)) - annos_new[-1].append(anno[43*2]) - annos_new[-1].append(str((float(anno[43*2+1])+float(anno[49*2+1]))/2)) - annos_new[-1].append(anno[44*2]) - annos_new[-1].append(str((float(anno[44*2+1])+float(anno[48*2+1]))/2)) - annos_new[-1].append(anno[45*2]) - annos_new[-1].append(str((float(anno[45*2+1])+float(anno[47*2+1]))/2)) - annos_new[-1].append(anno[46*2]) - annos_new[-1].append(anno[46*2+1]) - # nose - for i in range(51, 60): - annos_new[-1].append(anno[i*2]) - annos_new[-1].append(anno[i*2+1]) - # left eye - annos_new[-1].append(anno[60*2]) - annos_new[-1].append(anno[60*2+1]) - annos_new[-1].append(str(0.666*float(anno[61*2])+0.333*float(anno[62*2]))) - annos_new[-1].append(str(0.666*float(anno[61*2+1])+0.333*float(anno[62*2+1]))) - annos_new[-1].append(str(0.666*float(anno[63*2])+0.333*float(anno[62*2]))) - annos_new[-1].append(str(0.666*float(anno[63*2+1])+0.333*float(anno[62*2+1]))) - annos_new[-1].append(anno[64*2]) - annos_new[-1].append(anno[64*2+1]) - annos_new[-1].append(str(0.666*float(anno[65*2])+0.333*float(anno[66*2]))) - annos_new[-1].append(str(0.666*float(anno[65*2+1])+0.333*float(anno[66*2+1]))) - annos_new[-1].append(str(0.666*float(anno[67*2])+0.333*float(anno[66*2]))) - annos_new[-1].append(str(0.666*float(anno[67*2+1])+0.333*float(anno[66*2+1]))) - # right eye - annos_new[-1].append(anno[68*2]) - annos_new[-1].append(anno[68*2+1]) - annos_new[-1].append(str(0.666*float(anno[69*2])+0.333*float(anno[70*2]))) - annos_new[-1].append(str(0.666*float(anno[69*2+1])+0.333*float(anno[70*2+1]))) - annos_new[-1].append(str(0.666*float(anno[71*2])+0.333*float(anno[70*2]))) - annos_new[-1].append(str(0.666*float(anno[71*2+1])+0.333*float(anno[70*2+1]))) - annos_new[-1].append(anno[72*2]) - annos_new[-1].append(anno[72*2+1]) - annos_new[-1].append(str(0.666*float(anno[73*2])+0.333*float(anno[74*2]))) - annos_new[-1].append(str(0.666*float(anno[73*2+1])+0.333*float(anno[74*2+1]))) - annos_new[-1].append(str(0.666*float(anno[75*2])+0.333*float(anno[74*2]))) - annos_new[-1].append(str(0.666*float(anno[75*2+1])+0.333*float(anno[74*2+1]))) - # mouth - for i in range(76, 96): - annos_new[-1].append(anno[i*2]) - annos_new[-1].append(anno[i*2+1]) - - with open(os.path.join(root_folder, data_name, 'test_WFLW.txt'), 'w') as f: - for anno in annos_new: - f.write(' '.join(anno)+'\n') - -def gen_data(root_folder, data_name, target_size): - if not os.path.exists(os.path.join(root_folder, data_name, 'images_train')): - os.mkdir(os.path.join(root_folder, data_name, 'images_train')) - if not os.path.exists(os.path.join(root_folder, data_name, 'images_test')): - os.mkdir(os.path.join(root_folder, data_name, 'images_test')) - ################################################################################################################ - if data_name == 'CELEBA': - os.system('rmdir ../data/CELEBA/images_test') - with open(os.path.join(root_folder, data_name, 'celeba_bboxes.txt'), 'r') as f: - bboxes = f.readlines() - - bboxes = [x.strip().split() for x in bboxes] - with open(os.path.join(root_folder, data_name, 'train.txt'), 'w') as f: - for bbox in bboxes: - image_name = bbox[0] - print(image_name) - f.write(image_name+'\n') - bbox = bbox[1:] - bbox = [int(x) for x in bbox] - image_crop = process_celeba(root_folder, image_name, bbox, target_size) - cv2.imwrite(os.path.join(root_folder, data_name, 'images_train', image_name), image_crop) - ################################################################################################################ - elif data_name == 'data_300W_CELEBA': - os.system('cp -r ../data/CELEBA/images_train ../data/data_300W_CELEBA/.') - os.system('cp ../data/CELEBA/train.txt ../data/data_300W_CELEBA/train_CELEBA.txt') - - os.system('rmdir ../data/data_300W_CELEBA/images_test') - if not os.path.exists(os.path.join(root_folder, data_name, 'images_test_300W')): - os.mkdir(os.path.join(root_folder, data_name, 'images_test_300W')) - if not os.path.exists(os.path.join(root_folder, data_name, 'images_test_COFW')): - os.mkdir(os.path.join(root_folder, data_name, 'images_test_COFW')) - if not os.path.exists(os.path.join(root_folder, data_name, 'images_test_WFLW')): - os.mkdir(os.path.join(root_folder, data_name, 'images_test_WFLW')) - - # train for data_300W - folders_train = ['afw', 'helen/trainset', 'lfpw/trainset'] - annos_train = {} - for folder_train in folders_train: - all_files = sorted(os.listdir(os.path.join(root_folder, 'data_300W', folder_train))) - image_files = [x for x in all_files if '.pts' not in x] - label_files = [x for x in all_files if '.pts' in x] - assert len(image_files) == len(label_files) - for image_name, label_name in zip(image_files, label_files): - print(image_name) - image_crop, anno = process_300w(os.path.join(root_folder, 'data_300W'), folder_train, image_name, label_name, target_size) - image_crop_name = folder_train.replace('/', '_')+'_'+image_name - cv2.imwrite(os.path.join(root_folder, data_name, 'images_train', image_crop_name), image_crop) - annos_train[image_crop_name] = anno - with open(os.path.join(root_folder, data_name, 'train_300W.txt'), 'w') as f: - for image_crop_name, anno in annos_train.items(): - f.write(image_crop_name+' ') - for x,y in anno: - f.write(str(x)+' '+str(y)+' ') - f.write('\n') - - # test for data_300W - folders_test = ['helen/testset', 'lfpw/testset', 'ibug'] - annos_test = {} - for folder_test in folders_test: - all_files = sorted(os.listdir(os.path.join(root_folder, 'data_300W', folder_test))) - image_files = [x for x in all_files if '.pts' not in x] - label_files = [x for x in all_files if '.pts' in x] - assert len(image_files) == len(label_files) - for image_name, label_name in zip(image_files, label_files): - print(image_name) - image_crop, anno = process_300w(os.path.join(root_folder, 'data_300W'), folder_test, image_name, label_name, target_size) - image_crop_name = folder_test.replace('/', '_')+'_'+image_name - cv2.imwrite(os.path.join(root_folder, data_name, 'images_test_300W', image_crop_name), image_crop) - annos_test[image_crop_name] = anno - with open(os.path.join(root_folder, data_name, 'test_300W.txt'), 'w') as f: - for image_crop_name, anno in annos_test.items(): - f.write(image_crop_name+' ') - for x,y in anno: - f.write(str(x)+' '+str(y)+' ') - f.write('\n') - - # test for COFW_68 - test_mat = hdf5storage.loadmat(os.path.join('../data/COFW', 'COFW_test_color.mat')) - images = test_mat['IsT'] - - bboxes_mat = hdf5storage.loadmat(os.path.join('../data/data_300W_CELEBA', 'cofw68_test_bboxes.mat')) - bboxes = bboxes_mat['bboxes'] - image_num = images.shape[0] - with open('../data/data_300W_CELEBA/test_COFW.txt', 'w') as f: - for i in range(image_num): - image = images[i,0] - # grayscale - if len(image.shape) == 2: - image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) - # swap rgb channel to bgr - else: - image = image[:,:,::-1] - - bbox = bboxes[i,:] - anno_mat = hdf5storage.loadmat(os.path.join('../data/data_300W_CELEBA/cofw68_test_annotations', str(i+1)+'_points.mat')) - anno = anno_mat['Points'] - image_crop, anno = process_cofw_68_test(image, bbox, anno, target_size) - pad_num = 4-len(str(i+1)) - image_crop_name = 'cofw_test_' + '0' * pad_num + str(i+1) + '.jpg' - cv2.imwrite(os.path.join('../data/data_300W_CELEBA/images_test_COFW', image_crop_name), image_crop) - f.write(image_crop_name+' ') - for x,y in anno: - f.write(str(x)+' '+str(y)+' ') - f.write('\n') - - # test for WFLW_68 - test_file = 'list_98pt_rect_attr_test.txt' - with open(os.path.join(root_folder, 'WFLW', 'WFLW_annotations', 'list_98pt_rect_attr_train_test', test_file), 'r') as f: - annos_test = f.readlines() - annos_test = [x.strip().split() for x in annos_test] - names_mapping = {} - count = 1 - with open(os.path.join(root_folder, 'data_300W_CELEBA', 'test_WFLW_98.txt'), 'w') as f: - for anno_test in annos_test: - image_crop, anno = process_wflw(anno_test, target_size) - pad_num = 4-len(str(count)) - image_crop_name = 'wflw_test_' + '0' * pad_num + str(count) + '.jpg' - print(image_crop_name) - names_mapping[anno_test[0]+'_'+anno_test[-1]] = [image_crop_name, anno] - cv2.imwrite(os.path.join(root_folder, data_name, 'images_test_WFLW', image_crop_name), image_crop) - f.write(image_crop_name+' ') - for x,y in list(anno): - f.write(str(x)+' '+str(y)+' ') - f.write('\n') - count += 1 - - convert_wflw(root_folder, data_name) - - gen_meanface(root_folder, data_name) - ################################################################################################################ - elif data_name == 'data_300W_COFW_WFLW': - - os.system('rmdir ../data/data_300W_COFW_WFLW/images_test') - if not os.path.exists(os.path.join(root_folder, data_name, 'images_test_300W')): - os.mkdir(os.path.join(root_folder, data_name, 'images_test_300W')) - if not os.path.exists(os.path.join(root_folder, data_name, 'images_test_COFW')): - os.mkdir(os.path.join(root_folder, data_name, 'images_test_COFW')) - if not os.path.exists(os.path.join(root_folder, data_name, 'images_test_WFLW')): - os.mkdir(os.path.join(root_folder, data_name, 'images_test_WFLW')) - - # train for data_300W - folders_train = ['afw', 'helen/trainset', 'lfpw/trainset'] - annos_train = {} - for folder_train in folders_train: - all_files = sorted(os.listdir(os.path.join(root_folder, 'data_300W', folder_train))) - image_files = [x for x in all_files if '.pts' not in x] - label_files = [x for x in all_files if '.pts' in x] - assert len(image_files) == len(label_files) - for image_name, label_name in zip(image_files, label_files): - print(image_name) - image_crop, anno = process_300w(os.path.join(root_folder, 'data_300W'), folder_train, image_name, label_name, target_size) - image_crop_name = folder_train.replace('/', '_')+'_'+image_name - cv2.imwrite(os.path.join(root_folder, data_name, 'images_train', image_crop_name), image_crop) - annos_train[image_crop_name] = anno - with open(os.path.join(root_folder, data_name, 'train_300W.txt'), 'w') as f: - for image_crop_name, anno in annos_train.items(): - f.write(image_crop_name+' ') - for x,y in anno: - f.write(str(x)+' '+str(y)+' ') - f.write('\n') - - # test for data_300W - folders_test = ['helen/testset', 'lfpw/testset', 'ibug'] - annos_test = {} - for folder_test in folders_test: - all_files = sorted(os.listdir(os.path.join(root_folder, 'data_300W', folder_test))) - image_files = [x for x in all_files if '.pts' not in x] - label_files = [x for x in all_files if '.pts' in x] - assert len(image_files) == len(label_files) - for image_name, label_name in zip(image_files, label_files): - print(image_name) - image_crop, anno = process_300w(os.path.join(root_folder, 'data_300W'), folder_test, image_name, label_name, target_size) - image_crop_name = folder_test.replace('/', '_')+'_'+image_name - cv2.imwrite(os.path.join(root_folder, data_name, 'images_test_300W', image_crop_name), image_crop) - annos_test[image_crop_name] = anno - with open(os.path.join(root_folder, data_name, 'test_300W.txt'), 'w') as f: - for image_crop_name, anno in annos_test.items(): - f.write(image_crop_name+' ') - for x,y in anno: - f.write(str(x)+' '+str(y)+' ') - f.write('\n') - - # train for COFW_68 - ################### - train_file = 'COFW_train_color.mat' - train_mat = hdf5storage.loadmat(os.path.join(root_folder, 'COFW', train_file)) - images = train_mat['IsTr'] - bboxes = train_mat['bboxesTr'] - annos = train_mat['phisTr'] - - count = 1 - with open('../data/data_300W_COFW_WFLW/train_COFW.txt', 'w') as f: - for i in range(images.shape[0]): - image = images[i, 0] - # grayscale - if len(image.shape) == 2: - image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) - # swap rgb channel to bgr - else: - image = image[:,:,::-1] - bbox = bboxes[i, :] - anno = annos[i, :] - image_crop, anno = process_cofw_68_train(image, bbox, anno, target_size) - pad_num = 4-len(str(count)) - image_crop_name = 'cofw_train_' + '0' * pad_num + str(count) + '.jpg' - f.write(image_crop_name+'\n') - cv2.imwrite(os.path.join(root_folder, 'data_300W_COFW_WFLW', 'images_train', image_crop_name), image_crop) - count += 1 - ################### - - # test for COFW_68 - test_mat = hdf5storage.loadmat(os.path.join('../data/COFW', 'COFW_test_color.mat')) - images = test_mat['IsT'] - - bboxes_mat = hdf5storage.loadmat(os.path.join('../data/data_300W_COFW_WFLW', 'cofw68_test_bboxes.mat')) - bboxes = bboxes_mat['bboxes'] - image_num = images.shape[0] - with open('../data/data_300W_COFW_WFLW/test_COFW.txt', 'w') as f: - for i in range(image_num): - image = images[i,0] - # grayscale - if len(image.shape) == 2: - image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) - # swap rgb channel to bgr - else: - image = image[:,:,::-1] - - bbox = bboxes[i,:] - anno_mat = hdf5storage.loadmat(os.path.join('../data/data_300W_COFW_WFLW/cofw68_test_annotations', str(i+1)+'_points.mat')) - anno = anno_mat['Points'] - image_crop, anno = process_cofw_68_test(image, bbox, anno, target_size) - pad_num = 4-len(str(i+1)) - image_crop_name = 'cofw_test_' + '0' * pad_num + str(i+1) + '.jpg' - cv2.imwrite(os.path.join('../data/data_300W_COFW_WFLW/images_test_COFW', image_crop_name), image_crop) - f.write(image_crop_name+' ') - for x,y in anno: - f.write(str(x)+' '+str(y)+' ') - f.write('\n') - - # train for WFLW_68 - train_file = 'list_98pt_rect_attr_train.txt' - with open(os.path.join('../data', 'WFLW', 'WFLW_annotations', 'list_98pt_rect_attr_train_test', train_file), 'r') as f: - annos_train = f.readlines() - annos_train = [x.strip().split() for x in annos_train] - count = 1 - with open('../data/data_300W_COFW_WFLW/train_WFLW.txt', 'w') as f: - for anno_train in annos_train: - image_crop, anno = process_wflw(anno_train, target_size) - pad_num = 4-len(str(count)) - image_crop_name = 'wflw_train_' + '0' * pad_num + str(count) + '.jpg' - print(image_crop_name) - f.write(image_crop_name+'\n') - cv2.imwrite(os.path.join(root_folder, 'data_300W_COFW_WFLW', 'images_train', image_crop_name), image_crop) - count += 1 - - # test for WFLW_68 - test_file = 'list_98pt_rect_attr_test.txt' - with open(os.path.join(root_folder, 'WFLW', 'WFLW_annotations', 'list_98pt_rect_attr_train_test', test_file), 'r') as f: - annos_test = f.readlines() - annos_test = [x.strip().split() for x in annos_test] - names_mapping = {} - count = 1 - with open(os.path.join(root_folder, 'data_300W_COFW_WFLW', 'test_WFLW_98.txt'), 'w') as f: - for anno_test in annos_test: - image_crop, anno = process_wflw(anno_test, target_size) - pad_num = 4-len(str(count)) - image_crop_name = 'wflw_test_' + '0' * pad_num + str(count) + '.jpg' - print(image_crop_name) - names_mapping[anno_test[0]+'_'+anno_test[-1]] = [image_crop_name, anno] - cv2.imwrite(os.path.join(root_folder, data_name, 'images_test_WFLW', image_crop_name), image_crop) - f.write(image_crop_name+' ') - for x,y in list(anno): - f.write(str(x)+' '+str(y)+' ') - f.write('\n') - count += 1 - - convert_wflw(root_folder, data_name) - - gen_meanface(root_folder, data_name) - else: - print('Wrong data!') - -if __name__ == '__main__': - if len(sys.argv) < 2: - print('please input the data name.') - print('1. CELEBA') - print('2. data_300W_CELEBA') - print('3. data_300W_COFW_WFLW') - exit(0) - else: - data_name = sys.argv[1] - gen_data('../data', data_name, 256) - - diff --git a/spaces/yiningmao/metaphor-detection-baseline/scripts/run_bagging.sh b/spaces/yiningmao/metaphor-detection-baseline/scripts/run_bagging.sh deleted file mode 100644 index e994aa9d70c32c8b1e5cc0dacdbfb03787361b42..0000000000000000000000000000000000000000 --- a/spaces/yiningmao/metaphor-detection-baseline/scripts/run_bagging.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -INDEXES=$(seq 0 9) -for i in $INDEXES -do - echo "Running bagging for index $i" - python main.py --data_dir data/VUA20 --task_name vua --model_type MELBERT --train_batch_size 32 --learning_rate 3e-5 --warmup_epoch 2 --num_bagging 10 --bagging_index $i -done \ No newline at end of file diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deprecated/mmbt/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deprecated/mmbt/__init__.py deleted file mode 100644 index e467090cb4fbfa55ec51ec8232a54180c532ad6c..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deprecated/mmbt/__init__.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TYPE_CHECKING - -from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_mmbt": ["MMBTConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mmbt"] = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"] - - -if TYPE_CHECKING: - from .configuration_mmbt import MMBTConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/ibert/configuration_ibert.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/ibert/configuration_ibert.py deleted file mode 100644 index 249061ceae32734b2873fb3370022fe1a11f74e8..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/ibert/configuration_ibert.py +++ /dev/null @@ -1,146 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The I-BERT Authors (Sehoon Kim, Amir Gholami, Zhewei Yao, -# Michael Mahoney, Kurt Keutzer - UC Berkeley) and The HuggingFace Inc. team. -# Copyright (c) 20121, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" I-BERT configuration""" -from collections import OrderedDict -from typing import Mapping - -from ...configuration_utils import PretrainedConfig -from ...onnx import OnnxConfig -from ...utils import logging - - -logger = logging.get_logger(__name__) - -IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", - "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", - "kssteven/ibert-roberta-large-mnli": ( - "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" - ), -} - - -class IBertConfig(PretrainedConfig): - """ - This is the configuration class to store the configuration of a [`IBertModel`]. It is used to instantiate a I-BERT - model according to the specified arguments, defining the model architecture. Instantiating a configuration with the - defaults will yield a similar configuration to that of the IBERT - [kssteven/ibert-roberta-base](https://huggingface.co/kssteven/ibert-roberta-base) architecture. - - Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the - documentation from [`PretrainedConfig`] for more information. - - Args: - vocab_size (`int`, *optional*, defaults to 30522): - Vocabulary size of the I-BERT model. Defines the number of different tokens that can be represented by the - `inputs_ids` passed when calling [`IBertModel`] - hidden_size (`int`, *optional*, defaults to 768): - Dimensionality of the encoder layers and the pooler layer. - num_hidden_layers (`int`, *optional*, defaults to 12): - Number of hidden layers in the Transformer encoder. - num_attention_heads (`int`, *optional*, defaults to 12): - Number of attention heads for each attention layer in the Transformer encoder. - intermediate_size (`int`, *optional*, defaults to 3072): - Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. - hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): - The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, - `"relu"`, `"silu"` and `"gelu_new"` are supported. - hidden_dropout_prob (`float`, *optional*, defaults to 0.1): - The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. - attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): - The dropout ratio for the attention probabilities. - max_position_embeddings (`int`, *optional*, defaults to 512): - The maximum sequence length that this model might ever be used with. Typically set this to something large - just in case (e.g., 512 or 1024 or 2048). - type_vocab_size (`int`, *optional*, defaults to 2): - The vocabulary size of the `token_type_ids` passed when calling [`IBertModel`] - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - layer_norm_eps (`float`, *optional*, defaults to 1e-12): - The epsilon used by the layer normalization layers. - position_embedding_type (`str`, *optional*, defaults to `"absolute"`): - Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For - positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to - [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). - For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models - with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). - quant_mode (`bool`, *optional*, defaults to `False`): - Whether to quantize the model or not. - force_dequant (`str`, *optional*, defaults to `"none"`): - Force dequantize specific nonlinear layer. Dequatized layers are then executed with full precision. - `"none"`, `"gelu"`, `"softmax"`, `"layernorm"` and `"nonlinear"` are supported. As deafult, it is set as - `"none"`, which does not dequantize any layers. Please specify `"gelu"`, `"softmax"`, or `"layernorm"` to - dequantize GELU, Softmax, or LayerNorm, respectively. `"nonlinear"` will dequantize all nonlinear layers, - i.e., GELU, Softmax, and LayerNorm. - """ - - model_type = "ibert" - - def __init__( - self, - vocab_size=30522, - hidden_size=768, - num_hidden_layers=12, - num_attention_heads=12, - intermediate_size=3072, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=2, - initializer_range=0.02, - layer_norm_eps=1e-12, - pad_token_id=1, - bos_token_id=0, - eos_token_id=2, - position_embedding_type="absolute", - quant_mode=False, - force_dequant="none", - **kwargs, - ): - super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) - - self.vocab_size = vocab_size - self.hidden_size = hidden_size - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.hidden_act = hidden_act - self.intermediate_size = intermediate_size - self.hidden_dropout_prob = hidden_dropout_prob - self.attention_probs_dropout_prob = attention_probs_dropout_prob - self.max_position_embeddings = max_position_embeddings - self.type_vocab_size = type_vocab_size - self.initializer_range = initializer_range - self.layer_norm_eps = layer_norm_eps - self.position_embedding_type = position_embedding_type - self.quant_mode = quant_mode - self.force_dequant = force_dequant - - -class IBertOnnxConfig(OnnxConfig): - @property - def inputs(self) -> Mapping[str, Mapping[int, str]]: - if self.task == "multiple-choice": - dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} - else: - dynamic_axis = {0: "batch", 1: "sequence"} - return OrderedDict( - [ - ("input_ids", dynamic_axis), - ("attention_mask", dynamic_axis), - ] - ) diff --git a/spaces/yizhangliu/ImgCleaner/utils.py b/spaces/yizhangliu/ImgCleaner/utils.py deleted file mode 100644 index a0f1ca482e99a00c48cb6096c9199ad30350e88a..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/ImgCleaner/utils.py +++ /dev/null @@ -1,353 +0,0 @@ -css = ''' - .container {max-width: 1150px; margin: auto;padding-top: 1.5rem} - #begin-btn {color: blue; font-size:20px;} - #work-container {min-width: min(160px, 100%) !important;flex-grow: 0 !important} - - #scroll_x_row {height:20px;} - #op-container{margin: 0 auto; text-align: center;width:fit-content;min-width: min(150px, 100%);flex-grow: 0; flex-wrap: nowrap;} - #erase-btn-container{margin: 0 auto; text-align: center;width:150px;border-width:3px;border-color:#2c9748} - #erase-btn {padding:0;} - #enhancer-checkbox{width:520px} - #enhancer-tip{width:450px} - #enhancer-tip-div{text-align: left} - - #image_output{margin: 0 auto; text-align: center;width:640px} - - #download-container{margin: 0 auto; text-align: center;width:fit-content; min-width: min(150px, 100%);flex-grow: 0; flex-wrap: nowrap;} - - #download-btn-container{margin: 0 auto; text-align: center;width: 100px;border-width:1px;border-color:#2c9748} - #download-btn {padding:0;} - - #share-container{margin: 0 auto; text-align: center;width:fit-content; min-width: min(150px, 100%);flex-grow: 0; flex-wrap: nowrap;} - - #image_upload .touch-none{display: flex} - @keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } - } - #share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; - } - #share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; - } - #share-btn * { - all: unset; - } - #share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; - } - #share-btn-container .wrap { - display: none !important; - } - - .scrollbar_x { - height: 15px; - width: 50px; - border-radius: 10px; - background: #ccc; - position: absolute; - top: 0px; - } -''' - -start_cleaner = """async() => { - function isMobile() { - try { - document.createEvent("TouchEvent"); return true; - } catch(e) { - return false; - } - } - - function getClientHeight() - { - var clientHeight=0; - if(document.body.clientHeight&&document.documentElement.clientHeight) { - var clientHeight = (document.body.clientHeightdocument.documentElement.clientHeight)?document.body.clientHeight:document.documentElement.clientHeight; - } - return clientHeight; - } - - var gradioEl = document.querySelector('body > gradio-app').shadowRoot; - if (!gradioEl) { - gradioEl = document.querySelector('body > gradio-app'); - } - - const page1 = gradioEl.querySelectorAll('#page_1')[0]; - const page2 = gradioEl.querySelectorAll('#page_2')[0]; - const image_upload = gradioEl.querySelectorAll('#image_upload')[0]; - const image_output = gradioEl.querySelectorAll('#image_output')[0]; - const image_output_container = gradioEl.querySelectorAll('#image-output-container')[0]; - const data_image = gradioEl.querySelectorAll('#image_upload [data-testid="image"]')[0]; - const data_image_div = gradioEl.querySelectorAll('#image_upload [data-testid="image"] > div')[0]; - - const scroll_x_container = gradioEl.querySelectorAll('#scroll_x_container')[0]; - - image_output_container.setAttribute('style', 'width: 0px; height:0px; display:none;'); - var clientHeight = getClientHeight(); - if (isMobile()) { - window.devicePixelRatio = 1; - const page1_width = page1.offsetWidth; - min_height = (clientHeight - 100) + 'px;'; - - image_upload.setAttribute('style', 'width:' + (page1_width - 13*2) + 'px; min-height:' + min_height); - data_image.setAttribute('style', 'width: ' + (page1_width - 14*2) + 'px; min-height:' + min_height); - data_image_div.setAttribute('style', 'width: ' + (page1_width - 14*2) + 'px; min-height:' + min_height); - scroll_x_container.setAttribute('style', 'width: ' + (page1_width - 14*2) + 'px;height:20px;'); - image_output.setAttribute('style', 'width: ' + (page1_width - 13*2) + 'px; min-height:none;'); - - const enhancer = gradioEl.querySelectorAll('#enhancer-checkbox')[0]; - enhancer.style.display = "none"; - - } else { - max_height = clientHeight - 150; //800; - - const container = gradioEl.querySelectorAll('.container')[0]; - container.setAttribute('style', 'max-width: 100%;'); - - data_image.setAttribute('style', 'height: ' + max_height + 'px'); - data_image_div.setAttribute('style', 'min-height: ' + max_height + 'px'); - } - if (!(gradioEl.parentNode)) { - const share_btn_container = gradioEl.querySelectorAll('#share-btn-container')[0]; - share_btn_container.setAttribute('style', 'width: 0px; height:0px;'); - const share_btn_share_icon = gradioEl.querySelectorAll('#share-btn-share-icon')[0]; - share_btn_share_icon.setAttribute('style', 'width: 0px; height:0px;'); - } - page1.style.display = "none"; - page2.style.display = "block"; - - window['gradioEl'] = gradioEl; - window['doCheckGallery'] = 0; - window['checkGallery'] = function checkGallery() { - try { - if (window['doCheckGallery'] == 0) { - var gallery_items = window['gradioEl'].querySelectorAll('#gallery .gallery-item'); - if (gallery_items && gallery_items.length == 2) { - window.clearInterval(window['checkGallery_interval']); - window['doCheckGallery'] = 1; - gallery_items[gallery_items.length-1].click(); - } - } - } catch(e) { - } - } - window['checkGallery_interval'] = window.setInterval("window.checkGallery()", 500); - - window['start_workshop'] = function(workshop) { - var scroll_x_container = window['gradioEl'].querySelector('#scroll_x_container'); - var scrollbar_x = scroll_x_container.querySelector('#scrollbar_x'); - if (!scrollbar_x) { - var bar_height = 20; - var bar_width = 50; - var scrollbar_x = document.createElement('div'); - var css_x = `height: ${bar_height}px; width: 50px; border-radius: 10px; background: #007ACC;position: absolute; top: 0px;z-index:45;display:none;`; - scrollbar_x.style.cssText = css_x; - scrollbar_x.id = 'scrollbar_x'; - - scroll_x_container.appendChild(scrollbar_x); - scrollbar_x = scroll_x_container.querySelector('#scrollbar_x'); - } - if (scrollbar_x) { - scrollbar_x.style.top = '0px'; - scrollbar_x.style.left = '0px'; - scroll_x_container.ratio_x = (workshop.scrollWidth - workshop.offsetWidth) / (workshop.offsetWidth - bar_width); - window['put_log']('scrollbar_x_1_' + '/' + workshop.scrollWidth + '/' + workshop.offsetWidth + '/' + bar_width + '/' + scroll_x_container.ratio_x); - if (workshop.scrollWidth - workshop.offsetWidth > 0) { - scrollbar_x.style.display = 'block'; - } - } - - scroll_x_container.scrollbar_x = scrollbar_x; - scroll_x_container.workshop = workshop; - - - if (isMobile()) { - mousedown = 'touchstart'; - mousemove = 'touchmove'; - mouseup = 'touchend'; - } else { - mousedown = 'mousedown'; - mouseup = 'mouseup'; - mousemove = 'mousemove'; - } - - scroll_x_container.addEventListener(mousedown, function (e) { - if (this.scrollbar_x && e.target === this.scrollbar_x) { - if (isMobile()) { - e = e.touches[0]; - } - this.prevX = e.pageX; - } - }); - scroll_x_container.addEventListener(mouseup, function (e) { - if (this.scrollbar_x && e.target === this.scrollbar_x) { - this.prevX = null; - } - this.prevX = null; - }); - - scroll_x_container.addEventListener(mousemove, function (e) { - if (this.scrollbar_x && e.target === this.scrollbar_x) { - if (isMobile()) { - e = e.touches[0]; - } - if (this.prevX) { - offset = (e.pageX - this.prevX) * this.ratio_x; - this.workshop.scrollLeft = this.workshop.scrollLeft + offset; - scrollbar_left_x = this.scrollbar_x.offsetLeft + (e.pageX - this.prevX); - temp_x = this.scrollWidth - scrollbar_left_x - this.scrollbar_x.clientWidth; - if (temp_x >= 0 && temp_x <= (this.scrollWidth - this.scrollbar_x.clientWidth)) { - this.scrollbar_x.style.left = scrollbar_left_x + 'px'; - this.prevX = e.pageX; - } - } - } - if (!isMobile()) { - e.preventDefault(); - } - }); - - } - - window['move_nodes'] = function(node1, node2, selectors){ - var children = node1.querySelectorAll(selectors); - for (var i = 0; i < children.length; i++) { - node2.appendChild(children[i]); - } - } - - function get_time(){ - var myDate = new Date(); - var Y = myDate.getFullYear(); - var M = myDate.getMonth() + 1; - var D = myDate.getDate(); - var H = myDate.getHours(); - var i = myDate.getMinutes(); - var s = myDate.getSeconds(); - if(M < 10){M = '0' + M;} - if(D < 10){D = '0' + D;} - if(H < 10){H = '0' + H;} - if(i < 10){i = '0' + i;} - if(s < 10){s = '0' +s;} - var nowTime = Y+'-'+M+'-'+D+' '+H+':'+i+':'+s; - return nowTime; - } - window['log_container'] = gradioEl.querySelectorAll('#log_container')[0]; - window['put_log'] = function(log_info) { - if (window.location.href.indexOf(':7860') < 0) {return;} - window['log_container'].innerHTML += '
    '; - window['log_container'].innerHTML += get_time() + '-' + log_info; - } - - window['doCheckCanvas'] = 0; - window['checkCanvas'] = function checkCanvas() { - try { - var workshop = window['gradioEl'].querySelectorAll('#image_upload [data-testid="image"] > div >div')[0]; - if (workshop) { - var canvas = workshop.querySelectorAll('canvas'); - if (canvas.length === 5) { - if (window['doCheckCanvas'] === 0) { - window['put_log']('_0_' + window['doCheckCanvas']); - - window['doCheckCanvas'] = 1; - - var work_layer = document.createElement('div'); - var css_workshop = "width: 100%;height: 100%;padding: 0rem;box-sizing: border-box;overflow: hidden;position: relative;white-space:nowrap;z-index:45;"; - workshop.insertBefore(work_layer, canvas[0]); - work_layer.style.cssText = css_workshop; - work_layer.id = 'work_layer'; - work_layer.style.display = 'block'; - - window['put_log']('_1_' + window['doCheckCanvas'] + '/' + canvas[0].style.cssText); - - setTimeout(function(){ - window['put_log']('_2_' + window['doCheckCanvas']); - window['move_nodes'](workshop, work_layer, 'canvas'); - window['put_log']('_3_' + window['doCheckCanvas']); - window['start_workshop'](work_layer); - window['put_log']('_4_' + window['doCheckCanvas']); - setTimeout(function(){ - var image_upload = window['gradioEl'].querySelectorAll('#image_upload')[0]; - var btns = image_upload.querySelectorAll('button'); - window['put_log']('_5_' + btns.length); - if (btns.length == 3) { - window['put_log']('_6_' + btns.length); - btns[0].click(); - } - }, 100); - }, 200); - - return; - } - } else { - window['log_container'].innerHTML = ''; - - window['doCheckCanvas'] = 0; - var scrollbar_x = window['gradioEl'].querySelector('#scrollbar_x'); - if (scrollbar_x) { - scrollbar_x.parentNode.removeChild(scrollbar_x); - } - } - } else { - window['log_container'].innerHTML = ''; - - window['doCheckCanvas'] = 0; - var scrollbar_x = window['gradioEl'].querySelector('#scrollbar_x'); - if (scrollbar_x) { - scrollbar_x.parentNode.removeChild(scrollbar_x); - } - } - } catch(e) { - } - } - if (isMobile()) { - window['checkCanvas_interval'] = window.setInterval("window.checkCanvas()", 100); - } -}""" - -download_img = """async() => { - Date.prototype.Format = function (fmt) { - var o = { - "M+": this.getMonth() + 1, - "d+": this.getDate(), - "h+": this.getHours(), - "m+": this.getMinutes(), - "s+": this.getSeconds(), - "q+": Math.floor((this.getMonth() + 3) / 3), - "S": this.getMilliseconds() - }; - if (/(y+)/.test(fmt)) - fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "").substr(4 - RegExp.$1.length)); - for (var k in o) - if (new RegExp("(" + k + ")").test(fmt)) fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k]) : (("00" + o[k]).substr(("" + o[k]).length))); - return fmt; - } - - var gradioEl = document.querySelector('body > gradio-app').shadowRoot; - if (!gradioEl) { - gradioEl = document.querySelector('body > gradio-app'); - } - const out_image = gradioEl.querySelectorAll('#image_output img')[0]; - if (out_image) { - var x=new XMLHttpRequest(); - x.open("GET", out_image.src, true); - x.responseType = 'blob'; - x.onload = function(e){ - var url = window.URL.createObjectURL(x.response) - var a = document.createElement('a'); - a.href = url; - a.download = (new Date()).Format("yyyyMMdd_hhmmss"); - a.click(); - } - x.send(); - } -}""" diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/wav_upload.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/wav_upload.py deleted file mode 100644 index cac679de78634e638e9a998615406b1c36374fb5..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/wav_upload.py +++ /dev/null @@ -1,23 +0,0 @@ -from google.colab import files -import shutil -import os -import argparse -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--type", type=str, required=True, help="type of file to upload") - args = parser.parse_args() - file_type = args.type - - basepath = os.getcwd() - uploaded = files.upload() # 上传文件 - assert(file_type in ['zip', 'audio']) - if file_type == "zip": - upload_path = "./upload/" - for filename in uploaded.keys(): - #将上传的文件移动到指定的位置上 - shutil.move(os.path.join(basepath, filename), os.path.join(upload_path, "userzip.zip")) - elif file_type == "audio": - upload_path = "./raw/" - for filename in uploaded.keys(): - #将上传的文件移动到指定的位置上 - shutil.move(os.path.join(basepath, filename), os.path.join(upload_path, filename)) \ No newline at end of file diff --git a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vencoder/DPHubert.py b/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vencoder/DPHubert.py deleted file mode 100644 index 95b98b8b2e08e76139ce652bbbdb60dc42248a19..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vencoder/DPHubert.py +++ /dev/null @@ -1,26 +0,0 @@ -from vencoder.encoder import SpeechEncoder -import torch -from vencoder.dphubert.model import wav2vec2_model - -class DPHubert(SpeechEncoder): - def __init__(self,vec_path = "pretrain/DPHuBERT-sp0.75.pth",device=None): - print("load model(s) from {}".format(vec_path)) - if device is None: - self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") - else: - self.dev = torch.device(device) - ckpt = torch.load(vec_path) - self.hidden_dim = 768 - self.model = wav2vec2_model(**ckpt["config"]).to(self.dev) - self.model.load_state_dict(ckpt["state_dict"], strict=False) - - def encoder(self, wav): - feats = wav - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats[None,:] - with torch.no_grad(): - with torch.inference_mode(): - units = self.model(feats)[0] - return units.transpose(1,2) diff --git a/spaces/youngs3/coqui-ai-tts-ko/app.py b/spaces/youngs3/coqui-ai-tts-ko/app.py deleted file mode 100644 index c0e60164004ae89d29c362a3572237e4af762d18..0000000000000000000000000000000000000000 --- a/spaces/youngs3/coqui-ai-tts-ko/app.py +++ /dev/null @@ -1,37 +0,0 @@ -import os -os.system('pip install gradio==2.3.0a0') -os.system('pip freeze') -import gradio as gr -from subprocess import call - -if (not os.path.exists("korean.py")): - os.system("wget https://raw.githubusercontent.com/TensorSpeech/TensorFlowTTS/master/tensorflow_tts/utils/korean.py -O korean.py") - -import korean - -def run_cmd(command): - try: - print(command) - call(command) - except KeyboardInterrupt: - print("Process interrupted") - sys.exit(1) - -def inference(text): - cmd = ['tts', '--text', "".join(korean.tokenize(text)), '--model_path', 'vits-kss-checkpoint_90000.pth', '--config_path', 'vits-kss-config.json'] - run_cmd(cmd) - return 'tts_output.wav' - -if (not os.path.exists("vits-kss-checkpoint_90000.pth")): - os.system("wget -q https://huggingface.co/youngs3/coqui-vits-ko/resolve/main/vits-kss-checkpoint_90000.pth -O vits-kss-checkpoint_90000.pth") - os.system("wget -q https://huggingface.co/youngs3/coqui-vits-ko/resolve/main/vits-kss-config.json -O vits-kss-config.json") - -inputs = gr.inputs.Textbox(lines=5, label="Input Text") -outputs = gr.outputs.Audio(type="file",label="Output Audio") -title = "Korean Language coqui-ai-TTS" -description = "Gradio demo for coqui-ai-TTS, using a VITS model trained on the kss dataset. To use it, simply add your text, or click one of the examples to load them. Read more at the links below." -article = "

    TTS is a library for advanced Text-to-Speech generation | Github Repo

    " -examples = [ - ["공부가 가장 쉬웠어요!"] -] -gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples, enable_queue=True).launch() \ No newline at end of file diff --git a/spaces/younker/chatgpt-turbo/README.md b/spaces/younker/chatgpt-turbo/README.md deleted file mode 100644 index 7a5015e1027b4f213b56e7a3ab8c0388ba48dad4..0000000000000000000000000000000000000000 --- a/spaces/younker/chatgpt-turbo/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Chatgpt Turbo -emoji: 📚 -colorFrom: green -colorTo: green -sdk: docker -pinned: false -app_port: 7860 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ysharma/LLaVA_v1/llava/eval/webpage/index.html b/spaces/ysharma/LLaVA_v1/llava/eval/webpage/index.html deleted file mode 100644 index c2e3cf020ba7d8e064f2cd801788a5d2d50b97da..0000000000000000000000000000000000000000 --- a/spaces/ysharma/LLaVA_v1/llava/eval/webpage/index.html +++ /dev/null @@ -1,162 +0,0 @@ - - - - - - Who's GPT-4's favorite? Battles between State-of-the-Art Chatbots - - - - - - - - -
    -

    Who's GPT-4's favorite? Battles between State-of-the-Art Chatbots

    - - -
    -
    - - -
    -
    - - -
    -
    -
    -
    - - -
    -
    -
    - - -
    -
    - -
    -
    -
    - other logo -
    -
    -
    -
    - - -
    -
    -
    -
    - vicuna logo -
    -
    -
    - -
    -
    - - -
    -
    -
    - - -
    -
    -
    -
    -
    -
    - -
    -
    - -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    - Assistant #2 (Vicuna, our model) -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    - - -
    -
    GPT-4 Evaluation
    -
    -
    -
    -
    -
    -
    -
    -
    - -
    -
    - This website is co-authored with GPT-4. -
    -
    - - - - - - - - - - - - - diff --git a/spaces/zenafey/illusion/app.py b/spaces/zenafey/illusion/app.py deleted file mode 100644 index 5a8537acf65d5e250a89c546da3270891180ebdd..0000000000000000000000000000000000000000 --- a/spaces/zenafey/illusion/app.py +++ /dev/null @@ -1,75 +0,0 @@ -import replicate -import gradio as gr -from io import BytesIO -import base64 -import os - -illuse = replicate.Client(api_token=os.getenv('REPLICATE')) -model_name = "andreasjansson/illusion:75d51a73fce3c00de31ed9ab4358c73e8fc0f627dc8ce975818e653317cb919b" -example_image = "https://replicate.delivery/pbxt/hHJNV9QteKX8DK2ckkUeXsqbEIKNGFXU1fN0MJoizz3iPlOjA/output-0.png" - -def generate(prompt, negative_prompt, qr_content, pattern_image, num_inference_steps, guidance_scale, width, height, seed, num_outputs, controlnet_conditioning_scale, border, qrcode_background): - try: - inputs = { - 'prompt': prompt, - 'negative_prompt': negative_prompt, - 'qr_code_content': qr_content, - 'num_inference_steps': num_inference_steps, - 'guidance_scale': guidance_scale, - 'width': width, - 'height': height, - 'seed': seed, - 'num_outputs': num_outputs, - 'controlnet_conditioning_scale': controlnet_conditioning_scale, - 'border': border, - 'qrcode_background': qrcode_background - } - if pattern_image is not None: - inputs['image'] = open(pattern_image, 'rb') - - result = illuse.run( - model_name, - input=inputs - ) - return result - except Exception as e: - print(e) - gr.Error(str(e)) - return - - -with gr.Blocks() as demo: - gr.Markdown(""" -# Illusion Diffusion Fast demo -## powered by replicate -""") - with gr.Row(): - with gr.Column(): - prompt = gr.Textbox(label="Prompt") - negative_prompt = gr.Textbox(label="Negative") - with gr.Row(): - qr_content = gr.Textbox(label="QR Code Content", placeholder="https://youtube.com/") - pattern_input = gr.Image(label="Pattern Image(if used QR Code Content wont be used)", type="filepath") - with gr.Accordion("Additional Settings", open=False): - with gr.Row(): - num_inference_steps = gr.Slider(label="num_inference_steps", minimum=20, maximum=100, step=1, value=50) - guidance_scale = gr.Slider(label="guidance_scale", minimum=0.1, maximum=30, step=0.01, value=7.5) - with gr.Row(): - width = gr.Slider(label='width', minimum=128, maximum=1024, step=8, value=768) - height = gr.Slider(label='height', minimum=128, maximum=1024, step=8, value=768) - with gr.Row(): - seed = gr.Number(label='seed', value=-1) - num_outputs = gr.Slider(label="num_outputs", minimum=1, maximum=4, step=1) - with gr.Row(): - controlnet_conditioning_scale = gr.Slider(label="controlnet_conditioning_scale", minimum=0, maximum=4, step=1, value=1) - border = gr.Slider(label="border", minimum=0, maximum=4, step=1, value=4) - qrcode_background = gr.Dropdown(label="qrcode_background", choices=['gray', 'white'], value='white') - run_btn = gr.Button("Run", variant="primary") - output = gr.Gallery([example_image]) - - generation_event = run_btn.click(generate, inputs=[prompt, negative_prompt, qr_content, pattern_input, - num_inference_steps, guidance_scale, width, height, seed, - num_outputs, controlnet_conditioning_scale, border, - qrcode_background], outputs=output) - -demo.launch(show_api=False) diff --git a/spaces/zhan66/vits-simple-api/vits/text/vits_pinyin.py b/spaces/zhan66/vits-simple-api/vits/text/vits_pinyin.py deleted file mode 100644 index 45bcca442a73cf34accbfa84a1402085c4db9154..0000000000000000000000000000000000000000 --- a/spaces/zhan66/vits-simple-api/vits/text/vits_pinyin.py +++ /dev/null @@ -1,98 +0,0 @@ -""" from https://github.com/PlayVoice/vits_chinese """ -import pypinyin -from pypinyin.contrib.neutral_tone import NeutralToneWith5Mixin -from pypinyin.converter import DefaultConverter -from pypinyin.core import Pinyin - -import numpy as np - -from vits.bert.prosody_tool import pinyin_dict -from vits.bert import TTSProsody - - -class MyConverter(NeutralToneWith5Mixin, DefaultConverter): - pass - - -def is_chinese(uchar): - if uchar >= u'\u4e00' and uchar <= u'\u9fa5': - return True - else: - return False - - -def clean_chinese(text: str): - text = text.strip() - text_clean = [] - for char in text: - if (is_chinese(char)): - text_clean.append(char) - else: - if len(text_clean) > 1 and is_chinese(text_clean[-1]): - text_clean.append(',') - text_clean = ''.join(text_clean).strip(',') - return text_clean - - -class VITS_PinYin: - def __init__(self, bert_path, device): - self.pinyin_parser = Pinyin(MyConverter()) - self.prosody = TTSProsody(bert_path, device) - - def chinese_to_phonemes(self, text): - # 考虑使用g2pw的chinese bert替换原始的pypinyin,目前测试下来运行速度太慢。 - # 将标准中文文本符号替换成 bert 符号库中的单符号,以保证bert的效果. - text = text.replace("——", "...") \ - .replace("—", "...") \ - .replace("……", "...") \ - .replace("…", "...") \ - .replace('“', '"') \ - .replace('”', '"') \ - .replace("\n", "") - tokens = self.prosody.char_model.tokenizer.tokenize(text) - text = ''.join(tokens) - assert not tokens.count("[UNK]") - pinyins = np.reshape(pypinyin.pinyin(text, style=pypinyin.TONE3), (-1)) - try: - phone_index = 0 - phone_items = [] - phone_items.append('sil') - count_phone = [] - count_phone.append(1) - temp = "" - - len_pys = len(tokens) - for word in tokens: - if is_chinese(word): - count_phone.append(2) - if (phone_index >= len_pys): - print( - f"!!!![{text}]plz check ur text whether includes MULTIBYTE symbol.\ - (请检查你的文本中是否包含多字节符号)") - pinyin = pinyins[phone_index] - phone_index = phone_index + 1 - if not pinyin[-1].isdigit(): - pinyin += "5" - if pinyin[:-1] in pinyin_dict: - tone = pinyin[-1] - a = pinyin[:-1] - a1, a2 = pinyin_dict[a] - phone_items += [a1, a2 + tone] - else: - temp += word - if temp == pinyins[phone_index]: - temp = "" - phone_index += 1 - count_phone.append(1) - phone_items.append('sp') - - count_phone.append(1) - phone_items.append('sil') - phone_items_str = ' '.join(phone_items) - except IndexError as e: - print('except:', e) - - text = f'[PAD]{text}[PAD]' - char_embeds = self.prosody.get_char_embeds(text) - char_embeds = self.prosody.expand_for_phone(char_embeds, count_phone) - return phone_items_str, char_embeds diff --git a/spaces/zhoucr/ai-koni/transforms.py b/spaces/zhoucr/ai-koni/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/zhoucr/ai-koni/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/zhsso/roop/roop/globals.py b/spaces/zhsso/roop/roop/globals.py deleted file mode 100644 index 77fd391db235b878ce1f91765596bd76adb06697..0000000000000000000000000000000000000000 --- a/spaces/zhsso/roop/roop/globals.py +++ /dev/null @@ -1,17 +0,0 @@ -from typing import List - -source_path = None -target_path = None -output_path = None -frame_processors: List[str] = [] -keep_fps = None -keep_audio = None -keep_frames = None -many_faces = None -video_encoder = None -video_quality = None -max_memory = None -execution_providers: List[str] = [] -execution_threads = None -headless = None -log_level = 'error' diff --git a/spaces/zhuanjiaoover/bingo/Dockerfile b/spaces/zhuanjiaoover/bingo/Dockerfile deleted file mode 100644 index c677b05b75f7e4b2beee8c97fb47957a0861a83e..0000000000000000000000000000000000000000 --- a/spaces/zhuanjiaoover/bingo/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM weaigc/bingo:latest - -ARG DEBIAN_FRONTEND=noninteractive - -ENV BING_HEADER "" - -CMD npm start diff --git a/spaces/zomehwh/sovits-goldship/vdecoder/__init__.py b/spaces/zomehwh/sovits-goldship/vdecoder/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000