diff --git a/spaces/101-5/gpt4free/g4f/.v1/unfinished/openprompt/mail.py b/spaces/101-5/gpt4free/g4f/.v1/unfinished/openprompt/mail.py deleted file mode 100644 index 1130e7df1d309fdf35e13380cf81a1d162d14ff5..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/unfinished/openprompt/mail.py +++ /dev/null @@ -1,111 +0,0 @@ -import email - -import requests - - -class MailClient: - - def __init__(self): - self.username = None - self.token = None - self.raw = None - self.mailids = None - self.mails = None - self.mail = None - - def create(self, force=False): - headers = { - 'accept': 'application/json', - } - - if self.username: - pass - else: - self.response = requests.put( - 'https://www.developermail.com/api/v1/mailbox', headers=headers) - self.response = self.response.json() - self.username = self.response['result']['name'] - self.token = self.response['result']['token'] - - return {'username': self.username, 'token': self.token} - - def destroy(self): - headers = { - 'accept': 'application/json', - 'X-MailboxToken': self.token, - } - self.response = requests.delete( - f'https://www.developermail.com/api/v1/mailbox/{self.username}', headers=headers) - self.response = self.response.json() - self.username = None - self.token = None - return self.response - - def newtoken(self): - headers = { - 'accept': 'application/json', - 'X-MailboxToken': self.token, - } - self.response = requests.put( - f'https://www.developermail.com/api/v1/mailbox/{self.username}/token', headers=headers) - self.response = self.response.json() - self.token = self.response['result']['token'] - return {'username': self.username, 'token': self.token} - - def getmailids(self): - headers = { - 'accept': 'application/json', - 'X-MailboxToken': self.token, - } - - self.response = requests.get( - f'https://www.developermail.com/api/v1/mailbox/{self.username}', headers=headers) - self.response = self.response.json() - self.mailids = self.response['result'] - return self.mailids - - def getmails(self, mailids: list = None): - headers = { - 'accept': 'application/json', - 'X-MailboxToken': self.token, - 'Content-Type': 'application/json', - } - - if mailids is None: - mailids = self.mailids - - data = str(mailids) - - self.response = requests.post( - f'https://www.developermail.com/api/v1/mailbox/{self.username}/messages', headers=headers, data=data) - self.response = self.response.json() - self.mails = self.response['result'] - return self.mails - - def getmail(self, mailid: str, raw=False): - headers = { - 'accept': 'application/json', - 'X-MailboxToken': self.token, - } - self.response = requests.get( - f'https://www.developermail.com/api/v1/mailbox/{self.username}/messages/{mailid}', headers=headers) - self.response = self.response.json() - self.mail = self.response['result'] - if raw is False: - self.mail = email.message_from_string(self.mail) - return self.mail - - def delmail(self, mailid: str): - headers = { - 'accept': 'application/json', - 'X-MailboxToken': self.token, - } - self.response = requests.delete( - f'https://www.developermail.com/api/v1/mailbox/{self.username}/messages/{mailid}', headers=headers) - self.response = self.response.json() - return self.response - - -client = MailClient() -client.newtoken() -print(client.getmails()) diff --git a/spaces/101-5/gpt4free/g4f/models.py b/spaces/101-5/gpt4free/g4f/models.py deleted file mode 100644 index ecf18e6dffe029d6bbd651428094083c15b77283..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/models.py +++ /dev/null @@ -1,201 +0,0 @@ -from g4f import Provider - - -class Model: - class model: - name: str - base_provider: str - best_provider: str - - class gpt_35_turbo: - name: str = 'gpt-3.5-turbo' - base_provider: str = 'openai' - best_provider: Provider.Provider = Provider.Forefront - - class gpt_4: - name: str = 'gpt-4' - base_provider: str = 'openai' - best_provider: Provider.Provider = Provider.Bing - best_providers: list = [Provider.Bing, Provider.Lockchat] - - class claude_instant_v1_100k: - name: str = 'claude-instant-v1-100k' - base_provider: str = 'anthropic' - best_provider: Provider.Provider = Provider.Vercel - - class claude_instant_v1: - name: str = 'claude-instant-v1' - base_provider: str = 'anthropic' - best_provider: Provider.Provider = Provider.Vercel - - class claude_v1_100k: - name: str = 'claude-v1-100k' - base_provider: str = 'anthropic' - best_provider: Provider.Provider = Provider.Vercel - - class claude_v1: - name: str = 'claude-v1' - base_provider: str = 'anthropic' - best_provider: Provider.Provider = Provider.Vercel - - class alpaca_7b: - name: str = 'alpaca-7b' - base_provider: str = 'replicate' - best_provider: Provider.Provider = Provider.Vercel - - class stablelm_tuned_alpha_7b: - name: str = 'stablelm-tuned-alpha-7b' - base_provider: str = 'replicate' - best_provider: Provider.Provider = Provider.Vercel - - class bloom: - name: str = 'bloom' - base_provider: str = 'huggingface' - best_provider: Provider.Provider = Provider.Vercel - - class bloomz: - name: str = 'bloomz' - base_provider: str = 'huggingface' - best_provider: Provider.Provider = Provider.Vercel - - class flan_t5_xxl: - name: str = 'flan-t5-xxl' - base_provider: str = 'huggingface' - best_provider: Provider.Provider = Provider.Vercel - - class flan_ul2: - name: str = 'flan-ul2' - base_provider: str = 'huggingface' - best_provider: Provider.Provider = Provider.Vercel - - class gpt_neox_20b: - name: str = 'gpt-neox-20b' - base_provider: str = 'huggingface' - best_provider: Provider.Provider = Provider.Vercel - - class oasst_sft_4_pythia_12b_epoch_35: - name: str = 'oasst-sft-4-pythia-12b-epoch-3.5' - base_provider: str = 'huggingface' - best_provider: Provider.Provider = Provider.Vercel - - class santacoder: - name: str = 'santacoder' - base_provider: str = 'huggingface' - best_provider: Provider.Provider = Provider.Vercel - - class command_medium_nightly: - name: str = 'command-medium-nightly' - base_provider: str = 'cohere' - best_provider: Provider.Provider = Provider.Vercel - - class command_xlarge_nightly: - name: str = 'command-xlarge-nightly' - base_provider: str = 'cohere' - best_provider: Provider.Provider = Provider.Vercel - - class code_cushman_001: - name: str = 'code-cushman-001' - base_provider: str = 'openai' - best_provider: Provider.Provider = Provider.Vercel - - class code_davinci_002: - name: str = 'code-davinci-002' - base_provider: str = 'openai' - best_provider: Provider.Provider = Provider.Vercel - - class text_ada_001: - name: str = 'text-ada-001' - base_provider: str = 'openai' - best_provider: Provider.Provider = Provider.Vercel - - class text_babbage_001: - name: str = 'text-babbage-001' - base_provider: str = 'openai' - best_provider: Provider.Provider = Provider.Vercel - - class text_curie_001: - name: str = 'text-curie-001' - base_provider: str = 'openai' - best_provider: Provider.Provider = Provider.Vercel - - class text_davinci_002: - name: str = 'text-davinci-002' - base_provider: str = 'openai' - best_provider: Provider.Provider = Provider.Vercel - - class text_davinci_003: - name: str = 'text-davinci-003' - base_provider: str = 'openai' - best_provider: Provider.Provider = Provider.Vercel - - class palm: - name: str = 'palm' - base_provider: str = 'google' - best_provider: Provider.Provider = Provider.Bard - - - """ 'falcon-40b': Model.falcon_40b, - 'falcon-7b': Model.falcon_7b, - 'llama-13b': Model.llama_13b,""" - - class falcon_40b: - name: str = 'falcon-40b' - base_provider: str = 'huggingface' - best_provider: Provider.Provider = Provider.H2o - - class falcon_7b: - name: str = 'falcon-7b' - base_provider: str = 'huggingface' - best_provider: Provider.Provider = Provider.H2o - - class llama_13b: - name: str = 'llama-13b' - base_provider: str = 'huggingface' - best_provider: Provider.Provider = Provider.H2o - -class ModelUtils: - convert: dict = { - 'gpt-3.5-turbo': Model.gpt_35_turbo, - 'gpt-4': Model.gpt_4, - - 'claude-instant-v1-100k': Model.claude_instant_v1_100k, - 'claude-v1-100k': Model.claude_v1_100k, - 'claude-instant-v1': Model.claude_instant_v1, - 'claude-v1': Model.claude_v1, - - 'alpaca-7b': Model.alpaca_7b, - 'stablelm-tuned-alpha-7b': Model.stablelm_tuned_alpha_7b, - - 'bloom': Model.bloom, - 'bloomz': Model.bloomz, - - 'flan-t5-xxl': Model.flan_t5_xxl, - 'flan-ul2': Model.flan_ul2, - - 'gpt-neox-20b': Model.gpt_neox_20b, - 'oasst-sft-4-pythia-12b-epoch-3.5': Model.oasst_sft_4_pythia_12b_epoch_35, - 'santacoder': Model.santacoder, - - 'command-medium-nightly': Model.command_medium_nightly, - 'command-xlarge-nightly': Model.command_xlarge_nightly, - - 'code-cushman-001': Model.code_cushman_001, - 'code-davinci-002': Model.code_davinci_002, - - 'text-ada-001': Model.text_ada_001, - 'text-babbage-001': Model.text_babbage_001, - 'text-curie-001': Model.text_curie_001, - 'text-davinci-002': Model.text_davinci_002, - 'text-davinci-003': Model.text_davinci_003, - - 'palm2': Model.palm, - 'palm': Model.palm, - 'google': Model.palm, - 'google-bard': Model.palm, - 'google-palm': Model.palm, - 'bard': Model.palm, - - 'falcon-40b': Model.falcon_40b, - 'falcon-7b': Model.falcon_7b, - 'llama-13b': Model.llama_13b, - } \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bluesoleil Activation Serial Number Free Download and Install Guide.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bluesoleil Activation Serial Number Free Download and Install Guide.md deleted file mode 100644 index e8b0e96f0239516b92d2bf8480ea4b30c2e2368a..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bluesoleil Activation Serial Number Free Download and Install Guide.md +++ /dev/null @@ -1,125 +0,0 @@ -
-

How to Get BlueSoleil Activation Serial Number for Free

-

Do you want to connect your Bluetooth devices with your computer without any hassle? Do you want to transfer data, make calls, listen to music, and more using Bluetooth technology? If yes, then you need BlueSoleil.

-

bluesoleil activation serial number free


Download 🆓 https://byltly.com/2uKxPy



-

What is BlueSoleil and why do you need it?

-

BlueSoleil is a Bluetooth software that allows you to connect your devices with your computer

-

BlueSoleil is a software that enables your computer to communicate with Bluetooth devices such as phones, headsets, keyboards, mice, printers, cameras, etc. It supports various Bluetooth protocols and profiles, such as A2DP, AVRCP, BIP, DUN, FTP, HID, OPP, PAN, PBAP, SPP, etc. It also supports multiple Bluetooth dongles and adapters.

-

You need BlueSoleil to transfer data, make calls, listen to music, and more

-

With BlueSoleil, you can do many things with your Bluetooth devices. For example:

- -

And much more!

-

How to get BlueSoleil activation serial number for free?

-

BlueSoleil is not a free software. You need to buy a license key to activate it and use all its features. However, if you don't want to spend money on it, there are some ways to get BlueSoleil activation serial number for free. Here are two methods that you can try:

-

There are two ways to get BlueSoleil activation serial number for free: online and offline

-

Online method: use a website that provides serial numbers for free

-

This method is very simple and fast. All you need to do is find a website that offers serial numbers for various software for free. For example:

-

bluesoleil license key free download
-bluesoleil crack serial number generator
-bluesoleil activation code free online
-bluesoleil full version free with serial key
-bluesoleil registration key free no survey
-bluesoleil serial number free 2023
-bluesoleil activation key free windows 10
-bluesoleil crack serial number free reddit
-bluesoleil activation code free trial
-bluesoleil full version free with serial number
-bluesoleil registration key free download
-bluesoleil serial number free 2022
-bluesoleil activation key free windows 7
-bluesoleil crack serial number free 2021
-bluesoleil activation code free download
-bluesoleil full version free with serial code
-bluesoleil registration key free online
-bluesoleil serial number free 2021
-bluesoleil activation key free windows 8
-bluesoleil crack serial number free 2020
-bluesoleil activation code free online generator
-bluesoleil full version free with serial key download
-bluesoleil registration key free no download
-bluesoleil serial number free 2020
-bluesoleil activation key free windows xp
-bluesoleil crack serial number free 2019
-bluesoleil activation code free for android
-bluesoleil full version free with serial key online
-bluesoleil registration key free for mac
-bluesoleil serial number free 2019
-bluesoleil activation key free windows vista
-bluesoleil crack serial number free 2018
-bluesoleil activation code free for iphone
-bluesoleil full version free with serial key generator
-bluesoleil registration key free for windows
-bluesoleil serial number free 2018
-bluesoleil activation key free windows 2000
-bluesoleil crack serial number free 2017
-bluesoleil activation code free for ipad
-bluesoleil full version free with serial key reddit
-bluesoleil registration key free for linux
-bluesoleil serial number free 2017
-bluesoleil activation key free windows me
-bluesoleil crack serial number free 2016
-bluesoleil activation code free for pc
-bluesoleil full version free with serial key no survey
-bluesoleil registration key free for android
-bluesoleil serial number free 2016
-bluesoleil activation key free windows nt

-https://smartserials.com/serials/IVT-BlueSoleil-6.4.305.0-32199.htm -https://smartserials.com/serials/IVT-BlueSoleil-v10.0.417.0-34510.htm -

On these websites, you can find serial numbers for different versions of BlueSoleil. Just copy one of them and paste it when you install or activate BlueSoleil on your computer.

-

Offline method: use a crack file that bypasses the activation process

-

This method is more complicated and slow. You need to download a crack file that modifies the original BlueSoleil program and makes it think that it is already activated. For example:

-https://docslib.org/doc/10485325/bluesoleil-activation-serial-number-free -https://peatix.com/group/10135257 -

On these websites, you can find crack files for different versions of BlueSoleil. Just download one of them and follow the instructions on how to run it on your computer.

-

What are the advantages and disadvantages of each method?

-

Online method: easy, fast, but risky

-

Advantages: you don't need to download anything, just copy and paste the serial number

-

This method is very convenient and quick. You don't have to waste time or space on downloading any files. You just have to visit a website and copy a serial number.

-

Disadvantages: you may get a fake or invalid serial number, or expose your computer to malware or viruses

-

This method is also very risky and unreliable. You may not get a valid serial number that works for your version of BlueSoleil. You may also get a fake serial number that contains malware or viruses that can harm your computer. You may also violate the terms of service of BlueSoleil by using an illegal serial number.

-

Offline method: complicated, slow, but safe

-

Advantages: you don't need to rely on any website, just download and run the crack file

-

This method is more secure and reliable. You don't have to depend on any website that may be shady or fraudulent. You just have to download and run the crack file that modifies the original program and bypasses the activation process.

-

Disadvantages: you may need to disable your antivirus software, or face compatibility issues with your system

-

This method is also more complex and slow. You may have to disable your antivirus software to avoid false alarms or interference with the crack file. You may also face compatibility issues with your system, such as Windows version, architecture, or language. You may also violate the terms of service of BlueSoleil by using an illegal crack file.

-

How to use BlueSoleil after getting the activation serial number?

-

Install BlueSoleil on your computer and enter the serial number when prompted

-

To use BlueSoleil, you first need to install it on your computer. You can download the latest version of BlueSoleil from its official website http://www.bluesoleil.com/ -

Choose the version that matches your system and download it. Then run the setup file and follow the instructions to install it. When you are asked to enter the serial number, paste the one that you got from the online or offline method.

-

Connect your Bluetooth devices with your computer using BlueSoleil interface

-

After installing BlueSoleil, you can use its interface to connect your Bluetooth devices with your computer. You can find the BlueSoleil icon on your desktop or taskbar. Click on it to open the main window. You will see a red ball that represents your computer and some white balls that represent the Bluetooth devices in range. You can also see some service buttons at the top of the window that indicate the supported Bluetooth functions.

-

To connect a device, you need to do the following steps:

-
    -
  1. Double-click on a white ball to search for its supported services. The service buttons that match the device will be highlighted.
  2. -
  3. Click on a service button that you want to use. For example, if you want to transfer files, click on the file transfer button.
  4. -
  5. A service window will pop up. Follow the instructions on the window to complete the connection and operation. For example, if you want to transfer files, you can browse and select the files on your computer or device and drag and drop them to transfer.
  6. -
-

You can also right-click on a white ball to access more options, such as pairing, refreshing, disconnecting, etc.

-

Enjoy the features and functions of BlueSoleil

-

Once you have connected your Bluetooth devices with your computer using BlueSoleil, you can enjoy the features and functions of BlueSoleil. You can transfer data, make calls, listen to music, and more using Bluetooth technology. You can also manage your connections and devices using BlueSoleil interface. You can view the connection status, battery level, signal strength, etc. of your devices. You can also customize your settings, such as changing your Bluetooth name, enabling or disabling security mode, setting auto-connect options, etc.

-

Conclusion

-

BlueSoleil is a useful software that lets you connect your Bluetooth devices with your computer. You can get BlueSoleil activation serial number for free by using online or offline methods. Each method has its pros and cons, so choose wisely according to your needs and preferences. After getting the serial number, you can install BlueSoleil on your computer and enter it when prompted. Then you can connect your Bluetooth devices with your computer using BlueSoleil interface and enjoy its features and functions.

-

FAQs

-
    -
  1. Q: What are the system requirements for BlueSoleil?
  2. -
  3. A: According to its official website , BlueSoleil supports Windows XP/Vista/7/8/10 (32-bit and 64-bit). It also requires a Bluetooth dongle or adapter that supports Bluetooth 2.0 or above.
  4. -
  5. Q: How can I check if my Bluetooth dongle or adapter is compatible with BlueSoleil?
  6. -
  7. A: You can check the compatibility list on its official website , or contact its customer service for more information.
  8. -
  9. Q: How can I update BlueSoleil to the latest version?
  10. -
  11. A: You can download the latest version of BlueSoleil from its official website , or use its built-in update function.
  12. -
  13. Q: How can I get technical support for BlueSoleil?
  14. -
  15. A: You can visit its official website and access its online help center, forum, FAQ section, or contact its customer service via email or phone.
  16. -
  17. Q: How can I uninstall BlueSoleil from my computer?
  18. -
  19. A: You can uninstall BlueSoleil from your computer by using its built-in uninstall function, or by using Windows Control Panel.
  20. -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chess Position Trainer 5 Crack A Comprehensive Review of the Features and Benefits of this Chess Software.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chess Position Trainer 5 Crack A Comprehensive Review of the Features and Benefits of this Chess Software.md deleted file mode 100644 index 799c26450c096e132186a59c34961b006947daa8..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chess Position Trainer 5 Crack A Comprehensive Review of the Features and Benefits of this Chess Software.md +++ /dev/null @@ -1,110 +0,0 @@ - -

Chess Position Trainer 5 Crack: How to Download and Use It

-

If you are a chess enthusiast who wants to improve your game, you might have heard of Chess Position Trainer 5, a professional chess software that helps you build your own repertoire, train your openings, tactics and endgames, and analyze your games and positions with a powerful engine. But what if you don't want to pay for the software? Is there a way to get it for free? In this article, we will show you how to download and use Chess Position Trainer 5 crack, as well as the risks and drawbacks of doing so.

-

What is Chess Position Trainer 5?

-

Chess Position Trainer 5 is a chess training software that was developed by chesspositiontrainer.com in 2019. It is designed to help chess players of all levels create, manage and study their chess opening repertoire. It also offers features such as flash-card based training, blindfold mode, position explorer, engine analysis, game import/export, statistics, and more. Chess Position Trainer 5 claims to be a major time saver for your daily work and make it fun to train your chess openings. It supports multiple languages such as English, Deutsch, Français, Nederlands, Italiano, Português.

-

Chess Position Trainer 5 Crack


Download » https://byltly.com/2uKvdW



-

Features and benefits of Chess Position Trainer 5

-

Some of the features and benefits of Chess Position Trainer 5 are:

- -

How to download Chess Position Trainer 5 for free

-

If you want to download Chess Position Trainer 5 for free, you will need to find a crack file that bypasses the software's protection and activation system. A crack file is a modified version of the original software file that allows you to use it without paying for it. However, finding a reliable source for downloading Chess Position Trainer 5 crack can be tricky and risky. Here are some steps you can follow:

-

Step 1: Find a reliable source

-

The first step is to find a website that offers Chess Position Trainer 5 crack for download. You can use search engines such as Google or Bing to look for keywords such as "Chess Position Trainer 5 crack", "Chess Position Trainer 5 free download", "Chess Position Trainer 5 torrent", etc. However, be careful not to click on any suspicious or malicious links that might harm your computer or steal your personal information. You can use tools such as VirusTotal or Web of Trust to check the reputation and safety of the websites before visiting them.

-

Step 2: Download the setup file and the crack file

-

The second step is to download the setup file and the crack file from the website you have chosen. The setup file is usually an executable file (.exe) that installs the software on your computer. The crack file is usually a DLL file (.dll) that replaces the original file in the software's folder. Sometimes, the crack file might also come with instructions on how to apply it. Make sure you scan both files with an antivirus program before opening them.

-

Step 3: Install the software and apply the crack

-

The third step is to install the software and apply the crack on your computer. To install the software, run the setup file and follow the instructions on the screen. To apply the crack, copy the crack file and paste it into the software's folder (usually located in C:\Program Files\ChessPositionTrainer). If prompted, overwrite or replace the existing file. You might also need to disable your antivirus program or firewall temporarily while applying the crack.

-

How to use Chess Position Trainer 5 to improve your chess skills

-

If you have successfully installed and cracked Chess Position Trainer 5 on your computer, you can start using it to improve your chess skills. Here are some steps you can follow:

-

Chess Position Trainer 5 download
-Chess Position Trainer 5 free version
-Chess Position Trainer 5 license key
-Chess Position Trainer 5 serial number
-Chess Position Trainer 5 activation code
-Chess Position Trainer 5 full version
-Chess Position Trainer 5 torrent
-Chess Position Trainer 5 keygen
-Chess Position Trainer 5 patch
-Chess Position Trainer 5 registration code
-Chess Position Trainer 5 review
-Chess Position Trainer 5 tutorial
-Chess Position Trainer 5 alternative
-Chess Position Trainer 5 online
-Chess Position Trainer 5 manual
-Chess Position Trainer 5 features
-Chess Position Trainer 5 tips
-Chess Position Trainer 5 tricks
-Chess Position Trainer 5 guide
-Chess Position Trainer 5 video
-Chess Position Trainer 5 demo
-Chess Position Trainer 5 trial
-Chess Position Trainer 5 update
-Chess Position Trainer 5 latest version
-Chess Position Trainer 5 system requirements
-Chess Position Trainer 5 forum
-Chess Position Trainer 5 support
-Chess Position Trainer 5 feedback
-Chess Position Trainer 5 testimonials
-Chess Position Trainer 5 comparison
-Chess Position Trainer 5 benefits
-Chess Position Trainer 5 pros and cons
-Chess Position Trainer 5 discount
-Chess Position Trainer 5 coupon code
-Chess Position Trainer 5 best price
-Chess Position Trainer 5 free trial
-Chess Position Trainer 5 how to use
-Chess Position Trainer 5 how to install
-Chess Position Trainer 5 how to crack
-Chess Position Trainer 5 how to activate
-Chess Position Trainer 5 how to update
-Chess Position Trainer 5 how to uninstall
-Chess Position Trainer 5 how to backup
-Chess Position Trainer 5 how to restore
-Chess Position Trainer 5 how to export
-Chess Position Trainer 5 how to import
-Chess Position Trainer 5 how to create repertoire
-Chess Position Trainer 5 how to train openings
-Chess Position Trainer 5 how to improve chess skills

-

Step 1: Create and manage your own repertoire

-

The first step is to create and manage your own repertoire using Chess Position Trainer 5's innovative repertoire concept. You can create topics for different openings, endgames, middle-games or key positions that you want to study. You can also create new variations or edit existing ones using the position editor. You can add comments, diagrams, symbols or links to each position using the rich formatting feature. You can also reorder candidate moves according to their importance using the automatic re-ordering feature.

-

Step 2: Train your openings, tactics and endgames with flash cards

-

The second step is to train your openings, tactics and endgames with flash cards using Chess Position Trainer 5's flash-card based training concept. You can select which topics or positions you want to train from your repertoire using various criteria such as scheduled only, random, photo reading or systematic. You can also choose which side (white or black) you want to play from each position. You will be shown a position on the board and asked to enter the best move or moves according to your repertoire. You will get immediate feedback on whether you are correct or not. You can also review your mistakes or repeat difficult positions later.

-

Step 3: Analyze your games and positions with a powerful engine

-

The third step is to analyze your games and positions with a powerful engine using Chess Position Trainer 5's UCI engine support feature. You can import games from PGN files or databases into Chess Position Trainer 5 using the game import feature. You can also export games from Chess Position Trainer 5 into PGN files or websites using the game export feature. You can then analyze each move or position in your games using a UCI engine such as Stockfish or Komodo. You can see various information such as evaluation score, best move, depth Continuing the article:

Risks and drawbacks of using Chess Position Trainer 5 crack

-

While using Chess Position Trainer 5 crack might seem like a good idea to save money and enjoy the software's features, it also comes with many risks and drawbacks that you should be aware of. Here are some of them:

-

Legal issues and ethical concerns

-

Using Chess Position Trainer 5 crack is illegal, as it violates the software's copyright law. You are essentially stealing from the software developers who spent time and money creating the product. You could face fines of up to $150,000 and up to five years in prison for each instance of using or distributing cracked software. You could also damage your reputation and credibility as a chess player or professional if you are caught using pirated software. Moreover, using Chess Position Trainer 5 crack is unethical, as it shows a lack of respect and appreciation for the work of the software creators. You are also depriving them of their rightful income and discouraging them from developing more quality products in the future.

-

Potential malware and viruses

-

Using Chess Position Trainer 5 crack is dangerous, as it exposes your computer to potential malware and viruses. The websites that offer cracked software are often shady and untrustworthy, and they might infect your computer with malicious programs that can harm your system, steal your personal information, or display unwanted ads. The crack files themselves might also contain hidden malware or viruses that can compromise your security and privacy. Even if you scan the files with an antivirus program, you might not detect all the threats, as some malware can evade detection or disable your antivirus program. You could end up losing your data, money, or identity by using cracked software.

-

Lack of updates and support

-

Using Chess Position Trainer 5 crack is inconvenient, as it deprives you of updates and support from the software developers. The cracked software might not work properly or have bugs or errors that affect its functionality. You might not be able to access some features or options that are available in the original software. You might also miss out on new features or improvements that are added to the software through regular updates. Moreover, you will not be able to contact the software developers for any questions or issues that you encounter while using the software. You will have to rely on yourself or other users of cracked software for troubleshooting, which might not be effective or reliable.

-

Conclusion

-

Chess Position Trainer 5 is a professional chess software that can help you improve your chess skills by creating, managing and studying your chess opening repertoire. It also offers other features such as flash-card based training, blindfold mode, position explorer, engine analysis, game import/export, statistics, and more. However, using Chess Position Trainer 5 crack is not a wise decision, as it involves many risks and drawbacks such as legal issues, ethical concerns, potential malware and viruses, lack of updates and support. You should respect the software developers' rights and efforts and purchase the original software instead of using cracked software.

-

FAQs

- -

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download IVONA Text to Speech 1.6.63 with crack (All voices) and create amazing audio content.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download IVONA Text to Speech 1.6.63 with crack (All voices) and create amazing audio content.md deleted file mode 100644 index c0e53464be992a8a42a82464caeb44a698e95141..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download IVONA Text to Speech 1.6.63 with crack (All voices) and create amazing audio content.md +++ /dev/null @@ -1,78 +0,0 @@ -
-

IVONA Text to Speech 1.6.63 with crack (All voices) download

-

Do you want to convert any text into natural and expressive speech? Do you want to enjoy a wide range of voices and languages for free? Do you want to download and install a high-quality text-to-speech software in a few simple steps? If you answered yes to any of these questions, then you are in luck! In this article, I will show you how to download and install IVONA Text to Speech 1.6.63 with crack (All voices), a powerful and versatile text-to-speech software that can transform any text into lifelike speech.

-

What is IVONA Text to Speech?

-

IVONA Text to Speech is a speech synthesizer that can convert any text into speech (TTS). It uses advanced technology that allows it to create digital voices that speak various languages. IVONA voices are recognized for their natural and expressive sound, which makes them suitable for various purposes such as education, entertainment, accessibility, and communication.

-

IVONA Text to Speech 1.6.63 with crack (All voices) download


Download >>> https://byltly.com/2uKxqY



-

Some of the features and benefits of IVONA Text to Speech are:

- -

How to download IVONA Text to Speech 1.6.63 with crack (All voices)?

-

If you want to enjoy all the features and benefits of IVONA Text to Speech without paying anything, you need to download and install IVONA Text to Speech 1.6.63 with crack (All voices). This is a cracked version of the original software that allows you to use all the languages and voices for free.

-

To download and install IVONA Text to Speech 1.6.63 with crack (All voices), you need to follow these steps:

-

Download links for IVONA Text to Speech 1.6.63 with crack (All voices)

-

The first step is to download the files you need from the internet. You need two types of files: the IVONA Text-to-Speech HQ app and the voice files.

-

The IVONA Text-to-Speech HQ app is the main application that manages the voice and language settings and allows you to convert text to speech using different modes.

-

How to get IVONA Text to Speech 1.6.63 full version for free
-IVONA Text to Speech 1.6.63 cracked software with all languages
-Download IVONA Text to Speech 1.6.63 with patch and keygen
-IVONA Text to Speech 1.6.63 torrent download with crack and serial
-IVONA Text to Speech 1.6.63 activation code generator online
-IVONA Text to Speech 1.6.63 license key crack download
-IVONA Text to Speech 1.6.63 with all voices free download
-IVONA Text to Speech 1.6.63 latest version with crack download
-IVONA Text to Speech 1.6.63 portable edition with crack download
-IVONA Text to Speech 1.6.63 offline installer with crack download
-IVONA Text to Speech 1.6.63 for Windows 10/8/7 with crack download
-IVONA Text to Speech 1.6.63 for Mac OS X with crack download
-IVONA Text to Speech 1.6.63 for Linux with crack download
-IVONA Text to Speech 1.6.63 for Android with crack download
-IVONA Text to Speech 1.6.63 for iOS with crack download
-IVONA Text to Speech 1.6.63 review and features
-IVONA Text to Speech 1.6.63 alternatives and competitors
-IVONA Text to Speech 1.6.63 pros and cons
-IVONA Text to Speech 1.6.63 comparison and ratings
-IVONA Text to Speech 1.6.63 testimonials and feedbacks
-IVONA Text to Speech 1.6.63 tutorial and guide
-IVONA Text to Speech 1.6.63 tips and tricks
-IVONA Text to Speech 1.6.63 FAQs and troubleshooting
-IVONA Text to Speech 1.6.63 support and customer service
-IVONA Text to Speech 1.6.63 updates and changelog
-How to use IVONA Text to Speech 1.6.63 with crack effectively
-How to customize IVONA Text to Speech 1.6.63 with crack settings
-How to add new voices to IVONA Text to Speech 1.6.63 with crack
-How to convert text to speech with IVONA Text to Speech 1.6.63 with crack
-How to save text to speech as audio files with IVONA Text to Speech 1.6.63 with crack
-How to integrate IVONA Text to Speech 1.6.63 with crack with other applications
-How to improve the quality of text to speech with IVONA Text to Speech 1.6.63 with crack
-How to fix common errors and issues with IVONA Text to Speech 1.6.63 with crack
-How to uninstall and remove IVONA Text to Speech 1.6.63 with crack completely
-How safe and secure is IVONA Text to Speech 1.6.63 with crack download
-How legal and ethical is IVONA Text to Speech 1.6.63 with crack download
-How fast and reliable is IVONA Text to Speech 1.6.63 with crack download
-How much disk space and memory does IVONA Text to Speech 1.6.63 with crack require
-How compatible is IVONA Text to Speech 1.6

-

The voice files are the files that contain the data for each voice and language you want to use.

-

You can find the download links for both types of files below:

- - - - - - - - - - - - -<

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Be a Pro - Football Real Teams Real Players Real Action.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Be a Pro - Football Real Teams Real Players Real Action.md deleted file mode 100644 index f0ccd0d7b41e0fdae8da509d7cae93e2476df190..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Be a Pro - Football Real Teams Real Players Real Action.md +++ /dev/null @@ -1,97 +0,0 @@ -
-

Be a Pro - Football: How to Download and Play the Best Online Football Game

-

If you are a fan of football, you must have heard of Be a Pro - Football, the most thrilling football game on your mobile device. This game lets you join realistic fast-paced real-time PVP matches with your own dream team. You can face online players from all over the world and compete for the glory of being the best football club. But how can you download and play this amazing game on your Android device? In this article, we will show you how to do that in simple steps.

-

What is Be a Pro - Football?

-

Be a Pro - Football is an online football game developed by VPlay Interactive Private Limited. It is available for free on Google Play Store, but you can also download the apk and obb files from other sources . The apk file is the application package that contains the game's code and resources, while the obb file is the expansion file that contains the game's data, such as graphics, sounds, and videos.

-

be a pro football apk and obb download


Download Zip ⚹⚹⚹ https://urlin.us/2uSYXe



-

Key features of Be a Pro - Football

-

Be a Pro - Football has many features that make it stand out from other football games. Here are some of them:

- -

How to download Be a Pro - Football apk and obb files

-

To download Be a Pro - Football apk and obb files, you need to find a reliable source that offers them for free. You can use APKCombo or mob.org as examples. These websites allow you to download the latest version of Be a Pro - Football apk and obb files in one package. You just need to click on the download button and wait for the file to be downloaded to your device.

-

How to install Be a Pro - Football on your Android device

-

After downloading Be a Pro - Football apk and obb files, you need to follow these steps to install them on your Android device:

-

Step 1: Enable unknown sources

-

Before installing any apk file from outside the Google Play Store, you need to enable unknown sources on your device. This will allow you to install apps from other sources than the official store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message, but just tap OK to proceed.

-

Step 2: Locate and extract the obb file

-

The obb file is a zip file that contains the game's data. You need to extract it to a specific folder on your device. To do this, you need a file manager app that can handle zip files, such as ES File Explorer. Open the file manager app and locate the downloaded Be a Pro - Football zip file. Tap on it and choose Extract. You will see a folder named com.vplayinteractive.bap. This is the obb folder that you need to move to the Android/obb folder on your device. You can use the Cut and Paste commands to do this.

-

Step 3: Install the apk file

-

After moving the obb folder, you can install the apk file. Go back to the file manager app and locate the downloaded Be a Pro - Football apk file. Tap on it and choose Install. You may see a prompt asking you to confirm the installation, just tap Install again. Wait for the installation to finish and then tap Open to launch the game.

-

be a pro football game download apk and obb
-how to install be a pro football xapk on android
-be a pro football online pvp apk obb
-be a pro football realistic 3d graphics apk obb
-be a pro football champions league apk obb
-be a pro football mod apk and obb unlimited money
-be a pro football offline mode apk obb
-be a pro football latest version apk and obb
-be a pro football apk obb free download for android
-be a pro football transfer market apk obb
-be a pro football squad optimization apk obb
-be a pro football fast-paced thrilling match apk obb
-be a pro football dream team creation apk obb
-be a pro football smooth motions apk obb
-be a pro football level up your team apk obb
-be a pro football 11v11 online pvp apk obb
-be a pro football realistic dribble tackle shooting passing apk obb
-be a pro football high-precision 3d player modeling apk obb
-be a pro football idle league mode apk obb
-be a pro football vplay interactive private limited apk obb
-download be a pro football game for android apk and obb
-be a pro football full 3d motion capture apk obb
-be a pro football best sports game apk obb
-be a pro football realistic physics engine apk obb
-be a pro football google play id com.Trois.NOX apk obb
-be a pro football version 0.204.4 apk and obb download
-be a pro football update nov 17 2022 apk obb
-be a pro football android 5.0+ compatible apk obb
-be a pro football 924 mb size apk and obb file download
-be a pro football old versions download apk and obb
-how to play be a pro football on pc with emulator apk and obb
-be a pro football tips and tricks guide apk and obb download
-how to get free coins in be a pro football game apk and obb
-how to fix be a pro football not working or crashing issue apk and obb
-how to uninstall or delete be a pro football from your device apk and obb
-how to backup or restore your progress in be a pro football game apk and obb
-how to contact the developer of be a pro football game for support or feedback apk and obb
-how to join or create a club in be a pro football game online mode apk and obb
-how to customize your players and kits in be a pro football game offline mode apk and obb
-how to change the language or sound settings in be a pro football game options menu apk and obb

-

How to play Be a Pro - Football online

-

Now that you have installed Be a Pro - Football on your device, you can start playing it online with other players. Here are some tips on how to play the game:

-

Choose your team and players

-

When you launch the game, you will be asked to choose your team name, logo, and kit. You can also customize your players' names, appearances, and attributes. You can unlock more options as you progress in the game. You can also buy coins and gems with real money to get more items and features.

-

Join real-time 11v11 matches

-

The main mode of Be a Pro - Football is the real-time 11v11 online PVP mode. In this mode, you can join matches with other players from around the world and compete for rankings and rewards. You can choose from different leagues and tournaments, such as Champions League, Europa League, World Cup, and more. You can also create or join clubs with your friends and play together.

-

Upgrade your skills and tactics

-

To win matches, you need to upgrade your skills and tactics. You can train your players to improve their attributes, such as speed, strength, stamina, shooting, passing, dribbling, tackling, and more. You can also change your formation, strategy, and style of play according to your opponents and situations. You can use different skills and items during matches, such as sprint, slide tackle, cross, shoot, pass, dribble, header, volley, chip shot, curve shot, free kick, corner kick, penalty kick, and more.

-

Why you should play Be a Pro - Football

-

Be a Pro - Football is not just another football game. It is the best online football game that you can play on your mobile device. Here are some reasons why you should play it:

-

Realistic graphics and animations

-

Be a Pro - Football uses full 3D motion capture technology to create realistic graphics and animations for the players and stadiums. You will feel like you are watching a real football match on your screen. The game also has dynamic weather effects, such as rain, snow, fog, wind, and more.

-

Thrilling and fast-paced gameplay

-

Be a Pro - Football captures the true spirit of football with its thrilling and fast-paced gameplay. You will experience the excitement of attacking and defending in real-time matches with other players. You will need to use your skills and tactics to score goals and prevent goals from your opponents.

-

Competitive and diverse modes

-

Be a Pro - Football offers competitive and diverse modes for you to enjoy. You can play in different leagues and tournaments with different rules and rewards. You can also play in idle mode where you can earn coins and gems even when you are offline. You can also challenge yourself in various missions and achievements.

-

Conclusion

-

Be a Pro - Football is the best online football game that you can play on your mobile device. It has realistic graphics and animations, thrilling and fast-paced gameplay, competitive and diverse modes, and many more features that will keep you hooked for hours. If you want to download and play this amazing game on your Android device, just follow the steps we have shown you in this article. You will need to download the apk and obb files from a reliable source , enable unknown sources on your device, extract the obb file[^5 to the Android/obb folder, and install the apk file. You will then be able to enjoy the best online football game on your mobile device. Have fun and good luck!

-

FAQs

-

Here are some frequently asked questions about Be a Pro - Football:

-

Q: How can I get more coins and gems in Be a Pro - Football?

-

A: You can get more coins and gems by playing matches, completing missions, achievements, and daily tasks, participating in events and tournaments, watching ads, and buying them with real money.

-

Q: How can I change my team name, logo, and kit in Be a Pro - Football?

-

A: You can change your team name, logo, and kit by tapping on the Team button on the main screen. You can then edit your team details and customize your team appearance.

-

Q: How can I join or create a club in Be a Pro - Football?

-

A: You can join or create a club by tapping on the Club button on the main screen. You can then search for an existing club or create your own club. You can invite your friends to join your club and play together.

-

Q: How can I chat with other players in Be a Pro - Football?

-

A: You can chat with other players by tapping on the Chat button on the main screen. You can then choose from different chat channels, such as Global, Club, Match, and Private. You can also use emojis and stickers to express yourself.

-

Q: How can I contact the support team of Be a Pro - Football?

-

A: You can contact the support team of Be a Pro - Football by tapping on the Settings button on the main screen. You can then choose from different options, such as FAQ, Feedback, Report, and Contact Us. You can also email them at support@vplayinteractive.com.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator Ultimate MOD APK 1.5.3 Drive Your Way to Success with Unlimited Money and More.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator Ultimate MOD APK 1.5.3 Drive Your Way to Success with Unlimited Money and More.md deleted file mode 100644 index bd30d97a13ba3baf9a3750fbd8974d6891f2c21b..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator Ultimate MOD APK 1.5.3 Drive Your Way to Success with Unlimited Money and More.md +++ /dev/null @@ -1,81 +0,0 @@ - -

Bus Simulator Ultimate 1.5.3 Mod APK: A Review

-

Do you love driving buses and exploring different cities? Do you want to start your own bus company and become a successful entrepreneur? If yes, then you should try Bus Simulator Ultimate, a bus driving simulation game with a unique story released by Zuuks Games. Not just a normal driving game, with this game, you will experience the feeling of starting a business, managing your staff, expanding your routes, and competing with other players online. And if you want to enjoy the game without any limitations, you can download Bus Simulator Ultimate Mod APK, which gives you unlimited money, gold, and access to all the buses and skins in the game. In this article, we will review Bus Simulator Ultimate and its mod version, and show you how to download and install it on your device.

-

What is Bus Simulator Ultimate?

-

Bus Simulator Ultimate is a realistic bus driving simulation game that lets you drive various types of buses across different countries and cities. You can choose from over 25 different buses, each with their own features and specifications, such as speed, capacity, fuel consumption, etc. You can also customize your buses with different skins, colors, logos, stickers, etc. You can drive in different weather conditions, day and night cycles, traffic situations, road types, etc. You can also interact with your passengers, who have their own personalities and behaviors. You can hear their conversations, complaints, compliments, etc.

-

bus simulator ultimate 1.5.3 mod apk


Download » https://urlin.us/2uSZJD



-

Features of Bus Simulator Ultimate

-

Bus Simulator Ultimate has many features that make it one of the best bus driving games on the market. Here are some of them:

-

Realistic bus driving experience

-

The game has realistic physics and graphics that make you feel like you are driving a real bus. You can control your bus with various options, such as steering wheel, buttons, tilt, etc. You can also use indicators, headlights, horn, wipers, etc. You can follow the traffic rules and signs, or break them if you want to. You can also experience realistic sounds of engine, brakes, horns, etc.

-

Multiple game modes and maps

-

The game has different game modes that suit your preferences and skills. You can play in career mode, where you start your own bus company and complete various missions and challenges. You can also play in free mode, where you can drive freely in any map without any restrictions. You can also play in multiplayer mode, where you can join or create online rooms and play with other players around the world. You can chat with them, race with them, cooperate with them, etc.

-

The game has over 15 maps that cover different countries and cities, such as Germany, France, Italy, Spain, USA, Brazil, Turkey, etc. Each map has its own landmarks, scenery, culture, etc. You can explore them and discover new places and routes.

-

Customizable buses and skins

-

The game has over 25 different buses that you can choose from. Each bus has its own features and specifications that affect its performance and fuel consumption. You can also customize your buses with different skins that change their appearance. You can choose from various colors, logos, stickers, etc. You can also create your own skins using the skin editor tool.

-

Online multiplayer and leaderboards

-

The game has an online multiplayer mode that lets you play with other players around the world. You can join or create online rooms and chat with other players. You can also race with them or cooperate with them in different missions and challenges. You can also check the leaderboards and see your rank and stats compared to other players.

-

Business management and simulation

-

The game has a unique feature that lets you start and run your own bus company. You can name your company, choose your logo, hire and fire your staff, buy and sell your buses, etc. You can also manage your finances, expenses, income, etc. You can also expand your routes and services to different countries and cities. You can also compete with other companies and see your reputation and popularity.

-

bus simulator ultimate mod apk unlimited money and gold
-bus simulator ultimate hack apk download for android
-bus simulator ultimate mod menu apk latest version
-bus simulator ultimate 1.5.3 mod apk free shopping
-bus simulator ultimate mod apk all buses unlocked
-bus simulator ultimate hack apk ios no jailbreak
-bus simulator ultimate mod apk offline play
-bus simulator ultimate 1.5.3 mod apk revdl
-bus simulator ultimate mod apk unlimited xp and fuel
-bus simulator ultimate hack apk online generator
-bus simulator ultimate mod apk new update 2021
-bus simulator ultimate 1.5.3 mod apk happymod
-bus simulator ultimate mod apk no ads and license verification
-bus simulator ultimate hack apk obb data file
-bus simulator ultimate mod apk realistic graphics and physics
-bus simulator ultimate 1.5.3 mod apk rexdl
-bus simulator ultimate mod apk multiplayer mode enabled
-bus simulator ultimate hack apk unlimited everything
-bus simulator ultimate mod apk premium features unlocked
-bus simulator ultimate 1.5.3 mod apk an1.com
-bus simulator ultimate mod apk custom skins and routes
-bus simulator ultimate hack apk latest version 2021
-bus simulator ultimate mod apk no root required
-bus simulator ultimate 1.5.3 mod apk android 1
-bus simulator ultimate mod apk high speed and performance
-bus simulator ultimate hack apk anti ban and safe to use
-bus simulator ultimate mod apk full game download
-bus simulator ultimate 1.5.3 mod apk pure.com
-bus simulator ultimate mod apk real traffic and weather conditions
-bus simulator ultimate hack apk unlimited coins and gems
-bus simulator ultimate mod apk best simulation game for android
-bus simulator ultimate 1.5.3 mod apk apkpure.com
-bus simulator ultimate mod apk easy controls and user interface
-bus simulator ultimate hack apk no survey or human verification
-bus simulator ultimate mod apk support all android devices
-bus simulator ultimate 1.5.3 mod apk mob.org
-bus simulator ultimate mod apk different countries and cities to explore
-bus simulator ultimate hack apk vip access and premium membership
-bus simulator ultimate mod apk fun and addictive gameplay
-bus simulator ultimate 1.5.3 mod apk uptodown.com

-

What is Bus Simulator Ultimate Mod APK?

-

Bus Simulator Ultimate Mod APK is a modified version of the original game that gives you some extra benefits and features that are not available in the official version. With this mod APK, you can enjoy the game without any limitations or restrictions. Here are some of the features of Bus Simulator Ultimate Mod APK:

-

Unlimited money and gold

-

With this mod APK, you will have unlimited money and gold in the game. You can use them to buy any bus you want, upgrade them, customize them, etc. You can also use them to hire more staff, expand your routes, etc. You don't have to worry about running out of money or gold in the game.

-

All buses and skins unlocked

-

With this mod APK, you will have access to all the buses and skins in the game. You don't have to complete any missions or challenges to unlock them. You can choose any bus you want and customize it with any skin you want. You can also create your own skins using the skin editor tool.

-

No ads and no root required

-

With this mod APK, you will not see any ads in the game. You can enjoy the game without any interruptions or distractions. You also don't need to root your device to install this mod APK. You can install it easily on any Android device without any risk of damaging it.

-

How to download and install Bus Simulator Ultimate Mod APK?

-

If you want to download and install Bus Simulator Ultimate Mod APK on your device, you need to follow these simple steps:

-

Step 1: Download the mod APK file from a trusted source

-

You need to download the mod APK file from a reliable source that provides safe and virus-free downloads. You can use this link to download the latest version of Bus Simulator Ultimate Mod APK: Bus Simulator Ultimate 1.5.3 Mod APK Download

-

Step 2: Enable unknown sources on your device settings

-

You need to enable unknown sources on your device settings to allow the installation of apps from sources other than Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.

-

Step 3: Install the mod APK file and enjoy the game

-

You need to locate the downloaded mod APK file on your device storage and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish. Once done, you can open the game and enjoy it with all the mod features.

-

Conclusion

-

Bus Simulator Ultimate is a fun and realistic bus driving simulation game that lets you drive various types of buses across different countries and cities. You can also start your own bus company and manage it as a business simulation game. You can also play online with other players and compete with them in different modes and challenges. And if you want to enjoy the game without any limitations, you can download Bus Simulator Ultimate Mod APK, which gives you unlimited money, gold, and access to all the buses and skins in the game.

-

If you liked this article, please share it with your friends who love bus driving games. And if you have any questions or feedback about Bus Simulator Ultimate or its mod version, please leave a comment below. We would love to hear from you.

- FAQs Q: Is Bus Simulator Ultimate Mod APK safe to download and install? A: Yes, Bus Simulator Ultimate Mod APK is safe to download and install on any Android device. It does not contain any viruses or malware that can harm your device or data. Q: Can I play Bus Simulator Ultimate offline? A: Yes, you can play Bus Simulator Ultimate offline in career mode or free mode. However, you need an internet connection to play online multiplayer mode. Q: How can I update Bus Simulator Ultimate Mod APK? A: To update Bus Simulator Ultimate Mod APK, you need to download the latest version of the mod APK file from the same source that you downloaded it from before. Then, you need to uninstall the previous version of the game from your device and install the new version of the mod APK file. You can also check the source website for any updates or notifications about the mod APK. Q: How can I contact the developers of Bus Simulator Ultimate? A: You can contact the developers of Bus Simulator Ultimate by visiting their official website: Zuuks Games. You can also follow them on their social media accounts, such as Facebook, Twitter, Instagram, etc. Q: What are some other bus driving games that I can play? A: Some other bus driving games that you can play are: - Bus Simulator 2021: A realistic bus driving game that lets you drive in various cities and countries, such as New York, London, Paris, Berlin, etc. You can also customize your buses and create your own routes. - Coach Bus Simulator: A bus driving game that lets you drive different types of coaches and transport passengers across Europe. You can also upgrade your coaches and hire new drivers. - World Bus Driving Simulator: A bus driving game that lets you drive in different countries and regions, such as Brazil, USA, Canada, etc. You can also choose from different bus models and customize them with various accessories.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cubase 12 The Most Advanced and Reliable Music Production Software.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cubase 12 The Most Advanced and Reliable Music Production Software.md deleted file mode 100644 index 46c0f0b8cbd0fca22f3475cb0ea0fdff432181d2..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cubase 12 The Most Advanced and Reliable Music Production Software.md +++ /dev/null @@ -1,305 +0,0 @@ - -

Cubase 12: A Complete Guide for Beginners

-

If you are looking for a powerful and versatile music production software, you might want to check out Cubase 12. Cubase is one of the most popular and trusted DAWs (Digital Audio Workstations) in the world, used by professional composers, producers, engineers, and musicians across all genres and styles. Whether you want to create your own music, record and edit audio, mix and master your tracks, or collaborate with other artists, Cubase can help you achieve your musical goals.

-

In this article, we will give you a complete overview of Cubase 12, its main features and benefits, and how to get started with it. We will also show you some tips and tricks on how to use some of the new and improved tools in Cubase 12, such as MIDI Remote integration, VariAudio and Scale Assistant, Sampler Track 2, MixConsole, and more. By the end of this article, you will have a better understanding of what Cubase 12 can do for you and how to use it effectively.

-

cubase download 12


DOWNLOAD ––– https://urlin.us/2uSUXh



-

MIDI Remote Integration

-

MIDI controllers are essential devices for any music production setup. They allow you to control various parameters and functions in your software with physical knobs, faders, buttons, pads, keys, or pedals. In Cubase 12, integrating your MIDI controller has become easier than ever. Thanks to the new concept of MIDI Remote Scripts, Cubase will automatically detect your device and map the controls accordingly. If there is no script for your device yet, you can easily create your own with the MIDI Controller Surface Editor. You can then conveniently connect controls and parameters with the Mapping Assistant.

-

How to connect and configure your MIDI controller with Cubase 12?

-

To connect your MIDI controller with Cubase 12, follow these steps:

-

cubase pro 12 download free trial
-cubase 12 download steinberg download assistant
-cubase 12 download full version crack
-cubase 12 download mac
-cubase 12 download windows 10
-cubase artist 12 download
-cubase elements 12 download
-cubase ai 12 download
-cubase le 12 download
-cubase 12 download size
-cubase 12 download link
-cubase 12 download iso
-cubase 12 download offline installer
-cubase 12 download update
-cubase 12 download reddit
-cubase 12 download requirements
-cubase 12 download activation code
-cubase 12 download tutorial
-cubase 12 download problems
-cubase 12 download error
-cubase 12 download speed
-cubase 12 download license
-cubase 12 download price
-cubase 12 download coupon code
-cubase 12 download discount
-cubase 12 download review
-cubase 12 download features
-cubase 12 download comparison
-cubase 12 download tips and tricks
-cubase 12 download guide pdf
-cubase 12 download manual pdf
-cubase 12 download online help
-cubase 12 download support forum
-cubase 12 download customer service
-cubase 12 download feedback form
-cubase 12 download testimonials
-cubase 12 download videos youtube
-cubase 12 download podcasts spotify
-cubase 12 download courses udemy
-cubase 12 download books amazon
-cubase 12 download plugins vst4free
-cubase 12 download sounds loopmasters
-cubase 12 download presets splice
-cubase 12 download templates production expert
-cubase 12 download projects sound on sound
-cubase 12 download midi loops groove3
-cubase 12 download samples native instruments
-cubase 12 download instruments arturia
-cubase 12 download effects waves

-
    -
  1. Connect your MIDI controller to your computer via USB or MIDI cable.
  2. -
  3. Launch Cubase 12 and create or open a project.
  4. -
  5. Go to Studio > Studio Setup.
  6. -
  7. Click on the + button in the top left corner and select your MIDI controller from the list.
  8. -
  9. If there is a script available for your device, it will be automatically loaded. If not, you can create one using the MIDI Controller Surface Editor.
  10. -
  11. Click on Apply and OK.
  12. -
-

How to use MIDI Remote Scripts and Mapping Assistant to customize your control surface?

-

How to record and edit MIDI data with Cubase 12?

-

To record and edit MIDI data with Cubase 12, follow these steps:

-
    -
  1. Create a MIDI track and assign it to a VST instrument or an external MIDI device.
  2. -
  3. Set the input and output routing for the MIDI track in the Inspector.
  4. -
  5. Enable the Record Enable button on the MIDI track.
  6. -
  7. Set the left and right locators to define the recording range.
  8. -
  9. Click the Record button on the Transport panel or press * on the numeric keypad to start recording.
  10. -
  11. Play your MIDI controller or use the On-Screen Keyboard to enter MIDI notes.
  12. -
  13. Click the Stop button on the Transport panel or press 0 on the numeric keypad to stop recording.
  14. -
  15. The recorded MIDI data will appear as a MIDI part on the MIDI track. You can double-click it to open it in the Key Editor, where you can edit the notes, velocities, lengths, and other parameters.
  16. -
-

VariAudio and Scale Assistant

-

VariAudio is a powerful tool for editing and correcting the pitch of your vocals and monophonic instruments. It allows you to analyze, modify, and quantize the pitch of each note in an audio file, as well as adjust its formant, vibrato, and volume. You can also use VariAudio to create harmonies, melodies, or effects from your audio material. Scale Assistant is a new feature in Cubase 12 that helps you set and follow the scale of your project. It can automatically detect the scale of your audio or MIDI data, or let you choose one from a list of presets or custom scales. You can then use Scale Assistant to snap your notes to the scale, transpose them, or color-code them according to their harmonic function.

-

How to use VariAudio to edit and correct the pitch of your vocals and monophonic instruments?

-

To use VariAudio to edit and correct the pitch of your vocals and monophonic instruments, follow these steps:

-
    -
  1. Select an audio event that contains vocals or monophonic instruments in the Project window.
  2. -
  3. Open the Sample Editor by double-clicking the audio event.
  4. -
  5. In the Inspector, click on the VariAudio tab.
  6. -
  7. Click on Edit VariAudio to analyze the pitch of the audio event. The audio event will be divided into segments, each representing a note. The segments will be displayed as blocks on a piano roll grid.
  8. -
  9. To correct the pitch of a segment, drag it up or down until it snaps to a semitone. You can also use the Quantize Pitch slider to automatically adjust all segments to the nearest semitone.
  10. -
  11. To modify the pitch of a segment, drag its handles left or right to change its start or end position. You can also drag its center line up or down to bend its pitch curve.
  12. -
  13. To adjust the formant of a segment, use the Formant Shift slider. This will change the timbre of the segment without affecting its pitch.
  14. -
  15. To adjust the vibrato of a segment, use the Straighten Pitch slider. This will reduce or increase the amount of pitch variation within the segment.
  16. -
  17. To adjust the volume of a segment, use the Volume slider. This will change the amplitude of the segment without affecting its dynamics.
  18. -
-

How to use Scale Assistant to set and follow the scale of your project?

-

How to use Scale Assistant to set and follow the scale of your project?

-

To use Scale Assistant to set and follow the scale of your project, follow these steps:

-
    -
  1. Open the Key Editor by double-clicking a MIDI part or an audio event with VariAudio segments.
  2. -
  3. In the Inspector, click on the Scale Assistant tab.
  4. -
  5. To detect the scale of your data, click on Detect Scale. Cubase will analyze the notes or segments and suggest a scale that matches them. You can also choose a different scale from the drop-down menu.
  6. -
  7. To snap your notes or segments to the scale, enable Snap Pitch Editing. This will prevent you from moving them outside the scale. You can also use the Snap Live Input button to snap the notes that you play on your MIDI controller to the scale.
  8. -
  9. To transpose your notes or segments to a different key, use the Transpose slider. This will shift them up or down by semitones while keeping them in the scale.
  10. -
  11. To color-code your notes or segments according to their harmonic function, enable Colorize Pitch Editing. This will assign different colors to tonic, dominant, subdominant, and other degrees of the scale.
  12. -
-

Sampler Track 2

-

Sampler Track 2 is a new and improved version of the Sampler Track, a tool that allows you to create and use sampler tracks in Cubase 12. A sampler track is a track that contains a sample, which is a short audio file that can be played back at different pitches and speeds. You can use sampler tracks to create melodies, rhythms, effects, or sound design elements from any audio material. Sampler Track 2 introduces some new features and enhancements, such as Slice mode, LFOs, and Sample Library Browser.

-

How to create and use Sampler Tracks in Cubase 12?

-

To create and use Sampler Tracks in Cubase 12, follow these steps:

-
    -
  1. Right-click on an empty space in the Project window and select Add Track > Sampler Track.
  2. -
  3. A new sampler track will be created with an empty sampler zone. To load a sample into the sampler zone, drag and drop an audio file from the MediaBay, the Project window, or your file explorer.
  4. -
  5. The sample will be mapped across the entire keyboard range. You can play it with your MIDI controller or use the On-Screen Keyboard to trigger it.
  6. -
  7. You can adjust the pitch, volume, filter, envelope, and modulation settings of the sample in the Sampler Control section.
  8. -
  9. You can also add effects to the sample in the Insert Effects section.
  10. -
  11. To record MIDI data for the sampler track, follow the same steps as for recording MIDI data for any other track.
  12. -
-

How to use the new Slice mode and LFOs to manipulate your samples?

-

To use the new Slice mode and LFOs to manipulate your samples, follow these steps:

-
    -
  1. In the Sampler Control section, click on the Slice button to enable Slice mode. This will divide your sample into smaller segments based on its transients or beats.
  2. -
  3. You can adjust the sensitivity of the slicing algorithm with the Threshold slider. You can also manually add or remove slices with the + and - buttons.
  4. -
  5. Each slice will be assigned to a different key on your keyboard. You can play them individually or in any order you like.
  6. -
  7. You can also edit each slice separately by clicking on it and adjusting its pitch, volume, filter, envelope, and modulation settings.
  8. -
  9. To add LFOs (Low Frequency Oscillators) to your sample or slices, click on the LFO button in the Modulation section. This will open a new window where you can choose from different waveforms, rates, depths, and destinations for your LFOs.
  10. -
  11. You can use LFOs to modulate various parameters of your sample or slices, such as pitch, volume, filter cutoff, pan, etc. This can create interesting rhythmic or dynamic effects.
  12. -
-

How to use the new Sample Library Browser to find and load samples?

-

To use the new Sample Library Browser to find and load samples To use the new Sample Library Browser to find and load samples, follow these steps:

    -
  1. In the Sampler Control section, click on the Browse button to open the Sample Library Browser.
  2. -
  3. The Sample Library Browser shows a list of categories and subcategories of samples that are available on your computer or online. You can also use the search field to find samples by name, keyword, or tag.
  4. -
  5. To preview a sample, click on the Play button next to it. You can adjust the volume and pitch of the preview with the sliders below.
  6. -
  7. To load a sample into the sampler zone, double-click on it or drag and drop it onto the sampler zone. The sample will replace the existing one in the sampler zone.
  8. -
  9. You can also load multiple samples into different slices by selecting them and dragging and dropping them onto the sampler zone. This will create a new slice for each sample and assign them to consecutive keys on your keyboard.
  10. -
-

MixConsole

-

MixConsole is the main mixing environment in Cubase 12. It allows you to control the level, pan, solo/mute status, routing, effects, and other parameters of your audio and MIDI channels. You can also use MixConsole to create groups, sends, inserts, and busses for your channels. MixConsole features a flexible and customizable layout that lets you adjust the size, position, and visibility of different sections and elements. You can also open multiple MixConsole windows with different configurations and views. MixConsole also introduces some new and improved features in Cubase 12, such as Spectral Comparison EQ, Channel Strip modules, Snapshots, History, and more.

-

How to use the MixConsole to mix your tracks and apply effects?

-

To use the MixConsole to mix your tracks and apply effects, follow these steps:

-
    -
  1. Open the MixConsole window by pressing F3 or selecting Studio > MixConsole from the menu bar.
  2. -
  3. The MixConsole window shows all the channels in your project in the same order as in the track list. You can use the scroll bar or the channel overview to navigate to different channels. You can also use the Visibility tab in the left zone to show or hide individual channels or channel types.
  4. -
  5. To adjust the level of a channel, drag its fader up or down. To adjust the pan of a channel, drag its pan knob left or right. To solo or mute a channel, click on its S or M button. To enable or disable record for a channel, click on its R button.
  6. -
  7. To set up the input and output routing for a channel, click on its input or output selector in the Inspector or in the Routing section. You can choose from different options depending on the type of channel. For example, you can route an audio channel to an audio input or output device, a group channel, an FX channel, a VCA fader, etc.
  8. -
  9. To apply effects to a channel, you can use either inserts or sends. Inserts are effects that are applied directly to the signal of a channel. Sends are effects that are applied to a copy of the signal of a channel and then mixed with other signals in an FX channel. To add an insert effect to a channel, click on an empty slot in the Inserts section and select an effect from the list. To add a send effect to a channel, click on an empty slot in the Sends section and select an FX channel from the list. You can adjust the amount of send level with the knob next to the slot.
  10. -

    How to use the new Spectral Comparison EQ to clean up overlapping frequencies?

    -

    The Spectral Comparison EQ is a new feature in Cubase 12 that allows you to compare and adjust the frequency spectrum of two different channels. This can help you identify and eliminate any overlapping or clashing frequencies that might cause muddiness or interference in your mix. To use the Spectral Comparison EQ, follow these steps:

    -
      -
    1. Select a channel that you want to compare with another channel. This will be the reference channel.
    2. -
    3. Open the Channel Settings window by clicking on the e button on the channel.
    4. -
    5. In the Channel Settings window, click on the EQ tab to open the Equalizer section.
    6. -
    7. In the Equalizer section, click on the Spectral Comparison button to enable it. This will open a new window where you can see the frequency spectrum of the reference channel and the comparison channel.
    8. -
    9. To select a comparison channel, click on the drop-down menu next to the Spectral Comparison button and choose another channel from the list. You can also use the arrow buttons to cycle through different channels.
    10. -
    11. You can adjust the opacity and color of the frequency spectrum of each channel with the sliders below. You can also switch between different views, such as Combined, Side by Side, or Overlay.
    12. -
    13. To adjust the EQ of the reference channel, use the EQ bands and knobs as usual. You can also use the EQ Curve Display to drag and drop points on the curve. You will see how your adjustments affect the frequency spectrum of both channels in real time.
    14. -
    15. To adjust the EQ of the comparison channel, click on the Swap button next to the Spectral Comparison button. This will switch the roles of the reference and comparison channels. You can then use the same steps as above to modify the EQ of the comparison channel.
    16. -
    -

    How to use the Channel Strip modules to shape your sound?

    -

    The Channel Strip is a section in the Channel Settings window that contains various modules that can help you shape your sound. These modules include Gate, Compressor, Limiter, Envelope Shaper, Saturation, Magneto III, Standard Clipper, Tape Saturation, and Tube Saturation. You can use these modules to control the dynamics, tone, saturation, and distortion of your signal. To use the Channel Strip modules, follow these steps:

    -
      -
    1. Open the Channel Settings window by clicking on the e button on a channel.
    2. -
    3. In the Channel Settings window, click on the Strip tab to open the Channel Strip section.
    4. -
    5. In the Channel Strip section, you can see a list of modules that are available for your channel. You can enable or disable each module by clicking on its power button.
    6. -

      How to use the Channel Strip modules to shape your sound?

      -

      The Channel Strip is a section in the Channel Settings window that contains various modules that can help you shape your sound. These modules include Gate, Compressor, Limiter, Envelope Shaper, Saturation, Magneto III, Standard Clipper, Tape Saturation, and Tube Saturation. You can use these modules to control the dynamics, tone, saturation, and distortion of your signal. To use the Channel Strip modules, follow these steps:

      -
        -
      1. Open the Channel Settings window by clicking on the e button on a channel.
      2. -
      3. In the Channel Settings window, click on the Strip tab to open the Channel Strip section.
      4. -
      5. In the Channel Strip section, you can see a list of modules that are available for your channel. You can enable or disable each module by clicking on its power button.
      6. -
      7. You can also reorder or replace each module by dragging and dropping it to a different slot or clicking on its name and selecting another module from the list.
      8. -
      9. To adjust the settings of each module, use the knobs and buttons below its name. You can also use the Module Display to see a graphical representation of the module's parameters and curves.
      10. -
      11. You can also use the Presets menu to load or save presets for each module or for the entire channel strip.
      12. -
      -

      Conclusion

      -

      Cubase 12 is a powerful and versatile music production software that offers a wide range of features and tools for creating, recording, editing, mixing, and mastering your music. Whether you are a beginner or a professional, Cubase 12 can help you achieve your musical goals with ease and efficiency. In this article, we have given you a complete overview of Cubase 12, its main features and benefits, and how to get started with it. We have also shown you some tips and tricks on how to use some of the new and improved tools in Cubase 12, such as MIDI Remote integration, VariAudio and Scale Assistant, Sampler Track 2, MixConsole, and more. We hope you have found this article useful and informative.

      -

      If you want to learn more about Cubase 12, you can visit the official website of Steinberg, where you can find more tutorials, guides, videos, forums, and support. You can also download a free trial version of Cubase 12 and try it for yourself. Cubase 12 is available in three editions: Pro, Artist, and Elements. Each edition has different features and prices to suit your needs and budget. You can compare the editions and buy Cubase 12 online from Steinberg's online shop or from authorized dealers.

      -

      Thank you for reading this article. We hope you have enjoyed it and learned something new. Now it's time for you to try Cubase 12 and unleash your creativity. Happy music making!

      -

      FAQs

      -

      What are the system requirements for Cubase 12?

      -

      The system requirements for Cubase 12 are as follows:

      -
VoiceLanguageDownload link
AmyUK English
CarmenRomanian
CélineFrench
ConchitaSpanish
DóraIcelandic
GiorgioItalian
GwynethWelsh
GwynethWelsh English
KendraUS English
MajaPolish
MarleneGerman
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Operating SystemWindows 10 (64-bit) or macOS Big Sur (11) / Catalina (10.15)
CPU64-bit Intel or AMD multi-core processor (Intel i5 or faster recommended)
RAM8 GB minimum (16 GB or more recommended)
Hard Disk Space30 GB free space
Graphics CardGraphics card with DirectX 10 and WDDM 1.1 support (Windows only)
Audio InterfaceASIO compatible audio hardware recommended
MIDI InterfaceMIDI input/output device (optional)
Internet ConnectionAn internet connection is required for activation, account setup, access to manual and personal/product registration.
-

What are the differences between Cubase Pro, Artist, and Elements?

-

What are the differences between Cubase Pro, Artist, and Elements?

-

Cubase Pro is the most comprehensive edition of Cubase 12, offering all the features and tools that Cubase has to offer. It is designed for professional composers, producers, engineers, and musicians who need advanced functionality and flexibility. Cubase Pro supports up to 256 physical inputs and outputs, unlimited audio, instrument, and MIDI tracks, 8 VST instruments with over 3400 sounds, 75 audio effects, 18 MIDI effects, 5.1 surround sound, full score editing and printing, advanced audio editing and processing, VariAudio 3, Audio Alignment, ARA 2 support, SpectraLayers One integration, Sampler Track 2, Scale Assistant, Global Tracks, MixConsole Snapshots, Control Room, VCA faders, Channel Strip modules, Spectral Comparison EQ, Frequency 2 EQ, SuperVision metering suite, Video Export Render, Eucon support, and more.

-

Cubase Artist is a more affordable edition of Cubase 12, offering most of the features and tools that Cubase Pro has to offer. It is designed for aspiring composers, producers, engineers, and musicians who need a reliable and versatile music production software. Cubase Artist supports up to 32 physical inputs and outputs, unlimited audio and MIDI tracks, 64 instrument tracks, 8 VST instruments with over 3400 sounds, 64 audio effects, 18 MIDI effects, full score editing and printing, basic audio editing and processing, VariAudio 3, Audio Alignment, ARA 2 support, Sampler Track 2, Scale Assistant, Global Tracks, MixConsole Snapshots, Control Room (limited), VCA faders (limited), Channel Strip modules (limited), Spectral Comparison EQ (limited), Frequency 2 EQ (limited), SuperVision metering suite (limited), Video Export Render (limited), Eucon support (limited), and more.

-

Cubase Elements is the most basic edition of Cubase 12, Cubase Elements is the most basic edition of Cubase 12, offering the essential features and tools that Cubase has to offer. It is designed for beginners and hobbyists who want to learn and enjoy music production. Cubase Elements supports up to 24 physical inputs and outputs, 48 audio tracks, 64 MIDI tracks, 24 instrument tracks, 3 VST instruments with over 1000 sounds, 44 audio effects, 18 MIDI effects, basic score editing and printing, basic audio editing and processing, Sampler Track 2, Scale Assistant, MixConsole Snapshots (limited), Channel Strip modules (limited), Frequency 2 EQ (limited), SuperVision metering suite (limited), Video Export Render (limited), and more.

-

As you can see, Cubase 12 offers different editions to suit your needs and budget. You can compare the editions and their features in more detail on the Steinberg website. You can also upgrade from one edition to another at any time by purchasing an update or upgrade license.

-

FAQs

-

What are the system requirements for Cubase 12?

-

The system requirements for Cubase 12 are as follows:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Operating SystemWindows 10 (64-bit) or macOS Big Sur (11) / Catalina (10.15)
CPU64-bit Intel or AMD multi-core processor (Intel i5 or faster recommended)
RAM8 GB minimum (16 GB or more recommended)
Hard Disk Space30 GB free space
Graphics CardGraphics card with DirectX 10 and WDDM 1.1 support (Windows only)
Audio InterfaceASIO compatible audio hardware recommended
MIDI InterfaceMIDI input/output device (optional)
Internet ConnectionAn internet connection is required for activation, account setup, access to manual and personal/product registration.
-

What are the differences between Cubase Pro, Artist, and Elements?

-

What are the differences between Cubase Pro, Artist, and Elements?

-

Cubase Pro is the most comprehensive edition of Cubase 12, offering all the features and tools that Cubase has to offer. It is designed for professional composers, producers, engineers, and musicians who need advanced functionality and flexibility. Cubase Pro supports up to 256 physical inputs and outputs, unlimited audio, instrument, and MIDI tracks, 8 VST instruments with over 3400 sounds, 75 audio effects, 18 MIDI effects, 5.1 surround sound, full score editing and printing, advanced audio editing and processing, VariAudio 3, Audio Alignment, ARA 2 support, SpectraLayers One integration, Sampler Track 2, Scale Assistant, Global Tracks, MixConsole Snapshots, Control Room, VCA faders, Channel Strip modules, Spectral Comparison EQ, Frequency 2 EQ, SuperVision metering suite, Video Export Render, Eucon support, and more.

-

Cubase Artist is a more affordable edition of Cubase 12, offering most of the features and tools that Cubase Pro has to offer. It is designed for aspiring composers, producers, engineers, and musicians who need a reliable and versatile music production software. Cubase Artist supports up to 32 physical inputs and outputs, unlimited audio and MIDI tracks, 64 instrument tracks, 8 VST instruments with over 3400 sounds, 64 audio effects, 18 MIDI effects, full score editing and printing, basic audio editing and processing, VariAudio 3, Audio Alignment, ARA 2 support, Sampler Track 2, Scale Assistant, Global Tracks, MixConsole Snapshots, Control Room (limited), VCA faders (limited), Channel Strip modules (limited), Spectral Comparison EQ (limited), Frequency 2 EQ (limited), SuperVision metering suite (limited), Video Export Render (limited), Eucon support (limited), and more.

-

Cubase Elements is the most basic edition of Cubase 12, offering the essential features and tools that Cubase has to offer. It is designed for beginners and hobbyists who want to learn and enjoy music production. Cubase Elements supports up to 24 physical inputs and outputs, 48 audio tracks, 64 MIDI tracks, 24 instrument tracks, 3 VST instruments with over 1000 sounds, 44 audio effects, 18 MIDI effects, basic score editing and printing, basic audio editing and processing, Sampler Track 2, Scale Assistant, MixConsole Snapshots (limited), Channel Strip modules (limited), Frequency 2 EQ (limited), SuperVision metering suite (limited), Video Export Render (limited), and more.

-

As you can see, Cubase 12 offers different editions to suit your needs and budget. You can compare the editions and their features in more detail on the Steinberg website. You can also upgrade from one edition to another at any time by purchasing an update or upgrade license.

-

How much does Cubase 12 cost and where can I buy it?

-

The prices of Cubase 12 vary depending on the edition and the region. You can check the current prices on the Steinberg online shop or on authorized dealers' websites. Here are some approximate prices for reference:

- - - - - - - - - - - - - - - - - -
EditionPrice
Cubase Pro$559.99 USD / €559 EUR / £499 GBP
Cubase Artist$309.99 USD / €309 EUR / £269 GBP
Cubase Elements$99.99 USD / €99 EUR / £85 GBP
-

You can buy Cubase 12 online from the Steinberg online shop or from authorized dealers. You can also buy a boxed version of Cubase 12 from some retailers. When you buy Cubase 12 online or in a box, you will receive a download access code that you can use to download the software from the Steinberg Download Assistant. You will also need a USB-eLicenser or a Soft-eLicenser to activate your license.

-

Where can I find more tutorials and guides for Cubase 12?

-

If you want to learn more about Cubase 12, you can find many tutorials and guides on the official website of Steinberg, where you can access the online help, the operation manual, the quick start guide, the video tutorials, the forums, and the support section. You can also find many tutorials and guides on YouTube, where you can watch videos from Steinberg's channel or from other users. You can also find many tutorials and guides on blogs,

Where can I find more tutorials and guides for Cubase 12?

-

If you want to learn more about Cubase 12, you can find many tutorials and guides on the official website of Steinberg, where you can access the online help, the operation manual, the quick start guide, the video tutorials, the forums, and the support section. You can also find many tutorials and guides on YouTube, where you can watch videos from Steinberg's channel or from other users. You can also find many tutorials and guides on blogs, magazines, books, podcasts, or online courses that cover Cubase 12 or music production in general. Here are some examples of resources that you can check out:

- -

How can I contact Steinberg for support and feedback?

-

If you have any questions, issues, or feedback regarding Cubase 12 or any other Steinberg product, you can contact Steinberg for support and feedback through various channels. You can use the following methods to get in touch with Steinberg:

-

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Cookie Run Kingdom APK 2022 - A Fun and Challenging Game for All Ages.md b/spaces/1phancelerku/anime-remove-background/Cookie Run Kingdom APK 2022 - A Fun and Challenging Game for All Ages.md deleted file mode 100644 index a27018e7b9b9db3d84dbfc5ecbe5c1b399ff0cba..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Cookie Run Kingdom APK 2022 - A Fun and Challenging Game for All Ages.md +++ /dev/null @@ -1,120 +0,0 @@ - -
- - - - -
-

Cookie Run: Kingdom APK 2022 - A Sweet Adventure Game for Android

-

Introduction

-

Do you love cookies? Do you love adventure games? If you answered yes to both questions, then you will love Cookie Run: Kingdom APK 2022. This is a new game from Devsisters Corporation, the makers of the popular Cookie Run series. In this game, you can build your own cookie kingdom, meet your favorite tiny heroes, explore a vast and colorful world, and enjoy a captivating story and gameplay.

-

Cookie Run: Kingdom APK 2022 is a free-to-play game that you can download and install on your Android device. It is compatible with Android 4.4 or higher versions. You can download it from the official website or from the Google Play Store. You can also download the APK file from other sources, but make sure they are safe and reliable. To install the APK file, you need to enable the "Unknown sources" option in your device settings. Then, you can open the APK file and follow the instructions to complete the installation.

-

cookie run kingdom apk 2022


DOWNLOAD ✏ ✏ ✏ https://jinyurl.com/2uNRKU



-

Features of Cookie Run: Kingdom APK 2022

-

Build your own cookie kingdom

-

One of the main features of Cookie Run: Kingdom APK 2022 is that you can build your own cookie kingdom. You can customize your buildings and decorations to create your dream cookie land. You can also collect resources and upgrade your facilities to make your kingdom more prosperous and powerful. You can expand your territory and defend it from enemies who want to steal your cookies. You can also join forces with other players and form alliances to help each other out.

-

Meet your favorite tiny heroes: the cookies

-

Another feature of Cookie Run: Kingdom APK 2022 is that you can meet your favorite tiny heroes: the cookies. You can choose from over 200 cookie characters, each with its own voice, personality, and skills. You can form your own cookie squad and battle against other players in real-time PvP mode. You can also level up your cookies and unlock new abilities and costumes for them. You can also collect and evolve various pets that will assist you in your adventure.

-

Explore a vast and colorful world

-

A third feature of Cookie Run: Kingdom APK 2022 is that you can explore a vast and colorful world. You can discover different regions and biomes, such as forests, deserts, oceans, and more. You can encounter various enemies and bosses, such as jelly worms, cake monsters, and giant donuts. You can also complete quests and challenges to earn rewards, such as cookies, crystals, items, and more.

-

Enjoy a captivating story and gameplay

-

A fourth feature of Cookie Run: Kingdom APK 2022 is that you can enjoy a captivating story and gameplay. You can follow the epic adventure of the cookies, who are trying to restore their lost kingdom and uncover the secrets of the cookie world. You can experience a mix of RPG, strategy, and simulation genres, as you build, fight, explore, and interact with other characters. You can also enjoy the high-quality graphics and sound effects that will immerse you in the game.

-

Pros and cons of Cookie Run: Kingdom APK 2022

-

Pros

-

Some of the pros of Cookie Run: Kingdom APK 2022 are:

-
    -
  • Free to download and play
  • -
  • High-quality graphics and sound effects
  • -
  • Fun and addictive gameplay
  • -
  • Friendly and supportive community
  • -
-

Cons

-

Some of the cons of Cookie Run: Kingdom APK 2022 are:

-
    -
  • Requires internet connection
  • -
  • May contain ads and in-app purchases
  • -
  • May consume battery and storage space
  • -
-

Conclusion

-

In conclusion, Cookie Run: Kingdom APK 2022 is a sweet adventure game for Android that you should try out for yourself. You can build your own cookie kingdom, meet your favorite tiny heroes, explore a vast and colorful world, and enjoy a captivating story and gameplay. You can also enjoy the free-to-play feature, the high-quality graphics and sound effects, the fun and addictive gameplay, and the friendly and supportive community. However, you should also be aware of the internet connection requirement, the ads and in-app purchases possibility, and the battery and storage space consumption.

-

If you are looking for a game that will make you smile, laugh, and have fun, then Cookie Run: Kingdom APK 2022 is the game for you. Download it now and join the cookie adventure!

-

Cookie Run: Kingdom game download for android
-How to install Cookie Run: Kingdom apk on PC
-Cookie Run: Kingdom latest version update 2022
-Cookie Run: Kingdom tips and tricks for beginners
-Cookie Run: Kingdom best characters and teams
-Cookie Run: Kingdom mod apk unlimited crystals
-Cookie Run: Kingdom gameplay and review 2022
-Cookie Run: Kingdom cheats and hacks 2022
-Cookie Run: Kingdom events and rewards 2022
-Cookie Run: Kingdom guide and walkthrough 2022
-Cookie Run: Kingdom official website and support
-Cookie Run: Kingdom fan art and wallpapers
-Cookie Run: Kingdom voice actors and cast
-Cookie Run: Kingdom epic skills and animations
-Cookie Run: Kingdom story and lore 2022
-Cookie Run: Kingdom online multiplayer mode
-Cookie Run: Kingdom offline mode and data usage
-Cookie Run: Kingdom ratings and reviews 2022
-Cookie Run: Kingdom system requirements and compatibility
-Cookie Run: Kingdom bugs and issues 2022
-Cookie Run: Kingdom codes and coupons 2022
-Cookie Run: Kingdom news and updates 2022
-Cookie Run: Kingdom memes and jokes 2022
-Cookie Run: Kingdom wiki and community forum
-Cookie Run: Kingdom merchandise and products
-Cookie Run: Kingdom soundtrack and music 2022
-Cookie Run: Kingdom crossover and collaboration events
-Cookie Run: Kingdom tier list and rankings 2022
-Cookie Run: Kingdom videos and streams 2022
-Cookie Run: Kingdom challenges and quests 2022
-Cookie Run: Kingdom features and improvements 2022
-Cookie Run: Kingdom release date and launch time
-Cookie Run: Kingdom pre-registration and rewards
-Cookie Run: Kingdom size and storage space
-Cookie Run: Kingdom languages and subtitles
-Cookie Run: Kingdom genres and categories
-Cookie Run: Kingdom developer and publisher info
-Cookie Run: Kingdom alternatives and similar games
-Cookie Run: Kingdom free download link 2022
-Cookie Run: Kingdom safe and secure apk file

-

FAQs

-

Here are some frequently asked questions about Cookie Run: Kingdom APK 2022:

-
    -
  1. What are some tips and tricks for playing Cookie Run: Kingdom APK 2022?
  2. -

    Some tips and tricks for playing Cookie Run: Kingdom APK 2022 are:

    -
      -
    • Use different types of cookies for different situations. For example, use tank cookies to absorb damage, healer cookies to restore health, or DPS cookies to deal damage.
    • -
    • Upgrade your buildings and facilities regularly to increase your production, storage, defense, and income.
    • -
    • Join or create a guild to chat with other players, exchange gifts, request help, or participate in guild wars.
    • -
    • Complete the daily missions and events to earn extra rewards, such as cookies, crystals, items, and more.
    • -
    -
  3. How can I get more cookies and crystals in Cookie Run: Kingdom APK 2022?
  4. -

    Some ways to get more cookies and crystals in Cookie Run: Kingdom APK 2022 are:

    -
      -
    • Log in every day to claim your daily login bonus, which may include cookies and crystals.
    • -
    • Watch ads or complete surveys to earn free crystals.
    • -
    • Invite your friends to play the game and get referral rewards, such as cookies and crystals.
    • -
    • Purchase them with real money from the shop.
    • -
    -
  5. How can I join or create a guild in Cookie Run: Kingdom APK 2022?
  6. -

    To join or create a guild in Cookie Run: Kingdom APK 2022, you need to:

    -
      -
    • Reach level 10 in the game.
    • -
    • Tap on the guild icon on the bottom right corner of the screen.
    • -
    • Search for an existing guild that suits your preferences, or create your own guild by choosing a name, a logo, and a description.
    • -
    • Apply to join a guild or accept requests from other players who want to join your guild.
    • -
    -
  7. How can I contact the developers or report a bug in Cookie Run: Kingdom APK 2022?
  8. -

    To contact the developers or report a bug in Cookie Run: Kingdom APK 2022, you can:

    -
      -
    • Tap on the settings icon on the top right corner of the screen.
    • -
    • Tap on the customer service button and choose the option that best suits your issue.
    • -
    • Fill out the form with your details and your message, and attach any screenshots or videos if necessary.
    • -
    • Submit the form and wait for a response from the developers.
    • -
    -
  9. Is Cookie Run: Kingdom APK 2022 safe to download and install?
  10. -

    Yes, Cookie Run: Kingdom APK 2022 is safe to download and install, as long as you download it from the official website or from the Google Play Store. You can also download the APK file from other sources, but make sure they are safe and reliable. You can check the reviews and ratings of other users before downloading. You can also scan the APK file with an antivirus software before installing it. However, you should always be careful when downloading and installing any app from unknown sources, as they may contain malware or viruses that can harm your device.

    -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Hay Day Mod APK and Get Unlimited Coins and Diamonds.md b/spaces/1phancelerku/anime-remove-background/Download Hay Day Mod APK and Get Unlimited Coins and Diamonds.md deleted file mode 100644 index 8d637b816bff435db12b3c8a5c8ea661bb695695..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Hay Day Mod APK and Get Unlimited Coins and Diamonds.md +++ /dev/null @@ -1,136 +0,0 @@ -
-

Hay Day

Hay Day Diamond Mod APK: A Guide for Beginners

-

Do you love farming games? Do you want to create your own dream farm and enjoy the rural life? If yes, then you should try Hay Day, one of the most popular and addictive farming simulators on mobile devices. Hay Day is a game where you can grow crops, raise animals, trade goods, and interact with other players. You can also customize your farm with various decorations and buildings. However, to do all these things, you need diamonds and coins, the two main currencies in the game. Diamonds are especially hard to come by, and they are used for speeding up processes, buying special items, and unlocking features. That's why many players look for ways to get more diamonds without spending real money. One of the ways is to use Hay Day Diamond Mod APK, a modified version of the game that gives you unlimited diamonds and coins. In this article, we will tell you everything you need to know about Hay Day Diamond Mod APK, including its features, how to download and install it, and its pros and cons. Let's get started!

-

hay day diamond mod apk


DOWNLOAD >>>>> https://jinyurl.com/2uNT8T



-

Hay Day Game Features

-

Before we dive into the details of Hay Day Diamond Mod APK, let's first take a look at the original game and its features. Hay Day is a game developed by Supercell, the same company behind other popular games like Clash of Clans and Brawl Stars. Hay Day was released in 2012 and has since gained millions of fans around the world. The game is available for both Android and iOS devices, and it is free to download and play. However, it also offers in-app purchases for some items and features.

-

Hay Day is a game that simulates the life of a farmer. You start with a small piece of land and a few resources, and your goal is to turn it into a prosperous farm. You can do this by:

-

Build a Farm

-

One of the main aspects of Hay Day is building your farm. You can choose from a variety of buildings and structures to place on your land, such as barns, silos, mills, bakeries, dairy factories, sugar mills, popcorn pots, pie ovens, cake ovens, juice presses, ice cream makers, candy machines, jam makers, honey extractors, looms, sewing machines, tailors, smelters, jewelers, fishing boats, fishing nets, fishing lures, duck salons, duck traps, lobster pools, lobster tails, sushi bars, soup kitchens, salad bars, sandwich bars, barbecue grills, pizza ovens, pasta makers, and more. Each building has a different function and produces different goods. You can also upgrade your buildings to increase their capacity and efficiency. You can also decorate your farm with various items, such as fences, paths, trees, flowers, ponds, fountains, statues, benches, signs, and more. You can also change the layout of your farm anytime you want. Building and decorating your farm is a fun and creative way to express yourself and make your farm unique.

-

Crops to Harvest and Grow

-

Another important aspect of Hay Day is growing crops. You can plant various seeds on your fields, such as wheat, corn, carrots, soybeans, sugarcane, indigo, tomatoes, potatoes, strawberries, pumpkins, cotton, rice, chili peppers, cacao, coffee beans, tea leaves, and more. You can also grow fruits on your trees, such as apples, cherries, pears, plums, lemons, oranges, bananas, coconuts, olives, grapes, and more. You can also grow flowers on your bushes, such as roses, tulips, lilies, lavender, jasmine, and more. You can also grow mushrooms on your logs.

-

Growing crops is not only a source of income but also a source of ingredients for your buildings. You can use your crops to make various products, such as bread, butter, cheese, cream, milk, sugar, flour, popcorn, pies, cakes, juices, ice cream, candy, jam, honey, wool, fabric, clothes, metal bars, jewelry, fish, lobster, sushi, soup, salad, sandwich, barbecue, pizza, pasta, and more. You can also sell your crops and products to other players or to the roadside shop for coins. You can also use them to fulfill orders from the truck or the boat for coins and experience points. You can also donate them to your neighborhood or to the town visitors for rewards.

-

Growing crops is not only rewarding but also challenging. You have to water your crops regularly to make them grow faster. You also have to protect them from pests and diseases. You also have to harvest them before they wither and replant new seeds. You also have to manage your storage space and your resources wisely. Growing crops is a skill that requires patience and strategy.

-

hay day unlimited diamonds mod apk download
-hay day hack apk free diamonds and coins
-hay day mod apk latest version with diamonds
-how to get diamonds in hay day mod apk
-hay day mod apk 2023 unlimited diamonds and money
-hay day diamond generator mod apk no verification
-hay day mod apk offline with unlimited diamonds
-hay day mod apk android 1 unlimited diamonds
-hay day mod apk revdl with free diamonds
-hay day mod apk happymod with diamonds and coins
-hay day diamond cheat mod apk download
-hay day mod apk ios with unlimited diamonds
-hay day mod apk rexdl with free diamonds and money
-hay day mod apk unlimited everything diamonds and coins
-hay day diamond hack mod apk 2023
-hay day mod apk pure with unlimited diamonds
-hay day mod apk platinmods with free diamonds
-hay day mod apk 1.58.79 unlimited diamonds and money
-hay day diamond glitch mod apk download
-hay day mod apk an1 with unlimited diamonds and coins
-hay day diamond hack without human verification mod apk
-hay day mod apk for pc with free diamonds
-hay day mod apk unlimited keys and diamonds
-hay day diamond trick mod apk download
-hay day mod apk online with unlimited diamonds
-hay day diamond hack no survey no password mod apk
-hay day mod apk for iphone with free diamonds
-hay day mod apk unlimited coins and diamonds 2023
-hay day diamond hack tool v1.8 mod apk download
-hay day mod apk bluestacks with unlimited diamonds and coins
-hay day diamond hack without root mod apk
-hay day mod apk for android with free diamonds
-hay day mod apk unlimited gems and diamonds
-hay day diamond cheat engine mod apk download
-hay day mod apk facebook with unlimited diamonds and coins
-hay day diamond hack no jailbreak mod apk
-hay day mod apk for ipad with free diamonds
-hay day mod apk unlimited xp and diamonds
-hay day diamond hack generator mod apk download
-hay day mod apk google play with unlimited diamonds and coins

-

Animals

-

Another essential aspect of Hay Day is raising animals. You can keep various animals on your farm, such as chickens, cows, pigs, sheep, goats, horses, donkeys, rabbits, cats, dogs, and more. You can also keep bees in your beehives and catch fish in your fishing area. Each animal has a different function and produces different goods. You can also feed them and pet them to make them happy and productive.

-

Raising animals is not only a source of income but also a source of fun. You can use your animal products to make various products, such as eggs, bacon, ham, wool, cheese, cream, milk, honey, fish fillets, lobster tails, and more. You can also sell your animal products to other players or to the roadside shop for coins. You can also use them to fulfill orders from the truck or the boat for coins and experience points. You can also donate them to your neighborhood or to the town visitors for rewards.

-

Raising animals is not only fun but also challenging. You have to feed your animals regularly to make them produce goods. You also have to collect their goods before they expire and store them in your barn or silo. You also have to protect them from foxes and other predators. You also have to manage your storage space and your resources wisely. Raising animals is a skill that requires care and attention.

-

Places to Visit

-

Another exciting aspect of Hay Day is visiting other places. You can travel to different locations in the game world, such as the town, the mine, the valley, the derby, and more. Each place has a different purpose and offers different activities. You can also meet new characters and discover new stories.

-

Visiting places is not only a source of adventure but also a source of rewards. You can use your places to earn coins, diamonds, vouchers, boosters, expansion materials, decoration materials, and more. You can also complete tasks and quests to earn rewards. You can also participate in events and competitions to earn rewards.

-

Visiting places is not only rewarding but also challenging. You have to unlock new places by reaching certain levels or completing certain requirements. You also have to upgrade your places by spending coins or diamonds. You also have to maintain your places by repairing or cleaning them. You also have to manage your time and energy wisely. Visiting places is a skill that requires exploration and curiosity.

-

Play with Friends and Neighbors

-

Another enjoyable aspect of Hay Day is playing with friends and neighbors. You can connect with other players from around the world through various ways, such as Facebook, Game Center, Google Play Games, Supercell ID, or friend codes. You can also join or create a neighborhood with up to 30 members.

-

Playing with friends and neighbors is not only a source of socialization but also a source of cooperation. You can chat with other players and share tips and tricks. You can also trade goods and resources with other players through the roadside shop or the newspaper. You can also help other players by reviving their crops or filling their crates. You can also request or donate goods or resources from your neighborhood members.

-

Playing with friends and neighbors is not only cooperative but also competitive. You can compete with other players or neighborhoods in various ways, such as the derby, the valley, the global leaderboard, or the seasonal leaderboard. You can also earn trophies and badges for your achievements. You can also show off your farm and your products to other players.

-

Playing with friends and neighbors is a skill that requires communication and collaboration.

-

Hay Day Diamond Mod APK Features

-

Now that we have covered the original game and its features, let's move on to Hay Day Diamond Mod APK and its features. Hay Day Diamond Mod APK is a modified version of the game that gives you unlimited diamonds and coins. It also removes ads from the game. It is not an official version of the game, but it is created by third-party developers who modify the original game files.

-

Hay Day Diamond Mod APK has many features that make it different from the original game. Here are some of them:

-

Unlimited Diamonds

-

The most obvious feature of Hay Day Diamond Mod APK is unlimited diamonds. Diamonds are the premium currency in Hay Day, and they are very hard to get in the original game. You can only get them by leveling up, completing achievements, watching ads, or buying them with real money. However, in Hay Day Diamond Mod APK, you can get unlimited diamonds for free. You can use them to speed up processes, buy special items, unlock features, and more. You can also use them to buy coins, vouchers, boosters, expansion materials, decoration materials, and more. You can also use them to skip tasks and quests, or to retry events and competitions. With unlimited diamonds, you can enjoy the game without any limitations or restrictions.

-

Unlimited Coins

-

Another feature of Hay Day Diamond Mod APK is unlimited coins. Coins are the basic currency in Hay Day, and they are used for buying and selling goods and resources. You can earn coins by selling your crops and products to other players or to the roadside shop. You can also earn coins by fulfilling orders from the truck or the boat. You can also earn coins by helping other players or by completing tasks and quests. However, in Hay Day Diamond Mod APK, you can get unlimited coins for free. You can use them to buy more items and resources from the shop or from other players. You can also use them to upgrade your buildings and structures. You can also use them to expand your land and your fishing area. You can also use them to decorate your farm and your town. With unlimited coins, you can buy anything you want and need in the game.

-

No Ads

-

Another feature of Hay Day Diamond Mod APK is no ads. Ads are annoying and distracting, and they can ruin your gaming experience. In the original game, you have to watch ads to get some rewards, such as diamonds, vouchers, boosters, or expansion materials. However, in Hay Day Diamond Mod APK, you don't have to watch any ads at all. You can get all the rewards without wasting your time or data. You can also enjoy the game without any interruptions or pop-ups. With no ads, you can improve your gaming experience and focus on the fun.

-

How to Download and Install Hay Day Diamond Mod APK

-

Now that we have covered the features of Hay Day Diamond Mod APK, let's see how to download and install it on your device. Before we do that, let's first go over some requirements and precautions that you need to follow.

-

Requirements

-

To download and install Hay Day Diamond Mod APK on your device, you need to meet some minimum requirements. These are:

- -

If you meet these requirements, you are ready to download and install Hay Day Diamond Mod APK on your device.

-

Download Link

-

To download Hay Day Diamond Mod APK on your device, you need to find a reliable and safe download link for it. There are many websites that claim to offer Hay Day Diamond Mod APK for free, but not all of them are trustworthy. Some of them may contain viruses or malware that can harm your device or steal your personal information. Some of them may also provide fake or outdated versions of the game that may not work properly or may cause errors.

-

To avoid these risks, you should only download Hay Day Diamond Mod APK from a reputable source that has positive reviews and feedback from other users. One such source is [HayDayModAPK.com], a website that provides the latest version of Hay Day Diamond Mod APK for free. This website is secure and verified by Google Play Protect. It also has a user-friendly interface and fast download speed.

-

To download Hay Day Diamond Mod APK from [HayDayModAPK.com], follow these steps:

-
    -
  1. Go to [HayDayModAPK.com] using your browser.
  2. -
  3. Click on the Download button on the homepage.
  4. -
  5. Wait for the download to start automatically. If it doesn't start, click on the link that says "Click here if the download doesn't start automatically".
  6. -
  7. Wait for the download to finish.
  8. -
-

Congratulations! You have successfully downloaded Hay Day Diamond Mod APK on your device.

-

Installation Process

-

To install Hay Day Diamond Mod APK on your device, follow these steps:

-
    -
  1. Locate the downloaded file in your file manager or download folder. The file name should be HayDayModAPK.apk.
  2. -
  3. Tap on the file to open it.
  4. -
  5. A pop-up window will appear asking you to install the app. Tap on Install.
  6. -
  7. Wait for the installation to complete.
  8. -
  9. A pop-up window will appear asking you to open the app. Tap on Open.
  10. -
  11. Allow the app to access your device's resources and permissions.
  12. -
  13. Wait for the game to load and update.
  14. -
  15. Enjoy playing Hay Day Diamond Mod APK with unlimited diamonds and coins!
  16. -
-

Congratulations! You have successfully installed Hay Day Diamond Mod APK on your device.

-

Conclusion

-

In conclusion, Hay Day Diamond Mod APK is a modified version of Hay Day that gives you unlimited diamonds and coins. It also removes ads from the game. It is not an official version of the game, but it is created by third-party developers who modify the original game files. It has many features that make it different from the original game, such as unlimited diamonds, unlimited coins, and no ads. It also has some requirements and precautions that you need to follow, such as uninstalling the original game, enabling unknown sources, finding a reliable download link, and following the installation process.

-

Hay Day Diamond Mod APK is a great way to enjoy Hay Day without any limitations or restrictions. You can build your farm, grow crops, raise animals, visit places, play with friends and neighbors, and more. You can also buy anything you want and need in the game. You can also speed up processes, buy special items, unlock features, and more. You can also enjoy the game without any interruptions or pop-ups. Hay Day Diamond Mod APK is a fun and addictive farming simulator that you should try.

-

However, Hay Day Diamond Mod APK also has some disadvantages that you should be aware of. For example, it is not an official version of the game, so it may not be compatible with some devices or updates. It may also cause errors or glitches in the game. It may also violate the terms and conditions of the original game, so it may result in a ban or a penalty from Supercell. It may also expose your device or personal information to viruses or malware from untrusted sources. Hay Day Diamond Mod APK is a risky and unofficial version of Hay Day that you should use at your own discretion.

-

Hay Day Diamond Mod APK is a guide for beginners who want to know more about this modified version of Hay Day. We hope that this article has helped you understand what Hay Day Diamond Mod APK is, what are its features, how to download and install it, and what are its pros and cons. We hope that you have enjoyed reading this article and that you have learned something new. Thank you for your time and attention.

-

FAQs

-

Here are some frequently asked questions about Hay Day Diamond Mod APK:

-

Q: Is Hay Day Diamond Mod APK safe to use?

-

A: Hay Day Diamond Mod APK is not an official version of Hay Day, so it may not be safe to use. It may contain viruses or malware that can harm your device or steal your personal information. It may also cause errors or glitches in the game. It may also violate the terms and conditions of the original game, so it may result in a ban or a penalty from Supercell. You should only download Hay Day Diamond Mod APK from a reputable source that has positive reviews and feedback from other users. You should also scan the file with an antivirus software before installing it. You should also backup your data before using it.

-

Q: How do I update Hay Day Diamond Mod APK?

-

A: Hay Day Diamond Mod APK may not be compatible with some updates of the original game, so it may not work properly or at all after an update. You should check the website where you downloaded Hay Day Diamond Mod APK for any updates or new versions of the mod apk. You should also uninstall the old version of Hay Day Diamond Mod APK before installing the new one. You should also backup your data before updating it.

-

Q: Can I play Hay Day Diamond Mod APK online?

-

A: Hay Day Diamond Mod APK requires an internet connection to run and update, so you can play it online. However, you may not be able to play with other players who are using the original game or other versions of the mod apk. You may also face some issues or difficulties while playing online, such as lagging, crashing, or disconnecting. You may also get detected by Supercell and get banned or penalized for using a modified version of the game.

-

Q: Can I use Hay Day Diamond Mod APK on iOS devices?

-

A A: Hay Day Diamond Mod APK is designed for Android devices only, so you cannot use it on iOS devices. If you want to play Hay Day with unlimited diamonds and coins on your iPhone or iPad, you need to use a different method, such as jailbreaking your device, using a third-party app store, or using a game emulator. However, these methods are also risky and unofficial, and they may damage your device or violate the terms and conditions of the original game. You should use them at your own discretion and responsibility.

-

Q: What are some alternatives to Hay Day Diamond Mod APK?

-

A: If you don't want to use Hay Day Diamond Mod APK or if you can't use it for some reason, you can try some alternatives that can also give you more diamonds and coins in Hay Day. Some of these alternatives are:

- -

These are some of the alternatives to Hay Day Diamond Mod APK that you can try if you want to get more diamonds and coins in Hay Day. However, none of them can match the convenience and benefits of Hay Day Diamond Mod APK, which gives you unlimited diamonds and coins for free. You can also enjoy the game without any ads or interruptions. Hay Day Diamond Mod APK is the best option for you if you want to have the ultimate farming experience in Hay Day.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/AI-ZTH-03-23/6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5/README.md b/spaces/AI-ZTH-03-23/6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5/README.md deleted file mode 100644 index 5935744814668f85a7005ea6ba4e9540b216a1fa..0000000000000000000000000000000000000000 --- a/spaces/AI-ZTH-03-23/6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: 6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5 -emoji: 🦀 -colorFrom: gray -colorTo: gray -sdk: static -pinned: false -license: mit -duplicated_from: awacke1/AI.Dashboard.Wiki.Chat.Cognitive.HTML5 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIFILMS/StyleGANEX/models/stylegan2/lpips/pretrained_networks.py b/spaces/AIFILMS/StyleGANEX/models/stylegan2/lpips/pretrained_networks.py deleted file mode 100644 index 077a24419364fdb5ae2f697f73e28615adae75a7..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/StyleGANEX/models/stylegan2/lpips/pretrained_networks.py +++ /dev/null @@ -1,181 +0,0 @@ -from collections import namedtuple -import torch -from torchvision import models as tv -from IPython import embed - -class squeezenet(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(squeezenet, self).__init__() - pretrained_features = tv.squeezenet1_1(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.slice6 = torch.nn.Sequential() - self.slice7 = torch.nn.Sequential() - self.N_slices = 7 - for x in range(2): - self.slice1.add_module(str(x), pretrained_features[x]) - for x in range(2,5): - self.slice2.add_module(str(x), pretrained_features[x]) - for x in range(5, 8): - self.slice3.add_module(str(x), pretrained_features[x]) - for x in range(8, 10): - self.slice4.add_module(str(x), pretrained_features[x]) - for x in range(10, 11): - self.slice5.add_module(str(x), pretrained_features[x]) - for x in range(11, 12): - self.slice6.add_module(str(x), pretrained_features[x]) - for x in range(12, 13): - self.slice7.add_module(str(x), pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1 = h - h = self.slice2(h) - h_relu2 = h - h = self.slice3(h) - h_relu3 = h - h = self.slice4(h) - h_relu4 = h - h = self.slice5(h) - h_relu5 = h - h = self.slice6(h) - h_relu6 = h - h = self.slice7(h) - h_relu7 = h - vgg_outputs = namedtuple("SqueezeOutputs", ['relu1','relu2','relu3','relu4','relu5','relu6','relu7']) - out = vgg_outputs(h_relu1,h_relu2,h_relu3,h_relu4,h_relu5,h_relu6,h_relu7) - - return out - - -class alexnet(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(alexnet, self).__init__() - alexnet_pretrained_features = tv.alexnet(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.N_slices = 5 - for x in range(2): - self.slice1.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(2, 5): - self.slice2.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(5, 8): - self.slice3.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(8, 10): - self.slice4.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(10, 12): - self.slice5.add_module(str(x), alexnet_pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1 = h - h = self.slice2(h) - h_relu2 = h - h = self.slice3(h) - h_relu3 = h - h = self.slice4(h) - h_relu4 = h - h = self.slice5(h) - h_relu5 = h - alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5']) - out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5) - - return out - -class vgg16(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(vgg16, self).__init__() - vgg_pretrained_features = tv.vgg16(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.N_slices = 5 - for x in range(4): - self.slice1.add_module(str(x), vgg_pretrained_features[x]) - for x in range(4, 9): - self.slice2.add_module(str(x), vgg_pretrained_features[x]) - for x in range(9, 16): - self.slice3.add_module(str(x), vgg_pretrained_features[x]) - for x in range(16, 23): - self.slice4.add_module(str(x), vgg_pretrained_features[x]) - for x in range(23, 30): - self.slice5.add_module(str(x), vgg_pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1_2 = h - h = self.slice2(h) - h_relu2_2 = h - h = self.slice3(h) - h_relu3_3 = h - h = self.slice4(h) - h_relu4_3 = h - h = self.slice5(h) - h_relu5_3 = h - vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3']) - out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3) - - return out - - - -class resnet(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True, num=18): - super(resnet, self).__init__() - if(num==18): - self.net = tv.resnet18(pretrained=pretrained) - elif(num==34): - self.net = tv.resnet34(pretrained=pretrained) - elif(num==50): - self.net = tv.resnet50(pretrained=pretrained) - elif(num==101): - self.net = tv.resnet101(pretrained=pretrained) - elif(num==152): - self.net = tv.resnet152(pretrained=pretrained) - self.N_slices = 5 - - self.conv1 = self.net.conv1 - self.bn1 = self.net.bn1 - self.relu = self.net.relu - self.maxpool = self.net.maxpool - self.layer1 = self.net.layer1 - self.layer2 = self.net.layer2 - self.layer3 = self.net.layer3 - self.layer4 = self.net.layer4 - - def forward(self, X): - h = self.conv1(X) - h = self.bn1(h) - h = self.relu(h) - h_relu1 = h - h = self.maxpool(h) - h = self.layer1(h) - h_conv2 = h - h = self.layer2(h) - h_conv3 = h - h = self.layer3(h) - h_conv4 = h - h = self.layer4(h) - h_conv5 = h - - outputs = namedtuple("Outputs", ['relu1','conv2','conv3','conv4','conv5']) - out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5) - - return out diff --git a/spaces/ALSv/FSW/roop/processors/frame/face_swapper.py b/spaces/ALSv/FSW/roop/processors/frame/face_swapper.py deleted file mode 100644 index 0f735fcbdb8de145fa227ee428bca81764c8c57e..0000000000000000000000000000000000000000 --- a/spaces/ALSv/FSW/roop/processors/frame/face_swapper.py +++ /dev/null @@ -1,88 +0,0 @@ -from typing import Any, List, Callable -import cv2 -import insightface -import threading - -import roop.globals -import roop.processors.frame.core -from roop.core import update_status -from roop.face_analyser import get_one_face, get_many_faces -from roop.typing import Face, Frame -from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video - -FACE_SWAPPER = None -THREAD_LOCK = threading.Lock() -NAME = 'ROOP.FACE-SWAPPER' - - -def get_face_swapper() -> Any: - global FACE_SWAPPER - - with THREAD_LOCK: - if FACE_SWAPPER is None: - model_path = resolve_relative_path('../models/inswapper_128.onnx') - FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=roop.globals.execution_providers) - return FACE_SWAPPER - - -def pre_check() -> bool: - download_directory_path = resolve_relative_path('../models') - conditional_download(download_directory_path, ['https://huggingface.co/Apex-X/inswapper_128.onnx/resolve/main/inswapper_128.onnx']) - return True - - -def pre_start() -> bool: - if not is_image(roop.globals.source_path): - update_status('Select an image for source path.', NAME) - return False - elif not get_one_face(cv2.imread(roop.globals.source_path)): - update_status('No face in source path detected.', NAME) - return False - if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path): - update_status('Select an image or video for target path.', NAME) - return False - return True - - -def post_process() -> None: - global FACE_SWAPPER - - FACE_SWAPPER = None - - -def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame: - return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True) - - -def process_frame(source_face: Face, temp_frame: Frame) -> Frame: - if roop.globals.many_faces: - many_faces = get_many_faces(temp_frame) - if many_faces: - for target_face in many_faces: - temp_frame = swap_face(source_face, target_face, temp_frame) - else: - target_face = get_one_face(temp_frame) - if target_face: - temp_frame = swap_face(source_face, target_face, temp_frame) - return temp_frame - - -def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None: - source_face = get_one_face(cv2.imread(source_path)) - for temp_frame_path in temp_frame_paths: - temp_frame = cv2.imread(temp_frame_path) - result = process_frame(source_face, temp_frame) - cv2.imwrite(temp_frame_path, result) - if update: - update() - - -def process_image(source_path: str, target_path: str, output_path: str) -> None: - source_face = get_one_face(cv2.imread(source_path)) - target_frame = cv2.imread(target_path) - result = process_frame(source_face, target_frame) - cv2.imwrite(output_path, result) - - -def process_video(source_path: str, temp_frame_paths: List[str]) -> None: - roop.processors.frame.core.process_video(source_path, temp_frame_paths, process_frames) \ No newline at end of file diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnetv1d152.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnetv1d152.py deleted file mode 100644 index 58cc73beb318e38f9ce79154a1265be1a7dba17b..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnetv1d152.py +++ /dev/null @@ -1,17 +0,0 @@ -# model settings -model = dict( - type='ImageClassifier', - backbone=dict( - type='ResNetV1d', - depth=152, - num_stages=4, - out_indices=(3, ), - style='pytorch'), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='LinearClsHead', - num_classes=1000, - in_channels=2048, - loss=dict(type='CrossEntropyLoss', loss_weight=1.0), - topk=(1, 5), - )) diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/server/websearch/searchWeb.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/server/websearch/searchWeb.ts deleted file mode 100644 index 42369689a102e1d01ea1f9a38d1b1a6f9a571298..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/server/websearch/searchWeb.ts +++ /dev/null @@ -1,63 +0,0 @@ -import { SERPAPI_KEY, SERPER_API_KEY } from "$env/static/private"; - -import { getJson } from "serpapi"; -import type { GoogleParameters } from "serpapi"; - -// Show result as JSON -export async function searchWeb(query: string) { - if (SERPER_API_KEY) { - return await searchWebSerper(query); - } - if (SERPAPI_KEY) { - return await searchWebSerpApi(query); - } - throw new Error("No Serper.dev or SerpAPI key found"); -} - -export async function searchWebSerper(query: string) { - const params = { - q: query, - hl: "en", - gl: "us", - }; - - const response = await fetch("https://google.serper.dev/search", { - method: "POST", - body: JSON.stringify(params), - headers: { - "x-api-key": SERPER_API_KEY, - "Content-type": "application/json; charset=UTF-8", - }, - }); - - /* eslint-disable @typescript-eslint/no-explicit-any */ - const data = (await response.json()) as Record; - - if (!response.ok) { - throw new Error( - data["message"] ?? - `Serper API returned error code ${response.status} - ${response.statusText}` - ); - } - - return { - organic_results: data["organic"] ?? [], - knowledge_graph: data["knowledgeGraph"] ?? null, - answer_box: data["answerBox"] ?? null, - }; -} - -export async function searchWebSerpApi(query: string) { - const params = { - q: query, - hl: "en", - gl: "us", - google_domain: "google.com", - api_key: SERPAPI_KEY, - } satisfies GoogleParameters; - - // Show result as JSON - const response = await getJson("google", params); - - return response; -} diff --git a/spaces/AgentVerse/agentVerse/agentverse/environments/__init__.py b/spaces/AgentVerse/agentVerse/agentverse/environments/__init__.py deleted file mode 100644 index 099ceb1bd0de04d8d64e0da1b117be482805a4e3..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/agentverse/environments/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -from typing import Dict -from agentverse.registry import Registry - - -env_registry = Registry(name="EnvironmentRegistry") - - -from .base import BaseEnvironment, BaseRule - -# from .basic import PipelineEnvironment -from .simulation_env.basic import BasicEnvironment -from .simulation_env.pokemon import PokemonEnvironment -from .simulation_env.prisoner_dilemma import PrisonerDilemmaEnvironment -from .simulation_env.sde_team import SdeTeamEnvironment -from .simulation_env.sde_team_given_tests import SdeTeamGivenTestsEnvironment - -from .tasksolving_env.basic import BasicEnvironment diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/statesroundrectangle/Factory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/statesroundrectangle/Factory.js deleted file mode 100644 index 0250758289ae9776c24d5e26eee10e921cc6940e..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/statesroundrectangle/Factory.js +++ /dev/null @@ -1,13 +0,0 @@ -import StatesRoundRectangle from './StatesRoundRectangle.js'; -import ObjectFactory from '../ObjectFactory.js'; -import SetValue from '../../../plugins/utils/object/SetValue.js'; - -ObjectFactory.register('statesRoundRectangle', function (config) { - var gameObject = new StatesRoundRectangle(this.scene, config); - this.scene.add.existing(gameObject); - return gameObject; -}); - -SetValue(window, 'RexPlugins.UI.StatesRoundRectangle', StatesRoundRectangle); - -export default StatesRoundRectangle; \ No newline at end of file diff --git a/spaces/Aitor/CVchat/README.md b/spaces/Aitor/CVchat/README.md deleted file mode 100644 index 0a796b4668b3ae269e9b75a256b3570257c4455a..0000000000000000000000000000000000000000 --- a/spaces/Aitor/CVchat/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: CVchat -emoji: 🌍 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -license: cc-by-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Amrrs/podscript/README.md b/spaces/Amrrs/podscript/README.md deleted file mode 100644 index f9cb8d7dc84eea3db7ba342192b88a425323f3cc..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/podscript/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Podscript -emoji: 📚 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/unet2d.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/unet2d.md deleted file mode 100644 index 29e8163f646c0cad427fe95b36221ce6ae02eb55..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/unet2d.md +++ /dev/null @@ -1,13 +0,0 @@ -# UNet2DModel - -The [UNet](https://huggingface.co/papers/1505.04597) model was originally introduced by Ronneberger et al for biomedical image segmentation, but it is also commonly used in 🤗 Diffusers because it outputs images that are the same size as the input. It is one of the most important components of a diffusion system because it facilitates the actual diffusion process. There are several variants of the UNet model in 🤗 Diffusers, depending on it's number of dimensions and whether it is a conditional model or not. This is a 2D UNet model. - -The abstract from the paper is: - -*There is large consent that successful training of deep networks requires many thousand annotated training samples. In this paper, we present a network and training strategy that relies on the strong use of data augmentation to use the available annotated samples more efficiently. The architecture consists of a contracting path to capture context and a symmetric expanding path that enables precise localization. We show that such a network can be trained end-to-end from very few images and outperforms the prior best method (a sliding-window convolutional network) on the ISBI challenge for segmentation of neuronal structures in electron microscopic stacks. Using the same network trained on transmitted light microscopy images (phase contrast and DIC) we won the ISBI cell tracking challenge 2015 in these categories by a large margin. Moreover, the network is fast. Segmentation of a 512x512 image takes less than a second on a recent GPU. The full implementation (based on Caffe) and the trained networks are available at http://lmb.informatik.uni-freiburg.de/people/ronneber/u-net.* - -## UNet2DModel -[[autodoc]] UNet2DModel - -## UNet2DOutput -[[autodoc]] models.unet_2d.UNet2DOutput \ No newline at end of file diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py b/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py deleted file mode 100644 index 190309fd42a1b76c12c82fc1acf0511494be5ac3..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py +++ /dev/null @@ -1,215 +0,0 @@ -import mmcv -import numpy as np -import torch - -from ..builder import BBOX_CODERS -from .base_bbox_coder import BaseBBoxCoder - - -@BBOX_CODERS.register_module() -class LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder): - """Legacy Delta XYWH BBox coder used in MMDet V1.x. - - Following the practice in R-CNN [1]_, this coder encodes bbox (x1, y1, x2, - y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh) - back to original bbox (x1, y1, x2, y2). - - Note: - The main difference between :class`LegacyDeltaXYWHBBoxCoder` and - :class:`DeltaXYWHBBoxCoder` is whether ``+ 1`` is used during width and - height calculation. We suggest to only use this coder when testing with - MMDet V1.x models. - - References: - .. [1] https://arxiv.org/abs/1311.2524 - - Args: - target_means (Sequence[float]): denormalizing means of target for - delta coordinates - target_stds (Sequence[float]): denormalizing standard deviation of - target for delta coordinates - """ - - def __init__(self, - target_means=(0., 0., 0., 0.), - target_stds=(1., 1., 1., 1.)): - super(BaseBBoxCoder, self).__init__() - self.means = target_means - self.stds = target_stds - - def encode(self, bboxes, gt_bboxes): - """Get box regression transformation deltas that can be used to - transform the ``bboxes`` into the ``gt_bboxes``. - - Args: - bboxes (torch.Tensor): source boxes, e.g., object proposals. - gt_bboxes (torch.Tensor): target of the transformation, e.g., - ground-truth boxes. - - Returns: - torch.Tensor: Box transformation deltas - """ - assert bboxes.size(0) == gt_bboxes.size(0) - assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 - encoded_bboxes = legacy_bbox2delta(bboxes, gt_bboxes, self.means, - self.stds) - return encoded_bboxes - - def decode(self, - bboxes, - pred_bboxes, - max_shape=None, - wh_ratio_clip=16 / 1000): - """Apply transformation `pred_bboxes` to `boxes`. - - Args: - boxes (torch.Tensor): Basic boxes. - pred_bboxes (torch.Tensor): Encoded boxes with shape - max_shape (tuple[int], optional): Maximum shape of boxes. - Defaults to None. - wh_ratio_clip (float, optional): The allowed ratio between - width and height. - - Returns: - torch.Tensor: Decoded boxes. - """ - assert pred_bboxes.size(0) == bboxes.size(0) - decoded_bboxes = legacy_delta2bbox(bboxes, pred_bboxes, self.means, - self.stds, max_shape, wh_ratio_clip) - - return decoded_bboxes - - -@mmcv.jit(coderize=True) -def legacy_bbox2delta(proposals, - gt, - means=(0., 0., 0., 0.), - stds=(1., 1., 1., 1.)): - """Compute deltas of proposals w.r.t. gt in the MMDet V1.x manner. - - We usually compute the deltas of x, y, w, h of proposals w.r.t ground - truth bboxes to get regression target. - This is the inverse function of `delta2bbox()` - - Args: - proposals (Tensor): Boxes to be transformed, shape (N, ..., 4) - gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4) - means (Sequence[float]): Denormalizing means for delta coordinates - stds (Sequence[float]): Denormalizing standard deviation for delta - coordinates - - Returns: - Tensor: deltas with shape (N, 4), where columns represent dx, dy, - dw, dh. - """ - assert proposals.size() == gt.size() - - proposals = proposals.float() - gt = gt.float() - px = (proposals[..., 0] + proposals[..., 2]) * 0.5 - py = (proposals[..., 1] + proposals[..., 3]) * 0.5 - pw = proposals[..., 2] - proposals[..., 0] + 1.0 - ph = proposals[..., 3] - proposals[..., 1] + 1.0 - - gx = (gt[..., 0] + gt[..., 2]) * 0.5 - gy = (gt[..., 1] + gt[..., 3]) * 0.5 - gw = gt[..., 2] - gt[..., 0] + 1.0 - gh = gt[..., 3] - gt[..., 1] + 1.0 - - dx = (gx - px) / pw - dy = (gy - py) / ph - dw = torch.log(gw / pw) - dh = torch.log(gh / ph) - deltas = torch.stack([dx, dy, dw, dh], dim=-1) - - means = deltas.new_tensor(means).unsqueeze(0) - stds = deltas.new_tensor(stds).unsqueeze(0) - deltas = deltas.sub_(means).div_(stds) - - return deltas - - -@mmcv.jit(coderize=True) -def legacy_delta2bbox(rois, - deltas, - means=(0., 0., 0., 0.), - stds=(1., 1., 1., 1.), - max_shape=None, - wh_ratio_clip=16 / 1000): - """Apply deltas to shift/scale base boxes in the MMDet V1.x manner. - - Typically the rois are anchor or proposed bounding boxes and the deltas are - network outputs used to shift/scale those boxes. - This is the inverse function of `bbox2delta()` - - Args: - rois (Tensor): Boxes to be transformed. Has shape (N, 4) - deltas (Tensor): Encoded offsets with respect to each roi. - Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when - rois is a grid of anchors. Offset encoding follows [1]_. - means (Sequence[float]): Denormalizing means for delta coordinates - stds (Sequence[float]): Denormalizing standard deviation for delta - coordinates - max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W) - wh_ratio_clip (float): Maximum aspect ratio for boxes. - - Returns: - Tensor: Boxes with shape (N, 4), where columns represent - tl_x, tl_y, br_x, br_y. - - References: - .. [1] https://arxiv.org/abs/1311.2524 - - Example: - >>> rois = torch.Tensor([[ 0., 0., 1., 1.], - >>> [ 0., 0., 1., 1.], - >>> [ 0., 0., 1., 1.], - >>> [ 5., 5., 5., 5.]]) - >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], - >>> [ 1., 1., 1., 1.], - >>> [ 0., 0., 2., -1.], - >>> [ 0.7, -1.9, -0.5, 0.3]]) - >>> legacy_delta2bbox(rois, deltas, max_shape=(32, 32)) - tensor([[0.0000, 0.0000, 1.5000, 1.5000], - [0.0000, 0.0000, 5.2183, 5.2183], - [0.0000, 0.1321, 7.8891, 0.8679], - [5.3967, 2.4251, 6.0033, 3.7749]]) - """ - means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4) - stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4) - denorm_deltas = deltas * stds + means - dx = denorm_deltas[:, 0::4] - dy = denorm_deltas[:, 1::4] - dw = denorm_deltas[:, 2::4] - dh = denorm_deltas[:, 3::4] - max_ratio = np.abs(np.log(wh_ratio_clip)) - dw = dw.clamp(min=-max_ratio, max=max_ratio) - dh = dh.clamp(min=-max_ratio, max=max_ratio) - # Compute center of each roi - px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx) - py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy) - # Compute width/height of each roi - pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw) - ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh) - # Use exp(network energy) to enlarge/shrink each roi - gw = pw * dw.exp() - gh = ph * dh.exp() - # Use network energy to shift the center of each roi - gx = px + pw * dx - gy = py + ph * dy - # Convert center-xy/width/height to top-left, bottom-right - - # The true legacy box coder should +- 0.5 here. - # However, current implementation improves the performance when testing - # the models trained in MMDetection 1.X (~0.5 bbox AP, 0.2 mask AP) - x1 = gx - gw * 0.5 - y1 = gy - gh * 0.5 - x2 = gx + gw * 0.5 - y2 = gy + gh * 0.5 - if max_shape is not None: - x1 = x1.clamp(min=0, max=max_shape[1] - 1) - y1 = y1.clamp(min=0, max=max_shape[0] - 1) - x2 = x2.clamp(min=0, max=max_shape[1] - 1) - y2 = y2.clamp(min=0, max=max_shape[0] - 1) - bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas) - return bboxes diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r101-d8_769x769_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 4f1b9e19411eb963d16fd2a8174529e69ecd5a1a..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './dnl_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/README.md b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/README.md deleted file mode 100644 index 6ab8d213e97d0a9a62a244ca1d14fd130a48fe23..0000000000000000000000000000000000000000 --- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# text-generation-webui documentation - -## Table of contents - -* [Audio Notification](Audio-Notification.md) -* [Chat mode](Chat-mode.md) -* [DeepSpeed](DeepSpeed.md) -* [Docker](Docker.md) -* [ExLlama](ExLlama.md) -* [Extensions](Extensions.md) -* [GPTQ models (4 bit mode)](GPTQ-models-(4-bit-mode).md) -* [LLaMA model](LLaMA-model.md) -* [llama.cpp](llama.cpp.md) -* [LoRA](LoRA.md) -* [Low VRAM guide](Low-VRAM-guide.md) -* [RWKV model](RWKV-model.md) -* [Spell book](Spell-book.md) -* [System requirements](System-requirements.md) -* [Training LoRAs](Training-LoRAs.md) -* [Windows installation guide](Windows-installation-guide.md) -* [WSL installation guide](WSL-installation-guide.md) diff --git a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/README.md b/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/README.md deleted file mode 100644 index 15b71f11baae018bc0de5caad0beb3c02fbb811e..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/README.md +++ /dev/null @@ -1,147 +0,0 @@ -# TFill -[paper](https://openaccess.thecvf.com/content/CVPR2022/html/Zheng_Bridging_Global_Context_Interactions_for_High-Fidelity_Image_Completion_CVPR_2022_paper.html) | [arXiv](https://arxiv.org/abs/2104.00845) | [Project](https://chuanxiaz.com/tfill/) | [Video](https://www.youtube.com/watch?v=efB1fw0jiLs&feature=youtu.be) - -This repository implements the training, testing and editing tools for "Bridging Global Context Interactions for High-Fidelity Image Completion (CVPR2022, scores: 1, 1, 2, 2)" by [Chuanxia Zheng](https://www.chuanxiaz.com), [Tat-Jen Cham](https://personal.ntu.edu.sg/astjcham/), [Jianfei Cai](https://jianfei-cai.github.io/) and [Dinh Phung](https://research.monash.edu/en/persons/dinh-phung). Given masked images, the proposed **TFill** model is able to generate high-fidelity plausible results on various settings. - -## Examples -![teaser](images/example.png) - -## Object Removal -![teaser](images/tfill_removal.gif) - -## Object Repair -![teaser](images/tfill_repair.gif) - -## Framework -We propose the two-stages image completion framework, where the upper content inference network (TFill-*Coarse*) generates semantically correct content using a transformer encoder to directly capture the global context information; the lower appearance refinement network (TFill-*refined*) copies global visible and generated features to holes. - -![teaser](images/framework.png) - - - -# Getting started - -- Clone this repo: - -``` -git clone https://github.com/lyndonzheng/TFill -cd TFill -``` -## Requirements -The original model is trained and evaluated with Pytorch v1.9.1, which cannot be visited in current [PyTorch](https://pytorch.org/get-started/previous-versions/). Therefore, we create a new environment with Pytorch v1.10.0 to test the model, where the performance is the same. - -A suitable [conda](https://conda.io/) environment named `Tfill` can be created and activated with: - -``` -conda env create -f environment.yaml -conda activate TFill -``` -## Runing pretrained models -Download the pre-trained models using the following links ([CelebA-HQ](https://drive.google.com/drive/folders/1ntbVDjJ7-nAt4nLGuu7RNi3QpLfh40gk?usp=sharing), [FFHQ](https://drive.google.com/drive/folders/1xuAsShrw9wI5Be0sQka3vZEsfwnq0pPT?usp=sharing), [ImageNet](https://drive.google.com/drive/folders/1B4RswBUD6_jXAu3MVz3LtuNfoV4wTmGf?usp=sharing), [Plcases2](https://drive.google.com/drive/folders/154ikacQ8A2JLC8iIGda8jiZN-ysL1xh5?usp=sharing) -) and put them under```checkpoints/``` directory. It should have the following structure: - -``` -./checkpoints/ -├── celeba -│ ├── latest_net_D.pth -│ ├── latest_net_D_Ref.pth -│ ├── latest_net_E.pth -│ ├── latest_net_G.pth -│ ├── latest_net_G_Ref.pth -│ ├── latest_net_T.pth -├── ffhq -│ ├── ... -├── ... -``` - -- Test the model -``` -sh ./scripts/test.sh -``` -For different models, the users just need to modify lines 2-4, including ```name```,```img_file```,```mask_file```. For instance, we can replace the *celeba* to *imagenet*. - -The default results will be stored under the ```results/``` folder, in which: - -- ```examples/```: shows original and masked images; -- ```img_out/```: shows upsampled *Coarse* outputs; -- ```img_ref_out/```: shows the final *Refined* outputs. - -## Datasets -- ```face dataset```: - - 24,183 training images and 2,824 test images from [CelebA](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) and use the algorithm of [Growing GANs](https://github.com/tkarras/progressive_growing_of_gans) to get the high-resolution CelebA-HQ dataset. - - 60,000 training images and 10,000 test images from [FFHQ](https://github.com/NVlabs/ffhq-dataset) provided by [StyleGAN](https://github.com/NVlabs/stylegan). -- ```natural scenery```: original training and val images from [Places2](http://places2.csail.mit.edu/). -- ```object``` original training images from [ImageNet](http://www.image-net.org/). - -## Traning - -- Train a model (two stage: *Coarse* and *Refinement*) -``` -sh ./scripts/train.sh -``` -The default setting is for the top *Coarse* training. The users just need to replace the *coarse* with *refine* at line 6. Then, the model can continue training for high-resolution image completion. -More hyper-parameter can be in ```options/```. - -The coarse results using transformer and restrictive CNN is impressive, which provides plausible results for both **foreground** objects and **background** scene. - -![teaser](images/center_imagenet.jpg) -![teaser](images/center_places2.jpg) - -# GUI -The GUI operation is similar to our previous GUI in [PIC](https://github.com/lyndonzheng/Pluralistic-Inpainting), where steps are also the same. - -Basic usage is: - -``` -sh ./scripts/ui.sh -``` -In ```gui/ui_model.py```, users can modify the ```img_root```(line 30) and the corresponding ```img_files```(line 31) to randomly edit images from the testing dataset. - -## Editing Examples - -- **Results (original, output) for face editing** - -![teaser](images/free_face.jpg) - -- **Results (original, masked input, output) for nature scene editing** - -![teaser](images/free_nature.jpg) - -## Next -- Higher-resolution pluralistic image completion - -## License -This work is licensed under a MIT License. - -This software is for educational and academic research purpose only. If you wish to obtain a commercial royalty bearing license to this software, please contact us at chuanxia001@e.ntu.edu.sg. - -## Citation - -The code also uses our previous [PIC](https://github.com/lyndonzheng/Pluralistic-Inpainting). If you use this code for your research, please cite our papers. -``` -@InProceedings{Zheng_2022_CVPR, - author = {Zheng, Chuanxia and Cham, Tat-Jen and Cai, Jianfei and Phung, Dinh}, - title = {Bridging Global Context Interactions for High-Fidelity Image Completion}, - booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2022}, - pages = {11512-11522} -} - -@inproceedings{zheng2019pluralistic, - title={Pluralistic Image Completion}, - author={Zheng, Chuanxia and Cham, Tat-Jen and Cai, Jianfei}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={1438--1447}, - year={2019} -} - -@article{zheng2021pluralistic, - title={Pluralistic Free-From Image Completion}, - author={Zheng, Chuanxia and Cham, Tat-Jen and Cai, Jianfei}, - journal={International Journal of Computer Vision}, - pages={1--20}, - year={2021}, - publisher={Springer} -} -``` diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/midas/transforms.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/midas/transforms.py deleted file mode 100644 index 350cbc11662633ad7f8968eb10be2e7de6e384e9..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/midas/midas/transforms.py +++ /dev/null @@ -1,234 +0,0 @@ -import numpy as np -import cv2 -import math - - -def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA): - """Rezise the sample to ensure the given size. Keeps aspect ratio. - - Args: - sample (dict): sample - size (tuple): image size - - Returns: - tuple: new size - """ - shape = list(sample["disparity"].shape) - - if shape[0] >= size[0] and shape[1] >= size[1]: - return sample - - scale = [0, 0] - scale[0] = size[0] / shape[0] - scale[1] = size[1] / shape[1] - - scale = max(scale) - - shape[0] = math.ceil(scale * shape[0]) - shape[1] = math.ceil(scale * shape[1]) - - # resize - sample["image"] = cv2.resize( - sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method - ) - - sample["disparity"] = cv2.resize( - sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST - ) - sample["mask"] = cv2.resize( - sample["mask"].astype(np.float32), - tuple(shape[::-1]), - interpolation=cv2.INTER_NEAREST, - ) - sample["mask"] = sample["mask"].astype(bool) - - return tuple(shape) - - -class Resize(object): - """Resize sample to given size (width, height). - """ - - def __init__( - self, - width, - height, - resize_target=True, - keep_aspect_ratio=False, - ensure_multiple_of=1, - resize_method="lower_bound", - image_interpolation_method=cv2.INTER_AREA, - ): - """Init. - - Args: - width (int): desired output width - height (int): desired output height - resize_target (bool, optional): - True: Resize the full sample (image, mask, target). - False: Resize image only. - Defaults to True. - keep_aspect_ratio (bool, optional): - True: Keep the aspect ratio of the input sample. - Output sample might not have the given width and height, and - resize behaviour depends on the parameter 'resize_method'. - Defaults to False. - ensure_multiple_of (int, optional): - Output width and height is constrained to be multiple of this parameter. - Defaults to 1. - resize_method (str, optional): - "lower_bound": Output will be at least as large as the given size. - "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) - "minimal": Scale as least as possible. (Output size might be smaller than given size.) - Defaults to "lower_bound". - """ - self.__width = width - self.__height = height - - self.__resize_target = resize_target - self.__keep_aspect_ratio = keep_aspect_ratio - self.__multiple_of = ensure_multiple_of - self.__resize_method = resize_method - self.__image_interpolation_method = image_interpolation_method - - def constrain_to_multiple_of(self, x, min_val=0, max_val=None): - y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) - - if max_val is not None and y > max_val: - y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int) - - if y < min_val: - y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int) - - return y - - def get_size(self, width, height): - # determine new height and width - scale_height = self.__height / height - scale_width = self.__width / width - - if self.__keep_aspect_ratio: - if self.__resize_method == "lower_bound": - # scale such that output size is lower bound - if scale_width > scale_height: - # fit width - scale_height = scale_width - else: - # fit height - scale_width = scale_height - elif self.__resize_method == "upper_bound": - # scale such that output size is upper bound - if scale_width < scale_height: - # fit width - scale_height = scale_width - else: - # fit height - scale_width = scale_height - elif self.__resize_method == "minimal": - # scale as least as possbile - if abs(1 - scale_width) < abs(1 - scale_height): - # fit width - scale_height = scale_width - else: - # fit height - scale_width = scale_height - else: - raise ValueError( - f"resize_method {self.__resize_method} not implemented" - ) - - if self.__resize_method == "lower_bound": - new_height = self.constrain_to_multiple_of( - scale_height * height, min_val=self.__height - ) - new_width = self.constrain_to_multiple_of( - scale_width * width, min_val=self.__width - ) - elif self.__resize_method == "upper_bound": - new_height = self.constrain_to_multiple_of( - scale_height * height, max_val=self.__height - ) - new_width = self.constrain_to_multiple_of( - scale_width * width, max_val=self.__width - ) - elif self.__resize_method == "minimal": - new_height = self.constrain_to_multiple_of(scale_height * height) - new_width = self.constrain_to_multiple_of(scale_width * width) - else: - raise ValueError(f"resize_method {self.__resize_method} not implemented") - - return (new_width, new_height) - - def __call__(self, sample): - width, height = self.get_size( - sample["image"].shape[1], sample["image"].shape[0] - ) - - # resize sample - sample["image"] = cv2.resize( - sample["image"], - (width, height), - interpolation=self.__image_interpolation_method, - ) - - if self.__resize_target: - if "disparity" in sample: - sample["disparity"] = cv2.resize( - sample["disparity"], - (width, height), - interpolation=cv2.INTER_NEAREST, - ) - - if "depth" in sample: - sample["depth"] = cv2.resize( - sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST - ) - - sample["mask"] = cv2.resize( - sample["mask"].astype(np.float32), - (width, height), - interpolation=cv2.INTER_NEAREST, - ) - sample["mask"] = sample["mask"].astype(bool) - - return sample - - -class NormalizeImage(object): - """Normlize image by given mean and std. - """ - - def __init__(self, mean, std): - self.__mean = mean - self.__std = std - - def __call__(self, sample): - sample["image"] = (sample["image"] - self.__mean) / self.__std - - return sample - - -class PrepareForNet(object): - """Prepare sample for usage as network input. - """ - - def __init__(self): - pass - - def __call__(self, sample): - image = np.transpose(sample["image"], (2, 0, 1)) - sample["image"] = np.ascontiguousarray(image).astype(np.float32) - - if "mask" in sample: - sample["mask"] = sample["mask"].astype(np.float32) - sample["mask"] = np.ascontiguousarray(sample["mask"]) - - if "disparity" in sample: - disparity = sample["disparity"].astype(np.float32) - sample["disparity"] = np.ascontiguousarray(disparity) - - if "depth" in sample: - depth = sample["depth"].astype(np.float32) - sample["depth"] = np.ascontiguousarray(depth) - - return sample diff --git a/spaces/Apex-X/ROOPOK/roop/face_reference.py b/spaces/Apex-X/ROOPOK/roop/face_reference.py deleted file mode 100644 index 3c3e1f1c6e13c73ceafd40c0912c066a3a86a528..0000000000000000000000000000000000000000 --- a/spaces/Apex-X/ROOPOK/roop/face_reference.py +++ /dev/null @@ -1,21 +0,0 @@ -from typing import Optional - -from roop.typing import Face - -FACE_REFERENCE = None - - -def get_face_reference() -> Optional[Face]: - return FACE_REFERENCE - - -def set_face_reference(face: Face) -> None: - global FACE_REFERENCE - - FACE_REFERENCE = face - - -def clear_face_reference() -> None: - global FACE_REFERENCE - - FACE_REFERENCE = None diff --git a/spaces/Apex-X/Tm/roop/typing.py b/spaces/Apex-X/Tm/roop/typing.py deleted file mode 100644 index 1cff7440616e20bfe7b8bc287f86d11bf1b0f083..0000000000000000000000000000000000000000 --- a/spaces/Apex-X/Tm/roop/typing.py +++ /dev/null @@ -1,7 +0,0 @@ -from typing import Any - -from insightface.app.common import Face -import numpy - -Face = Face -Frame = numpy.ndarray[Any, Any] diff --git a/spaces/Artgor/digit-draw-detect/pages/about.py b/spaces/Artgor/digit-draw-detect/pages/about.py deleted file mode 100644 index fa7662d26c001d73e544bf9ff4a2250e244ea7e9..0000000000000000000000000000000000000000 --- a/spaces/Artgor/digit-draw-detect/pages/about.py +++ /dev/null @@ -1,25 +0,0 @@ -import streamlit as st - -text = """ -## A brief history of the project. -The first idea of the project was conceived in the summer of 2017. I completed cs231n course and wanted to put my skills to the test. So I studied Flask and many other tools from scratch and made an [app](https://github.com/Erlemar/digit-draw-recognize) for recognizing handwritten digits. It had two models - a simple feed-forward neural net written in NumPy and a CNN written in Tensorflow. One fun feature of this app was online learning: the model continuously improved its predictions based on new inputs (though this did sometimes lead to incorrect predictions). - -In 2019 I decided to update the [project](https://github.com/Erlemar/digit-draw-predict): I trained the new neural net in PyTorch and used cv2 to detect separate digits (people often drew multiple digits). More than that, the model had 11 classes - I made a separate class for "junk", as people often drew things for fun: animals, objects, or words. - -The first two versions were deployed on Heroku's free plan, but in 2022 these plans were discontinued. I didn't want my project to die because of nostalgia, so I developed a new version and deployed it differently. The current version has an object detection model (yolo3 written from scratch) and 12 classes (digits, junk, and **censored**). -If you want to know what does **censored** means, just try to draw something 😉 - -Initially, I considered deploying the app on Streamlit Cloud, but its computational limits were too low, so now the model is live on HuggingFace Spaces. - -### Links with additional information: - -* [Project page on my personal website](https://andlukyane.com/project/drawn-digits-prediction) -* [A dataset with the digits and bounding boxes on Kaggle](https://www.kaggle.com/datasets/artgor/handwritten-digits-and-bounding-boxes) -* [Training code](https://github.com/Erlemar/pytorch_tempest_pet_) -* [Blogpost on my personal website](https://andlukyane.com/blog/a-third-life-of-a-personal-project) -* [Blogpost on medium](https://towardsdatascience.com/the-third-life-of-a-personal-pet-project-for-handwritten-digit-recognition-fd908dc8e7a1) -* [Russian blogpost on habr](https://habr.com/ru/company/ods/blog/707046/) -* [Project code on GitHub](https://github.com/Erlemar/digit-draw-detect) -""" - -st.markdown(text, unsafe_allow_html=True) diff --git a/spaces/Asifpa6/emotion-analyzer-app/app.py b/spaces/Asifpa6/emotion-analyzer-app/app.py deleted file mode 100644 index d31a4fe2593426fa86cd96d87873053f4da98089..0000000000000000000000000000000000000000 --- a/spaces/Asifpa6/emotion-analyzer-app/app.py +++ /dev/null @@ -1,66 +0,0 @@ -import streamlit as st -import pandas as pd -from emotion_analysis import get_emotion -import base64 - - -def read_data(file_path): - file_extension = file_path.split('.')[-1].lower() - - if file_extension == 'xlsx' or file_extension == 'xls': - data = pd.read_excel(file_path) - elif file_extension == 'csv': - data = pd.read_csv(file_path) - else: - raise ValueError("Unsupported file format. Only Excel (xlsx, xls) and CSV (csv) files are supported.") - - return data - - -# Streamlit app -def main(): - st.title("Text Emotion Detection") - menu = ["Input Text", "Batch Processing"] - option = st.sidebar.radio("Select an option", menu) - - - if option == "Input Text": - text = st.text_area("Enter your text:") - if st.button("Submit"): - if text.strip() != "": - emotion_detail, confidence_score = get_emotion(text) - st.write("Detected Emotion") - st.write(f"{emotion_detail[0]} - {confidence_score}") - else: - st.write("Please enter some text.") - - elif option == "Batch Processing": - uploaded_file = st.file_uploader("Upload CSV or Excel file", type=["csv", "xlsx"]) - - if uploaded_file is not None: - file_name = uploaded_file.name - file_extension = file_name.split('.')[-1].lower() - file_name = uploaded_file.name - if file_extension == 'xlsx' or file_extension == 'xls': - dataframe = pd.read_excel(uploaded_file) - elif file_extension == 'csv': - dataframe = pd.read_csv(uploaded_file) - else: - raise ValueError("Unsupported file format. Only Excel (xlsx, xls) and CSV (csv) files are supported.") - # dataframe = pd.read_excel(uploaded_file) - if "text" not in dataframe.columns: - st.write("CSV file should have a 'text' column.") - else: - dataframe["emotion"], dataframe["confidence"] = zip(*dataframe["text"].map(get_emotion)) - st.write("Detected Emotions") - st.write(dataframe) - # Download button - csv = dataframe.to_csv(index=False) - b64 = base64.b64encode(csv.encode()).decode() # Convert DataFrame to CSV string - href = f'Download' - st.markdown(href, unsafe_allow_html=True) - else: - pass - -if __name__ == '__main__': - main() diff --git a/spaces/Asmithayellow/Asmi/app.py b/spaces/Asmithayellow/Asmi/app.py deleted file mode 100644 index a362dcc7d0ddd1eee86961f1bc3db6d894fbd3d5..0000000000000000000000000000000000000000 --- a/spaces/Asmithayellow/Asmi/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """You are a helpful assistant to answer all user queries. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/__init__.py deleted file mode 100644 index fe581623d89d67a49eb43f3c3e88f3f450257707..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/__init__.py +++ /dev/null @@ -1,115 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from typing import List, Union - -from .charsetgroupprober import CharSetGroupProber -from .charsetprober import CharSetProber -from .enums import InputState -from .resultdict import ResultDict -from .universaldetector import UniversalDetector -from .version import VERSION, __version__ - -__all__ = ["UniversalDetector", "detect", "detect_all", "__version__", "VERSION"] - - -def detect( - byte_str: Union[bytes, bytearray], should_rename_legacy: bool = False -) -> ResultDict: - """ - Detect the encoding of the given byte string. - - :param byte_str: The byte sequence to examine. - :type byte_str: ``bytes`` or ``bytearray`` - :param should_rename_legacy: Should we rename legacy encodings - to their more modern equivalents? - :type should_rename_legacy: ``bool`` - """ - if not isinstance(byte_str, bytearray): - if not isinstance(byte_str, bytes): - raise TypeError( - f"Expected object of type bytes or bytearray, got: {type(byte_str)}" - ) - byte_str = bytearray(byte_str) - detector = UniversalDetector(should_rename_legacy=should_rename_legacy) - detector.feed(byte_str) - return detector.close() - - -def detect_all( - byte_str: Union[bytes, bytearray], - ignore_threshold: bool = False, - should_rename_legacy: bool = False, -) -> List[ResultDict]: - """ - Detect all the possible encodings of the given byte string. - - :param byte_str: The byte sequence to examine. - :type byte_str: ``bytes`` or ``bytearray`` - :param ignore_threshold: Include encodings that are below - ``UniversalDetector.MINIMUM_THRESHOLD`` - in results. - :type ignore_threshold: ``bool`` - :param should_rename_legacy: Should we rename legacy encodings - to their more modern equivalents? - :type should_rename_legacy: ``bool`` - """ - if not isinstance(byte_str, bytearray): - if not isinstance(byte_str, bytes): - raise TypeError( - f"Expected object of type bytes or bytearray, got: {type(byte_str)}" - ) - byte_str = bytearray(byte_str) - - detector = UniversalDetector(should_rename_legacy=should_rename_legacy) - detector.feed(byte_str) - detector.close() - - if detector.input_state == InputState.HIGH_BYTE: - results: List[ResultDict] = [] - probers: List[CharSetProber] = [] - for prober in detector.charset_probers: - if isinstance(prober, CharSetGroupProber): - probers.extend(p for p in prober.probers) - else: - probers.append(prober) - for prober in probers: - if ignore_threshold or prober.get_confidence() > detector.MINIMUM_THRESHOLD: - charset_name = prober.charset_name or "" - lower_charset_name = charset_name.lower() - # Use Windows encoding name instead of ISO-8859 if we saw any - # extra Windows-specific bytes - if lower_charset_name.startswith("iso-8859") and detector.has_win_bytes: - charset_name = detector.ISO_WIN_MAP.get( - lower_charset_name, charset_name - ) - # Rename legacy encodings with superset encodings if asked - if should_rename_legacy: - charset_name = detector.LEGACY_MAP.get( - charset_name.lower(), charset_name - ) - results.append( - { - "encoding": charset_name, - "confidence": prober.get_confidence(), - "language": prober.language, - } - ) - if len(results) > 0: - return sorted(results, key=lambda result: -result["confidence"]) - - return [detector.result] diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/help.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/help.py deleted file mode 100644 index 2d292c2f062cd80cd108aac503eae7b635ceec8d..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/help.py +++ /dev/null @@ -1,131 +0,0 @@ -"""Module containing bug report helper(s).""" - -import json -import platform -import ssl -import sys - -from pip._vendor import idna -from pip._vendor import urllib3 - -from . import __version__ as requests_version - -charset_normalizer = None - -try: - from pip._vendor import chardet -except ImportError: - chardet = None - -try: - from pip._vendor.urllib3.contrib import pyopenssl -except ImportError: - pyopenssl = None - OpenSSL = None - cryptography = None -else: - import cryptography - import OpenSSL - - -def _implementation(): - """Return a dict with the Python implementation and version. - - Provide both the name and the version of the Python implementation - currently running. For example, on CPython 3.10.3 it will return - {'name': 'CPython', 'version': '3.10.3'}. - - This function works best on CPython and PyPy: in particular, it probably - doesn't work for Jython or IronPython. Future investigation should be done - to work out the correct shape of the code for those platforms. - """ - implementation = platform.python_implementation() - - if implementation == "CPython": - implementation_version = platform.python_version() - elif implementation == "PyPy": - implementation_version = "{}.{}.{}".format( - sys.pypy_version_info.major, - sys.pypy_version_info.minor, - sys.pypy_version_info.micro, - ) - if sys.pypy_version_info.releaselevel != "final": - implementation_version = "".join( - [implementation_version, sys.pypy_version_info.releaselevel] - ) - elif implementation == "Jython": - implementation_version = platform.python_version() # Complete Guess - elif implementation == "IronPython": - implementation_version = platform.python_version() # Complete Guess - else: - implementation_version = "Unknown" - - return {"name": implementation, "version": implementation_version} - - -def info(): - """Generate information for a bug report.""" - try: - platform_info = { - "system": platform.system(), - "release": platform.release(), - } - except OSError: - platform_info = { - "system": "Unknown", - "release": "Unknown", - } - - implementation_info = _implementation() - urllib3_info = {"version": urllib3.__version__} - charset_normalizer_info = {"version": None} - chardet_info = {"version": None} - if charset_normalizer: - charset_normalizer_info = {"version": charset_normalizer.__version__} - if chardet: - chardet_info = {"version": chardet.__version__} - - pyopenssl_info = { - "version": None, - "openssl_version": "", - } - if OpenSSL: - pyopenssl_info = { - "version": OpenSSL.__version__, - "openssl_version": f"{OpenSSL.SSL.OPENSSL_VERSION_NUMBER:x}", - } - cryptography_info = { - "version": getattr(cryptography, "__version__", ""), - } - idna_info = { - "version": getattr(idna, "__version__", ""), - } - - system_ssl = ssl.OPENSSL_VERSION_NUMBER - system_ssl_info = {"version": f"{system_ssl:x}" if system_ssl is not None else ""} - - return { - "platform": platform_info, - "implementation": implementation_info, - "system_ssl": system_ssl_info, - "using_pyopenssl": pyopenssl is not None, - "using_charset_normalizer": chardet is None, - "pyOpenSSL": pyopenssl_info, - "urllib3": urllib3_info, - "chardet": chardet_info, - "charset_normalizer": charset_normalizer_info, - "cryptography": cryptography_info, - "idna": idna_info, - "requests": { - "version": requests_version, - }, - } - - -def main(): - """Pretty-print the bug information as JSON.""" - print(json.dumps(info(), sort_keys=True, indent=2)) - - -if __name__ == "__main__": - main() diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py deleted file mode 100644 index cb9fc820cb352aa6e92705aab4f55cbc2eff96bc..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py +++ /dev/null @@ -1,98 +0,0 @@ -# flake8: noqa - -import abc -import sys -import pathlib -from contextlib import suppress - -if sys.version_info >= (3, 10): - from zipfile import Path as ZipPath # type: ignore -else: - from ..zipp import Path as ZipPath # type: ignore - - -try: - from typing import runtime_checkable # type: ignore -except ImportError: - - def runtime_checkable(cls): # type: ignore - return cls - - -try: - from typing import Protocol # type: ignore -except ImportError: - Protocol = abc.ABC # type: ignore - - -class TraversableResourcesLoader: - """ - Adapt loaders to provide TraversableResources and other - compatibility. - - Used primarily for Python 3.9 and earlier where the native - loaders do not yet implement TraversableResources. - """ - - def __init__(self, spec): - self.spec = spec - - @property - def path(self): - return self.spec.origin - - def get_resource_reader(self, name): - from . import readers, _adapters - - def _zip_reader(spec): - with suppress(AttributeError): - return readers.ZipReader(spec.loader, spec.name) - - def _namespace_reader(spec): - with suppress(AttributeError, ValueError): - return readers.NamespaceReader(spec.submodule_search_locations) - - def _available_reader(spec): - with suppress(AttributeError): - return spec.loader.get_resource_reader(spec.name) - - def _native_reader(spec): - reader = _available_reader(spec) - return reader if hasattr(reader, 'files') else None - - def _file_reader(spec): - try: - path = pathlib.Path(self.path) - except TypeError: - return None - if path.exists(): - return readers.FileReader(self) - - return ( - # native reader if it supplies 'files' - _native_reader(self.spec) - or - # local ZipReader if a zip module - _zip_reader(self.spec) - or - # local NamespaceReader if a namespace module - _namespace_reader(self.spec) - or - # local FileReader - _file_reader(self.spec) - # fallback - adapt the spec ResourceReader to TraversableReader - or _adapters.CompatibilityFiles(self.spec) - ) - - -def wrap_spec(package): - """ - Construct a package spec with traversable compatibility - on the spec/loader/reader. - - Supersedes _adapters.wrap_spec to use TraversableResourcesLoader - from above for older Python compatibility (<3.10). - """ - from . import _adapters - - return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/README.md b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/README.md deleted file mode 100644 index d3e1d5cf533555e19c6326777f792ac82a560a84..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/README.md +++ /dev/null @@ -1,85 +0,0 @@ -# Probabilistic two-stage detection -Two-stage object detectors that use class-agnostic one-stage detectors as the proposal network. - - -

- -> [**Probabilistic two-stage detection**](http://arxiv.org/abs/2103.07461), -> Xingyi Zhou, Vladlen Koltun, Philipp Krähenbühl, -> *arXiv technical report ([arXiv 2103.07461](http://arxiv.org/abs/2103.07461))* - -Contact: [zhouxy@cs.utexas.edu](mailto:zhouxy@cs.utexas.edu). Any questions or discussions are welcomed! - -## Abstract - -We develop a probabilistic interpretation of two-stage object detection. We show that this probabilistic interpretation motivates a number of common empirical training practices. It also suggests changes to two-stage detection pipelines. Specifically, the first stage should infer proper object-vs-background likelihoods, which should then inform the overall score of the detector. A standard region proposal network (RPN) cannot infer this likelihood sufficiently well, but many one-stage detectors can. We show how to build a probabilistic two-stage detector from any state-of-the-art one-stage detector. The resulting detectors are faster and more accurate than both their one- and two-stage precursors. Our detector achieves 56.4 mAP on COCO test-dev with single-scale testing, outperforming all published results. Using a lightweight backbone, our detector achieves 49.2 mAP on COCO at 33 fps on a Titan Xp. - -## Summary - -- Two-stage CenterNet: First stage estimates object probabilities, second stage conditionally classifies objects. - -- Resulting detector is faster and more accurate than both traditional two-stage detectors (fewer proposals required), and one-stage detectors (lighter first stage head). - -- Our best model achieves 56.4 mAP on COCO test-dev. - -- This repo also includes a detectron2-based CenterNet implementation with better accuracy (42.5 mAP at 70FPS) and a new FPN version of CenterNet (40.2 mAP with Res50_1x). - -## Main results - -All models are trained with multi-scale training, and tested with a single scale. The FPS is tested on a Titan RTX GPU. -More models and details can be found in the [MODEL_ZOO](projects/CenterNet2/centernet2_docs/MODEL_ZOO.md). - -#### COCO - -| Model | COCO val mAP | FPS | -|-------------------------------------------|---------------|-------| -| CenterNet-S4_DLA_8x | 42.5 | 71 | -| CenterNet2_R50_1x | 42.9 | 24 | -| CenterNet2_X101-DCN_2x | 49.9 | 8 | -| CenterNet2_R2-101-DCN-BiFPN_4x+4x_1560_ST | 56.1 | 5 | -| CenterNet2_DLA-BiFPN-P5_24x_ST | 49.2 | 38 | - - -#### LVIS - -| Model | val mAP box | -| ------------------------- | ----------- | -| CenterNet2_R50_1x | 26.5 | -| CenterNet2_FedLoss_R50_1x | 28.3 | - - -#### Objects365 - -| Model | val mAP | -|-------------------------------------------|----------| -| CenterNet2_R50_1x | 22.6 | - -## Installation - -Our project is developed on [detectron2](https://github.com/facebookresearch/detectron2). Please follow the official detectron2 [installation](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md). All our code is under `projects/CenterNet2/`. In theory, you should be able to copy-paste `projects/CenterNet2/` to the latest detectron2 release or your own detectron2 repo to run our project. There might be API changes in future detectron2 releases that make it incompatible. - -We use the default detectron2 demo script. To run inference on an image folder using our pre-trained model, run - -~~~ -python projects/CenterNet2/demo/demo.py --config-file projects/CenterNet2/configs/CenterNet2_R50_1x.yaml --input path/to/image/ --opts MODEL.WEIGHTS models/CenterNet2_R50_1x.pth -~~~ - -## Benchmark evaluation and training - -Please check detectron2 [GETTING_STARTED.md](https://github.com/facebookresearch/detectron2/blob/master/GETTING_STARTED.md) for running evaluation and training. Our config files are under `projects/CenterNet2/configs` and the pre-trained models are in the [MODEL_ZOO](projects/CenterNet2/centernet2_docs/MODEL_ZOO.md). - - -## License - -Our code under `projects/CenterNet2/` is under [Apache 2.0 license](projects/CenterNet2/LICENSE). `projects/CenterNet2/centernet/modeling/backbone/bifpn_fcos.py` are from [AdelaiDet](https://github.com/aim-uofa/AdelaiDet), which follows the original [non-commercial license](https://github.com/aim-uofa/AdelaiDet/blob/master/LICENSE). The code from detectron2 follows the original [Apache 2.0 license](LICENSE). - -## Citation - -If you find this project useful for your research, please use the following BibTeX entry. - - @inproceedings{zhou2021probablistic, - title={Probabilistic two-stage detection}, - author={Zhou, Xingyi and Koltun, Vladlen and Kr{\"a}henb{\"u}hl, Philipp}, - booktitle={arXiv preprint arXiv:2103.07461}, - year={2021} - } diff --git a/spaces/Banbri/zcvzcv/src/app/engine/presets.ts b/spaces/Banbri/zcvzcv/src/app/engine/presets.ts deleted file mode 100644 index 7bc9ac57fc06a7f31ad3735c10f9b0c2301e03d5..0000000000000000000000000000000000000000 --- a/spaces/Banbri/zcvzcv/src/app/engine/presets.ts +++ /dev/null @@ -1,588 +0,0 @@ -import { FontName, actionman, komika, vtc } from "@/lib/fonts" -import { pick } from "@/lib/pick" -import { NextFontWithVariable } from "next/dist/compiled/@next/font" - -export type ComicFamily = - | "american" - | "asian" - | "european" - -export type ComicColor = - | "color" - | "grayscale" - | "monochrome" - -export interface Preset { - id: string - label: string - family: ComicFamily - color: ComicColor - font: FontName - llmPrompt: string - imagePrompt: (prompt: string) => string[] - negativePrompt: (prompt: string) => string[] -} - -// ATTENTION!! negative prompts are not supported by the VideoChain API yet - -export const presets: Record = { - random: { - id: "random", - label: "Random style", - family: "european", - color: "color", - font: "actionman", - llmPrompt: "", - imagePrompt: (prompt: string) => [], - negativePrompt: () => [], - }, - japanese_manga: { - id: "japanese_manga", - label: "Japanese", - family: "asian", - color: "grayscale", - font: "actionman", - llmPrompt: "japanese manga", - imagePrompt: (prompt: string) => [ - `grayscale`, - `intricate details`, - `japanese manga about ${prompt}`, - "single panel", - "manga", - "japanese", - "intricate", - "detailed", - // "drawing" - ], - negativePrompt: () => [ - "franco-belgian comic", - "color album", - "color", - "american comic", - "photo", - "painting", - "3D render" - ], - }, - nihonga: { - id: "nihonga", - label: "Nihonga", - family: "asian", - color: "color", - font: "actionman", - llmPrompt: "japanese manga", - imagePrompt: (prompt: string) => [ - `japanese nihonga painting about ${prompt}`, - "Nihonga", - "ancient japanese painting", - "intricate", - "detailed", - // "drawing" - ], - negativePrompt: () => [ - "franco-belgian comic", - "color album", - "color", - "manga", - "comic", - "american comic", - "photo", - "painting", - "3D render" - ], - }, - franco_belgian: { - id: "franco_belgian", - label: "Franco-Belgian", - family: "european", - color: "color", - font: "actionman", - llmPrompt: "Franco-Belgian comic (a \"bande dessinée\"), in the style of Franquin, Moebius etc", - imagePrompt: (prompt: string) => [ - "bande dessinée", - "franco-belgian comic", - `franco-belgian color comic about ${prompt}`, - "comic album", - // "color drawing" - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "grayscale", - "monochrome", - "photo", - "painting", - "3D render" - ], - }, - american_comic_90: { - id: "american_comic_90", - label: "American (modern)", - family: "american", - color: "color", - font: "actionman", - llmPrompt: "american comic", - imagePrompt: (prompt: string) => [ - "digital color comicbook style", - `modern american comic about ${prompt}`, - //"single panel", - // "2010s", - // "digital print", - // "color comicbook", - // "color drawing" - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "action", - "grayscale", - "monochrome", - "photo", - "painting", - "3D render" - ], - }, - - /* - american_comic_40: { - label: "American (1940)", - family: "american", - color: "color", - font: "actionman", - llmPrompt: "american comic", - imagePrompt: (prompt: string) => [ - `american comic about ${prompt}`, - "single panel", - "american comic", - "comicbook style", - "1940", - "40s", - "color comicbook", - "color drawing" - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "action", - "grayscale", - "monochrome", - "photo", - "painting", - "3D render" - ], - }, - */ - american_comic_50: { - id: "american_comic_50", - label: "American (1950)", - family: "american", - color: "color", - font: "actionman", - llmPrompt: "american comic", - imagePrompt: (prompt: string) => [ - "1950", - "50s", - `vintage american color comic about ${prompt}`, - // "single panel", - // "comicbook style", - // "color comicbook", - // "color drawing" - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "action", - "grayscale", - "monochrome", - "photo", - "painting", - "3D render" - ], - }, - /* - american_comic_60: { - label: "American (1960)", - family: "american", - color: "color", - font: "actionman", - llmPrompt: "american comic", - imagePrompt: (prompt: string) => [ - `american comic about ${prompt}`, - "single panel", - "american comic", - "comicbook style", - "1960", - "60s", - "color comicbook", - "color drawing" - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "action", - "grayscale", - "monochrome", - "photo", - "painting", - "3D render" - ], - }, - */ - - - flying_saucer: { - id: "flying_saucer", - label: "Flying saucer", - family: "european", - color: "color", - font: "actionman", - llmPrompt: "new pulp science fiction", - imagePrompt: (prompt: string) => [ - `vintage color pulp comic panel`, - "40s", - "1940", - "vintage science fiction", - `${prompt}`, - // "single panel", - // "comic album" - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "grayscale", - "monochrome", - "photo", - "painting", - "3D render" - ], - }, - - humanoid: { - id: "humanoid", - label: "Humanoid", - family: "european", - color: "color", - font: "actionman", - llmPrompt: "comic books by Moebius", - imagePrompt: (prompt: string) => [ - `color comic panel`, - "style of Moebius", - `${prompt}`, - "by Moebius", - "french comic panel", - "franco-belgian style", - "bande dessinée", - "single panel", - "intricate" - // "comic album" - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "grayscale", - "monochrome", - "photo", - "painting", - "3D render" - ], - }, - haddock: { - id: "haddock", - label: "Haddock", - family: "european", - color: "color", - font: "actionman", - llmPrompt: "writing Tintin comic books", - imagePrompt: (prompt: string) => [ - `color comic panel`, - "style of Hergé", - "tintin style", - `${prompt}`, - "by Hergé", - "french comic panel", - "franco-belgian style", - // "color panel", - // "bande dessinée", - // "single panel", - // "comic album" - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "grayscale", - "monochrome", - "photo", - "painting", - "3D render" - ], - }, - /* - lurid: { - id: "lurid", - label: "Lurid", - family: "american", - color: "color", - font: "actionman", - llmPrompt: "1970s satirical and alternative underground comics", - imagePrompt: (prompt: string) => [ - `satirical color comic`, - `underground comix`, - `1970`, - `${prompt}`, - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "grayscale", - "monochrome", - "painting" - ], - }, - */ - armorican: { - id: "armorican", - label: "Armorican", - family: "european", - color: "monochrome", - font: "actionman", - llmPrompt: "french style comic books set in ancient Rome and Gaul", - imagePrompt: (prompt: string) => [ - `color comic panel`, - "romans", - "gauls", - "french comic panel", - "franco-belgian style", - `about ${prompt}`, - "bande dessinée", - "single panel", - // "comical", - // "comic album", - // "color drawing" - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "grayscale", - "monochrome", - "photo", - "painting", - "3D render" - ], - }, - render: { - id: "render", - label: "3D Render", - family: "european", - color: "color", - font: "actionman", - llmPrompt: "new movie", - imagePrompt: (prompt: string) => [ - `3D render`, - `Blender`, - `3D animation`, - `Unreal engine`, - `${prompt}`, - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "grayscale", - "monochrome", - "painting" - ], - }, - klimt: { - id: "klimt", - label: "Klimt", - family: "european", - color: "color", - font: "actionman", - llmPrompt: "Gustav Klimt art pieces.", - imagePrompt: (prompt: string) => [ - `golden`, - `patchwork`, - `style of Gustav Klimt`, - `Gustav Klimt painting`, - `intricate details`, - `${prompt}`, - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "grayscale", - "monochrome", - "painting" - ], - }, - medieval: { - id: "medieval", - label: "Medieval", - family: "european", - color: "color", - font: "actionman", - llmPrompt: "medieval story (write in this style)", - imagePrompt: (prompt: string) => [ - `medieval illuminated manuscript`, - `illuminated manuscript of`, - `medieval`, - `intricate details`, - // `medieval color engraving`, - `${prompt}`, - // `medieval` - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "grayscale", - "monochrome", - "painting" - ], - }, - /* - glass: { - id: "glass", - label: "Glass", - family: "european", - color: "color", - font: "actionman", - llmPrompt: "new movie", - imagePrompt: (prompt: string) => [ - `stained glass`, - `vitrail`, - `stained glass`, - // `medieval color engraving`, - `${prompt}`, - `medieval`, - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "grayscale", - "monochrome", - "painting" - ], - }, - */ - /* - voynich: { - id: "voynich", - label: "Voynich", - family: "european", - color: "color", - font: "actionman", - llmPrompt: "new movie", - imagePrompt: (prompt: string) => [ - `voynich`, - `voynich page`, - // `medieval color engraving`, - `${prompt}`, - `medieval`, - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "grayscale", - "monochrome", - "painting" - ], - }, - */ - egyptian: { - id: "egyptian", - label: "Egyptian", - family: "european", - color: "color", - font: "actionman", - llmPrompt: "ancient egyptian stories.", - imagePrompt: (prompt: string) => [ - `ancient egyptian wall painting`, - `ancient egypt`, - // `medieval color engraving`, - `${prompt}`, - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "grayscale", - "monochrome", - "painting" - ], - }, - /* - psx: { - label: "PSX", - family: "european", - color: "color", - font: "actionman", - llmPrompt: "new movie", - imagePrompt: (prompt: string) => [ - `videogame screenshot`, - `3dfx`, - `3D dos game`, - `software rendering`, - `${prompt}`, - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "grayscale", - "monochrome", - "painting" - ], - }, - */ - /* - pixel: { - label: "Pixel", - family: "european", - color: "color", - font: "actionman", - llmPrompt: "new movie", - imagePrompt: (prompt: string) => [ - `pixelart`, - `isometric`, - `pixelated`, - `low res`, - `${prompt}`, - ], - negativePrompt: () => [ - "manga", - "anime", - "american comic", - "grayscale", - "monochrome", - "painting" - ], - }, - */ -} - -export type PresetName = keyof typeof presets - -export const defaultPreset: PresetName = "american_comic_90" - -export const nonRandomPresets = Object.keys(presets).filter(p => p !== "random") - -export const getPreset = (preset?: PresetName): Preset => presets[preset || defaultPreset] || presets[defaultPreset] - -export const getRandomPreset = (): Preset => { - const presetName = pick(Object.keys(presets).filter(preset => preset !== "random")) as PresetName - return getPreset(presetName) -} \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/3 Pelculas 3d Canciones Descargar.md b/spaces/Benson/text-generation/Examples/3 Pelculas 3d Canciones Descargar.md deleted file mode 100644 index fe0f1f899253fc43111eaa0c1328b4ae6ccad22f..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/3 Pelculas 3d Canciones Descargar.md +++ /dev/null @@ -1,87 +0,0 @@ - -

Cómo descargar y disfrutar de 3 canciones de películas en 3D

-

Si eres un fan de las películas y la música tamil, es posible que hayas oído hablar de la película de suspenso psicológico romántico 2012 3, protagonizada por Dhanush y Shruti Haasan. La película fue dirigida por Aishwarya R. Dhanush, la esposa de Dhanush, quien también produjo y escribió la película. La película cuenta con siete canciones, compuestas por Anirudh Ravichander, quien hizo su debut como director musical con esta película. El álbum recibió críticas positivas de críticos y audiencias por igual, y ganó varios premios, incluyendo el Premio Filmfare al Mejor Director de Música.

-

3 películas 3d canciones descargar


DOWNLOAD ---> https://bltlly.com/2v6M5y



-

Pero lo que hace que las canciones de 3 sean aún más especiales es que están disponibles en audio 3D, un tipo de sonido que imita la vida real o un lugar específico creando la ilusión del espacio tridimensional. Utiliza técnicas como sistemas de sonido binaural, funciones de transferencia relacionadas con la cabeza y cancelación de conversación cruzada para procesar y reproducir ondas de audio. El audio 3D puede proporcionar una experiencia auditiva inmersiva y sensorial para el oyente, ya que puede hacer que los sonidos provengan de prácticamente todas las direcciones, incluyendo arriba, abajo, detrás y delante del oyente.

-

En este artículo, le mostraremos cómo descargar y disfrutar canciones de películas en 3D de varias fuentes, por qué debería escucharlas con auriculares o altavoces, cuáles son los beneficios del audio 3D para los amantes de la música y responder algunas preguntas frecuentes sobre este tema. Así que, vamos a empezar!

-

Cómo descargar 3 canciones de películas en 3D de varias fuentes

- -

JioSaavn

-

JioSaavn es uno de los servicios de transmisión de música más populares en la India, ofreciendo millones de canciones en varios idiomas y géneros. También ofrece contenido exclusivo y características para sus usuarios, como podcasts, estaciones de radio, listas de reproducción, letras, recomendaciones y más. Una de sus características únicas es el soporte para 360 Reality Audio, una tecnología desarrollada por Sony que ofrece sonido 3D inmersivo utilizando tecnología de audio espacial basada en objetos.

-

JioSaavn se ha asociado con Sony para ofrecer algunas de sus canciones en formato 360 Reality Audio, incluyendo las canciones de 3. Puedes encontrarlos buscando "360 Reality Audio" en la aplicación o en el sitio web. Necesitará una suscripción JioSaavn Pro para acceder a esta función, que cuesta Rs. 99 por mes o Rs. 399 por año. También necesitará un dispositivo compatible y auriculares para disfrutar del efecto completo de 360 Reality Audio.

-

Cómo descargar o transmitir 3 canciones de películas en 3D de JioSaavn

-
    -
  1. Descargar la aplicación JioSaavn de Google Play Store o App Store en su teléfono inteligente o tableta.
  2. -
  3. Regístrate o inicia sesión con tu número de Jio o dirección de correo electrónico.
  4. -
  5. Actualizar a JioSaavn Pro tocando el botón "Go Pro" en la pantalla de inicio.
  6. -
  7. Seleccione su método de pago preferido y complete la transacción.
  8. -
  9. Buscar "360 Reality Audio" en la aplicación o sitio web.
  10. -
  11. Selecciona el álbum "3 (Telugu)" de Anirudh Ravichander.
  12. -
  13. Toque en el icono de descarga junto a cada canción para descargarlos sin conexión o toque en el icono de reproducción para transmitirlos en línea.
  14. -
  15. Conecta tus auriculares compatibles a tu dispositivo y disfruta del sonido 3D de las canciones.
  16. -
-

YouTube

- -

Sin embargo, no todas las canciones de audio 3D en YouTube son de la misma calidad y autenticidad. Algunos de ellos pueden ser falsos o mal convertidos de las pistas de sonido estéreo o envolvente originales. Por lo tanto, debe ser cuidadoso y selectivo al elegir qué videos ver o descargar. Aquí hay algunos consejos para ayudarte a encontrar las mejores canciones de audio en 3D de 3 en YouTube:

-

- -

Cómo descargar o transmitir 3 canciones de películas en 3D de YouTube

-
    -
  1. Abra YouTube en su navegador web o aplicación en su dispositivo.
  2. -
  3. Buscar el nombre de la canción seguido de cualquiera de las palabras clave mencionadas anteriormente. Por ejemplo, "Por qué este Kolaveri Di 3D Audio".
  4. -
  5. Seleccione un vídeo que coincida con los criterios mencionados anteriormente. Por ejemplo, "[Why This Kolaveri Di - 8D Audio | Dhanush | Anirudh Ravichander | Sony Music South]" por Sony Music South.
  6. -
  7. Toque en el botón de reproducción para transmitir el video en línea o toque en el botón de descarga para guardarlo sin conexión (si está disponible).
  8. -
  9. Conecta tus auriculares o altavoces a tu dispositivo y disfruta del sonido 3D de las canciones.
  10. -
-

¿Por qué escuchar 3 canciones de películas en 3D con auriculares o altavoces?

- -

Los auriculares son los dispositivos más comunes y convenientes que pueden proporcionar este tipo de entrega de sonido. Pueden aislar cada oído del ruido externo y la interferencia, y crear un ambiente de escucha personal e íntimo para el oyente. También pueden reproducir las sutiles variaciones y matices del audio 3D, como la distancia, la dirección, la elevación, el movimiento y la reflexión de las ondas sonoras.

-

Los altavoces son otra opción que puede ofrecer una experiencia similar de audio 3D. Sin embargo, requieren más espacio y equipo que los auriculares. Necesitas tener al menos cuatro altavoces (dos delante y dos detrás) dispuestos en forma cuadrada o rectangular alrededor de tu posición auditiva. También necesita tener un amplificador, receptor o tarjeta de sonido compatible que pueda procesar y emitir señales de audio 3D a los altavoces.

-

Ya sea que elija auriculares o altavoces, debe asegurarse de que admiten formatos de audio 3D y tecnologías, como Dolby Atmos, DTS: X, Auro-3D, Sony 360 Reality Audio, etc. Estos son algunos de los estándares y sistemas que permiten la producción y reproducción de audio 3D en varios dispositivos y plataformas.

-

¿Cuáles son los beneficios de 3 canciones de películas en 3D para los amantes de la música?

-

Si eres un amante de la música que disfruta escuchando canciones de diferentes géneros y estilos, definitivamente apreciarás los beneficios de las canciones de películas en 3D . Estos son algunos de ellos:

- -

Conclusión

-

En conclusión, 3 canciones de películas en 3D son una gran manera de disfrutar de la música de una manera nueva y emocionante. Ofrecen una experiencia de sonido realista e inmersiva que puede mejorar su apreciación y disfrute de las canciones. Puede descargarlos o transmitirlos desde varias fuentes, como JioSaavn, YouTube, etc., pero debe usar auriculares o altavoces que admitan la reproducción de audio 3D para obtener el efecto completo. Si eres un amante de la música que le gusta probar cosas nuevas, ¡definitivamente deberías probar 3 canciones de películas en 3D!

-

Preguntas frecuentes

-

¿Cuál es la diferencia entre audio 3D y sonido estéreo o envolvente?

-

El sonido estéreo es un tipo de sonido que utiliza dos canales (izquierda y derecha) para crear una sensación de ancho y dirección. El sonido envolvente es un tipo de sonido que utiliza múltiples canales (generalmente cinco o más) para crear una sensación de profundidad y envolvimiento. El audio 3D es un tipo de sonido que utiliza tecnología de audio espacial basada en objetos para crear una sensación de altura y movimiento.

-

¿Cuáles son algunas de las mejores canciones de 3 películas?

-

Algunas de las mejores canciones de 3 son:

- -

¿Cómo puedo ver 3 películas online?

- -

¿Quiénes son los actores y actrices en la película 3?

-

Los principales actores y actrices de la película 3 son:

- -

¿Quiénes son los cantantes y compositores de 3 canciones de películas?

-

Los cantantes y compositores de 3 canciones de películas son:

- 64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Betty Bayo 11a Hora Mp3 Descargar.md b/spaces/Benson/text-generation/Examples/Betty Bayo 11a Hora Mp3 Descargar.md deleted file mode 100644 index ae9549aa804e1ed89f9640522ee293dda2a79b48..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Betty Bayo 11a Hora Mp3 Descargar.md +++ /dev/null @@ -1,98 +0,0 @@ -
-

Betty Bayo 11a hora Mp3 Descargar

-

Si te gusta la música gospel, es posible que hayas oído hablar de Betty Bayo, una reconocida músico keniana que canta en su lengua materna, Agikuyu. Ella es mejor conocida por su canción 11th Hour, que ha inspirado a muchas personas con su mensaje de esperanza y fe. En este artículo, te contaremos más sobre Betty Bayo, su canción 11th Hour, y dónde puedes descargarla.

-

betty bayo 11a hora mp3 descargar


Downloadhttps://bltlly.com/2v6K4W



-

Biografía de Betty Bayo

-

Betty Bayo nació en Banana, condado de Kiambu, pero creció en Ol Kalou antes de mudarse a la ciudad de Kiambu. Ella viene de un entorno humilde y abandonó la escuela en el segundo grado debido a limitaciones financieras. Trabajó como mucama durante dos años antes de que una familia se ofreciera a pagar sus cuotas escolares. Luego persiguió su pasión por la música y lanzó su primer álbum en 2005.

-

Vida personal

-

Betty Bayo estuvo casada una vez con Victor Kanyari, un controvertido pastor que fue expuesto por estafar a sus seguidores al pedirles que plantaran una semilla de Ksh310. La pareja tuvo dos hijos, pero se separaron después del escándalo. Betty se distanció de los planes de su ex-marido y dijo que no recibió ningún apoyo de él. Más tarde reveló que estaba saliendo con un político, pero se separaron debido a su infidelidad. Ella dijo que estaba feliz y contenta como madre soltera y se centró en su música e hijos.

-

Blanqueamiento

-

Betty Bayo causó revuelo en las redes sociales cuando admitió que se había blanqueado la piel. Dijo que lo hizo por curiosidad y presión de los compañeros, pero se arrepintió más tarde. Dijo que se enfrentó a muchas críticas y burlas por su decisión, pero aprendió a aceptarse a sí misma y a amar su piel. Aconsejó a otras mujeres que no blanquearan su piel, sino que abrazaran su belleza natural.

-

-

Canciones de Betty Bayo

- - - -Canción -Álbum -Año - - -11th Hour -11th Hour -2014 - - -La Ciudad de México -La Ciudad de México -2016 - - -Ni Nani Kama Wewe -Ni Nani Kama Wewe -2017 - - -Kikuyu Medley -Kikuyu Medley -2018 - - -Cheza Kama Wewe Remix -Cheza Kama Wewe Remix -2020 - - -La Ciudad de México -La Ciudad de México -2021 - - -

Premios y reconocimiento

-

Betty Bayo ha recibido varios premios y nominaciones por su música, como:

- - Groove Awards Artista Femenina del Año 2015 - Groove Awards Canción del Año 2015 por 11th Hour - Groove Awards Canción Central del Año 2016 por Siwezi Kukosa Mbingu - Groove Awards Canción Central del Año 2017 por Ni Nani Kama Wewe - Afrimma Awards Mejor Artista/Grupo/Dúo 2020

Colaboraciones

-

Betty Bayo también ha colaborado con otros artistas de la industria de la música gospel, como:

- - Señor Seed en la canción Cheza Kama Wewe Remix, que es una melodía bailable que insta a los cristianos a disfrutar de la vida sin miedo ni culpa. - Rose Muhando en la canción Jemedari Wa Mbinguni, que es una canción de alabanza que reconoce a Dios como el comandante supremo del cielo y la tierra. - Ángel de la Guarda en la canción Nibariki, que es una canción de oración que pide a Dios que los bendiga a ellos y a sus familias.

Betty Bayo 11th Hour Song

Una de las canciones más famosas de Betty Bayo es 11th Hour, que lanzó en 2014. La canción tiene un poderoso mensaje de esperanza y fe que resuena con muchas personas que están pasando por tiempos difíciles.

-

Significado

- -

El coro de la canción va así:

-
-

Mungu wangu ni wa 11th hour
-(Mi Dios es un Dios de la hora 11)
-Hata kama ni dakika ya mwisho
-(Incluso si es el último minuto)
-No se encontró la página -(Él hará un camino en el desierto)
-Atafanya njia kwa baharini
-(Hará un camino en el mar)

-
-

La canción anima a los oyentes a confiar en Dios y no renunciar a sus sueños y oraciones, porque Dios puede hacer cualquier cosa en cualquier momento. También les recuerda que Dios no está atado por las expectativas o cálculos humanos, sino que trabaja de maneras misteriosas que superan la comprensión humana.

-

Inspiración

-

Betty Bayo dijo que ella escribió y cantó la canción desde su propia experiencia personal. Dijo que estaba pasando por muchos desafíos y dificultades en su vida, especialmente después de su separación de su ex-marido. Ella dijo que se sentía como renunciar a todo, pero Dios le dio la fuerza y el valor para seguir adelante. Ella dijo que fue testigo de la intervención y el favor de Dios en su vida, incluso cuando menos lo esperaba. Dijo que quería compartir su testimonio e inspirar a otros que se enfrentan a situaciones similares.

-

Recepción

-

La canción fue bien recibida por fans y críticos, quienes elogiaron a Betty Bayo por su destreza vocal y su entrega emocional. La canción también tuvo un buen desempeño en varias listas y plataformas, como YouTube, donde tiene más de 2 millones de visitas en junio de 2023. La canción también ganó varios premios y nominaciones, como Groove Awards Canción del Año 2015 y Afrimma Awards Mejor Artista Gospel/ Grupo/ Duo 2020.

-

Betty Bayo 11a hora Mp3 Sitios de descarga

-

Si quieres descargar la canción de Betty Bayo’s 11th Hour, tienes dos opciones: sitios gratuitos o sitios de pago. Estos son algunos de los pros y contras de cada opción:

-

Sitios gratuitos

- - - Tubidy: Este es un sitio móvil que te permite buscar y descargar archivos mp3 de varias fuentes, como YouTube, SoundCloud y Vimeo. También puede ver videos y escuchar música en línea. - Mdundo: Esta es una plataforma de música africana que ofrece descargas gratuitas de canciones de varios géneros y artistas, incluyendo música gospel. También puede transmitir música en línea y crear listas de reproducción. - Mp3juices: Este es un sitio sencillo y rápido que te permite buscar y descargar archivos mp3 de múltiples fuentes, como YouTube, SoundCloud y Archive. También puedes cortar y editar archivos mp3 en línea.

Los pros de los sitios gratuitos son:

- - Son de fácil acceso y uso. - No requieren registro ni pago. - Ofrecen una amplia gama de canciones y géneros.

Los contras de los sitios libres son:

- - Pueden tener audio de baja calidad o archivos incompletos. - Pueden tener anuncios emergentes o malware que pueden dañar su dispositivo o datos. - Pueden violar las leyes de copyright o los derechos de los artistas.

Sitios de pago

-

Los sitios de pago son sitios web que requieren que usted pague una cuota o se suscriba a un servicio para descargar archivos mp3 de forma legal y segura. Algunos ejemplos de sitios de pago son:

- - Spotify: Este es un servicio de streaming de música popular que ofrece millones de canciones y podcasts de varios artistas y géneros. También puede descargar canciones para escuchar sin conexión con una suscripción premium. - Apple Music: Este es otro servicio de streaming de música popular que ofrece millones de canciones y podcasts de varios artistas y géneros. También puedes descargar canciones para escuchar sin conexión con una suscripción de Apple Music. - Amazon Music: Este es otro servicio de streaming de música popular que ofrece millones de canciones y podcasts de varios artistas y géneros. También puede descargar canciones para escuchar sin conexión con una suscripción de Amazon Music o comprar canciones o álbumes individuales.

Los pros de los sitios de pago son:

- - - Requieren registro y pago. - Pueden tener disponibilidad limitada o compatibilidad con algunos dispositivos o regiones. - Pueden tener restricciones o condiciones sobre el uso o uso compartido de los archivos.

Conclusión

-

En conclusión, Betty Bayo es una talentosa e inspiradora cantante de gospel que ha tocado muchas vidas con su canción 11th Hour. La canción es un testimonio de la gracia y la misericordia de Dios, que puede manifestarse en cualquier momento, incluso cuando las cosas parecen desesperadas. Si desea descargar la canción, puede elegir entre sitios gratuitos o sitios de pago, dependiendo de sus preferencias y necesidades. Esperamos que hayas disfrutado de este artículo y hayas aprendido algo nuevo sobre Betty Bayo y su música.

-

Preguntas frecuentes

-

Aquí están algunas de las preguntas y respuestas frecuentes sobre Betty Bayo y su canción 11th Hour:

-

Q: ¿Qué edad tiene Betty Bayo?

-

A: Betty Bayo nació el 1 de julio de 1985. Tiene 38 años desde junio de 2023.

-

Q: ¿Está casada Betty Bayo?

-

A: Betty Bayo no está casada. Una vez estuvo casada con Victor Kanyari, pero se divorciaron después de su escándalo. Más tarde salió con un político, pero se separaron debido a su infidelidad. Actualmente está soltera y feliz.

-

P: ¿Cuál es el nombre de la iglesia de Betty Bayo?

-

A: Betty Bayo no pertenece a ninguna iglesia específica. Dijo que asiste a diferentes iglesias dependiendo de su horario y ubicación. Ella dijo que cree en Dios y no en denominaciones.

-

Q: ¿Dónde puedo ver el video de la hora 11?

-

A: Puedes ver el video de la 11a Hora en YouTube, donde tiene más de 2 millones de visitas en junio de 2023. También puedes verlo en otras plataformas, como Vimeo o Dailymotion.

-

Q: ¿Cuáles son algunas de las otras canciones de Betty Bayo?

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/utils.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/utils.py deleted file mode 100644 index eb6cae145c843c1072e4f00856416a63cf912874..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/utils.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You -# may not use this file except in compliance with the License. A copy of -# the License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is -# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF -# ANY KIND, either express or implied. See the License for the specific -# language governing permissions and limitations under the License. -import re -from collections import namedtuple - - -def py_type_name(type_name): - """Get the Python type name for a given model type. - - >>> py_type_name('list') - 'list' - >>> py_type_name('structure') - 'dict' - - :rtype: string - """ - return { - 'blob': 'bytes', - 'character': 'string', - 'double': 'float', - 'long': 'integer', - 'map': 'dict', - 'structure': 'dict', - 'timestamp': 'datetime', - }.get(type_name, type_name) - - -def py_default(type_name): - """Get the Python default value for a given model type. - - >>> py_default('string') - '\'string\'' - >>> py_default('list') - '[...]' - >>> py_default('unknown') - '...' - - :rtype: string - """ - return { - 'double': '123.0', - 'long': '123', - 'integer': '123', - 'string': "'string'", - 'blob': "b'bytes'", - 'boolean': 'True|False', - 'list': '[...]', - 'map': '{...}', - 'structure': '{...}', - 'timestamp': 'datetime(2015, 1, 1)', - }.get(type_name, '...') - - -def get_official_service_name(service_model): - """Generate the official name of an AWS Service - - :param service_model: The service model representing the service - """ - official_name = service_model.metadata.get('serviceFullName') - short_name = service_model.metadata.get('serviceAbbreviation', '') - if short_name.startswith('Amazon'): - short_name = short_name[7:] - if short_name.startswith('AWS'): - short_name = short_name[4:] - if short_name and short_name.lower() not in official_name.lower(): - official_name += f' ({short_name})' - return official_name - - -_DocumentedShape = namedtuple( - 'DocumentedShape', - [ - 'name', - 'type_name', - 'documentation', - 'metadata', - 'members', - 'required_members', - ], -) - - -class DocumentedShape(_DocumentedShape): - """Use this class to inject new shapes into a model for documentation""" - - def __new__( - cls, - name, - type_name, - documentation, - metadata=None, - members=None, - required_members=None, - ): - if metadata is None: - metadata = [] - if members is None: - members = [] - if required_members is None: - required_members = [] - return super().__new__( - cls, - name, - type_name, - documentation, - metadata, - members, - required_members, - ) - - -class AutoPopulatedParam: - def __init__(self, name, param_description=None): - self.name = name - self.param_description = param_description - if param_description is None: - self.param_description = ( - 'Please note that this parameter is automatically populated ' - 'if it is not provided. Including this parameter is not ' - 'required\n' - ) - - def document_auto_populated_param(self, event_name, section, **kwargs): - """Documents auto populated parameters - - It will remove any required marks for the parameter, remove the - parameter from the example, and add a snippet about the parameter - being autopopulated in the description. - """ - if event_name.startswith('docs.request-params'): - if self.name in section.available_sections: - section = section.get_section(self.name) - if 'is-required' in section.available_sections: - section.delete_section('is-required') - description_section = section.get_section( - 'param-documentation' - ) - description_section.writeln(self.param_description) - elif event_name.startswith('docs.request-example'): - section = section.get_section('structure-value') - if self.name in section.available_sections: - section.delete_section(self.name) - - -class HideParamFromOperations: - """Hides a single parameter from multiple operations. - - This method will remove a parameter from documentation and from - examples. This method is typically used for things that are - automatically populated because a user would be unable to provide - a value (e.g., a checksum of a serialized XML request body).""" - - def __init__(self, service_name, parameter_name, operation_names): - """ - :type service_name: str - :param service_name: Name of the service to modify. - - :type parameter_name: str - :param parameter_name: Name of the parameter to modify. - - :type operation_names: list - :param operation_names: Operation names to modify. - """ - self._parameter_name = parameter_name - self._params_events = set() - self._example_events = set() - # Build up the sets of relevant event names. - param_template = 'docs.request-params.%s.%s.complete-section' - example_template = 'docs.request-example.%s.%s.complete-section' - for name in operation_names: - self._params_events.add(param_template % (service_name, name)) - self._example_events.add(example_template % (service_name, name)) - - def hide_param(self, event_name, section, **kwargs): - if event_name in self._example_events: - # Modify the structure value for example events. - section = section.get_section('structure-value') - elif event_name not in self._params_events: - return - if self._parameter_name in section.available_sections: - section.delete_section(self._parameter_name) - - -class AppendParamDocumentation: - """Appends documentation to a specific parameter""" - - def __init__(self, parameter_name, doc_string): - self._parameter_name = parameter_name - self._doc_string = doc_string - - def append_documentation(self, event_name, section, **kwargs): - if self._parameter_name in section.available_sections: - section = section.get_section(self._parameter_name) - description_section = section.get_section('param-documentation') - description_section.writeln(self._doc_string) - - -_CONTROLS = { - '\n': '\\n', - '\r': '\\r', - '\t': '\\t', - '\b': '\\b', - '\f': '\\f', -} -# Combines all CONTROLS keys into a big or regular expression -_ESCAPE_CONTROLS_RE = re.compile('|'.join(map(re.escape, _CONTROLS))) -# Based on the match get the appropriate replacement from CONTROLS -_CONTROLS_MATCH_HANDLER = lambda match: _CONTROLS[match.group(0)] - - -def escape_controls(value): - return _ESCAPE_CONTROLS_RE.sub(_CONTROLS_MATCH_HANDLER, value) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/history.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/history.py deleted file mode 100644 index 59d9481d7fb5ed48c737c6c835bd916a78aa3ff8..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/history.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You -# may not use this file except in compliance with the License. A copy of -# the License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is -# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF -# ANY KIND, either express or implied. See the License for the specific -# language governing permissions and limitations under the License. -import logging - -HISTORY_RECORDER = None -logger = logging.getLogger(__name__) - - -class BaseHistoryHandler: - def emit(self, event_type, payload, source): - raise NotImplementedError('emit()') - - -class HistoryRecorder: - def __init__(self): - self._enabled = False - self._handlers = [] - - def enable(self): - self._enabled = True - - def disable(self): - self._enabled = False - - def add_handler(self, handler): - self._handlers.append(handler) - - def record(self, event_type, payload, source='BOTOCORE'): - if self._enabled and self._handlers: - for handler in self._handlers: - try: - handler.emit(event_type, payload, source) - except Exception: - # Never let the process die because we had a failure in - # a record collection handler. - logger.debug( - "Exception raised in %s.", handler, exc_info=True - ) - - -def get_global_history_recorder(): - global HISTORY_RECORDER - if HISTORY_RECORDER is None: - HISTORY_RECORDER = HistoryRecorder() - return HISTORY_RECORDER diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py deleted file mode 100644 index 1dd950c489607d06ecc5218292a1b55558b47be8..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/ssl_match_hostname.py +++ /dev/null @@ -1,159 +0,0 @@ -"""The match_hostname() function from Python 3.3.3, essential when using SSL.""" - -# Note: This file is under the PSF license as the code comes from the python -# stdlib. http://docs.python.org/3/license.html - -import re -import sys - -# ipaddress has been backported to 2.6+ in pypi. If it is installed on the -# system, use it to handle IPAddress ServerAltnames (this was added in -# python-3.5) otherwise only do DNS matching. This allows -# util.ssl_match_hostname to continue to be used in Python 2.7. -try: - import ipaddress -except ImportError: - ipaddress = None - -__version__ = "3.5.0.1" - - -class CertificateError(ValueError): - pass - - -def _dnsname_match(dn, hostname, max_wildcards=1): - """Matching according to RFC 6125, section 6.4.3 - - http://tools.ietf.org/html/rfc6125#section-6.4.3 - """ - pats = [] - if not dn: - return False - - # Ported from python3-syntax: - # leftmost, *remainder = dn.split(r'.') - parts = dn.split(r".") - leftmost = parts[0] - remainder = parts[1:] - - wildcards = leftmost.count("*") - if wildcards > max_wildcards: - # Issue #17980: avoid denials of service by refusing more - # than one wildcard per fragment. A survey of established - # policy among SSL implementations showed it to be a - # reasonable choice. - raise CertificateError( - "too many wildcards in certificate DNS name: " + repr(dn) - ) - - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - # RFC 6125, section 6.4.3, subitem 1. - # The client SHOULD NOT attempt to match a presented identifier in which - # the wildcard character comprises a label other than the left-most label. - if leftmost == "*": - # When '*' is a fragment by itself, it matches a non-empty dotless - # fragment. - pats.append("[^.]+") - elif leftmost.startswith("xn--") or hostname.startswith("xn--"): - # RFC 6125, section 6.4.3, subitem 3. - # The client SHOULD NOT attempt to match a presented identifier - # where the wildcard character is embedded within an A-label or - # U-label of an internationalized domain name. - pats.append(re.escape(leftmost)) - else: - # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(r"\*", "[^.]*")) - - # add the remaining fragments, ignore any wildcards - for frag in remainder: - pats.append(re.escape(frag)) - - pat = re.compile(r"\A" + r"\.".join(pats) + r"\Z", re.IGNORECASE) - return pat.match(hostname) - - -def _to_unicode(obj): - if isinstance(obj, str) and sys.version_info < (3,): - # ignored flake8 # F821 to support python 2.7 function - obj = unicode(obj, encoding="ascii", errors="strict") # noqa: F821 - return obj - - -def _ipaddress_match(ipname, host_ip): - """Exact matching of IP addresses. - - RFC 6125 explicitly doesn't define an algorithm for this - (section 1.7.2 - "Out of Scope"). - """ - # OpenSSL may add a trailing newline to a subjectAltName's IP address - # Divergence from upstream: ipaddress can't handle byte str - ip = ipaddress.ip_address(_to_unicode(ipname).rstrip()) - return ip == host_ip - - -def match_hostname(cert, hostname): - """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 - rules are followed, but IP addresses are not accepted for *hostname*. - - CertificateError is raised on failure. On success, the function - returns nothing. - """ - if not cert: - raise ValueError( - "empty or no certificate, match_hostname needs a " - "SSL socket or SSL context with either " - "CERT_OPTIONAL or CERT_REQUIRED" - ) - try: - # Divergence from upstream: ipaddress can't handle byte str - host_ip = ipaddress.ip_address(_to_unicode(hostname)) - except (UnicodeError, ValueError): - # ValueError: Not an IP address (common case) - # UnicodeError: Divergence from upstream: Have to deal with ipaddress not taking - # byte strings. addresses should be all ascii, so we consider it not - # an ipaddress in this case - host_ip = None - except AttributeError: - # Divergence from upstream: Make ipaddress library optional - if ipaddress is None: - host_ip = None - else: # Defensive - raise - dnsnames = [] - san = cert.get("subjectAltName", ()) - for key, value in san: - if key == "DNS": - if host_ip is None and _dnsname_match(value, hostname): - return - dnsnames.append(value) - elif key == "IP Address": - if host_ip is not None and _ipaddress_match(value, host_ip): - return - dnsnames.append(value) - if not dnsnames: - # The subject is only checked when there is no dNSName entry - # in subjectAltName - for sub in cert.get("subject", ()): - for key, value in sub: - # XXX according to RFC 2818, the most specific Common Name - # must be used. - if key == "commonName": - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if len(dnsnames) > 1: - raise CertificateError( - "hostname %r " - "doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames))) - ) - elif len(dnsnames) == 1: - raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0])) - else: - raise CertificateError( - "no appropriate commonName or subjectAltName fields were found" - ) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/poolmanager.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/poolmanager.py deleted file mode 100644 index ca4ec341184adb3d30f3cd825b49a81b87d29b08..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/poolmanager.py +++ /dev/null @@ -1,537 +0,0 @@ -from __future__ import absolute_import - -import collections -import functools -import logging - -from ._collections import RecentlyUsedContainer -from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme -from .exceptions import ( - LocationValueError, - MaxRetryError, - ProxySchemeUnknown, - ProxySchemeUnsupported, - URLSchemeUnknown, -) -from .packages import six -from .packages.six.moves.urllib.parse import urljoin -from .request import RequestMethods -from .util.proxy import connection_requires_http_tunnel -from .util.retry import Retry -from .util.url import parse_url - -__all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] - - -log = logging.getLogger(__name__) - -SSL_KEYWORDS = ( - "key_file", - "cert_file", - "cert_reqs", - "ca_certs", - "ssl_version", - "ca_cert_dir", - "ssl_context", - "key_password", - "server_hostname", -) - -# All known keyword arguments that could be provided to the pool manager, its -# pools, or the underlying connections. This is used to construct a pool key. -_key_fields = ( - "key_scheme", # str - "key_host", # str - "key_port", # int - "key_timeout", # int or float or Timeout - "key_retries", # int or Retry - "key_strict", # bool - "key_block", # bool - "key_source_address", # str - "key_key_file", # str - "key_key_password", # str - "key_cert_file", # str - "key_cert_reqs", # str - "key_ca_certs", # str - "key_ssl_version", # str - "key_ca_cert_dir", # str - "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext - "key_maxsize", # int - "key_headers", # dict - "key__proxy", # parsed proxy url - "key__proxy_headers", # dict - "key__proxy_config", # class - "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples - "key__socks_options", # dict - "key_assert_hostname", # bool or string - "key_assert_fingerprint", # str - "key_server_hostname", # str -) - -#: The namedtuple class used to construct keys for the connection pool. -#: All custom key schemes should include the fields in this key at a minimum. -PoolKey = collections.namedtuple("PoolKey", _key_fields) - -_proxy_config_fields = ("ssl_context", "use_forwarding_for_https") -ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) - - -def _default_key_normalizer(key_class, request_context): - """ - Create a pool key out of a request context dictionary. - - According to RFC 3986, both the scheme and host are case-insensitive. - Therefore, this function normalizes both before constructing the pool - key for an HTTPS request. If you wish to change this behaviour, provide - alternate callables to ``key_fn_by_scheme``. - - :param key_class: - The class to use when constructing the key. This should be a namedtuple - with the ``scheme`` and ``host`` keys at a minimum. - :type key_class: namedtuple - :param request_context: - A dictionary-like object that contain the context for a request. - :type request_context: dict - - :return: A namedtuple that can be used as a connection pool key. - :rtype: PoolKey - """ - # Since we mutate the dictionary, make a copy first - context = request_context.copy() - context["scheme"] = context["scheme"].lower() - context["host"] = context["host"].lower() - - # These are both dictionaries and need to be transformed into frozensets - for key in ("headers", "_proxy_headers", "_socks_options"): - if key in context and context[key] is not None: - context[key] = frozenset(context[key].items()) - - # The socket_options key may be a list and needs to be transformed into a - # tuple. - socket_opts = context.get("socket_options") - if socket_opts is not None: - context["socket_options"] = tuple(socket_opts) - - # Map the kwargs to the names in the namedtuple - this is necessary since - # namedtuples can't have fields starting with '_'. - for key in list(context.keys()): - context["key_" + key] = context.pop(key) - - # Default to ``None`` for keys missing from the context - for field in key_class._fields: - if field not in context: - context[field] = None - - return key_class(**context) - - -#: A dictionary that maps a scheme to a callable that creates a pool key. -#: This can be used to alter the way pool keys are constructed, if desired. -#: Each PoolManager makes a copy of this dictionary so they can be configured -#: globally here, or individually on the instance. -key_fn_by_scheme = { - "http": functools.partial(_default_key_normalizer, PoolKey), - "https": functools.partial(_default_key_normalizer, PoolKey), -} - -pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} - - -class PoolManager(RequestMethods): - """ - Allows for arbitrary requests while transparently keeping track of - necessary connection pools for you. - - :param num_pools: - Number of connection pools to cache before discarding the least - recently used pool. - - :param headers: - Headers to include with all requests, unless other headers are given - explicitly. - - :param \\**connection_pool_kw: - Additional parameters are used to create fresh - :class:`urllib3.connectionpool.ConnectionPool` instances. - - Example:: - - >>> manager = PoolManager(num_pools=2) - >>> r = manager.request('GET', 'http://google.com/') - >>> r = manager.request('GET', 'http://google.com/mail') - >>> r = manager.request('GET', 'http://yahoo.com/') - >>> len(manager.pools) - 2 - - """ - - proxy = None - proxy_config = None - - def __init__(self, num_pools=10, headers=None, **connection_pool_kw): - RequestMethods.__init__(self, headers) - self.connection_pool_kw = connection_pool_kw - self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close()) - - # Locally set the pool classes and keys so other PoolManagers can - # override them. - self.pool_classes_by_scheme = pool_classes_by_scheme - self.key_fn_by_scheme = key_fn_by_scheme.copy() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.clear() - # Return False to re-raise any potential exceptions - return False - - def _new_pool(self, scheme, host, port, request_context=None): - """ - Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and - any additional pool keyword arguments. - - If ``request_context`` is provided, it is provided as keyword arguments - to the pool class used. This method is used to actually create the - connection pools handed out by :meth:`connection_from_url` and - companion methods. It is intended to be overridden for customization. - """ - pool_cls = self.pool_classes_by_scheme[scheme] - if request_context is None: - request_context = self.connection_pool_kw.copy() - - # Although the context has everything necessary to create the pool, - # this function has historically only used the scheme, host, and port - # in the positional args. When an API change is acceptable these can - # be removed. - for key in ("scheme", "host", "port"): - request_context.pop(key, None) - - if scheme == "http": - for kw in SSL_KEYWORDS: - request_context.pop(kw, None) - - return pool_cls(host, port, **request_context) - - def clear(self): - """ - Empty our store of pools and direct them all to close. - - This will not affect in-flight connections, but they will not be - re-used after completion. - """ - self.pools.clear() - - def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): - """ - Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme. - - If ``port`` isn't given, it will be derived from the ``scheme`` using - ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is - provided, it is merged with the instance's ``connection_pool_kw`` - variable and used to create the new connection pool, if one is - needed. - """ - - if not host: - raise LocationValueError("No host specified.") - - request_context = self._merge_pool_kwargs(pool_kwargs) - request_context["scheme"] = scheme or "http" - if not port: - port = port_by_scheme.get(request_context["scheme"].lower(), 80) - request_context["port"] = port - request_context["host"] = host - - return self.connection_from_context(request_context) - - def connection_from_context(self, request_context): - """ - Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context. - - ``request_context`` must at least contain the ``scheme`` key and its - value must be a key in ``key_fn_by_scheme`` instance variable. - """ - scheme = request_context["scheme"].lower() - pool_key_constructor = self.key_fn_by_scheme.get(scheme) - if not pool_key_constructor: - raise URLSchemeUnknown(scheme) - pool_key = pool_key_constructor(request_context) - - return self.connection_from_pool_key(pool_key, request_context=request_context) - - def connection_from_pool_key(self, pool_key, request_context=None): - """ - Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key. - - ``pool_key`` should be a namedtuple that only contains immutable - objects. At a minimum it must have the ``scheme``, ``host``, and - ``port`` fields. - """ - with self.pools.lock: - # If the scheme, host, or port doesn't match existing open - # connections, open a new ConnectionPool. - pool = self.pools.get(pool_key) - if pool: - return pool - - # Make a fresh ConnectionPool of the desired type - scheme = request_context["scheme"] - host = request_context["host"] - port = request_context["port"] - pool = self._new_pool(scheme, host, port, request_context=request_context) - self.pools[pool_key] = pool - - return pool - - def connection_from_url(self, url, pool_kwargs=None): - """ - Similar to :func:`urllib3.connectionpool.connection_from_url`. - - If ``pool_kwargs`` is not provided and a new pool needs to be - constructed, ``self.connection_pool_kw`` is used to initialize - the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` - is provided, it is used instead. Note that if a new pool does not - need to be created for the request, the provided ``pool_kwargs`` are - not used. - """ - u = parse_url(url) - return self.connection_from_host( - u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs - ) - - def _merge_pool_kwargs(self, override): - """ - Merge a dictionary of override values for self.connection_pool_kw. - - This does not modify self.connection_pool_kw and returns a new dict. - Any keys in the override dictionary with a value of ``None`` are - removed from the merged dictionary. - """ - base_pool_kwargs = self.connection_pool_kw.copy() - if override: - for key, value in override.items(): - if value is None: - try: - del base_pool_kwargs[key] - except KeyError: - pass - else: - base_pool_kwargs[key] = value - return base_pool_kwargs - - def _proxy_requires_url_absolute_form(self, parsed_url): - """ - Indicates if the proxy requires the complete destination URL in the - request. Normally this is only needed when not using an HTTP CONNECT - tunnel. - """ - if self.proxy is None: - return False - - return not connection_requires_http_tunnel( - self.proxy, self.proxy_config, parsed_url.scheme - ) - - def _validate_proxy_scheme_url_selection(self, url_scheme): - """ - Validates that were not attempting to do TLS in TLS connections on - Python2 or with unsupported SSL implementations. - """ - if self.proxy is None or url_scheme != "https": - return - - if self.proxy.scheme != "https": - return - - if six.PY2 and not self.proxy_config.use_forwarding_for_https: - raise ProxySchemeUnsupported( - "Contacting HTTPS destinations through HTTPS proxies " - "'via CONNECT tunnels' is not supported in Python 2" - ) - - def urlopen(self, method, url, redirect=True, **kw): - """ - Same as :meth:`urllib3.HTTPConnectionPool.urlopen` - with custom cross-host redirect logic and only sends the request-uri - portion of the ``url``. - - The given ``url`` parameter must be absolute, such that an appropriate - :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. - """ - u = parse_url(url) - self._validate_proxy_scheme_url_selection(u.scheme) - - conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) - - kw["assert_same_host"] = False - kw["redirect"] = False - - if "headers" not in kw: - kw["headers"] = self.headers.copy() - - if self._proxy_requires_url_absolute_form(u): - response = conn.urlopen(method, url, **kw) - else: - response = conn.urlopen(method, u.request_uri, **kw) - - redirect_location = redirect and response.get_redirect_location() - if not redirect_location: - return response - - # Support relative URLs for redirecting. - redirect_location = urljoin(url, redirect_location) - - # RFC 7231, Section 6.4.4 - if response.status == 303: - method = "GET" - - retries = kw.get("retries") - if not isinstance(retries, Retry): - retries = Retry.from_int(retries, redirect=redirect) - - # Strip headers marked as unsafe to forward to the redirected location. - # Check remove_headers_on_redirect to avoid a potential network call within - # conn.is_same_host() which may use socket.gethostbyname() in the future. - if retries.remove_headers_on_redirect and not conn.is_same_host( - redirect_location - ): - headers = list(six.iterkeys(kw["headers"])) - for header in headers: - if header.lower() in retries.remove_headers_on_redirect: - kw["headers"].pop(header, None) - - try: - retries = retries.increment(method, url, response=response, _pool=conn) - except MaxRetryError: - if retries.raise_on_redirect: - response.drain_conn() - raise - return response - - kw["retries"] = retries - kw["redirect"] = redirect - - log.info("Redirecting %s -> %s", url, redirect_location) - - response.drain_conn() - return self.urlopen(method, redirect_location, **kw) - - -class ProxyManager(PoolManager): - """ - Behaves just like :class:`PoolManager`, but sends all requests through - the defined proxy, using the CONNECT method for HTTPS URLs. - - :param proxy_url: - The URL of the proxy to be used. - - :param proxy_headers: - A dictionary containing headers that will be sent to the proxy. In case - of HTTP they are being sent with each request, while in the - HTTPS/CONNECT case they are sent only once. Could be used for proxy - authentication. - - :param proxy_ssl_context: - The proxy SSL context is used to establish the TLS connection to the - proxy when using HTTPS proxies. - - :param use_forwarding_for_https: - (Defaults to False) If set to True will forward requests to the HTTPS - proxy to be made on behalf of the client instead of creating a TLS - tunnel via the CONNECT method. **Enabling this flag means that request - and response headers and content will be visible from the HTTPS proxy** - whereas tunneling keeps request and response headers and content - private. IP address, target hostname, SNI, and port are always visible - to an HTTPS proxy even when this flag is disabled. - - Example: - >>> proxy = urllib3.ProxyManager('http://localhost:3128/') - >>> r1 = proxy.request('GET', 'http://google.com/') - >>> r2 = proxy.request('GET', 'http://httpbin.org/') - >>> len(proxy.pools) - 1 - >>> r3 = proxy.request('GET', 'https://httpbin.org/') - >>> r4 = proxy.request('GET', 'https://twitter.com/') - >>> len(proxy.pools) - 3 - - """ - - def __init__( - self, - proxy_url, - num_pools=10, - headers=None, - proxy_headers=None, - proxy_ssl_context=None, - use_forwarding_for_https=False, - **connection_pool_kw - ): - - if isinstance(proxy_url, HTTPConnectionPool): - proxy_url = "%s://%s:%i" % ( - proxy_url.scheme, - proxy_url.host, - proxy_url.port, - ) - proxy = parse_url(proxy_url) - - if proxy.scheme not in ("http", "https"): - raise ProxySchemeUnknown(proxy.scheme) - - if not proxy.port: - port = port_by_scheme.get(proxy.scheme, 80) - proxy = proxy._replace(port=port) - - self.proxy = proxy - self.proxy_headers = proxy_headers or {} - self.proxy_ssl_context = proxy_ssl_context - self.proxy_config = ProxyConfig(proxy_ssl_context, use_forwarding_for_https) - - connection_pool_kw["_proxy"] = self.proxy - connection_pool_kw["_proxy_headers"] = self.proxy_headers - connection_pool_kw["_proxy_config"] = self.proxy_config - - super(ProxyManager, self).__init__(num_pools, headers, **connection_pool_kw) - - def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): - if scheme == "https": - return super(ProxyManager, self).connection_from_host( - host, port, scheme, pool_kwargs=pool_kwargs - ) - - return super(ProxyManager, self).connection_from_host( - self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs - ) - - def _set_proxy_headers(self, url, headers=None): - """ - Sets headers needed by proxies: specifically, the Accept and Host - headers. Only sets headers not provided by the user. - """ - headers_ = {"Accept": "*/*"} - - netloc = parse_url(url).netloc - if netloc: - headers_["Host"] = netloc - - if headers: - headers_.update(headers) - return headers_ - - def urlopen(self, method, url, redirect=True, **kw): - "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." - u = parse_url(url) - if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme): - # For connections using HTTP CONNECT, httplib sets the necessary - # headers on the CONNECT to the proxy. If we're not using CONNECT, - # we'll definitely need to set 'Host' at the very least. - headers = kw.get("headers", self.headers) - kw["headers"] = self._set_proxy_headers(url, headers) - - return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) - - -def proxy_from_url(url, **kw): - return ProxyManager(proxy_url=url, **kw) diff --git a/spaces/BilalSardar/Like-Chatgpt-clone/app.py b/spaces/BilalSardar/Like-Chatgpt-clone/app.py deleted file mode 100644 index 60ce1425fde524542514bb9ec270e8ef2fa64af5..0000000000000000000000000000000000000000 --- a/spaces/BilalSardar/Like-Chatgpt-clone/app.py +++ /dev/null @@ -1,54 +0,0 @@ -from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate -from langchain.chains.conversation.memory import ConversationBufferWindowMemory -import os - -def chatgpt(background,key,prompts): - try: - template = background+""" - {history} - Human: {human_input} - Assistant:""" - - prompt = PromptTemplate( - input_variables=["history", "human_input"], - template=template - ) - - - chatgpt_chain = LLMChain( - llm=OpenAI(temperature=0,openai_api_key = (key) - ), - prompt=prompt, - verbose=True, - memory=ConversationBufferWindowMemory(k=2), - ) - - output = chatgpt_chain.predict(human_input=prompts) - #print(output) - return output - except: - return "Enter your key" - -import gradio as gr -Backgorund_in = gr.inputs.Textbox(label="Enter Background") -key_in =gr.inputs.Textbox(label="Enter Your API key") -prompt = gr.inputs.Textbox(label="Enter what you want") -output=gr.inputs.Textbox(label="Your Result") -examples=[[""""Assistant is a large language model trained by OpenAI. - -Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. - -Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics. - -Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.""","sk-IQHMkGxxxxxxxxx", """I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd."""]] - -demo=gr.Interface(fn=chatgpt, - inputs=[Backgorund_in, key_in,prompt], - outputs=output, - examples=examples, - title="Like-GPTClone", - description="This space is a simple implementation of langChain pacakge github(https://github.com/hwchase17/langchain) " - - ) - -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/vis/base.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/vis/base.py deleted file mode 100644 index b3f1ae5aff28375a98807982310f0fe69febe832..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/vis/base.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import logging -import numpy as np -import cv2 -import torch - -Image = np.ndarray -Boxes = torch.Tensor - - -class MatrixVisualizer(object): - """ - Base visualizer for matrix data - """ - - def __init__( - self, - inplace=True, - cmap=cv2.COLORMAP_PARULA, - val_scale=1.0, - alpha=0.7, - interp_method_matrix=cv2.INTER_LINEAR, - interp_method_mask=cv2.INTER_NEAREST, - ): - self.inplace = inplace - self.cmap = cmap - self.val_scale = val_scale - self.alpha = alpha - self.interp_method_matrix = interp_method_matrix - self.interp_method_mask = interp_method_mask - - def visualize(self, image_bgr, mask, matrix, bbox_xywh): - self._check_image(image_bgr) - self._check_mask_matrix(mask, matrix) - if self.inplace: - image_target_bgr = image_bgr - else: - image_target_bgr = image_bgr * 0 - x, y, w, h = [int(v) for v in bbox_xywh] - if w <= 0 or h <= 0: - return image_bgr - mask, matrix = self._resize(mask, matrix, w, h) - mask_bg = np.tile((mask == 0)[:, :, np.newaxis], [1, 1, 3]) - matrix_scaled = matrix.astype(np.float32) * self.val_scale - _EPSILON = 1e-6 - if np.any(matrix_scaled > 255 + _EPSILON): - logger = logging.getLogger(__name__) - logger.warning( - f"Matrix has values > {255 + _EPSILON} after " f"scaling, clipping to [0..255]" - ) - matrix_scaled_8u = matrix_scaled.clip(0, 255).astype(np.uint8) - matrix_vis = cv2.applyColorMap(matrix_scaled_8u, self.cmap) - matrix_vis[mask_bg] = image_target_bgr[y : y + h, x : x + w, :][mask_bg] - image_target_bgr[y : y + h, x : x + w, :] = ( - image_target_bgr[y : y + h, x : x + w, :] * (1.0 - self.alpha) + matrix_vis * self.alpha - ) - return image_target_bgr.astype(np.uint8) - - def _resize(self, mask, matrix, w, h): - if (w != mask.shape[1]) or (h != mask.shape[0]): - mask = cv2.resize(mask, (w, h), self.interp_method_mask) - if (w != matrix.shape[1]) or (h != matrix.shape[0]): - matrix = cv2.resize(matrix, (w, h), self.interp_method_matrix) - return mask, matrix - - def _check_image(self, image_rgb): - assert len(image_rgb.shape) == 3 - assert image_rgb.shape[2] == 3 - assert image_rgb.dtype == np.uint8 - - def _check_mask_matrix(self, mask, matrix): - assert len(matrix.shape) == 2 - assert len(mask.shape) == 2 - assert mask.dtype == np.uint8 - - -class RectangleVisualizer(object): - - _COLOR_GREEN = (18, 127, 15) - - def __init__(self, color=_COLOR_GREEN, thickness=1): - self.color = color - self.thickness = thickness - - def visualize(self, image_bgr, bbox_xywh, color=None, thickness=None): - x, y, w, h = bbox_xywh - color = color or self.color - thickness = thickness or self.thickness - cv2.rectangle(image_bgr, (int(x), int(y)), (int(x + w), int(y + h)), color, thickness) - return image_bgr - - -class PointsVisualizer(object): - - _COLOR_GREEN = (18, 127, 15) - - def __init__(self, color_bgr=_COLOR_GREEN, r=5): - self.color_bgr = color_bgr - self.r = r - - def visualize(self, image_bgr, pts_xy, colors_bgr=None, rs=None): - for j, pt_xy in enumerate(pts_xy): - x, y = pt_xy - color_bgr = colors_bgr[j] if colors_bgr is not None else self.color_bgr - r = rs[j] if rs is not None else self.r - cv2.circle(image_bgr, (x, y), r, color_bgr, -1) - return image_bgr - - -class TextVisualizer(object): - - _COLOR_GRAY = (218, 227, 218) - _COLOR_WHITE = (255, 255, 255) - - def __init__( - self, - font_face=cv2.FONT_HERSHEY_SIMPLEX, - font_color_bgr=_COLOR_GRAY, - font_scale=0.35, - font_line_type=cv2.LINE_AA, - font_line_thickness=1, - fill_color_bgr=_COLOR_WHITE, - fill_color_transparency=1.0, - frame_color_bgr=_COLOR_WHITE, - frame_color_transparency=1.0, - frame_thickness=1, - ): - self.font_face = font_face - self.font_color_bgr = font_color_bgr - self.font_scale = font_scale - self.font_line_type = font_line_type - self.font_line_thickness = font_line_thickness - self.fill_color_bgr = fill_color_bgr - self.fill_color_transparency = fill_color_transparency - self.frame_color_bgr = frame_color_bgr - self.frame_color_transparency = frame_color_transparency - self.frame_thickness = frame_thickness - - def visualize(self, image_bgr, txt, topleft_xy): - txt_w, txt_h = self.get_text_size_wh(txt) - topleft_xy = tuple(map(int, topleft_xy)) - x, y = topleft_xy - if self.frame_color_transparency < 1.0: - t = self.frame_thickness - image_bgr[y - t : y + txt_h + t, x - t : x + txt_w + t, :] = ( - image_bgr[y - t : y + txt_h + t, x - t : x + txt_w + t, :] - * self.frame_color_transparency - + np.array(self.frame_color_bgr) * (1.0 - self.frame_color_transparency) - ).astype(np.float) - if self.fill_color_transparency < 1.0: - image_bgr[y : y + txt_h, x : x + txt_w, :] = ( - image_bgr[y : y + txt_h, x : x + txt_w, :] * self.fill_color_transparency - + np.array(self.fill_color_bgr) * (1.0 - self.fill_color_transparency) - ).astype(np.float) - cv2.putText( - image_bgr, - txt, - topleft_xy, - self.font_face, - self.font_scale, - self.font_color_bgr, - self.font_line_thickness, - self.font_line_type, - ) - return image_bgr - - def get_text_size_wh(self, txt): - ((txt_w, txt_h), _) = cv2.getTextSize( - txt, self.font_face, self.font_scale, self.font_line_thickness - ) - return txt_w, txt_h - - -class CompoundVisualizer(object): - def __init__(self, visualizers): - self.visualizers = visualizers - - def visualize(self, image_bgr, data): - assert len(data) == len(self.visualizers), ( - "The number of datas {} should match the number of visualizers" - " {}".format(len(data), len(self.visualizers)) - ) - image = image_bgr - for i, visualizer in enumerate(self.visualizers): - image = visualizer.visualize(image, data[i]) - return image - - def __str__(self): - visualizer_str = ", ".join([str(v) for v in self.visualizers]) - return "Compound Visualizer [{}]".format(visualizer_str) diff --git a/spaces/CVPR/LIVE/pydiffvg_tensorflow/color.py b/spaces/CVPR/LIVE/pydiffvg_tensorflow/color.py deleted file mode 100644 index e0db61215407dfbee8e4021aa0b32e70df473ddb..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/pydiffvg_tensorflow/color.py +++ /dev/null @@ -1,23 +0,0 @@ -import tensorflow as tf - -class LinearGradient: - def __init__(self, - begin = tf.constant([0.0, 0.0]), - end = tf.constant([0.0, 0.0]), - offsets = tf.constant([0.0]), - stop_colors = tf.constant([0.0, 0.0, 0.0, 0.0])): - self.begin = begin - self.end = end - self.offsets = offsets - self.stop_colors = stop_colors - -class RadialGradient: - def __init__(self, - center = tf.constant([0.0, 0.0]), - radius = tf.constant([0.0, 0.0]), - offsets = tf.constant([0.0]), - stop_colors = tf.constant([0.0, 0.0, 0.0, 0.0])): - self.center = center - self.radius = radius - self.offsets = offsets - self.stop_colors = stop_colors diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/inner_product.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/inner_product.h deleted file mode 100644 index 9423b1bdbf19deba9f7ec91d9ce4417b0c2f5145..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/inner_product.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a fill of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// the purpose of this header is to #include the inner_product.h header -// of the sequential, host, and device systems. It should be #included in any -// code which uses adl to dispatch inner_product - -#include - -// SCons can't see through the #defines below to figure out what this header -// includes, so we fake it out by specifying all possible files we might end up -// including inside an #if 0. -#if 0 -#include -#include -#include -#include -#endif - -#define __THRUST_HOST_SYSTEM_INNER_PRODUCT_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/inner_product.h> -#include __THRUST_HOST_SYSTEM_INNER_PRODUCT_HEADER -#undef __THRUST_HOST_SYSTEM_INNER_PRODUCT_HEADER - -#define __THRUST_DEVICE_SYSTEM_INNER_PRODUCT_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/inner_product.h> -#include __THRUST_DEVICE_SYSTEM_INNER_PRODUCT_HEADER -#undef __THRUST_DEVICE_SYSTEM_INNER_PRODUCT_HEADER - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/fill.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/fill.h deleted file mode 100644 index 20c636096c25e9a2d951ad2f50a4de72d0d1b968..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/fill.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system has no special fill functions - diff --git a/spaces/CVPR/Text2Human/Text2Human/utils/language_utils.py b/spaces/CVPR/Text2Human/Text2Human/utils/language_utils.py deleted file mode 100644 index bb2ef69b3001f10b20069f40ec0141d28260482f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Text2Human/Text2Human/utils/language_utils.py +++ /dev/null @@ -1,315 +0,0 @@ -from curses import A_ATTRIBUTES - -import numpy -import torch -from pip import main -from sentence_transformers import SentenceTransformer, util - -# predefined shape text -upper_length_text = [ - 'sleeveless', 'without sleeves', 'sleeves have been cut off', 'tank top', - 'tank shirt', 'muscle shirt', 'short-sleeve', 'short sleeves', - 'with short sleeves', 'medium-sleeve', 'medium sleeves', - 'with medium sleeves', 'sleeves reach elbow', 'long-sleeve', - 'long sleeves', 'with long sleeves' -] -upper_length_attr = { - 'sleeveless': 0, - 'without sleeves': 0, - 'sleeves have been cut off': 0, - 'tank top': 0, - 'tank shirt': 0, - 'muscle shirt': 0, - 'short-sleeve': 1, - 'with short sleeves': 1, - 'short sleeves': 1, - 'medium-sleeve': 2, - 'with medium sleeves': 2, - 'medium sleeves': 2, - 'sleeves reach elbow': 2, - 'long-sleeve': 3, - 'long sleeves': 3, - 'with long sleeves': 3 -} -lower_length_text = [ - 'three-point', 'medium', 'short', 'covering knee', 'cropped', - 'three-quarter', 'long', 'slack', 'of long length' -] -lower_length_attr = { - 'three-point': 0, - 'medium': 1, - 'covering knee': 1, - 'short': 1, - 'cropped': 2, - 'three-quarter': 2, - 'long': 3, - 'slack': 3, - 'of long length': 3 -} -socks_length_text = [ - 'socks', 'stocking', 'pantyhose', 'leggings', 'sheer hosiery' -] -socks_length_attr = { - 'socks': 0, - 'stocking': 1, - 'pantyhose': 1, - 'leggings': 1, - 'sheer hosiery': 1 -} -hat_text = ['hat', 'cap', 'chapeau'] -eyeglasses_text = ['sunglasses'] -belt_text = ['belt', 'with a dress tied around the waist'] -outer_shape_text = [ - 'with outer clothing open', 'with outer clothing unzipped', - 'covering inner clothes', 'with outer clothing zipped' -] -outer_shape_attr = { - 'with outer clothing open': 0, - 'with outer clothing unzipped': 0, - 'covering inner clothes': 1, - 'with outer clothing zipped': 1 -} - -upper_types = [ - 'T-shirt', 'shirt', 'sweater', 'hoodie', 'tops', 'blouse', 'Basic Tee' -] -outer_types = [ - 'jacket', 'outer clothing', 'coat', 'overcoat', 'blazer', 'outerwear', - 'duffle', 'cardigan' -] -skirt_types = ['skirt'] -dress_types = ['dress'] -pant_types = ['jeans', 'pants', 'trousers'] -rompers_types = ['rompers', 'bodysuit', 'jumpsuit'] - -attr_names_list = [ - 'gender', 'hair length', '0 upper clothing length', - '1 lower clothing length', '2 socks', '3 hat', '4 eyeglasses', '5 belt', - '6 opening of outer clothing', '7 upper clothes', '8 outer clothing', - '9 skirt', '10 dress', '11 pants', '12 rompers' -] - - -def generate_shape_attributes(user_shape_texts): - model = SentenceTransformer('all-MiniLM-L6-v2') - parsed_texts = user_shape_texts.split(',') - - text_num = len(parsed_texts) - - human_attr = [0, 0] - attr = [1, 3, 0, 0, 0, 3, 1, 1, 0, 0, 0, 0, 0] - - changed = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - for text_id, text in enumerate(parsed_texts): - user_embeddings = model.encode(text) - if ('man' in text) and (text_id == 0): - human_attr[0] = 0 - human_attr[1] = 0 - - if ('woman' in text or 'lady' in text) and (text_id == 0): - human_attr[0] = 1 - human_attr[1] = 2 - - if (not changed[0]) and (text_id == 1): - # upper length - predefined_embeddings = model.encode(upper_length_text) - similarities = util.dot_score(user_embeddings, - predefined_embeddings) - arg_idx = torch.argmax(similarities).item() - attr[0] = upper_length_attr[upper_length_text[arg_idx]] - changed[0] = 1 - - if (not changed[1]) and ((text_num == 2 and text_id == 1) or - (text_num > 2 and text_id == 2)): - # lower length - predefined_embeddings = model.encode(lower_length_text) - similarities = util.dot_score(user_embeddings, - predefined_embeddings) - arg_idx = torch.argmax(similarities).item() - attr[1] = lower_length_attr[lower_length_text[arg_idx]] - changed[1] = 1 - - if (not changed[2]) and (text_id > 2): - # socks length - predefined_embeddings = model.encode(socks_length_text) - similarities = util.dot_score(user_embeddings, - predefined_embeddings) - arg_idx = torch.argmax(similarities).item() - if similarities[0][arg_idx] > 0.7: - attr[2] = arg_idx + 1 - changed[2] = 1 - - if (not changed[3]) and (text_id > 2): - # hat - predefined_embeddings = model.encode(hat_text) - similarities = util.dot_score(user_embeddings, - predefined_embeddings) - if similarities[0][0] > 0.7: - attr[3] = 1 - changed[3] = 1 - - if (not changed[4]) and (text_id > 2): - # glasses - predefined_embeddings = model.encode(eyeglasses_text) - similarities = util.dot_score(user_embeddings, - predefined_embeddings) - arg_idx = torch.argmax(similarities).item() - if similarities[0][arg_idx] > 0.7: - attr[4] = arg_idx + 1 - changed[4] = 1 - - if (not changed[5]) and (text_id > 2): - # belt - predefined_embeddings = model.encode(belt_text) - similarities = util.dot_score(user_embeddings, - predefined_embeddings) - arg_idx = torch.argmax(similarities).item() - if similarities[0][arg_idx] > 0.7: - attr[5] = arg_idx + 1 - changed[5] = 1 - - if (not changed[6]) and (text_id == 3): - # outer coverage - predefined_embeddings = model.encode(outer_shape_text) - similarities = util.dot_score(user_embeddings, - predefined_embeddings) - arg_idx = torch.argmax(similarities).item() - if similarities[0][arg_idx] > 0.7: - attr[6] = arg_idx - changed[6] = 1 - - if (not changed[10]) and (text_num == 2 and text_id == 1): - # dress_types - predefined_embeddings = model.encode(dress_types) - similarities = util.dot_score(user_embeddings, - predefined_embeddings) - similarity_skirt = util.dot_score(user_embeddings, - model.encode(skirt_types)) - if similarities[0][0] > 0.5 and similarities[0][ - 0] > similarity_skirt[0][0]: - attr[10] = 1 - attr[7] = 0 - attr[8] = 0 - attr[9] = 0 - attr[11] = 0 - attr[12] = 0 - - changed[0] = 1 - changed[10] = 1 - changed[7] = 1 - changed[8] = 1 - changed[9] = 1 - changed[11] = 1 - changed[12] = 1 - - if (not changed[12]) and (text_num == 2 and text_id == 1): - # rompers_types - predefined_embeddings = model.encode(rompers_types) - similarities = util.dot_score(user_embeddings, - predefined_embeddings) - max_similarity = torch.max(similarities).item() - if max_similarity > 0.6: - attr[12] = 1 - attr[7] = 0 - attr[8] = 0 - attr[9] = 0 - attr[10] = 0 - attr[11] = 0 - - changed[12] = 1 - changed[7] = 1 - changed[8] = 1 - changed[9] = 1 - changed[10] = 1 - changed[11] = 1 - - if (not changed[7]) and (text_num > 2 and text_id == 1): - # upper_types - predefined_embeddings = model.encode(upper_types) - similarities = util.dot_score(user_embeddings, - predefined_embeddings) - max_similarity = torch.max(similarities).item() - if max_similarity > 0.6: - attr[7] = 1 - changed[7] = 1 - - if (not changed[8]) and (text_id == 3): - # outer_types - predefined_embeddings = model.encode(outer_types) - similarities = util.dot_score(user_embeddings, - predefined_embeddings) - arg_idx = torch.argmax(similarities).item() - if similarities[0][arg_idx] > 0.7: - attr[6] = outer_shape_attr[outer_shape_text[arg_idx]] - attr[8] = 1 - changed[8] = 1 - - if (not changed[9]) and (text_num > 2 and text_id == 2): - # skirt_types - predefined_embeddings = model.encode(skirt_types) - similarity_skirt = util.dot_score(user_embeddings, - predefined_embeddings) - similarity_dress = util.dot_score(user_embeddings, - model.encode(dress_types)) - if similarity_skirt[0][0] > 0.7 and similarity_skirt[0][ - 0] > similarity_dress[0][0]: - attr[9] = 1 - attr[10] = 0 - changed[9] = 1 - changed[10] = 1 - - if (not changed[11]) and (text_num > 2 and text_id == 2): - # pant_types - predefined_embeddings = model.encode(pant_types) - similarities = util.dot_score(user_embeddings, - predefined_embeddings) - max_similarity = torch.max(similarities).item() - if max_similarity > 0.6: - attr[11] = 1 - attr[9] = 0 - attr[10] = 0 - attr[12] = 0 - changed[11] = 1 - changed[9] = 1 - changed[10] = 1 - changed[12] = 1 - - return human_attr + attr - - -def generate_texture_attributes(user_text): - parsed_texts = user_text.split(',') - - attr = [] - for text in parsed_texts: - if ('pure color' in text) or ('solid color' in text): - attr.append(4) - elif ('spline' in text) or ('stripe' in text): - attr.append(3) - elif ('plaid' in text) or ('lattice' in text): - attr.append(5) - elif 'floral' in text: - attr.append(1) - elif 'denim' in text: - attr.append(0) - else: - attr.append(17) - - if len(attr) == 1: - attr.append(attr[0]) - attr.append(17) - - if len(attr) == 2: - attr.append(17) - - return attr - - -if __name__ == "__main__": - user_request = input('Enter your request: ') - while user_request != '\\q': - attr = generate_shape_attributes(user_request) - print(attr) - for attr_name, attr_value in zip(attr_names_list, attr): - print(attr_name, attr_value) - user_request = input('Enter your request: ') diff --git a/spaces/CamCam17/Alexwww-davide-comic-book-characters/README.md b/spaces/CamCam17/Alexwww-davide-comic-book-characters/README.md deleted file mode 100644 index 69e3efb33215d67b5c429ec8c1bb4a8f567a2188..0000000000000000000000000000000000000000 --- a/spaces/CamCam17/Alexwww-davide-comic-book-characters/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Livebook -emoji: 📓 -colorFrom: pink -colorTo: purple -sdk: docker -fullWidth: true ---- - -You can install and run [Livebook](https://livebook.dev/) inside a Hugging Face Space. Here's [a tutorial](https://huggingface.co/docs/hub/spaces-sdks-docker-livebook) on how to do that. \ No newline at end of file diff --git a/spaces/Cpp4App/Cpp4App/CDM/detect_compo/deprecated/Block.py b/spaces/Cpp4App/Cpp4App/CDM/detect_compo/deprecated/Block.py deleted file mode 100644 index 8cf301418704e0ac91f791126690100bbedc5a8b..0000000000000000000000000000000000000000 --- a/spaces/Cpp4App/Cpp4App/CDM/detect_compo/deprecated/Block.py +++ /dev/null @@ -1,56 +0,0 @@ -import cv2 -from os.path import join as pjoin -import time -import numpy as np - -from CDM.detect_compo.lib_ip.Component import Component -from CDM.config.CONFIG_UIED import Config -C = Config() - - -class Block(Component): - def __init__(self, region, image_shape): - super().__init__(region, image_shape) - self.category = 'Block' - self.parent = None - self.children = [] - self.uicompo_ = None - self.top_or_botm = None - self.redundant = False - - def block_is_uicompo(self, image_shape, max_compo_scale): - ''' - Check the if the block is a ui component according to its relative size - ''' - row, column = image_shape[:2] - # print(height, height / row, max_compo_scale[0], height / row > max_compo_scale[0]) - # draw.draw_bounding_box(org, [corner], show=True) - # ignore atomic components - if self.bbox.height / row > max_compo_scale[0] or self.bbox.width / column > max_compo_scale[1]: - return False - return True - - def block_is_top_or_bottom_bar(self, image_shape, top_bottom_height): - ''' - Check if the block is top bar or bottom bar - ''' - height, width = image_shape[:2] - (column_min, row_min, column_max, row_max) = self.bbox.put_bbox() - if column_min < 5 and row_min < 5 and \ - width - column_max < 5 and row_max < height * top_bottom_height[0]: - self.uicompo_ = True - return True - if column_min < 5 and row_min > height * top_bottom_height[1] and \ - width - column_max < 5 and height - row_max < 5: - self.uicompo_ = True - return True - return False - - def block_erase_from_bin(self, binary, pad): - (column_min, row_min, column_max, row_max) = self.put_bbox() - column_min = max(column_min - pad, 0) - column_max = min(column_max + pad, binary.shape[1]) - row_min = max(row_min - pad, 0) - row_max = min(row_max + pad, binary.shape[0]) - cv2.rectangle(binary, (column_min, row_min), (column_max, row_max), (0), -1) - diff --git a/spaces/CyberHarem/find_my_waifu/huggingface.py b/spaces/CyberHarem/find_my_waifu/huggingface.py deleted file mode 100644 index 3c357336b7d664810a027bbb4e01f7c7d0169eff..0000000000000000000000000000000000000000 --- a/spaces/CyberHarem/find_my_waifu/huggingface.py +++ /dev/null @@ -1,38 +0,0 @@ -import math -import os - -from gchar.utils import get_requests_session -from huggingface_hub import configure_http_backend, HfApi, HfFileSystem - -_NUM_TAGS = [ - ('n<1K', 0, 1_000), - ('1K1T', 1_000_000_000_000, math.inf), -] - - -def number_to_tag(v): - for tag, min_, max_ in _NUM_TAGS: - if min_ <= v < max_: - return tag - - raise ValueError(f'No tags found for {v!r}') - - -configure_http_backend(get_requests_session) - - -def get_hf_client() -> HfApi: - return HfApi(token=os.environ.get('HF_TOKEN')) - - -def get_hf_fs() -> HfFileSystem: - return HfFileSystem(token=os.environ.get('HF_TOKEN')) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/GimpPaletteFile.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/GimpPaletteFile.py deleted file mode 100644 index d388928945a0f6711de2b1c8d1ed50ce192a8219..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/GimpPaletteFile.py +++ /dev/null @@ -1,56 +0,0 @@ -# -# Python Imaging Library -# $Id$ -# -# stuff to read GIMP palette files -# -# History: -# 1997-08-23 fl Created -# 2004-09-07 fl Support GIMP 2.0 palette files. -# -# Copyright (c) Secret Labs AB 1997-2004. All rights reserved. -# Copyright (c) Fredrik Lundh 1997-2004. -# -# See the README file for information on usage and redistribution. -# - -import re - -from ._binary import o8 - - -class GimpPaletteFile: - """File handler for GIMP's palette format.""" - - rawmode = "RGB" - - def __init__(self, fp): - self.palette = [o8(i) * 3 for i in range(256)] - - if fp.readline()[:12] != b"GIMP Palette": - msg = "not a GIMP palette file" - raise SyntaxError(msg) - - for i in range(256): - s = fp.readline() - if not s: - break - - # skip fields and comment lines - if re.match(rb"\w+:|#", s): - continue - if len(s) > 100: - msg = "bad palette file" - raise SyntaxError(msg) - - v = tuple(map(int, s.split()[:3])) - if len(v) != 3: - msg = "bad palette entry" - raise ValueError(msg) - - self.palette[i] = o8(v[0]) + o8(v[1]) + o8(v[2]) - - self.palette = b"".join(self.palette) - - def getpalette(self): - return self.palette, self.rawmode diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_config.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_config.py deleted file mode 100644 index f46a5bfe6ba6093688c7a91bd51de9d137840432..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_config.py +++ /dev/null @@ -1,369 +0,0 @@ -import logging -import os -import ssl -import sys -import typing -from pathlib import Path - -import certifi - -from ._compat import set_minimum_tls_version_1_2 -from ._models import Headers -from ._types import CertTypes, HeaderTypes, TimeoutTypes, URLTypes, VerifyTypes -from ._urls import URL -from ._utils import get_ca_bundle_from_env - -DEFAULT_CIPHERS = ":".join( - [ - "ECDHE+AESGCM", - "ECDHE+CHACHA20", - "DHE+AESGCM", - "DHE+CHACHA20", - "ECDH+AESGCM", - "DH+AESGCM", - "ECDH+AES", - "DH+AES", - "RSA+AESGCM", - "RSA+AES", - "!aNULL", - "!eNULL", - "!MD5", - "!DSS", - ] -) - - -logger = logging.getLogger("httpx") - - -class UnsetType: - pass # pragma: no cover - - -UNSET = UnsetType() - - -def create_ssl_context( - cert: typing.Optional[CertTypes] = None, - verify: VerifyTypes = True, - trust_env: bool = True, - http2: bool = False, -) -> ssl.SSLContext: - return SSLConfig( - cert=cert, verify=verify, trust_env=trust_env, http2=http2 - ).ssl_context - - -class SSLConfig: - """ - SSL Configuration. - """ - - DEFAULT_CA_BUNDLE_PATH = Path(certifi.where()) - - def __init__( - self, - *, - cert: typing.Optional[CertTypes] = None, - verify: VerifyTypes = True, - trust_env: bool = True, - http2: bool = False, - ): - self.cert = cert - self.verify = verify - self.trust_env = trust_env - self.http2 = http2 - self.ssl_context = self.load_ssl_context() - - def load_ssl_context(self) -> ssl.SSLContext: - logger.debug( - "load_ssl_context verify=%r cert=%r trust_env=%r http2=%r", - self.verify, - self.cert, - self.trust_env, - self.http2, - ) - - if self.verify: - return self.load_ssl_context_verify() - return self.load_ssl_context_no_verify() - - def load_ssl_context_no_verify(self) -> ssl.SSLContext: - """ - Return an SSL context for unverified connections. - """ - context = self._create_default_ssl_context() - context.check_hostname = False - context.verify_mode = ssl.CERT_NONE - self._load_client_certs(context) - return context - - def load_ssl_context_verify(self) -> ssl.SSLContext: - """ - Return an SSL context for verified connections. - """ - if self.trust_env and self.verify is True: - ca_bundle = get_ca_bundle_from_env() - if ca_bundle is not None: - self.verify = ca_bundle - - if isinstance(self.verify, ssl.SSLContext): - # Allow passing in our own SSLContext object that's pre-configured. - context = self.verify - self._load_client_certs(context) - return context - elif isinstance(self.verify, bool): - ca_bundle_path = self.DEFAULT_CA_BUNDLE_PATH - elif Path(self.verify).exists(): - ca_bundle_path = Path(self.verify) - else: - raise IOError( - "Could not find a suitable TLS CA certificate bundle, " - "invalid path: {}".format(self.verify) - ) - - context = self._create_default_ssl_context() - context.verify_mode = ssl.CERT_REQUIRED - context.check_hostname = True - - # Signal to server support for PHA in TLS 1.3. Raises an - # AttributeError if only read-only access is implemented. - if sys.version_info >= (3, 8): # pragma: no cover - try: - context.post_handshake_auth = True - except AttributeError: # pragma: no cover - pass - - # Disable using 'commonName' for SSLContext.check_hostname - # when the 'subjectAltName' extension isn't available. - try: - context.hostname_checks_common_name = False - except AttributeError: # pragma: no cover - pass - - if ca_bundle_path.is_file(): - cafile = str(ca_bundle_path) - logger.debug("load_verify_locations cafile=%r", cafile) - context.load_verify_locations(cafile=cafile) - elif ca_bundle_path.is_dir(): - capath = str(ca_bundle_path) - logger.debug("load_verify_locations capath=%r", capath) - context.load_verify_locations(capath=capath) - - self._load_client_certs(context) - - return context - - def _create_default_ssl_context(self) -> ssl.SSLContext: - """ - Creates the default SSLContext object that's used for both verified - and unverified connections. - """ - context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) - set_minimum_tls_version_1_2(context) - context.options |= ssl.OP_NO_COMPRESSION - context.set_ciphers(DEFAULT_CIPHERS) - - if ssl.HAS_ALPN: - alpn_idents = ["http/1.1", "h2"] if self.http2 else ["http/1.1"] - context.set_alpn_protocols(alpn_idents) - - if sys.version_info >= (3, 8): # pragma: no cover - keylogfile = os.environ.get("SSLKEYLOGFILE") - if keylogfile and self.trust_env: - context.keylog_filename = keylogfile - - return context - - def _load_client_certs(self, ssl_context: ssl.SSLContext) -> None: - """ - Loads client certificates into our SSLContext object - """ - if self.cert is not None: - if isinstance(self.cert, str): - ssl_context.load_cert_chain(certfile=self.cert) - elif isinstance(self.cert, tuple) and len(self.cert) == 2: - ssl_context.load_cert_chain(certfile=self.cert[0], keyfile=self.cert[1]) - elif isinstance(self.cert, tuple) and len(self.cert) == 3: - ssl_context.load_cert_chain( - certfile=self.cert[0], - keyfile=self.cert[1], - password=self.cert[2], # type: ignore - ) - - -class Timeout: - """ - Timeout configuration. - - **Usage**: - - Timeout(None) # No timeouts. - Timeout(5.0) # 5s timeout on all operations. - Timeout(None, connect=5.0) # 5s timeout on connect, no other timeouts. - Timeout(5.0, connect=10.0) # 10s timeout on connect. 5s timeout elsewhere. - Timeout(5.0, pool=None) # No timeout on acquiring connection from pool. - # 5s timeout elsewhere. - """ - - def __init__( - self, - timeout: typing.Union[TimeoutTypes, UnsetType] = UNSET, - *, - connect: typing.Union[None, float, UnsetType] = UNSET, - read: typing.Union[None, float, UnsetType] = UNSET, - write: typing.Union[None, float, UnsetType] = UNSET, - pool: typing.Union[None, float, UnsetType] = UNSET, - ): - if isinstance(timeout, Timeout): - # Passed as a single explicit Timeout. - assert connect is UNSET - assert read is UNSET - assert write is UNSET - assert pool is UNSET - self.connect = timeout.connect # type: typing.Optional[float] - self.read = timeout.read # type: typing.Optional[float] - self.write = timeout.write # type: typing.Optional[float] - self.pool = timeout.pool # type: typing.Optional[float] - elif isinstance(timeout, tuple): - # Passed as a tuple. - self.connect = timeout[0] - self.read = timeout[1] - self.write = None if len(timeout) < 3 else timeout[2] - self.pool = None if len(timeout) < 4 else timeout[3] - elif not ( - isinstance(connect, UnsetType) - or isinstance(read, UnsetType) - or isinstance(write, UnsetType) - or isinstance(pool, UnsetType) - ): - self.connect = connect - self.read = read - self.write = write - self.pool = pool - else: - if isinstance(timeout, UnsetType): - raise ValueError( - "httpx.Timeout must either include a default, or set all " - "four parameters explicitly." - ) - self.connect = timeout if isinstance(connect, UnsetType) else connect - self.read = timeout if isinstance(read, UnsetType) else read - self.write = timeout if isinstance(write, UnsetType) else write - self.pool = timeout if isinstance(pool, UnsetType) else pool - - def as_dict(self) -> typing.Dict[str, typing.Optional[float]]: - return { - "connect": self.connect, - "read": self.read, - "write": self.write, - "pool": self.pool, - } - - def __eq__(self, other: typing.Any) -> bool: - return ( - isinstance(other, self.__class__) - and self.connect == other.connect - and self.read == other.read - and self.write == other.write - and self.pool == other.pool - ) - - def __repr__(self) -> str: - class_name = self.__class__.__name__ - if len({self.connect, self.read, self.write, self.pool}) == 1: - return f"{class_name}(timeout={self.connect})" - return ( - f"{class_name}(connect={self.connect}, " - f"read={self.read}, write={self.write}, pool={self.pool})" - ) - - -class Limits: - """ - Configuration for limits to various client behaviors. - - **Parameters:** - - * **max_connections** - The maximum number of concurrent connections that may be - established. - * **max_keepalive_connections** - Allow the connection pool to maintain - keep-alive connections below this point. Should be less than or equal - to `max_connections`. - * **keepalive_expiry** - Time limit on idle keep-alive connections in seconds. - """ - - def __init__( - self, - *, - max_connections: typing.Optional[int] = None, - max_keepalive_connections: typing.Optional[int] = None, - keepalive_expiry: typing.Optional[float] = 5.0, - ): - self.max_connections = max_connections - self.max_keepalive_connections = max_keepalive_connections - self.keepalive_expiry = keepalive_expiry - - def __eq__(self, other: typing.Any) -> bool: - return ( - isinstance(other, self.__class__) - and self.max_connections == other.max_connections - and self.max_keepalive_connections == other.max_keepalive_connections - and self.keepalive_expiry == other.keepalive_expiry - ) - - def __repr__(self) -> str: - class_name = self.__class__.__name__ - return ( - f"{class_name}(max_connections={self.max_connections}, " - f"max_keepalive_connections={self.max_keepalive_connections}, " - f"keepalive_expiry={self.keepalive_expiry})" - ) - - -class Proxy: - def __init__( - self, - url: URLTypes, - *, - auth: typing.Optional[typing.Tuple[str, str]] = None, - headers: typing.Optional[HeaderTypes] = None, - ): - url = URL(url) - headers = Headers(headers) - - if url.scheme not in ("http", "https", "socks5"): - raise ValueError(f"Unknown scheme for proxy URL {url!r}") - - if url.username or url.password: - # Remove any auth credentials from the URL. - auth = (url.username, url.password) - url = url.copy_with(username=None, password=None) - - self.url = url - self.auth = auth - self.headers = headers - - @property - def raw_auth(self) -> typing.Optional[typing.Tuple[bytes, bytes]]: - # The proxy authentication as raw bytes. - return ( - None - if self.auth is None - else (self.auth[0].encode("utf-8"), self.auth[1].encode("utf-8")) - ) - - def __repr__(self) -> str: - # The authentication is represented with the password component masked. - auth = (self.auth[0], "********") if self.auth else None - - # Build a nice concise representation. - url_str = f"{str(self.url)!r}" - auth_str = f", auth={auth!r}" if auth else "" - headers_str = f", headers={dict(self.headers)!r}" if self.headers else "" - return f"Proxy({url_str}{auth_str}{headers_str})" - - -DEFAULT_TIMEOUT_CONFIG = Timeout(timeout=5.0) -DEFAULT_LIMITS = Limits(max_connections=100, max_keepalive_connections=20) -DEFAULT_MAX_REDIRECTS = 20 diff --git a/spaces/Dao3/ChatGLM-6B/app.py b/spaces/Dao3/ChatGLM-6B/app.py deleted file mode 100644 index 7871b6f037949e3cbab97c3e39fce46ce60540fa..0000000000000000000000000000000000000000 --- a/spaces/Dao3/ChatGLM-6B/app.py +++ /dev/null @@ -1,46 +0,0 @@ -from transformers import AutoModel, AutoTokenizer -import gradio as gr - -tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -# model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() -model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4",trust_remote_code=True).float() -model = model.eval() - -MAX_TURNS = 20 -MAX_BOXES = MAX_TURNS * 2 - - -def predict(input, max_length, top_p, temperature, history=None): - if history is None: - history = [] - for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p, - temperature=temperature): - updates = [] - for query, response in history: - updates.append(gr.update(visible=True, value="用户:" + query)) - updates.append(gr.update(visible=True, value="ChatGLM-6B:" + response)) - if len(updates) < MAX_BOXES: - updates = updates + [gr.Textbox.update(visible=False)] * (MAX_BOXES - len(updates)) - yield [history] + updates - - -with gr.Blocks() as demo: - state = gr.State([]) - text_boxes = [] - for i in range(MAX_BOXES): - if i % 2 == 0: - text_boxes.append(gr.Markdown(visible=False, label="提问:")) - else: - text_boxes.append(gr.Markdown(visible=False, label="回复:")) - - with gr.Row(): - with gr.Column(scale=4): - txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter", lines=11).style( - container=False) - with gr.Column(scale=1): - max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True) - top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True) - temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True) - button = gr.Button("Generate") - button.click(predict, [txt, max_length, top_p, temperature, state], [state] + text_boxes) -demo.queue().launch() diff --git a/spaces/Datasculptor/MusicGen/tests/quantization/test_vq.py b/spaces/Datasculptor/MusicGen/tests/quantization/test_vq.py deleted file mode 100644 index c215099fedacae35c6798fdd9b8420a447aa16bb..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/MusicGen/tests/quantization/test_vq.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from audiocraft.quantization.vq import ResidualVectorQuantizer - - -class TestResidualVectorQuantizer: - - def test_rvq(self): - x = torch.randn(1, 16, 2048) - vq = ResidualVectorQuantizer(n_q=8, dimension=16, bins=8) - res = vq(x, 1.) - assert res.x.shape == torch.Size([1, 16, 2048]) diff --git a/spaces/Didier/Semantic_Search_arXiv/app.py b/spaces/Didier/Semantic_Search_arXiv/app.py deleted file mode 100644 index 22429c65c6d17190947372709ce567c90aa7c9f1..0000000000000000000000000000000000000000 --- a/spaces/Didier/Semantic_Search_arXiv/app.py +++ /dev/null @@ -1,58 +0,0 @@ -# Title: app.py -# Author: Didier Guillevic -# Date: 2021 - -import streamlit as st -import pandas as pd -import numpy as np -import os -import io - -import arxiv_info - -st.title("Semantic Search (arxiv.org)") -st.markdown(""" -* Query with some document (preferred) or some keywords (in any language). -* Get papers from [arxiv.org](https://arxiv.org) with meaning similar to the query (arxiv indexed up to 2020-11). -* Sample queries: - * Techniques to measure blood sugar levels. - * Aujourd'hui, malgré des décennies de développement de la médecine et l'intérêt croissant pour les soins de santé de précision, la grande majorité des diagnostics se produisent une fois que les patients commencent à montrer des signes visibles de maladie. - * Diabetes-Patienten kontrollieren ständig den Blutzucker. Forscher haben jetzt die Wirkung von speziellen Hautsensoren geprüft, die ohne Blut auskommen. - * 機械学習をヘルスケアに適用することで、多くの素晴らしい結果が得られました。ただし、これらの大部分は、治療に明示的にではなく、状態の診断または結果の予測に焦点を合わせています。 -""") - -# -# Define the form to get the query and desired number of results -# - -def build_markdown_response(docs): - outStream = io.StringIO() - for doc in docs: - outStream.write(f"* Score : {doc['score']:0.3f}\n") - outStream.write(f"* URL : [{doc['url']}]({doc['url']})\n") - outStream.write(f"* Title : {doc['title']}\n") - outStream.write(f"* Abstract: {doc['abstract']}\n") - outStream.write("- - - -\n") - result = outStream.getvalue() - outStream.close() - return result - -def form_callback(): - # Get the top semantically similar documents - docs = arxiv_info.retrieve_similar_docs( - st.session_state.query, - arxiv_info.arxiv_embeddings, - arxiv_info.arxiv_df, - nb_results=st.session_state.nb_results) - - # Return the result in markdown format - st.header("Search document") - st.write(st.session_state.query) - st.header("Similar documents") - result = build_markdown_response(docs) - st.markdown(result) - -with st.form(key="input_form"): - st.text_area("Text to search", key="query") - st.slider(label="Number of results", min_value=1, max_value=10, value=5, key="nb_results") - submit_button = st.form_submit_button(label="Submit", on_click=form_callback) diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/torch_utils/custom_ops.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/torch_utils/custom_ops.py deleted file mode 100644 index 6509d70bbcd49a24628e3e90258ccb0d8c5a3b39..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/stylegan_human/torch_utils/custom_ops.py +++ /dev/null @@ -1,260 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import os -import glob -import torch -import torch.utils.cpp_extension -import importlib -import hashlib -import shutil -from pathlib import Path -import re -import uuid - -from torch.utils.file_baton import FileBaton - -# ---------------------------------------------------------------------------- -# Global options. - -verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full' - -# ---------------------------------------------------------------------------- -# Internal helper funcs. - - -def _find_compiler_bindir(): - patterns = [ - 'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64', - 'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64', - 'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64', - 'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin', - ] - for pattern in patterns: - matches = sorted(glob.glob(pattern)) - if len(matches): - return matches[-1] - return None - - -def _get_mangled_gpu_name(): - name = torch.cuda.get_device_name().lower() - out = [] - for c in name: - if re.match('[a-z0-9_-]+', c): - out.append(c) - else: - out.append('-') - return ''.join(out) - - -# ---------------------------------------------------------------------------- -# Main entry point for compiling and loading C++/CUDA plugins. - -_cached_plugins = dict() - - -def get_plugin(module_name, sources, **build_kwargs): - assert verbosity in ['none', 'brief', 'full'] - - # Already cached? - if module_name in _cached_plugins: - return _cached_plugins[module_name] - - # Print status. - if verbosity == 'full': - print(f'Setting up PyTorch plugin "{module_name}"...') - elif verbosity == 'brief': - print( - f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True) - - try: # pylint: disable=too-many-nested-blocks - # Make sure we can find the necessary compiler binaries. - if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0: - compiler_bindir = _find_compiler_bindir() - if compiler_bindir is None: - raise RuntimeError( - f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".') - os.environ['PATH'] += ';' + compiler_bindir - - # Compile and load. - verbose_build = (verbosity == 'full') - - # Incremental build md5sum trickery. Copies all the input source files - # into a cached build directory under a combined md5 digest of the input - # source files. Copying is done only if the combined digest has changed. - # This keeps input file timestamps and filenames the same as in previous - # extension builds, allowing for fast incremental rebuilds. - # - # This optimization is done only in case all the source files reside in - # a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR - # environment variable is set (we take this as a signal that the user - # actually cares about this.) - source_dirs_set = set(os.path.dirname(source) for source in sources) - if len(source_dirs_set) == 1 and ('TORCH_EXTENSIONS_DIR' in os.environ): - all_source_files = sorted(list(x for x in Path( - list(source_dirs_set)[0]).iterdir() if x.is_file())) - - # Compute a combined hash digest for all source files in the same - # custom op directory (usually .cu, .cpp, .py and .h files). - hash_md5 = hashlib.md5() - for src in all_source_files: - with open(src, 'rb') as f: - hash_md5.update(f.read()) - build_dir = torch.utils.cpp_extension._get_build_directory( - module_name, verbose=verbose_build) # pylint: disable=protected-access - digest_build_dir = os.path.join(build_dir, hash_md5.hexdigest()) - - if not os.path.isdir(digest_build_dir): - os.makedirs(digest_build_dir, exist_ok=True) - baton = FileBaton(os.path.join(digest_build_dir, 'lock')) - if baton.try_acquire(): - try: - for src in all_source_files: - shutil.copyfile(src, os.path.join( - digest_build_dir, os.path.basename(src))) - finally: - baton.release() - else: - # Someone else is copying source files under the digest dir, - # wait until done and continue. - baton.wait() - digest_sources = [os.path.join( - digest_build_dir, os.path.basename(x)) for x in sources] - torch.utils.cpp_extension.load(name=module_name, build_directory=build_dir, - verbose=verbose_build, sources=digest_sources, **build_kwargs) - else: - torch.utils.cpp_extension.load( - name=module_name, verbose=verbose_build, sources=sources, **build_kwargs) - module = importlib.import_module(module_name) - - except: - if verbosity == 'brief': - print('Failed!') - raise - - # Print status and add to cache. - if verbosity == 'full': - print(f'Done setting up PyTorch plugin "{module_name}".') - elif verbosity == 'brief': - print('Done.') - _cached_plugins[module_name] = module - return module - -# ---------------------------------------------------------------------------- - - -def get_plugin_v3(module_name, sources, headers=None, source_dir=None, **build_kwargs): - assert verbosity in ['none', 'brief', 'full'] - if headers is None: - headers = [] - if source_dir is not None: - sources = [os.path.join(source_dir, fname) for fname in sources] - headers = [os.path.join(source_dir, fname) for fname in headers] - - # Already cached? - if module_name in _cached_plugins: - return _cached_plugins[module_name] - - # Print status. - if verbosity == 'full': - print(f'Setting up PyTorch plugin "{module_name}"...') - elif verbosity == 'brief': - print( - f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True) - verbose_build = (verbosity == 'full') - - # Compile and load. - try: # pylint: disable=too-many-nested-blocks - # Make sure we can find the necessary compiler binaries. - if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0: - compiler_bindir = _find_compiler_bindir() - if compiler_bindir is None: - raise RuntimeError( - f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".') - os.environ['PATH'] += ';' + compiler_bindir - - # Some containers set TORCH_CUDA_ARCH_LIST to a list that can either - # break the build or unnecessarily restrict what's available to nvcc. - # Unset it to let nvcc decide based on what's available on the - # machine. - os.environ['TORCH_CUDA_ARCH_LIST'] = '' - - # Incremental build md5sum trickery. Copies all the input source files - # into a cached build directory under a combined md5 digest of the input - # source files. Copying is done only if the combined digest has changed. - # This keeps input file timestamps and filenames the same as in previous - # extension builds, allowing for fast incremental rebuilds. - # - # This optimization is done only in case all the source files reside in - # a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR - # environment variable is set (we take this as a signal that the user - # actually cares about this.) - # - # EDIT: We now do it regardless of TORCH_EXTENSIOS_DIR, in order to work - # around the *.cu dependency bug in ninja config. - # - all_source_files = sorted(sources + headers) - all_source_dirs = set(os.path.dirname(fname) - for fname in all_source_files) - # and ('TORCH_EXTENSIONS_DIR' in os.environ): - if len(all_source_dirs) == 1: - - # Compute combined hash digest for all source files. - hash_md5 = hashlib.md5() - for src in all_source_files: - with open(src, 'rb') as f: - hash_md5.update(f.read()) - - # Select cached build directory name. - source_digest = hash_md5.hexdigest() - build_top_dir = torch.utils.cpp_extension._get_build_directory( - module_name, verbose=verbose_build) # pylint: disable=protected-access - cached_build_dir = os.path.join( - build_top_dir, f'{source_digest}-{_get_mangled_gpu_name()}') - - if not os.path.isdir(cached_build_dir): - tmpdir = f'{build_top_dir}/srctmp-{uuid.uuid4().hex}' - os.makedirs(tmpdir) - for src in all_source_files: - shutil.copyfile(src, os.path.join( - tmpdir, os.path.basename(src))) - try: - os.replace(tmpdir, cached_build_dir) # atomic - except OSError: - # source directory already exists, delete tmpdir and its contents. - shutil.rmtree(tmpdir) - if not os.path.isdir(cached_build_dir): - raise - - # Compile. - cached_sources = [os.path.join( - cached_build_dir, os.path.basename(fname)) for fname in sources] - torch.utils.cpp_extension.load(name=module_name, build_directory=cached_build_dir, - verbose=verbose_build, sources=cached_sources, **build_kwargs) - else: - torch.utils.cpp_extension.load( - name=module_name, verbose=verbose_build, sources=sources, **build_kwargs) - - # Load. - module = importlib.import_module(module_name) - - except: - if verbosity == 'brief': - print('Failed!') - raise - - # Print status and add to cache dict. - if verbosity == 'full': - print(f'Done setting up PyTorch plugin "{module_name}".') - elif verbosity == 'brief': - print('Done.') - _cached_plugins[module_name] = module - return module diff --git a/spaces/ECCV2022/ECCV2022_papers/app.py b/spaces/ECCV2022/ECCV2022_papers/app.py deleted file mode 100644 index c82a4c739f588f88539637b14e4843dd9009d7d5..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/ECCV2022_papers/app.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import gradio as gr - -from paper_list import PaperList - -DESCRIPTION = '# ECCV 2022 Papers' -NOTES = ''' -- [ECCV 2022](https://eccv2022.ecva.net/) -- [List of accepted papers](https://docs.google.com/spreadsheets/d/1PvvnVAuG9TDov1rBQcLhH5noIM4INTfLhKwrk6ESDFU/edit?usp=sharing) -''' -FOOTER = 'visitor badge' - - -def main(): - paper_list = PaperList() - - with gr.Blocks(css='style.css') as demo: - gr.Markdown(DESCRIPTION) - - search_box = gr.Textbox( - label='Search Title', - placeholder= - 'You can search for titles with regular expressions. e.g. (? context: - limit = k - context - 1 - assert torch.allclose(x.grad[:, :limit], - torch.tensor(0.)), x.grad[:, :limit].norm() - - # Now check that streaming gives the same result at batch eval. - x = torch.randn(4, steps, 16) - y = tr(x) - ys = [] - with tr.streaming(): - for k in range(steps): - chunk = x[:, k:k + 1, :] - ys.append(tr(chunk)) - y_stream = torch.cat(ys, dim=1) - delta = torch.norm(y_stream - y) / torch.norm(y) - assert delta < 1e-6, delta - - -def test_transformer_vs_pytorch(): - torch.manual_seed(1234) - # Check that in the non causal setting, we get the same result as - # PyTorch Transformer encoder. - for custom in [False, True]: - tr = StreamingTransformer( - 16, 4, 2, - causal=False, custom=custom, dropout=0., positional_scale=0.) - layer = torch.nn.TransformerEncoderLayer(16, 4, dropout=0., batch_first=True) - tr_ref = torch.nn.TransformerEncoder(layer, 2) - tr.load_state_dict(tr_ref.state_dict()) - - x = torch.randn(4, 20, 16) - y = tr(x) - y2 = tr_ref(x) - delta = torch.norm(y2 - y) / torch.norm(y) - assert delta < 1e-6, delta - - -def test_streaming_api(): - tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0.) - tr.eval() - steps = 12 - x = torch.randn(1, steps, 16) - - with torch.no_grad(): - with tr.streaming(): - _ = tr(x[:, :1]) - state = {k: v.clone() for k, v in tr.get_streaming_state().items()} - y = tr(x[:, 1:2]) - tr.set_streaming_state(state) - y2 = tr(x[:, 1:2]) - assert torch.allclose(y, y2), (y - y2).norm() - assert tr.flush() is None - - -def test_memory_efficient(): - torch.manual_seed(1234) - for backend in ['torch', 'xformers']: - set_efficient_attention_backend(backend) - - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., layer_scale=0.1) - tr_mem_efficient = StreamingTransformer( - 16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1) - tr_mem_efficient.load_state_dict(tr.state_dict()) - tr.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - with torch.no_grad(): - y = tr(x) - y2 = tr_mem_efficient(x) - assert torch.allclose(y, y2), ((y - y2).norm(), backend) - - -def test_attention_as_float32(): - torch.manual_seed(1234) - cases = [ - {'custom': True}, - {'custom': False}, - ] - for case in cases: - tr = StreamingTransformer(16, 4, 2, dropout=0., dtype=torch.bfloat16, **case) - tr_float32 = StreamingTransformer( - 16, 4, 2, dropout=0., attention_as_float32=True, dtype=torch.bfloat16, **case) - if not case['custom']: - # we are not using autocast here because it doesn't really - # work as expected on CPU, so we have to manually cast the weights of the MHA. - for layer in tr_float32.layers: - layer.self_attn.mha.to(torch.float32) - tr_float32.load_state_dict(tr.state_dict()) - steps = 12 - x = torch.randn(3, steps, 16, dtype=torch.bfloat16) - - with torch.no_grad(): - y = tr(x) - y2 = tr_float32(x) - assert not torch.allclose(y, y2), (y - y2).norm() - - -@torch.no_grad() -def test_streaming_memory_efficient(): - torch.manual_seed(1234) - for backend in ['torch', 'xformers']: - set_efficient_attention_backend(backend) - tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0., custom=True) - tr_mem_efficient = StreamingTransformer( - 16, 4, 2, dropout=0., memory_efficient=True, causal=True) - tr.load_state_dict(tr_mem_efficient.state_dict()) - tr.eval() - tr_mem_efficient.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - ref = tr(x) - - with tr_mem_efficient.streaming(): - outs = [] - # frame_sizes = [2] + [1] * (steps - 2) - frame_sizes = [1] * steps - - for frame_size in frame_sizes: - frame = x[:, :frame_size] - x = x[:, frame_size:] - outs.append(tr_mem_efficient(frame)) - - out = torch.cat(outs, dim=1) - delta = torch.norm(out - ref) / torch.norm(out) - assert delta < 1e-6, delta - - -def test_cross_attention(): - torch.manual_seed(1234) - for norm_first in [True, False]: - m = StreamingTransformer( - 16, 4, 2, cross_attention=False, norm_first=norm_first, dropout=0., custom=True) - m_cross = StreamingTransformer( - 16, 4, 2, cross_attention=True, norm_first=norm_first, dropout=0., custom=True) - m_cross.load_state_dict(m.state_dict(), strict=False) - x = torch.randn(2, 5, 16) - cross_x = torch.randn(2, 3, 16) - y_ref = m(x) - y_cross_zero = m_cross(x, cross_attention_src=0 * cross_x) - # With norm_first, the two should be exactly yhe same, - # but with norm_first=False, we get 2 normalization in a row - # and the epsilon value leads to a tiny change. - atol = 0. if norm_first else 1e-6 - print((y_ref - y_cross_zero).norm() / y_ref.norm()) - assert torch.allclose(y_ref, y_cross_zero, atol=atol) - - # We now expect a difference even with a generous atol of 1e-2. - y_cross = m_cross(x, cross_attention_src=cross_x) - assert not torch.allclose(y_cross, y_cross_zero, atol=1e-2) - - with pytest.raises(AssertionError): - _ = m_cross(x) - _ = m(x, cross_attention_src=cross_x) - - -def test_cross_attention_compat(): - torch.manual_seed(1234) - num_heads = 2 - dim = num_heads * 64 - with pytest.raises(AssertionError): - StreamingMultiheadAttention(dim, num_heads, causal=True, cross_attention=True) - - cross_attn = StreamingMultiheadAttention( - dim, num_heads, dropout=0, cross_attention=True, custom=True) - ref_attn = torch.nn.MultiheadAttention(dim, num_heads, dropout=0, batch_first=True) - - # We can load the regular attention state dict - # so we have compat when loading old checkpoints. - cross_attn.load_state_dict(ref_attn.state_dict()) - - queries = torch.randn(3, 7, dim) - keys = torch.randn(3, 9, dim) - values = torch.randn(3, 9, dim) - - y = cross_attn(queries, keys, values)[0] - y_ref = ref_attn(queries, keys, values)[0] - assert torch.allclose(y, y_ref, atol=1e-7), (y - y_ref).norm() / y_ref.norm() - - # Now let's check that streaming is working properly. - with cross_attn.streaming(): - ys = [] - for step in range(queries.shape[1]): - ys.append(cross_attn(queries[:, step: step + 1], keys, values)[0]) - y_streaming = torch.cat(ys, dim=1) - assert torch.allclose(y_streaming, y, atol=1e-7) - - -def test_repeat_kv(): - torch.manual_seed(1234) - num_heads = 8 - kv_repeat = 4 - dim = num_heads * 64 - with pytest.raises(AssertionError): - mha = StreamingMultiheadAttention( - dim, num_heads, causal=True, kv_repeat=kv_repeat, cross_attention=True) - mha = StreamingMultiheadAttention( - dim, num_heads, causal=True, kv_repeat=kv_repeat) - mha = StreamingMultiheadAttention( - dim, num_heads, causal=True, kv_repeat=kv_repeat, custom=True) - x = torch.randn(4, 18, dim) - y = mha(x, x, x)[0] - assert x.shape == y.shape - - -def test_qk_layer_norm(): - torch.manual_seed(1234) - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, bias_attn=False) - steps = 12 - x = torch.randn(3, steps, 16) - y = tr(x) - - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, cross_attention=True) - z = torch.randn(3, 21, 16) - y = tr(x, cross_attention_src=z) - assert y.shape == x.shape diff --git a/spaces/EleutherAI/magma/magma/image_prefix.py b/spaces/EleutherAI/magma/magma/image_prefix.py deleted file mode 100644 index 96c2c14fb707346850e0e5969588ff805ed3245d..0000000000000000000000000000000000000000 --- a/spaces/EleutherAI/magma/magma/image_prefix.py +++ /dev/null @@ -1,109 +0,0 @@ -import torch -import torch.nn as nn -from torchtyping import TensorType -from einops import rearrange -from .image_encoders import get_image_encoder -from .config import MultimodalConfig - -# ------------------------- Image prefix ---------------------------------- - -# for models that are fixed to a specific sequence lengths (i.e clip models with no pooling), the sequence lengths are below -ENCODER_SEQ_LENS = { - "clip_resnet": 49, - "clip_resnet_large": 144, -} - -ENCODER_OUT_DIMS = { - "nfresnet50": 2048, - "clip": 512, - "clip_resnet": 2560, - "clip_resnet_large": 3072, -} - - -class ImagePrefix(nn.Module): - - """ - Takes in a batch of images and returns a batch of embeddings of the - same dimensions as the LM's word embeddings. - - :param config: MultimodalConfig object - :param out_dim: output dimension of the embedding - :param device: device to run the model on - """ - - def __init__( - self, - config: MultimodalConfig, - out_dim: int = 2048, - device=None, - ): - super().__init__() - self.device = device or torch.device( - "cuda" if torch.cuda.is_available() else "cpu" - ) - self.config = config - self.encoder_type = config.encoder_name - - # get image encoder backbone - self.enc = get_image_encoder( - config.encoder_name, - pretrained=config.pretrained_img_encoder, - ) - self.encoder_out_dim = ENCODER_OUT_DIMS[ - self.encoder_type - ] # out dim for image encoder - - self.out_dim = out_dim # out dim for lm - - # set the out seq len to that specified in the config, or for some models, the hardcoded value - self.out_seq_len = ( - config.image_seq_len - if config.encoder_name not in ENCODER_SEQ_LENS - else ENCODER_SEQ_LENS[config.encoder_name] - ) - - # get the output projection - proj_out_dim = ( - (self.out_dim * self.out_seq_len) - if self.encoder_type not in ENCODER_SEQ_LENS - else self.out_dim - ) - self.proj = nn.Linear(self.encoder_out_dim, proj_out_dim) - self.dropout = nn.Dropout(config.image_embed_dropout_prob) - self.use_layernorm = config.use_image_embed_layernorm - if self.use_layernorm: - self.ln = nn.LayerNorm(self.out_dim) - - def forward( - self, x: TensorType["b", "c", "h", "w"] - ) -> TensorType["b", "seq", "out_dim"]: - - # pass through image encoder - logits = self.enc(x) - - # remove trailing dimensions of size 1 + pass through linear - if logits.ndim == 4: - logits = rearrange(logits, "b d 1 1 -> b d") - elif logits.ndim == 3: - assert self.encoder_type in ENCODER_SEQ_LENS - else: - assert logits.ndim == 2 - - logits = self.proj(logits) - - # reshape to desired output shape - if ( - self.encoder_type not in ENCODER_SEQ_LENS - ): # don't need to reshape those with fixed seq lens / no pooling - logits = rearrange( - logits, "b (s d) -> b s d", d=self.out_dim, s=self.out_seq_len - ) - - # pass through dropout and layer norm - logits = self.dropout(logits) - - if self.use_layernorm: - logits = self.ln(logits) - - return logits diff --git a/spaces/Ella2323/Positive-Reframing/test.sh b/spaces/Ella2323/Positive-Reframing/test.sh deleted file mode 100644 index e7a3bab4086eef6b6c61c65914524e715d696bff..0000000000000000000000000000000000000000 --- a/spaces/Ella2323/Positive-Reframing/test.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -for n in 1 2 3 4 5 6 7 8 9 10 -do - echo '------------------------------------------------------------' - echo $n - echo "test/$n.txt" - python test.py --input="test/$n.txt" | diff - test/$n.gold.txt -done \ No newline at end of file diff --git a/spaces/EronSamez/RVC_HFmeu/infer/modules/vc/pipeline.py b/spaces/EronSamez/RVC_HFmeu/infer/modules/vc/pipeline.py deleted file mode 100644 index 76e712c649b95e21f9bbe6416ae8b7050317b479..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/infer/modules/vc/pipeline.py +++ /dev/null @@ -1,655 +0,0 @@ -import os -import sys -import traceback -import logging - -logger = logging.getLogger(__name__) - -from functools import lru_cache -from time import time as ttime -from torch import Tensor -import faiss -import librosa -import numpy as np -import parselmouth -import pyworld -import torch -import torch.nn.functional as F -import torchcrepe -from scipy import signal -from tqdm import tqdm - -import random -now_dir = os.getcwd() -sys.path.append(now_dir) -import re -from functools import partial -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - -input_audio_path2wav = {} -from LazyImport import lazyload -torchcrepe = lazyload("torchcrepe") # Fork Feature. Crepe algo for training and preprocess -torch = lazyload("torch") -from infer.lib.rmvpe import RMVPE - -@lru_cache -def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period): - audio = input_audio_path2wav[input_audio_path] - f0, t = pyworld.harvest( - audio, - fs=fs, - f0_ceil=f0max, - f0_floor=f0min, - frame_period=frame_period, - ) - f0 = pyworld.stonemask(audio, f0, t, fs) - return f0 - - -def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比 - # print(data1.max(),data2.max()) - rms1 = librosa.feature.rms( - y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2 - ) # 每半秒一个点 - rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) - rms1 = torch.from_numpy(rms1) - rms1 = F.interpolate( - rms1.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.from_numpy(rms2) - rms2 = F.interpolate( - rms2.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) - data2 *= ( - torch.pow(rms1, torch.tensor(1 - rate)) - * torch.pow(rms2, torch.tensor(rate - 1)) - ).numpy() - return data2 - - -class Pipeline(object): - def __init__(self, tgt_sr, config): - self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( - config.x_pad, - config.x_query, - config.x_center, - config.x_max, - config.is_half, - ) - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * self.x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * self.x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * self.x_query # 查询切点前后查询时间 - self.t_center = self.sr * self.x_center # 查询切点位置 - self.t_max = self.sr * self.x_max # 免查询时长阈值 - self.device = config.device - self.model_rmvpe = RMVPE("%s/rmvpe.pt" % os.environ["rmvpe_root"], is_half=self.is_half, device=self.device) - self.f0_method_dict = { - "pm": self.get_pm, - "harvest": self.get_harvest, - "dio": self.get_dio, - "rmvpe": self.get_rmvpe, - "rmvpe+": self.get_pitch_dependant_rmvpe, - "crepe": self.get_f0_official_crepe_computation, - "crepe-tiny": partial(self.get_f0_official_crepe_computation, model='model'), - "mangio-crepe": self.get_f0_crepe_computation, - "mangio-crepe-tiny": partial(self.get_f0_crepe_computation, model='model'), - - } - self.note_dict = [ - 65.41, 69.30, 73.42, 77.78, 82.41, 87.31, - 92.50, 98.00, 103.83, 110.00, 116.54, 123.47, - 130.81, 138.59, 146.83, 155.56, 164.81, 174.61, - 185.00, 196.00, 207.65, 220.00, 233.08, 246.94, - 261.63, 277.18, 293.66, 311.13, 329.63, 349.23, - 369.99, 392.00, 415.30, 440.00, 466.16, 493.88, - 523.25, 554.37, 587.33, 622.25, 659.25, 698.46, - 739.99, 783.99, 830.61, 880.00, 932.33, 987.77, - 1046.50, 1108.73, 1174.66, 1244.51, 1318.51, 1396.91, - 1479.98, 1567.98, 1661.22, 1760.00, 1864.66, 1975.53, - 2093.00, 2217.46, 2349.32, 2489.02, 2637.02, 2793.83, - 2959.96, 3135.96, 3322.44, 3520.00, 3729.31, 3951.07 - ] - - # Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device) - def get_optimal_torch_device(self, index: int = 0) -> torch.device: - if torch.cuda.is_available(): - return torch.device( - f"cuda:{index % torch.cuda.device_count()}" - ) # Very fast - elif torch.backends.mps.is_available(): - return torch.device("mps") - return torch.device("cpu") - - # Fork Feature: Compute f0 with the crepe method - def get_f0_crepe_computation( - self, - x, - f0_min, - f0_max, - p_len, - *args, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time. - **kwargs, # Either use crepe-tiny "tiny" or crepe "full". Default is full - ): - x = x.astype( - np.float32 - ) # fixes the F.conv2D exception. We needed to convert double to float. - x /= np.quantile(np.abs(x), 0.999) - torch_device = self.get_optimal_torch_device() - audio = torch.from_numpy(x).to(torch_device, copy=True) - audio = torch.unsqueeze(audio, dim=0) - if audio.ndim == 2 and audio.shape[0] > 1: - audio = torch.mean(audio, dim=0, keepdim=True).detach() - audio = audio.detach() - hop_length = kwargs.get('crepe_hop_length', 160) - model = kwargs.get('model', 'full') - print("Initiating prediction with a crepe_hop_length of: " + str(hop_length)) - pitch: Tensor = torchcrepe.predict( - audio, - self.sr, - hop_length, - f0_min, - f0_max, - model, - batch_size=hop_length * 2, - device=torch_device, - pad=True, - ) - p_len = p_len or x.shape[0] // hop_length - # Resize the pitch for final f0 - source = np.array(pitch.squeeze(0).cpu().float().numpy()) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * p_len, len(source)) / p_len, - np.arange(0, len(source)), - source, - ) - f0 = np.nan_to_num(target) - return f0 # Resized f0 - - def get_f0_official_crepe_computation( - self, - x, - f0_min, - f0_max, - *args, - **kwargs - ): - # Pick a batch size that doesn't cause memory errors on your gpu - batch_size = 512 - # Compute pitch using first gpu - audio = torch.tensor(np.copy(x))[None].float() - model = kwargs.get('model', 'full') - f0, pd = torchcrepe.predict( - audio, - self.sr, - self.window, - f0_min, - f0_max, - model, - batch_size=batch_size, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - return f0 - - # Fork Feature: Compute pYIN f0 method - def get_f0_pyin_computation(self, x, f0_min, f0_max): - y, sr = librosa.load("saudio/Sidney.wav", self.sr, mono=True) - f0, _, _ = librosa.pyin(y, sr=self.sr, fmin=f0_min, fmax=f0_max) - f0 = f0[1:] # Get rid of extra first frame - return f0 - - def get_pm(self, x, p_len, *args, **kwargs): - f0 = parselmouth.Sound(x, self.sr).to_pitch_ac( - time_step=160 / 16000, - voicing_threshold=0.6, - pitch_floor=kwargs.get('f0_min'), - pitch_ceiling=kwargs.get('f0_max'), - ).selected_array["frequency"] - - return np.pad( - f0, - [[max(0, (p_len - len(f0) + 1) // 2), max(0, p_len - len(f0) - (p_len - len(f0) + 1) // 2)]], - mode="constant" - ) - - def get_harvest(self, x, *args, **kwargs): - f0_spectral = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=kwargs.get('f0_max'), - f0_floor=kwargs.get('f0_min'), - frame_period=1000 * kwargs.get('hop_length', 160) / self.sr, - ) - return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr) - - def get_dio(self, x, *args, **kwargs): - f0_spectral = pyworld.dio( - x.astype(np.double), - fs=self.sr, - f0_ceil=kwargs.get('f0_max'), - f0_floor=kwargs.get('f0_min'), - frame_period=1000 * kwargs.get('hop_length', 160) / self.sr, - ) - return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr) - - - def get_rmvpe(self, x, *args, **kwargs): - if not hasattr(self, "model_rmvpe"): - from infer.lib.rmvpe import RMVPE - - logger.info( - "Loading rmvpe model,%s" % "%s/rmvpe.pt" % os.environ["rmvpe_root"] - ) - self.model_rmvpe = RMVPE( - "%s/rmvpe.pt" % os.environ["rmvpe_root"], - is_half=self.is_half, - device=self.device, - ) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - - return f0 - - - def get_pitch_dependant_rmvpe(self, x, f0_min=1, f0_max=40000, *args, **kwargs): - return self.model_rmvpe.infer_from_audio_with_pitch(x, thred=0.03, f0_min=f0_min, f0_max=f0_max) - - def autotune_f0(self, f0): - autotuned_f0 = [] - for freq in f0: - closest_notes = [x for x in self.note_dict if abs(x - freq) == min(abs(n - freq) for n in self.note_dict)] - autotuned_f0.append(random.choice(closest_notes)) - return np.array(autotuned_f0, np.float64) - - # Fork Feature: Acquire median hybrid f0 estimation calculation - def get_f0_hybrid_computation( - self, - methods_str, - input_audio_path, - x, - f0_min, - f0_max, - p_len, - filter_radius, - crepe_hop_length, - time_step - ): - # Get various f0 methods from input to use in the computation stack - params = {'x': x, 'p_len': p_len, 'f0_min': f0_min, - 'f0_max': f0_max, 'time_step': time_step, 'filter_radius': filter_radius, - 'crepe_hop_length': crepe_hop_length, 'model': "full" - } - methods_str = re.search('hybrid\[(.+)\]', methods_str) - if methods_str: # Ensure a match was found - methods = [method.strip() for method in methods_str.group(1).split('+')] - f0_computation_stack = [] - - print(f"Calculating f0 pitch estimations for methods: {str(methods)}") - x = x.astype(np.float32) - x /= np.quantile(np.abs(x), 0.999) - # Get f0 calculations for all methods specified - - for method in methods: - if method not in self.f0_method_dict: - print(f"Method {method} not found.") - continue - f0 = self.f0_method_dict[method](**params) - if method == 'harvest' and filter_radius > 2: - f0 = signal.medfilt(f0, 3) - f0 = f0[1:] # Get rid of first frame. - f0_computation_stack.append(f0) - - for fc in f0_computation_stack: - print(len(fc)) - - print(f"Calculating hybrid median f0 from the stack of: {str(methods)}") - f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0) - return f0_median_hybrid - - def get_f0( - self, - input_audio_path, - x, - p_len, - f0_up_key, - f0_method, - filter_radius, - crepe_hop_length, - f0_autotune, - inp_f0=None, - f0_min=50, - f0_max=1100, - ): - global input_audio_path2wav - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - params = {'x': x, 'p_len': p_len, 'f0_up_key': f0_up_key, 'f0_min': f0_min, - 'f0_max': f0_max, 'time_step': time_step, 'filter_radius': filter_radius, - 'crepe_hop_length': crepe_hop_length, 'model': "full" - } - - if "hybrid" in f0_method: - # Perform hybrid median pitch estimation - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = self.get_f0_hybrid_computation( - f0_method,+ - input_audio_path, - x, - f0_min, - f0_max, - p_len, - filter_radius, - crepe_hop_length, - time_step, - ) - else: - f0 = self.f0_method_dict[f0_method](**params) - - if "privateuseone" in str(self.device): # clean ortruntime memory - del self.model_rmvpe.model - del self.model_rmvpe - logger.info("Cleaning ortruntime memory") - - if f0_autotune: - f0 = self.autotune_f0(f0) - - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ - :shape - ] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int32) - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9 if version == "v1" else 12, - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) if version == "v1" else logits[0] - if protect < 0.5 and pitch is not None and pitchf is not None: - feats0 = feats.clone() - if ( - not isinstance(index, type(None)) - and not isinstance(big_npy, type(None)) - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - - # _, I = index.search(npy, 1) - # npy = big_npy[I.squeeze()] - - score, ix = index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - if protect < 0.5 and pitch is not None and pitchf is not None: - feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute( - 0, 2, 1 - ) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch is not None and pitchf is not None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - - if protect < 0.5 and pitch is not None and pitchf is not None: - pitchff = pitchf.clone() - pitchff[pitchf > 0] = 1 - pitchff[pitchf < 1] = protect - pitchff = pitchff.unsqueeze(-1) - feats = feats * pitchff + feats0 * (1 - pitchff) - feats = feats.to(feats0.dtype) - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - hasp = pitch is not None and pitchf is not None - arg = (feats, p_len, pitch, pitchf, sid) if hasp else (feats, p_len, sid) - audio1 = (net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy() - del hasp, arg - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - def process_t(self, t, s, window, audio_pad, pitch, pitchf, times, index, big_npy, index_rate, version, protect, t_pad_tgt, if_f0, sid, model, net_g): - t = t // window * window - if if_f0 == 1: - return self.vc( - model, - net_g, - sid, - audio_pad[s : t + t_pad_tgt + window], - pitch[:, s // window : (t + t_pad_tgt) // window], - pitchf[:, s // window : (t + t_pad_tgt) // window], - times, - index, - big_npy, - index_rate, - version, - protect, - )[t_pad_tgt : -t_pad_tgt] - else: - return self.vc( - model, - net_g, - sid, - audio_pad[s : t + t_pad_tgt + window], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[t_pad_tgt : -t_pad_tgt] - - - def pipeline( - self, - model, - net_g, - sid, - audio, - input_audio_path, - times, - f0_up_key, - f0_method, - file_index, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - crepe_hop_length, - f0_autotune, - f0_file=None, - f0_min=50, - f0_max=1100 - ): - if ( - file_index != "" - # and file_big_npy != "" - # and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - # big_npy = np.load(file_big_npy) - big_npy = index.reconstruct_n(0, index.ntotal) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name"): - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0: - pitch, pitchf = self.get_f0( - input_audio_path, - audio_pad, - p_len, - f0_up_key, - f0_method, - filter_radius, - crepe_hop_length, - f0_autotune, - inp_f0, - f0_min, - f0_max - ) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - if self.device == "mps" or "xpu" in self.device: - pitchf = pitchf.astype(np.float32) - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - - with tqdm(total=len(opt_ts), desc="Processing", unit="window") as pbar: - for i, t in enumerate(opt_ts): - t = t // self.window * self.window - start = s - end = t + self.t_pad2 + self.window - audio_slice = audio_pad[start:end] - pitch_slice = pitch[:, start // self.window:end // self.window] if if_f0 else None - pitchf_slice = pitchf[:, start // self.window:end // self.window] if if_f0 else None - audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt]) - s = t - pbar.update(1) - pbar.refresh() - - audio_slice = audio_pad[t:] - pitch_slice = pitch[:, t // self.window:] if if_f0 and t is not None else pitch - pitchf_slice = pitchf[:, t // self.window:] if if_f0 and t is not None else pitchf - audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt]) - - audio_opt = np.concatenate(audio_opt) - if rms_mix_rate != 1: - audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) - if tgt_sr != resample_sr >= 16000: - audio_opt = librosa.resample( - audio_opt, orig_sr=tgt_sr, target_sr=resample_sr - ) - audio_max = np.abs(audio_opt).max() / 0.99 - max_int16 = 32768 - if audio_max > 1: - max_int16 /= audio_max - audio_opt = (audio_opt * max_int16).astype(np.int16) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - print("Returning completed audio...") - print("-------------------") - return audio_opt diff --git a/spaces/EyeSeeThru/anything-v3.0/utils.py b/spaces/EyeSeeThru/anything-v3.0/utils.py deleted file mode 100644 index ff1c065d186347ca51b47d010a697dbe1814695c..0000000000000000000000000000000000000000 --- a/spaces/EyeSeeThru/anything-v3.0/utils.py +++ /dev/null @@ -1,6 +0,0 @@ -def is_google_colab(): - try: - import google.colab - return True - except: - return False \ No newline at end of file diff --git a/spaces/FrankZxShen/so-vits-svc-models-ba/vdecoder/hifigan/utils.py b/spaces/FrankZxShen/so-vits-svc-models-ba/vdecoder/hifigan/utils.py deleted file mode 100644 index 9c93c996d3cc73c30d71c1fc47056e4230f35c0f..0000000000000000000000000000000000000000 --- a/spaces/FrankZxShen/so-vits-svc-models-ba/vdecoder/hifigan/utils.py +++ /dev/null @@ -1,68 +0,0 @@ -import glob -import os -import matplotlib -import torch -from torch.nn.utils import weight_norm -# matplotlib.use("Agg") -import matplotlib.pylab as plt - - -def plot_spectrogram(spectrogram): - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - - fig.canvas.draw() - plt.close() - - return fig - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def apply_weight_norm(m): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - weight_norm(m) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def load_checkpoint(filepath, device): - assert os.path.isfile(filepath) - print("Loading '{}'".format(filepath)) - checkpoint_dict = torch.load(filepath, map_location=device) - print("Complete.") - return checkpoint_dict - - -def save_checkpoint(filepath, obj): - print("Saving checkpoint to {}".format(filepath)) - torch.save(obj, filepath) - print("Complete.") - - -def del_old_checkpoints(cp_dir, prefix, n_models=2): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) # get checkpoint paths - cp_list = sorted(cp_list)# sort by iter - if len(cp_list) > n_models: # if more than n_models models are found - for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models - open(cp, 'w').close()# empty file contents - os.unlink(cp)# delete file (move to trash when using Colab) - - -def scan_checkpoint(cp_dir, prefix): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) - if len(cp_list) == 0: - return None - return sorted(cp_list)[-1] - diff --git a/spaces/FrankZxShen/so-vits-svc-models-ba/vencoder/ContentVec768L12.py b/spaces/FrankZxShen/so-vits-svc-models-ba/vencoder/ContentVec768L12.py deleted file mode 100644 index 0d1591c8843b920d5685e822354e8e6adc9a9e19..0000000000000000000000000000000000000000 --- a/spaces/FrankZxShen/so-vits-svc-models-ba/vencoder/ContentVec768L12.py +++ /dev/null @@ -1,34 +0,0 @@ -from vencoder.encoder import SpeechEncoder -import torch -from fairseq import checkpoint_utils - -class ContentVec768L12(SpeechEncoder): - def __init__(self,vec_path = "pretrain/checkpoint_best_legacy_500.pt",device=None): - print("load model(s) from {}".format(vec_path)) - self.hidden_dim = 768 - models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [vec_path], - suffix="", - ) - if device is None: - self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") - else: - self.dev = torch.device(device) - self.model = models[0].to(self.dev) - self.model.eval() - - def encoder(self, wav): - feats = wav - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).fill_(False) - inputs = { - "source": feats.to(wav.device), - "padding_mask": padding_mask.to(wav.device), - "output_layer": 12, # layer 12 - } - with torch.no_grad(): - logits = self.model.extract_features(**inputs) - return logits[0].transpose(1, 2) \ No newline at end of file diff --git a/spaces/GIZ/SDSN-demo/appStore/keyword_search.py b/spaces/GIZ/SDSN-demo/appStore/keyword_search.py deleted file mode 100644 index 9162afc6e633b7bc25c4fb34d9cda34ae90efa6a..0000000000000000000000000000000000000000 --- a/spaces/GIZ/SDSN-demo/appStore/keyword_search.py +++ /dev/null @@ -1,176 +0,0 @@ -# set path -import glob, os, sys; -sys.path.append('../utils') - -import streamlit as st -import json -import logging -from utils.lexical_search import runLexicalPreprocessingPipeline, lexical_search -from utils.semantic_search import runSemanticPreprocessingPipeline, semantic_keywordsearch -from utils.checkconfig import getconfig -from utils.streamlitcheck import checkbox_without_preselect - -# Declare all the necessary variables -config = getconfig('paramconfig.cfg') -split_by = config.get('semantic_search','SPLIT_BY') -split_length = int(config.get('semantic_search','SPLIT_LENGTH')) -split_overlap = int(config.get('semantic_search','SPLIT_OVERLAP')) -split_respect_sentence_boundary = bool(int(config.get('semantic_search', - 'RESPECT_SENTENCE_BOUNDARY'))) -remove_punc = bool(int(config.get('semantic_search','REMOVE_PUNC'))) -embedding_model = config.get('semantic_search','RETRIEVER') -embedding_model_format = config.get('semantic_search','RETRIEVER_FORMAT') -embedding_layer = int(config.get('semantic_search','RETRIEVER_EMB_LAYER')) -embedding_dim = int(config.get('semantic_search','EMBEDDING_DIM')) -max_seq_len = int(config.get('semantic_search','MAX_SEQ_LENGTH')) -retriever_top_k = int(config.get('semantic_search','RETRIEVER_TOP_K')) -reader_model = config.get('semantic_search','READER') -reader_top_k = int(config.get('semantic_search','RETRIEVER_TOP_K')) -top_k_per_candidate = int(config.get('semantic_search','READER_TOP_K_PER_CANDIDATE')) -lexical_split_by= config.get('lexical_search','SPLIT_BY') -lexical_split_length=int(config.get('lexical_search','SPLIT_LENGTH')) -lexical_split_overlap = int(config.get('lexical_search','SPLIT_OVERLAP')) -lexical_remove_punc = bool(int(config.get('lexical_search','REMOVE_PUNC'))) -lexical_top_k=int(config.get('lexical_search','TOP_K')) - -def app(): - - with st.container(): - st.markdown("

Search

", - unsafe_allow_html=True) - st.write(' ') - st.write(' ') - - with st.expander("ℹ️ - About this app", expanded=False): - - st.write( - """ - The *Search* app is an interface \ - for doing contextual and keyword searches in \ - policy documents. \ - """) - st.write("") - st.write(""" The application allows its user to perform a search\ - based on two options: a lexical search([TFIDF](https://en.wikipedia.org/wiki/Tf%E2%80%93idf))\ - and semantic search. [bi-encoder](https://www.sbert.net/examples/applications/retrieve_rerank/README.html)\ - The lexical search only \ - displays paragraphs in the document with exact matching results, \ - the semantic search shows paragraphs with meaningful connections \ - (e.g., synonyms) based on the search context. Both \ - methods employ a probabilistic retrieval framework in its identification\ - of relevant paragraphs. By defualt the search is performed using \ - 'Semantic Search', and to find 'Exact/Lexical Matches' please tick the \ - checkbox provided which will by-pass semantic search. Furthermore,\ - the application allows the user to search for pre-defined keywords \ - from different thematic buckets present in sidebar.""") - st.write("") - st.write(""" The Exact Matches gives back top {} findings, and Semantic - search provides with top {} answers.""".format(lexical_top_k, retriever_top_k)) - st.write("") - st.write("") - st.markdown("Some runtime metrics tested with cpu: Intel(R) Xeon(R) CPU @ 2.20GHz, memory: 13GB") - col1,col2,col3= st.columns([2,4,4]) - with col1: - st.caption("OCR File processing") - # st.markdown('
50 sec
', unsafe_allow_html=True) - st.write("50 sec") - - with col2: - st.caption("Lexical Search on 200 paragraphs(~ 35 pages)") - # st.markdown('
12 sec
', unsafe_allow_html=True) - st.write("15 sec") - - with col3: - st.caption("Semantic search on 200 paragraphs(~ 35 pages)") - # st.markdown('
120 sec
', unsafe_allow_html=True) - st.write("120 sec(including emebedding creation)") - - with st.sidebar: - with open('docStore/sample/keywordexample.json','r') as json_file: - keywordexample = json.load(json_file) - - # genre = st.radio("Select Keyword Category", list(keywordexample.keys())) - st.caption("Select Keyword Category") - genre = checkbox_without_preselect(list(keywordexample.keys())) - if genre: - keywordList = keywordexample[genre] - else: - keywordList = None - - st.markdown("---") - - with st.container(): - type_hinting = "Please enter here your question and we \ - will look for an answer in the document\ - OR enter the keyword you are looking \ - for and we will look for similar\ - context in the document.\ - You can also explore predefined sets of keywords from sidebar. " - if keywordList is not None: - # queryList = st.text_input("You selected the {} category we \ - # will look for these keywords in document".format(genre) - # value="{}".format(keywordList)) - queryList = st.text_input(type_hinting, - value = "{}".format(keywordList)) - else: - queryList = st.text_input(type_hinting, - placeholder="Enter keyword/query here") - - searchtype = st.checkbox("Show only Exact Matches") - if st.button("Find them"): - - if queryList == "": - st.info("🤔 No keyword provided, if you dont have any, \ - please try example sets from sidebar!") - logging.warning("Terminated as no keyword provided") - else: - if 'filepath' in st.session_state: - - if searchtype: - all_documents = runLexicalPreprocessingPipeline( - file_name=st.session_state['filename'], - file_path=st.session_state['filepath'], - split_by=lexical_split_by, - split_length=lexical_split_length, - split_overlap=lexical_split_overlap, - remove_punc=lexical_remove_punc) - logging.info("performing lexical search") - with st.spinner("Performing Exact matching search \ - (Lexical search) for you"): - lexical_search(query=queryList, - documents = all_documents['documents'], - top_k = lexical_top_k ) - else: - all_documents = runSemanticPreprocessingPipeline( - file_path= st.session_state['filepath'], - file_name = st.session_state['filename'], - split_by=split_by, - split_length= split_length, - split_overlap=split_overlap, - remove_punc= remove_punc, - split_respect_sentence_boundary=split_respect_sentence_boundary) - if len(all_documents['documents']) > 100: - warning_msg = ": This might take sometime, please sit back and relax." - else: - warning_msg = "" - - logging.info("starting semantic search") - with st.spinner("Performing Similar/Contextual search{}".format(warning_msg)): - semantic_keywordsearch(query = queryList, - documents = all_documents['documents'], - embedding_model=embedding_model, - embedding_layer=embedding_layer, - embedding_model_format=embedding_model_format, - reader_model=reader_model,reader_top_k=reader_top_k, - retriever_top_k=retriever_top_k, embedding_dim=embedding_dim, - max_seq_len=max_seq_len, - top_k_per_candidate = top_k_per_candidate) - - else: - st.info("🤔 No document found, please try to upload it at the sidebar!") - logging.warning("Terminated as no document provided") - - - - diff --git a/spaces/GT4SD/PatentToolkit/tridentmodel/__init__.py b/spaces/GT4SD/PatentToolkit/tridentmodel/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_sorted_block_race.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_sorted_block_race.py deleted file mode 100644 index be99732b6d256d70d3c309b0f89df49b6c5e9cce..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_sorted_block_race.py +++ /dev/null @@ -1,51 +0,0 @@ -import numpy as np -import os -import pybullet as p -import random -from cliport.tasks import primitives -from cliport.tasks.grippers import Spatula -from cliport.tasks.task import Task -from cliport.utils import utils -import numpy as np -from cliport.tasks.task import Task -from cliport.utils import utils - -class ColorSortedBlockRace(Task): - """Pick up blocks of two colors and place them in corresponding colored zones in a sequence.""" - - def __init__(self): - super().__init__() - self.max_steps = 20 - self.lang_template = "place the blocks in the corresponding colored zones in sequence" - self.task_completed_desc = "done placing blocks in zones." - self.additional_reset() - - def reset(self, env): - super().reset(env) - - # Add zones. - zone_size = (0.12, 0.12, 0) - zone_urdf = 'zone/zone.urdf' - zone_colors = ['blue', 'red'] - zone_poses = [] - for color in zone_colors: - zone_pose = self.get_random_pose(env, zone_size) - env.add_object(zone_urdf, zone_pose, 'fixed', color=utils.COLORS[color]) - zone_poses.append(zone_pose) - - # Add blocks. - block_size = (0.04, 0.04, 0.04) - block_urdf = 'block/block.urdf' - block_colors = ['blue', 'red'] - blocks = [] - for color in block_colors: - for _ in range(3): - block_pose = self.get_random_pose(env, block_size) - block_id = env.add_object(block_urdf, block_pose, color=utils.COLORS[color]) - blocks.append(block_id) - - # Goal: each block is in the corresponding colored zone. - for i, block in enumerate(blocks): - self.add_goal(objs=[block], matches=np.ones((1, 1)), targ_poses=[zone_poses[i//3]], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1/len(blocks), - language_goal=self.lang_template) \ No newline at end of file diff --git a/spaces/Golyass/Recomender-System-Hybrid-Method/README.md b/spaces/Golyass/Recomender-System-Hybrid-Method/README.md deleted file mode 100644 index deef98249c8a6b79f8f72bd9b0ce38b52f3fc2bd..0000000000000000000000000000000000000000 --- a/spaces/Golyass/Recomender-System-Hybrid-Method/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Recomender System Hybrid Method -emoji: 💻 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false -license: other ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/visualization/image.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/visualization/image.py deleted file mode 100644 index 5a148384d7a77c4d9849c54570e85740eaff8235..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/visualization/image.py +++ /dev/null @@ -1,303 +0,0 @@ -import matplotlib.pyplot as plt -import mmcv -import numpy as np -import pycocotools.mask as mask_util -from matplotlib.collections import PatchCollection -from matplotlib.patches import Polygon - -from ..utils import mask2ndarray - -EPS = 1e-2 - - -def color_val_matplotlib(color): - """Convert various input in BGR order to normalized RGB matplotlib color - tuples, - - Args: - color (:obj:`Color`/str/tuple/int/ndarray): Color inputs - - Returns: - tuple[float]: A tuple of 3 normalized floats indicating RGB channels. - """ - color = mmcv.color_val(color) - color = [color / 255 for color in color[::-1]] - return tuple(color) - - -def imshow_det_bboxes(img, - bboxes, - labels, - segms=None, - class_names=None, - score_thr=0, - bbox_color='green', - text_color='green', - mask_color=None, - thickness=2, - font_size=13, - win_name='', - show=True, - wait_time=0, - out_file=None): - """Draw bboxes and class labels (with scores) on an image. - - Args: - img (str or ndarray): The image to be displayed. - bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or - (n, 5). - labels (ndarray): Labels of bboxes. - segms (ndarray or None): Masks, shaped (n,h,w) or None - class_names (list[str]): Names of each classes. - score_thr (float): Minimum score of bboxes to be shown. Default: 0 - bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. - The tuple of color should be in BGR order. Default: 'green' - text_color (str or tuple(int) or :obj:`Color`):Color of texts. - The tuple of color should be in BGR order. Default: 'green' - mask_color (str or tuple(int) or :obj:`Color`, optional): - Color of masks. The tuple of color should be in BGR order. - Default: None - thickness (int): Thickness of lines. Default: 2 - font_size (int): Font size of texts. Default: 13 - show (bool): Whether to show the image. Default: True - win_name (str): The window name. Default: '' - wait_time (float): Value of waitKey param. Default: 0. - out_file (str, optional): The filename to write the image. - Default: None - - Returns: - ndarray: The image with bboxes drawn on it. - """ - assert bboxes.ndim == 2, \ - f' bboxes ndim should be 2, but its ndim is {bboxes.ndim}.' - assert labels.ndim == 1, \ - f' labels ndim should be 1, but its ndim is {labels.ndim}.' - assert bboxes.shape[0] == labels.shape[0], \ - 'bboxes.shape[0] and labels.shape[0] should have the same length.' - assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5, \ - f' bboxes.shape[1] should be 4 or 5, but its {bboxes.shape[1]}.' - img = mmcv.imread(img).astype(np.uint8) - - if score_thr > 0: - assert bboxes.shape[1] == 5 - scores = bboxes[:, -1] - inds = scores > score_thr - bboxes = bboxes[inds, :] - labels = labels[inds] - if segms is not None: - segms = segms[inds, ...] - - mask_colors = [] - if labels.shape[0] > 0: - if mask_color is None: - # random color - np.random.seed(42) - mask_colors = [ - np.random.randint(0, 256, (1, 3), dtype=np.uint8) - for _ in range(max(labels) + 1) - ] - else: - # specify color - mask_colors = [ - np.array(mmcv.color_val(mask_color)[::-1], dtype=np.uint8) - ] * ( - max(labels) + 1) - - bbox_color = color_val_matplotlib(bbox_color) - text_color = color_val_matplotlib(text_color) - - img = mmcv.bgr2rgb(img) - width, height = img.shape[1], img.shape[0] - img = np.ascontiguousarray(img) - - fig = plt.figure(win_name, frameon=False) - plt.title(win_name) - canvas = fig.canvas - dpi = fig.get_dpi() - # add a small EPS to avoid precision lost due to matplotlib's truncation - # (https://github.com/matplotlib/matplotlib/issues/15363) - fig.set_size_inches((width + EPS) / dpi, (height + EPS) / dpi) - - # remove white edges by set subplot margin - plt.subplots_adjust(left=0, right=1, bottom=0, top=1) - ax = plt.gca() - ax.axis('off') - - polygons = [] - color = [] - for i, (bbox, label) in enumerate(zip(bboxes, labels)): - bbox_int = bbox.astype(np.int32) - poly = [[bbox_int[0], bbox_int[1]], [bbox_int[0], bbox_int[3]], - [bbox_int[2], bbox_int[3]], [bbox_int[2], bbox_int[1]]] - np_poly = np.array(poly).reshape((4, 2)) - polygons.append(Polygon(np_poly)) - color.append(bbox_color) - label_text = class_names[ - label] if class_names is not None else f'class {label}' - if len(bbox) > 4: - label_text += f'|{bbox[-1]:.02f}' - ax.text( - bbox_int[0], - bbox_int[1], - f'{label_text}', - bbox={ - 'facecolor': 'black', - 'alpha': 0.8, - 'pad': 0.7, - 'edgecolor': 'none' - }, - color=text_color, - fontsize=font_size, - verticalalignment='top', - horizontalalignment='left') - if segms is not None: - color_mask = mask_colors[labels[i]] - mask = segms[i].astype(bool) - img[mask] = img[mask] * 0.5 + color_mask * 0.5 - - plt.imshow(img) - - p = PatchCollection( - polygons, facecolor='none', edgecolors=color, linewidths=thickness) - ax.add_collection(p) - - stream, _ = canvas.print_to_buffer() - buffer = np.frombuffer(stream, dtype='uint8') - img_rgba = buffer.reshape(height, width, 4) - rgb, alpha = np.split(img_rgba, [3], axis=2) - img = rgb.astype('uint8') - img = mmcv.rgb2bgr(img) - - if show: - # We do not use cv2 for display because in some cases, opencv will - # conflict with Qt, it will output a warning: Current thread - # is not the object's thread. You can refer to - # https://github.com/opencv/opencv-python/issues/46 for details - if wait_time == 0: - plt.show() - else: - plt.show(block=False) - plt.pause(wait_time) - if out_file is not None: - mmcv.imwrite(img, out_file) - - plt.close() - - return img - - -def imshow_gt_det_bboxes(img, - annotation, - result, - class_names=None, - score_thr=0, - gt_bbox_color=(255, 102, 61), - gt_text_color=(255, 102, 61), - gt_mask_color=(255, 102, 61), - det_bbox_color=(72, 101, 241), - det_text_color=(72, 101, 241), - det_mask_color=(72, 101, 241), - thickness=2, - font_size=13, - win_name='', - show=True, - wait_time=0, - out_file=None): - """General visualization GT and result function. - - Args: - img (str or ndarray): The image to be displayed.) - annotation (dict): Ground truth annotations where contain keys of - 'gt_bboxes' and 'gt_labels' or 'gt_masks' - result (tuple[list] or list): The detection result, can be either - (bbox, segm) or just bbox. - class_names (list[str]): Names of each classes. - score_thr (float): Minimum score of bboxes to be shown. Default: 0 - gt_bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. - The tuple of color should be in BGR order. Default: (255, 102, 61) - gt_text_color (str or tuple(int) or :obj:`Color`):Color of texts. - The tuple of color should be in BGR order. Default: (255, 102, 61) - gt_mask_color (str or tuple(int) or :obj:`Color`, optional): - Color of masks. The tuple of color should be in BGR order. - Default: (255, 102, 61) - det_bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. - The tuple of color should be in BGR order. Default: (72, 101, 241) - det_text_color (str or tuple(int) or :obj:`Color`):Color of texts. - The tuple of color should be in BGR order. Default: (72, 101, 241) - det_mask_color (str or tuple(int) or :obj:`Color`, optional): - Color of masks. The tuple of color should be in BGR order. - Default: (72, 101, 241) - thickness (int): Thickness of lines. Default: 2 - font_size (int): Font size of texts. Default: 13 - win_name (str): The window name. Default: '' - show (bool): Whether to show the image. Default: True - wait_time (float): Value of waitKey param. Default: 0. - out_file (str, optional): The filename to write the image. - Default: None - - Returns: - ndarray: The image with bboxes or masks drawn on it. - """ - assert 'gt_bboxes' in annotation - assert 'gt_labels' in annotation - assert isinstance( - result, - (tuple, list)), f'Expected tuple or list, but get {type(result)}' - - gt_masks = annotation.get('gt_masks', None) - if gt_masks is not None: - gt_masks = mask2ndarray(gt_masks) - - img = mmcv.imread(img) - - img = imshow_det_bboxes( - img, - annotation['gt_bboxes'], - annotation['gt_labels'], - gt_masks, - class_names=class_names, - bbox_color=gt_bbox_color, - text_color=gt_text_color, - mask_color=gt_mask_color, - thickness=thickness, - font_size=font_size, - win_name=win_name, - show=False) - - if isinstance(result, tuple): - bbox_result, segm_result = result - if isinstance(segm_result, tuple): - segm_result = segm_result[0] # ms rcnn - else: - bbox_result, segm_result = result, None - - bboxes = np.vstack(bbox_result) - labels = [ - np.full(bbox.shape[0], i, dtype=np.int32) - for i, bbox in enumerate(bbox_result) - ] - labels = np.concatenate(labels) - - segms = None - if segm_result is not None and len(labels) > 0: # non empty - segms = mmcv.concat_list(segm_result) - segms = mask_util.decode(segms) - segms = segms.transpose(2, 0, 1) - - img = imshow_det_bboxes( - img, - bboxes, - labels, - segms=segms, - class_names=class_names, - score_thr=score_thr, - bbox_color=det_bbox_color, - text_color=det_text_color, - mask_color=det_mask_color, - thickness=thickness, - font_size=font_size, - win_name=win_name, - show=show, - wait_time=wait_time, - out_file=out_file) - return img diff --git a/spaces/GuXiaoBei/wechat-chatbot/app.py b/spaces/GuXiaoBei/wechat-chatbot/app.py deleted file mode 100644 index 59f0f0c5f48cd69b6b08d7fd0ea65dca9f497f2f..0000000000000000000000000000000000000000 --- a/spaces/GuXiaoBei/wechat-chatbot/app.py +++ /dev/null @@ -1,45 +0,0 @@ -# encoding:utf-8 - -import config -import gradio as gr -from channel import channel_factory -from common.log import logger -from io import BytesIO -from PIL import Image -from concurrent.futures import ThreadPoolExecutor -thread_pool = ThreadPoolExecutor(max_workers=8) - -def getImage(bytes): - bytes_stream = BytesIO(bytes) - image = Image.open(bytes_stream) - return image - -def getLoginUrl(): - # load config - config.load_config() - - # create channel - bot = channel_factory.create_channel("wx") - thread_pool.submit(bot.startup) - - while (True): - if bot.getQrCode(): - return getImage(bot.getQrCode()) - -if __name__ == '__main__': - try: - - with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - btn = gr.Button(value="生成二维码") - with gr.Column(): - outputs=[gr.Pil()] - btn.click(getLoginUrl, outputs=outputs) - - demo.launch() - - - except Exception as e: - logger.error("App startup failed!") - logger.exception(e) diff --git a/spaces/HESOAYM/ElviraMulti/modules/config.py b/spaces/HESOAYM/ElviraMulti/modules/config.py deleted file mode 100644 index 4e816ddd6cf4499f21cbbd2aee3ae0a6eeb7c5af..0000000000000000000000000000000000000000 --- a/spaces/HESOAYM/ElviraMulti/modules/config.py +++ /dev/null @@ -1,170 +0,0 @@ -from collections import defaultdict -from contextlib import contextmanager -import os -import logging -import sys -import commentjson as json - -from . import shared -from . import presets - - -__all__ = [ - "my_api_key", - "authflag", - "auth_list", - "dockerflag", - "retrieve_proxy", - "log_level", - "advance_docs", - "update_doc_config", - "multi_api_key", - "server_name", - "server_port", - "share", -] - -# 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低) -# 同时,也可以为后续支持自定义功能提供config的帮助 -if os.path.exists("config.json"): - with open("config.json", "r", encoding='utf-8') as f: - config = json.load(f) -else: - config = {} - -language = config.get("language", "auto") # 在这里输入你的 API 密钥 -language = os.environ.get("LANGUAGE", language) - - -if os.path.exists("api_key.txt"): - logging.info("检测到api_key.txt文件,正在进行迁移...") - with open("api_key.txt", "r") as f: - config["openai_api_key"] = f.read().strip() - os.rename("api_key.txt", "api_key(deprecated).txt") - with open("config.json", "w", encoding='utf-8') as f: - json.dump(config, f, indent=4) - -if os.path.exists("auth.json"): - logging.info("检测到auth.json文件,正在进行迁移...") - auth_list = [] - with open("auth.json", "r", encoding='utf-8') as f: - auth = json.load(f) - for _ in auth: - if auth[_]["username"] and auth[_]["password"]: - auth_list.append((auth[_]["username"], auth[_]["password"])) - else: - logging.error("请检查auth.json文件中的用户名和密码!") - sys.exit(1) - config["users"] = auth_list - os.rename("auth.json", "auth(deprecated).json") - with open("config.json", "w", encoding='utf-8') as f: - json.dump(config, f, indent=4) - -## 处理docker if we are running in Docker -dockerflag = config.get("dockerflag", False) -if os.environ.get("dockerrun") == "yes": - dockerflag = True - -## 处理 api-key 以及 允许的用户列表 -my_api_key = config.get("openai_api_key", "") # 在这里输入你的 API 密钥 -my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key) - -## 多账户机制 -multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制 -if multi_api_key: - api_key_list = config.get("api_key_list", []) - if len(api_key_list) == 0: - logging.error("多账号模式已开启,但api_key_list为空,请检查config.json") - sys.exit(1) - shared.state.set_api_key_queue(api_key_list) - -auth_list = config.get("users", []) # 实际上是使用者的列表 -authflag = len(auth_list) > 0 # 是否开启认证的状态值,改为判断auth_list长度 - -# 处理自定义的api_host,优先读环境变量的配置,如果存在则自动装配 -api_host = os.environ.get("api_host", config.get("api_host", "")) -if api_host: - shared.state.set_api_host(api_host) - -@contextmanager -def retrieve_openai_api(api_key = None): - old_api_key = os.environ.get("OPENAI_API_KEY", "") - if api_key is None: - os.environ["OPENAI_API_KEY"] = my_api_key - yield my_api_key - else: - os.environ["OPENAI_API_KEY"] = api_key - yield api_key - os.environ["OPENAI_API_KEY"] = old_api_key - -## 处理log -log_level = config.get("log_level", "INFO") -logging.basicConfig( - level=log_level, - format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s", -) - -## 处理代理: -http_proxy = config.get("http_proxy", "") -https_proxy = config.get("https_proxy", "") -http_proxy = os.environ.get("HTTP_PROXY", http_proxy) -https_proxy = os.environ.get("HTTPS_PROXY", https_proxy) - -# 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报错 -os.environ["HTTP_PROXY"] = "" -os.environ["HTTPS_PROXY"] = "" - -local_embedding = config.get("local_embedding", False) # 是否使用本地embedding - -@contextmanager -def retrieve_proxy(proxy=None): - """ - 1, 如果proxy = NONE,设置环境变量,并返回最新设置的代理 - 2,如果proxy != NONE,更新当前的代理配置,但是不更新环境变量 - """ - global http_proxy, https_proxy - if proxy is not None: - http_proxy = proxy - https_proxy = proxy - yield http_proxy, https_proxy - else: - old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] - os.environ["HTTP_PROXY"] = http_proxy - os.environ["HTTPS_PROXY"] = https_proxy - yield http_proxy, https_proxy # return new proxy - - # return old proxy - os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var - - -## 处理advance docs -advance_docs = defaultdict(lambda: defaultdict(dict)) -advance_docs.update(config.get("advance_docs", {})) -def update_doc_config(two_column_pdf): - global advance_docs - advance_docs["pdf"]["two_column"] = two_column_pdf - - logging.info(f"更新后的文件参数为:{advance_docs}") - -## 处理gradio.launch参数 -server_name = config.get("server_name", None) -server_port = config.get("server_port", None) -if server_name is None: - if dockerflag: - server_name = "0.0.0.0" - else: - server_name = "127.0.0.1" -if server_port is None: - if dockerflag: - server_port = 7860 - -assert server_port is None or type(server_port) == int, "要求port设置为int类型" - -# 设置默认model -default_model = config.get("default_model", "") -try: - presets.DEFAULT_MODEL = presets.MODELS.index(default_model) -except ValueError: - pass - -share = config.get("share", False) diff --git a/spaces/HaloMaster/chinesesummary/fengshen/__init__.py b/spaces/HaloMaster/chinesesummary/fengshen/__init__.py deleted file mode 100644 index b5829a3ac9e634d44d408d2ff6d22880e1c00805..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The IDEA Authors. All rights reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .models.longformer import LongformerConfig, LongformerModel -from .models.roformer import RoFormerConfig, RoFormerModel -from .models.megatron_t5 import T5Config, T5EncoderModel -from .models.ubert import UbertPiplines, UbertModel diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/laser/laser_src/laser_lstm.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/laser/laser_src/laser_lstm.py deleted file mode 100644 index 10df90e002d5a7dd74a571dbc3b328c130c57a0a..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/laser/laser_src/laser_lstm.py +++ /dev/null @@ -1,585 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from fairseq import options, utils - -from fairseq.models import ( - FairseqEncoder, - FairseqIncrementalDecoder, - FairseqEncoderDecoderModel, - register_model, - register_model_architecture, -) - - -@register_model("laser_lstm") -class LSTMModel(FairseqEncoderDecoderModel): - def __init__(self, encoder, decoder): - super().__init__(encoder, decoder) - - def forward( - self, - src_tokens, - src_lengths, - prev_output_tokens=None, - tgt_tokens=None, - tgt_lengths=None, - target_language_id=None, - dataset_name="", - ): - assert target_language_id is not None - - src_encoder_out = self.encoder(src_tokens, src_lengths, dataset_name) - return self.decoder( - prev_output_tokens, src_encoder_out, lang_id=target_language_id - ) - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - parser.add_argument( - "--dropout", - default=0.1, - type=float, - metavar="D", - help="dropout probability", - ) - parser.add_argument( - "--encoder-embed-dim", - type=int, - metavar="N", - help="encoder embedding dimension", - ) - parser.add_argument( - "--encoder-embed-path", - default=None, - type=str, - metavar="STR", - help="path to pre-trained encoder embedding", - ) - parser.add_argument( - "--encoder-hidden-size", type=int, metavar="N", help="encoder hidden size" - ) - parser.add_argument( - "--encoder-layers", type=int, metavar="N", help="number of encoder layers" - ) - parser.add_argument( - "--encoder-bidirectional", - action="store_true", - help="make all layers of encoder bidirectional", - ) - parser.add_argument( - "--decoder-embed-dim", - type=int, - metavar="N", - help="decoder embedding dimension", - ) - parser.add_argument( - "--decoder-embed-path", - default=None, - type=str, - metavar="STR", - help="path to pre-trained decoder embedding", - ) - parser.add_argument( - "--decoder-hidden-size", type=int, metavar="N", help="decoder hidden size" - ) - parser.add_argument( - "--decoder-layers", type=int, metavar="N", help="number of decoder layers" - ) - parser.add_argument( - "--decoder-out-embed-dim", - type=int, - metavar="N", - help="decoder output embedding dimension", - ) - parser.add_argument( - "--decoder-zero-init", - type=str, - metavar="BOOL", - help="initialize the decoder hidden/cell state to zero", - ) - parser.add_argument( - "--decoder-lang-embed-dim", - type=int, - metavar="N", - help="decoder language embedding dimension", - ) - parser.add_argument( - "--fixed-embeddings", - action="store_true", - help="keep embeddings fixed (ENCODER ONLY)", - ) # TODO Also apply to decoder embeddings? - - # Granular dropout settings (if not specified these default to --dropout) - parser.add_argument( - "--encoder-dropout-in", - type=float, - metavar="D", - help="dropout probability for encoder input embedding", - ) - parser.add_argument( - "--encoder-dropout-out", - type=float, - metavar="D", - help="dropout probability for encoder output", - ) - parser.add_argument( - "--decoder-dropout-in", - type=float, - metavar="D", - help="dropout probability for decoder input embedding", - ) - parser.add_argument( - "--decoder-dropout-out", - type=float, - metavar="D", - help="dropout probability for decoder output", - ) - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - # make sure that all args are properly defaulted (in case there are any new ones) - base_architecture(args) - - def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim): - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) - embed_dict = utils.parse_embedding(embed_path) - utils.print_embed_overlap(embed_dict, dictionary) - return utils.load_embedding(embed_dict, dictionary, embed_tokens) - - pretrained_encoder_embed = None - if args.encoder_embed_path: - pretrained_encoder_embed = load_pretrained_embedding_from_file( - args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim - ) - pretrained_decoder_embed = None - if args.decoder_embed_path: - pretrained_decoder_embed = load_pretrained_embedding_from_file( - args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim - ) - - num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0 - - encoder = LSTMEncoder( - dictionary=task.source_dictionary, - embed_dim=args.encoder_embed_dim, - hidden_size=args.encoder_hidden_size, - num_layers=args.encoder_layers, - dropout_in=args.encoder_dropout_in, - dropout_out=args.encoder_dropout_out, - bidirectional=args.encoder_bidirectional, - pretrained_embed=pretrained_encoder_embed, - fixed_embeddings=args.fixed_embeddings, - ) - decoder = LSTMDecoder( - dictionary=task.target_dictionary, - embed_dim=args.decoder_embed_dim, - hidden_size=args.decoder_hidden_size, - out_embed_dim=args.decoder_out_embed_dim, - num_layers=args.decoder_layers, - dropout_in=args.decoder_dropout_in, - dropout_out=args.decoder_dropout_out, - zero_init=options.eval_bool(args.decoder_zero_init), - encoder_embed_dim=args.encoder_embed_dim, - encoder_output_units=encoder.output_units, - pretrained_embed=pretrained_decoder_embed, - num_langs=num_langs, - lang_embed_dim=args.decoder_lang_embed_dim, - ) - return cls(encoder, decoder) - - -class LSTMEncoder(FairseqEncoder): - """LSTM encoder.""" - - def __init__( - self, - dictionary, - embed_dim=512, - hidden_size=512, - num_layers=1, - dropout_in=0.1, - dropout_out=0.1, - bidirectional=False, - left_pad=True, - pretrained_embed=None, - padding_value=0.0, - fixed_embeddings=False, - ): - super().__init__(dictionary) - self.num_layers = num_layers - self.dropout_in = dropout_in - self.dropout_out = dropout_out - self.bidirectional = bidirectional - self.hidden_size = hidden_size - - num_embeddings = len(dictionary) - self.padding_idx = dictionary.pad() - if pretrained_embed is None: - self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) - else: - self.embed_tokens = pretrained_embed - if fixed_embeddings: - self.embed_tokens.weight.requires_grad = False - - self.lstm = LSTM( - input_size=embed_dim, - hidden_size=hidden_size, - num_layers=num_layers, - dropout=self.dropout_out if num_layers > 1 else 0.0, - bidirectional=bidirectional, - ) - self.left_pad = left_pad - self.padding_value = padding_value - - self.output_units = hidden_size - if bidirectional: - self.output_units *= 2 - - def forward(self, src_tokens, src_lengths, dataset_name): - if self.left_pad: - # convert left-padding to right-padding - src_tokens = utils.convert_padding_direction( - src_tokens, - self.padding_idx, - left_to_right=True, - ) - - bsz, seqlen = src_tokens.size() - - # embed tokens - x = self.embed_tokens(src_tokens) - x = F.dropout(x, p=self.dropout_in, training=self.training) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - # pack embedded source tokens into a PackedSequence - try: - packed_x = nn.utils.rnn.pack_padded_sequence(x, src_lengths.data.tolist()) - except BaseException: - raise Exception(f"Packing failed in dataset {dataset_name}") - - # apply LSTM - if self.bidirectional: - state_size = 2 * self.num_layers, bsz, self.hidden_size - else: - state_size = self.num_layers, bsz, self.hidden_size - h0 = x.data.new(*state_size).zero_() - c0 = x.data.new(*state_size).zero_() - packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0)) - - # unpack outputs and apply dropout - x, _ = nn.utils.rnn.pad_packed_sequence( - packed_outs, padding_value=self.padding_value - ) - x = F.dropout(x, p=self.dropout_out, training=self.training) - assert list(x.size()) == [seqlen, bsz, self.output_units] - - if self.bidirectional: - - def combine_bidir(outs): - return torch.cat( - [ - torch.cat([outs[2 * i], outs[2 * i + 1]], dim=0).view( - 1, bsz, self.output_units - ) - for i in range(self.num_layers) - ], - dim=0, - ) - - final_hiddens = combine_bidir(final_hiddens) - final_cells = combine_bidir(final_cells) - - encoder_padding_mask = src_tokens.eq(self.padding_idx).t() - - # Set padded outputs to -inf so they are not selected by max-pooling - padding_mask = src_tokens.eq(self.padding_idx).t().unsqueeze(-1) - if padding_mask.any(): - x = x.float().masked_fill_(padding_mask, float("-inf")).type_as(x) - - # Build the sentence embedding by max-pooling over the encoder outputs - sentemb = x.max(dim=0)[0] - - return { - "sentemb": sentemb, - "encoder_out": (x, final_hiddens, final_cells), - "encoder_padding_mask": encoder_padding_mask - if encoder_padding_mask.any() - else None, - } - - def reorder_encoder_out(self, encoder_out_dict, new_order): - encoder_out_dict["sentemb"] = encoder_out_dict["sentemb"].index_select( - 0, new_order - ) - encoder_out_dict["encoder_out"] = tuple( - eo.index_select(1, new_order) for eo in encoder_out_dict["encoder_out"] - ) - if encoder_out_dict["encoder_padding_mask"] is not None: - encoder_out_dict["encoder_padding_mask"] = encoder_out_dict[ - "encoder_padding_mask" - ].index_select(1, new_order) - return encoder_out_dict - - def max_positions(self): - """Maximum input length supported by the encoder.""" - return int(1e5) # an arbitrary large number - - -class LSTMDecoder(FairseqIncrementalDecoder): - """LSTM decoder.""" - - def __init__( - self, - dictionary, - embed_dim=512, - hidden_size=512, - out_embed_dim=512, - num_layers=1, - dropout_in=0.1, - dropout_out=0.1, - zero_init=False, - encoder_embed_dim=512, - encoder_output_units=512, - pretrained_embed=None, - num_langs=1, - lang_embed_dim=0, - ): - super().__init__(dictionary) - self.dropout_in = dropout_in - self.dropout_out = dropout_out - self.hidden_size = hidden_size - - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - if pretrained_embed is None: - self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) - else: - self.embed_tokens = pretrained_embed - - self.layers = nn.ModuleList( - [ - LSTMCell( - input_size=encoder_output_units + embed_dim + lang_embed_dim - if layer == 0 - else hidden_size, - hidden_size=hidden_size, - ) - for layer in range(num_layers) - ] - ) - if hidden_size != out_embed_dim: - self.additional_fc = Linear(hidden_size, out_embed_dim) - self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out) - - if zero_init: - self.sentemb2init = None - else: - self.sentemb2init = Linear( - encoder_output_units, 2 * num_layers * hidden_size - ) - - if lang_embed_dim == 0: - self.embed_lang = None - else: - self.embed_lang = nn.Embedding(num_langs, lang_embed_dim) - nn.init.uniform_(self.embed_lang.weight, -0.1, 0.1) - - def forward( - self, prev_output_tokens, encoder_out_dict, incremental_state=None, lang_id=0 - ): - sentemb = encoder_out_dict["sentemb"] - encoder_out = encoder_out_dict["encoder_out"] - - if incremental_state is not None: - prev_output_tokens = prev_output_tokens[:, -1:] - bsz, seqlen = prev_output_tokens.size() - - # get outputs from encoder - encoder_outs, _, _ = encoder_out[:3] - srclen = encoder_outs.size(0) - - # embed tokens - x = self.embed_tokens(prev_output_tokens) - x = F.dropout(x, p=self.dropout_in, training=self.training) - - # embed language identifier - if self.embed_lang is not None: - lang_ids = prev_output_tokens.data.new_full((bsz,), lang_id) - langemb = self.embed_lang(lang_ids) - # TODO Should we dropout here??? - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - # initialize previous states (or get from cache during incremental generation) - cached_state = utils.get_incremental_state( - self, incremental_state, "cached_state" - ) - if cached_state is not None: - prev_hiddens, prev_cells, input_feed = cached_state - else: - num_layers = len(self.layers) - if self.sentemb2init is None: - prev_hiddens = [ - x.data.new(bsz, self.hidden_size).zero_() for i in range(num_layers) - ] - prev_cells = [ - x.data.new(bsz, self.hidden_size).zero_() for i in range(num_layers) - ] - else: - init = self.sentemb2init(sentemb) - prev_hiddens = [ - init[:, (2 * i) * self.hidden_size : (2 * i + 1) * self.hidden_size] - for i in range(num_layers) - ] - prev_cells = [ - init[ - :, - (2 * i + 1) * self.hidden_size : (2 * i + 2) * self.hidden_size, - ] - for i in range(num_layers) - ] - input_feed = x.data.new(bsz, self.hidden_size).zero_() - - attn_scores = x.data.new(srclen, seqlen, bsz).zero_() - outs = [] - for j in range(seqlen): - if self.embed_lang is None: - input = torch.cat((x[j, :, :], sentemb), dim=1) - else: - input = torch.cat((x[j, :, :], sentemb, langemb), dim=1) - - for i, rnn in enumerate(self.layers): - # recurrent cell - hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i])) - - # hidden state becomes the input to the next layer - input = F.dropout(hidden, p=self.dropout_out, training=self.training) - - # save state for next time step - prev_hiddens[i] = hidden - prev_cells[i] = cell - - out = hidden - out = F.dropout(out, p=self.dropout_out, training=self.training) - - # input feeding - input_feed = out - - # save final output - outs.append(out) - - # cache previous states (no-op except during incremental generation) - utils.set_incremental_state( - self, - incremental_state, - "cached_state", - (prev_hiddens, prev_cells, input_feed), - ) - - # collect outputs across time steps - x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size) - - # T x B x C -> B x T x C - x = x.transpose(1, 0) - - # srclen x tgtlen x bsz -> bsz x tgtlen x srclen - attn_scores = attn_scores.transpose(0, 2) - - # project back to size of vocabulary - if hasattr(self, "additional_fc"): - x = self.additional_fc(x) - x = F.dropout(x, p=self.dropout_out, training=self.training) - x = self.fc_out(x) - - return x, attn_scores - - def reorder_incremental_state(self, incremental_state, new_order): - super().reorder_incremental_state(incremental_state, new_order) - cached_state = utils.get_incremental_state( - self, incremental_state, "cached_state" - ) - if cached_state is None: - return - - def reorder_state(state): - if isinstance(state, list): - return [reorder_state(state_i) for state_i in state] - return state.index_select(0, new_order) - - new_state = tuple(map(reorder_state, cached_state)) - utils.set_incremental_state(self, incremental_state, "cached_state", new_state) - - def max_positions(self): - """Maximum output length supported by the decoder.""" - return int(1e5) # an arbitrary large number - - -def Embedding(num_embeddings, embedding_dim, padding_idx): - m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) - nn.init.uniform_(m.weight, -0.1, 0.1) - nn.init.constant_(m.weight[padding_idx], 0) - return m - - -def LSTM(input_size, hidden_size, **kwargs): - m = nn.LSTM(input_size, hidden_size, **kwargs) - for name, param in m.named_parameters(): - if "weight" in name or "bias" in name: - param.data.uniform_(-0.1, 0.1) - return m - - -def LSTMCell(input_size, hidden_size, **kwargs): - m = nn.LSTMCell(input_size, hidden_size, **kwargs) - for name, param in m.named_parameters(): - if "weight" in name or "bias" in name: - param.data.uniform_(-0.1, 0.1) - return m - - -def Linear(in_features, out_features, bias=True, dropout=0): - """Weight-normalized Linear layer (input: N x T x C)""" - m = nn.Linear(in_features, out_features, bias=bias) - m.weight.data.uniform_(-0.1, 0.1) - if bias: - m.bias.data.uniform_(-0.1, 0.1) - return m - - -@register_model_architecture("laser_lstm", "laser_lstm") -def base_architecture(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_embed_path = getattr(args, "encoder_embed_path", None) - args.encoder_hidden_size = getattr( - args, "encoder_hidden_size", args.encoder_embed_dim - ) - args.encoder_layers = getattr(args, "encoder_layers", 1) - args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False) - args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout) - args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) - args.decoder_embed_path = getattr(args, "decoder_embed_path", None) - args.decoder_hidden_size = getattr( - args, "decoder_hidden_size", args.decoder_embed_dim - ) - args.decoder_layers = getattr(args, "decoder_layers", 1) - args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) - args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout) - args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) - args.decoder_zero_init = getattr(args, "decoder_zero_init", "0") - args.decoder_lang_embed_dim = getattr(args, "decoder_lang_embed_dim", 0) - args.fixed_embeddings = getattr(args, "fixed_embeddings", False) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/model_parallel/modules/transformer_layer.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/model_parallel/modules/transformer_layer.py deleted file mode 100644 index 7ab53c6e5f12f15562717effb86ab8cb8d6b4fa3..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/model_parallel/modules/transformer_layer.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq.model_parallel.modules import ModelParallelMultiheadAttention -from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer - - -try: - from fairseq.model_parallel.megatron.mpu import ( - ColumnParallelLinear, - RowParallelLinear, - ) - - has_megatron_submodule = True -except (ImportError, ModuleNotFoundError): - has_megatron_submodule = False - - -class ModelParallelTransformerEncoderLayer(TransformerEncoderLayer): - """Encoder layer block over multiple gpus. - - See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details. - """ - - def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): - if q_noise > 0: - raise NotImplementedError - return ColumnParallelLinear(input_dim, output_dim, gather_output=False) - - def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): - if q_noise > 0: - raise NotImplementedError - return RowParallelLinear(input_dim, output_dim, input_is_parallel=True) - - def build_self_attention(self, embed_dim, args, **unused_kwargs): - return ModelParallelMultiheadAttention( - embed_dim, - args.encoder_attention_heads, - dropout=args.attention_dropout, - self_attention=True, - ) - - -class ModelParallelTransformerDecoderLayer(TransformerDecoderLayer): - """Decoder layer block. - - See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details. - """ - - def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): - if q_noise > 0: - raise NotImplementedError - return ColumnParallelLinear(input_dim, output_dim, gather_output=False) - - def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): - if q_noise > 0: - raise NotImplementedError - return RowParallelLinear(input_dim, output_dim, input_is_parallel=True) - - def build_self_attention(self, embed_dim, args, **unused_kwargs): - return ModelParallelMultiheadAttention( - embed_dim=embed_dim, - num_heads=args.decoder_attention_heads, - dropout=args.attention_dropout, - self_attention=not getattr(args, "cross_self_attention", False), - ) - - def build_encoder_attention(self, embed_dim, args, **unused_kwargs): - return ModelParallelMultiheadAttention( - embed_dim=embed_dim, - num_heads=args.decoder_attention_heads, - kdim=getattr(args, "encoder_embed_dim", None), - vdim=getattr(args, "encoder_embed_dim", None), - dropout=args.attention_dropout, - encoder_decoder_attention=True, - ) diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/exceptions.py b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/exceptions.py deleted file mode 100644 index d6cf6a8f2fed4941c5c4a17902292062a929886e..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/exceptions.py +++ /dev/null @@ -1,23 +0,0 @@ -class DuplicateBlockError(ValueError): - """Raised when a Blocks contains more than one Block with the same id""" - - pass - - -class TooManyRequestsError(Exception): - """Raised when the Hugging Face API returns a 429 status code.""" - - pass - - -class InvalidApiName(ValueError): - pass - - -class Error(Exception): - def __init__(self, message: str): - self.message = message - super().__init__(self.message) - - def __str__(self): - return repr(self.message) diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/models/model_utils.py b/spaces/ICML2022/OFA/fairseq/fairseq/models/model_utils.py deleted file mode 100644 index 732d66b1d5f695151c26d29eb7f6b53179c269f1..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/models/model_utils.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from typing import List, Optional - -import torch -from torch import Tensor - - -@torch.jit.script -def script_skip_tensor_list(x: List[Tensor], mask): - res = [xi[mask] if xi.size(0) == mask.size(0) else xi[:, mask] for xi in x] - outputs = [] - for i, t in enumerate(res): - if t.numel() != 0: - outputs.append(t) - else: - outputs.append(x[i]) - return outputs - - -@torch.jit.script -def script_skip_tensor(x: Tensor, mask): - # None case - if x.size(0) == 0: - return x - res = x[mask] if x.size(0) == mask.size(0) else x[:, mask] - if res.numel() == 0: - return x - else: - return res - - -@torch.jit.script -def expand_2d_or_3d_tensor(x, trg_dim: int, padding_idx: int): - """ - Expand 2D/3D tensor on dim=1 - """ - if x is None: - return None - - assert x.dim() == 2 or x.dim() == 3 - assert trg_dim >= x.size(1), (trg_dim, x.size()) - if trg_dim == x.size(1): - return x - - dims = [x.size(0), trg_dim - x.size(1)] - if x.dim() == 3: - dims.append(x.size(2)) - x = torch.cat([x, torch.zeros(dims).to(x).fill_(padding_idx)], 1) - - return x - - -@torch.jit.script -def coalesce(x: Optional[Tensor], y: Tensor) -> Tensor: - return x if x is not None else y - - -@torch.jit.script -def fill_tensors( - x: Optional[Tensor], mask, y: Optional[Tensor], padding_idx: int -) -> Optional[Tensor]: - """ - Filling tensor x with y at masked positions (dim=0). - """ - if x is None or x.size()[0] == 0 or y is None: - return x - assert x.dim() == y.dim() and mask.size(0) == x.size(0) - assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2)) - - n_selected = mask.sum() - if n_selected == 0: - return x - assert n_selected == y.size(0) - if n_selected == x.size(0): - return y - - if x.size(1) < y.size(1): - x = expand_2d_or_3d_tensor(x, y.size(1), padding_idx) - x[mask] = y - elif x.size(1) > y.size(1): - x[mask] = torch.tensor(padding_idx).type_as(x) - if x.dim() == 2: - x[mask, : y.size(1)] = y - else: - x[mask, : y.size(1), :] = y - else: - x[mask] = y - return x diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/options.py b/spaces/ICML2022/OFA/fairseq/fairseq/options.py deleted file mode 100644 index 797b2842db4a68849110a25bb52a47c658966186..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/options.py +++ /dev/null @@ -1,406 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -from pathlib import Path -from typing import Callable, List, Optional, Union - -import torch -from fairseq import utils -from fairseq.data.indexed_dataset import get_available_dataset_impl -from fairseq.dataclass.configs import ( - CheckpointConfig, - CommonConfig, - CommonEvalConfig, - DatasetConfig, - DistributedTrainingConfig, - EvalLMConfig, - GenerationConfig, - InteractiveConfig, - OptimizationConfig, - EMAConfig, -) -from fairseq.dataclass.utils import gen_parser_from_dataclass - -# this import is for backward compatibility -from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa - - -def get_preprocessing_parser(default_task="translation"): - parser = get_parser("Preprocessing", default_task) - add_preprocess_args(parser) - return parser - - -def get_training_parser(default_task="translation"): - parser = get_parser("Trainer", default_task) - add_dataset_args(parser, train=True) - add_distributed_training_args(parser) - add_model_args(parser) - add_optimization_args(parser) - add_checkpoint_args(parser) - add_ema_args(parser) - return parser - - -def get_generation_parser(interactive=False, default_task="translation"): - parser = get_parser("Generation", default_task) - add_dataset_args(parser, gen=True) - add_distributed_training_args(parser) - add_generation_args(parser) - add_checkpoint_args(parser) - if interactive: - add_interactive_args(parser) - return parser - - -def get_speech_generation_parser(default_task="text_to_speech"): - parser = get_parser("Speech Generation", default_task) - add_dataset_args(parser, gen=True) - add_distributed_training_args(parser, default_world_size=1) - add_speech_generation_args(parser) - return parser - - -def get_interactive_generation_parser(default_task="translation"): - return get_generation_parser(interactive=True, default_task=default_task) - - -def get_eval_lm_parser(default_task="language_modeling"): - parser = get_parser("Evaluate Language Model", default_task) - add_dataset_args(parser, gen=True) - add_distributed_training_args(parser, default_world_size=1) - add_eval_lm_args(parser) - return parser - - -def get_validation_parser(default_task=None): - parser = get_parser("Validation", default_task) - add_dataset_args(parser, train=True) - add_distributed_training_args(parser, default_world_size=1) - group = parser.add_argument_group("Evaluation") - gen_parser_from_dataclass(group, CommonEvalConfig()) - return parser - - -def parse_args_and_arch( - parser: argparse.ArgumentParser, - input_args: List[str] = None, - parse_known: bool = False, - suppress_defaults: bool = False, - modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None, -): - """ - Args: - parser (ArgumentParser): the parser - input_args (List[str]): strings to parse, defaults to sys.argv - parse_known (bool): only parse known arguments, similar to - `ArgumentParser.parse_known_args` - suppress_defaults (bool): parse while ignoring all default values - modify_parser (Optional[Callable[[ArgumentParser], None]]): - function to modify the parser, e.g., to set default values - """ - if suppress_defaults: - # Parse args without any default values. This requires us to parse - # twice, once to identify all the necessary task/model args, and a second - # time with all defaults set to None. - args = parse_args_and_arch( - parser, - input_args=input_args, - parse_known=parse_known, - suppress_defaults=False, - ) - suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser]) - suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()}) - args = suppressed_parser.parse_args(input_args) - return argparse.Namespace( - **{k: v for k, v in vars(args).items() if v is not None} - ) - - from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY - - # Before creating the true parser, we need to import optional user module - # in order to eagerly import custom tasks, optimizers, architectures, etc. - usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) - usr_parser.add_argument("--user-dir", default=None) - usr_args, _ = usr_parser.parse_known_args(input_args) - utils.import_user_module(usr_args) - - if modify_parser is not None: - modify_parser(parser) - - # The parser doesn't know about model/criterion/optimizer-specific args, so - # we parse twice. First we parse the model/criterion/optimizer, then we - # parse a second time after adding the *-specific arguments. - # If input_args is given, we will parse those args instead of sys.argv. - args, _ = parser.parse_known_args(input_args) - - # Add model-specific args to parser. - if hasattr(args, "arch"): - model_specific_group = parser.add_argument_group( - "Model-specific configuration", - # Only include attributes which are explicitly given as command-line - # arguments or which have default values. - argument_default=argparse.SUPPRESS, - ) - if args.arch in ARCH_MODEL_REGISTRY: - ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group) - elif args.arch in MODEL_REGISTRY: - MODEL_REGISTRY[args.arch].add_args(model_specific_group) - else: - raise RuntimeError() - - if hasattr(args, "task"): - from fairseq.tasks import TASK_REGISTRY - - TASK_REGISTRY[args.task].add_args(parser) - if getattr(args, "use_bmuf", False): - # hack to support extra args for block distributed data parallelism - from fairseq.optim.bmuf import FairseqBMUF - - FairseqBMUF.add_args(parser) - - # Add *-specific args to parser. - from fairseq.registry import REGISTRIES - - for registry_name, REGISTRY in REGISTRIES.items(): - choice = getattr(args, registry_name, None) - if choice is not None: - cls = REGISTRY["registry"][choice] - if hasattr(cls, "add_args"): - cls.add_args(parser) - elif hasattr(cls, "__dataclass"): - gen_parser_from_dataclass(parser, cls.__dataclass()) - - # Modify the parser a second time, since defaults may have been reset - if modify_parser is not None: - modify_parser(parser) - - # Parse a second time. - if parse_known: - args, extra = parser.parse_known_args(input_args) - else: - args = parser.parse_args(input_args) - extra = None - # Post-process args. - if ( - hasattr(args, "batch_size_valid") and args.batch_size_valid is None - ) or not hasattr(args, "batch_size_valid"): - args.batch_size_valid = args.batch_size - if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None: - args.max_tokens_valid = args.max_tokens - if getattr(args, "memory_efficient_fp16", False): - args.fp16 = True - if getattr(args, "memory_efficient_bf16", False): - args.bf16 = True - args.tpu = getattr(args, "tpu", False) - args.bf16 = getattr(args, "bf16", False) - if args.bf16: - args.tpu = True - if args.tpu and args.fp16: - raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs") - - if getattr(args, "seed", None) is None: - args.seed = 1 # default seed for training - args.no_seed_provided = True - else: - args.no_seed_provided = False - - # Apply architecture configuration. - if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY: - ARCH_CONFIG_REGISTRY[args.arch](args) - - if parse_known: - return args, extra - else: - return args - - -def get_parser(desc, default_task="translation"): - # Before creating the true parser, we need to import optional user module - # in order to eagerly import custom tasks, optimizers, architectures, etc. - usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) - usr_parser.add_argument("--user-dir", default=None) - usr_args, _ = usr_parser.parse_known_args() - utils.import_user_module(usr_args) - - parser = argparse.ArgumentParser(allow_abbrev=False) - gen_parser_from_dataclass(parser, CommonConfig()) - - from fairseq.registry import REGISTRIES - - for registry_name, REGISTRY in REGISTRIES.items(): - parser.add_argument( - "--" + registry_name.replace("_", "-"), - default=REGISTRY["default"], - choices=REGISTRY["registry"].keys(), - ) - - # Task definitions can be found under fairseq/tasks/ - from fairseq.tasks import TASK_REGISTRY - - parser.add_argument( - "--task", - metavar="TASK", - default=default_task, - choices=TASK_REGISTRY.keys(), - help="task", - ) - # fmt: on - return parser - - -def add_preprocess_args(parser): - group = parser.add_argument_group("Preprocessing") - # fmt: off - group.add_argument("-s", "--source-lang", default=None, metavar="SRC", - help="source language") - group.add_argument("-t", "--target-lang", default=None, metavar="TARGET", - help="target language") - group.add_argument("--trainpref", metavar="FP", default=None, - help="train file prefix (also used to build dictionaries)") - group.add_argument("--validpref", metavar="FP", default=None, - help="comma separated, valid file prefixes " - "(words missing from train set are replaced with )") - group.add_argument("--testpref", metavar="FP", default=None, - help="comma separated, test file prefixes " - "(words missing from train set are replaced with )") - group.add_argument("--align-suffix", metavar="FP", default=None, - help="alignment file suffix") - group.add_argument("--destdir", metavar="DIR", default="data-bin", - help="destination dir") - group.add_argument("--thresholdtgt", metavar="N", default=0, type=int, - help="map words appearing less than threshold times to unknown") - group.add_argument("--thresholdsrc", metavar="N", default=0, type=int, - help="map words appearing less than threshold times to unknown") - group.add_argument("--tgtdict", metavar="FP", - help="reuse given target dictionary") - group.add_argument("--srcdict", metavar="FP", - help="reuse given source dictionary") - group.add_argument("--nwordstgt", metavar="N", default=-1, type=int, - help="number of target words to retain") - group.add_argument("--nwordssrc", metavar="N", default=-1, type=int, - help="number of source words to retain") - group.add_argument("--alignfile", metavar="ALIGN", default=None, - help="an alignment file (optional)") - parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap', - choices=get_available_dataset_impl(), - help='output dataset implementation') - group.add_argument("--joined-dictionary", action="store_true", - help="Generate joined dictionary") - group.add_argument("--only-source", action="store_true", - help="Only process the source language") - group.add_argument("--padding-factor", metavar="N", default=8, type=int, - help="Pad dictionary size to be multiple of N") - group.add_argument("--workers", metavar="N", default=1, type=int, - help="number of parallel workers") - group.add_argument("--dict-only", action='store_true', - help="if true, only builds a dictionary and then exits") - # fmt: on - return parser - - -def add_dataset_args(parser, train=False, gen=False): - group = parser.add_argument_group("dataset_data_loading") - gen_parser_from_dataclass(group, DatasetConfig()) - # fmt: on - return group - - -def add_distributed_training_args(parser, default_world_size=None): - group = parser.add_argument_group("distributed_training") - if default_world_size is None: - default_world_size = max(1, torch.cuda.device_count()) - gen_parser_from_dataclass( - group, DistributedTrainingConfig(distributed_world_size=default_world_size) - ) - return group - - -def add_optimization_args(parser): - group = parser.add_argument_group("optimization") - # fmt: off - gen_parser_from_dataclass(group, OptimizationConfig()) - # fmt: on - return group - - -def add_checkpoint_args(parser): - group = parser.add_argument_group("checkpoint") - # fmt: off - gen_parser_from_dataclass(group, CheckpointConfig()) - # fmt: on - return group - - -def add_common_eval_args(group): - gen_parser_from_dataclass(group, CommonEvalConfig()) - - -def add_eval_lm_args(parser): - group = parser.add_argument_group("LM Evaluation") - add_common_eval_args(group) - gen_parser_from_dataclass(group, EvalLMConfig()) - - -def add_generation_args(parser): - group = parser.add_argument_group("Generation") - add_common_eval_args(group) - gen_parser_from_dataclass(group, GenerationConfig()) - return group - - -def add_speech_generation_args(parser): - group = parser.add_argument_group("Speech Generation") - add_common_eval_args(group) # NOTE: remove_bpe is not needed - # fmt: off - group.add_argument('--eos_prob_threshold', default=0.5, type=float, - help='terminate when eos probability exceeds this') - # fmt: on - return group - - -def add_interactive_args(parser): - group = parser.add_argument_group("Interactive") - gen_parser_from_dataclass(group, InteractiveConfig()) - - -def add_model_args(parser): - group = parser.add_argument_group("Model configuration") - # fmt: off - - # Model definitions can be found under fairseq/models/ - # - # The model architecture can be specified in several ways. - # In increasing order of priority: - # 1) model defaults (lowest priority) - # 2) --arch argument - # 3) --encoder/decoder-* arguments (highest priority) - from fairseq.models import ARCH_MODEL_REGISTRY - group.add_argument('--arch', '-a', metavar='ARCH', - choices=ARCH_MODEL_REGISTRY.keys(), - help='model architecture') - # fmt: on - return group - - -def get_args( - data: Union[str, Path], - task: str = "translation", - arch: str = "transformer", - **overrides -): - parser = get_training_parser(task) - args = parse_args_and_arch(parser, [str(data), "--task", task, "--arch", arch]) - - for k, v in overrides.items(): - setattr(args, k, v) - - return args - - -def add_ema_args(parser): - group = parser.add_argument_group("EMA configuration") - gen_parser_from_dataclass(group, EMAConfig()) diff --git a/spaces/Ibtehaj10/cheating-detection/generate_keys.py b/spaces/Ibtehaj10/cheating-detection/generate_keys.py deleted file mode 100644 index 997135fada594e5b17f64a219bbd8b44f1463cfa..0000000000000000000000000000000000000000 --- a/spaces/Ibtehaj10/cheating-detection/generate_keys.py +++ /dev/null @@ -1,17 +0,0 @@ -import pickle -from pathlib import Path -import streamlit_authenticator as stauth -# print("Done !!!") - -names = ["dmin", "ser"] - -username =["admin", "user"] - -password =["admin123", "user123"] - -hashed_passwords =stauth.Hasher(password).generate() - -file_path = Path(__file__).parent / "hashed_pw.pkl" - -with file_path.open("wb") as file: - pickle.dump(hashed_passwords, file) diff --git a/spaces/Iceclear/StableSR/StableSR/ldm/modules/embedding_manager.py b/spaces/Iceclear/StableSR/StableSR/ldm/modules/embedding_manager.py deleted file mode 100644 index 5c5f97bd9f151bc1c508f56bd7ccfb4509aaea82..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/ldm/modules/embedding_manager.py +++ /dev/null @@ -1,161 +0,0 @@ -import torch -from torch import nn - -from ldm.data.personalized import per_img_token_list -from transformers import CLIPTokenizer -from functools import partial - -DEFAULT_PLACEHOLDER_TOKEN = ["*"] - -PROGRESSIVE_SCALE = 2000 - -def get_clip_token_for_string(tokenizer, string): - batch_encoding = tokenizer(string, truncation=True, max_length=77, return_length=True, - return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - tokens = batch_encoding["input_ids"] - assert torch.count_nonzero(tokens - 49407) == 2, f"String '{string}' maps to more than a single token. Please use another string" - - return tokens[0, 1] - -def get_bert_token_for_string(tokenizer, string): - token = tokenizer(string) - assert torch.count_nonzero(token) == 3, f"String '{string}' maps to more than a single token. Please use another string" - - token = token[0, 1] - - return token - -def get_embedding_for_clip_token(embedder, token): - return embedder(token.unsqueeze(0))[0, 0] - - -class EmbeddingManager(nn.Module): - def __init__( - self, - embedder, - placeholder_strings=None, - initializer_words=None, - per_image_tokens=False, - num_vectors_per_token=1, - progressive_words=False, - **kwargs - ): - super().__init__() - - self.string_to_token_dict = {} - - self.string_to_param_dict = nn.ParameterDict() - - self.initial_embeddings = nn.ParameterDict() # These should not be optimized - - self.progressive_words = progressive_words - self.progressive_counter = 0 - - self.max_vectors_per_token = num_vectors_per_token - - if hasattr(embedder, 'tokenizer'): # using Stable Diffusion's CLIP encoder - self.is_clip = True - get_token_for_string = partial(get_clip_token_for_string, embedder.tokenizer) - get_embedding_for_tkn = partial(get_embedding_for_clip_token, embedder.transformer.text_model.embeddings) - token_dim = 768 - else: # using LDM's BERT encoder - self.is_clip = False - get_token_for_string = partial(get_bert_token_for_string, embedder.tknz_fn) - get_embedding_for_tkn = embedder.transformer.token_emb - token_dim = 1280 - - if per_image_tokens: - placeholder_strings.extend(per_img_token_list) - - for idx, placeholder_string in enumerate(placeholder_strings): - - token = get_token_for_string(placeholder_string) - - if initializer_words and idx < len(initializer_words): - init_word_token = get_token_for_string(initializer_words[idx]) - - with torch.no_grad(): - init_word_embedding = get_embedding_for_tkn(init_word_token.cpu()) - - token_params = torch.nn.Parameter(init_word_embedding.unsqueeze(0).repeat(num_vectors_per_token, 1), requires_grad=True) - self.initial_embeddings[placeholder_string] = torch.nn.Parameter(init_word_embedding.unsqueeze(0).repeat(num_vectors_per_token, 1), requires_grad=False) - else: - token_params = torch.nn.Parameter(torch.rand(size=(num_vectors_per_token, token_dim), requires_grad=True)) - - self.string_to_token_dict[placeholder_string] = token - self.string_to_param_dict[placeholder_string] = token_params - - def forward( - self, - tokenized_text, - embedded_text, - ): - b, n, device = *tokenized_text.shape, tokenized_text.device - - for placeholder_string, placeholder_token in self.string_to_token_dict.items(): - - placeholder_embedding = self.string_to_param_dict[placeholder_string].to(device) - - if self.max_vectors_per_token == 1: # If there's only one vector per token, we can do a simple replacement - placeholder_idx = torch.where(tokenized_text == placeholder_token.to(device)) - embedded_text[placeholder_idx] = placeholder_embedding - else: # otherwise, need to insert and keep track of changing indices - if self.progressive_words: - self.progressive_counter += 1 - max_step_tokens = 1 + self.progressive_counter // PROGRESSIVE_SCALE - else: - max_step_tokens = self.max_vectors_per_token - - num_vectors_for_token = min(placeholder_embedding.shape[0], max_step_tokens) - - placeholder_rows, placeholder_cols = torch.where(tokenized_text == placeholder_token.to(device)) - - if placeholder_rows.nelement() == 0: - continue - - sorted_cols, sort_idx = torch.sort(placeholder_cols, descending=True) - sorted_rows = placeholder_rows[sort_idx] - - for idx in range(len(sorted_rows)): - row = sorted_rows[idx] - col = sorted_cols[idx] - - new_token_row = torch.cat([tokenized_text[row][:col], placeholder_token.repeat(num_vectors_for_token).to(device), tokenized_text[row][col + 1:]], axis=0)[:n] - new_embed_row = torch.cat([embedded_text[row][:col], placeholder_embedding[:num_vectors_for_token], embedded_text[row][col + 1:]], axis=0)[:n] - - embedded_text[row] = new_embed_row - tokenized_text[row] = new_token_row - - return embedded_text - - def save(self, ckpt_path): - torch.save({"string_to_token": self.string_to_token_dict, - "string_to_param": self.string_to_param_dict}, ckpt_path) - - def load(self, ckpt_path): - ckpt = torch.load(ckpt_path, map_location='cpu') - - self.string_to_token_dict = ckpt["string_to_token"] - self.string_to_param_dict = ckpt["string_to_param"] - - def get_embedding_norms_squared(self): - all_params = torch.cat(list(self.string_to_param_dict.values()), axis=0) # num_placeholders x embedding_dim - param_norm_squared = (all_params * all_params).sum(axis=-1) # num_placeholders - - return param_norm_squared - - def embedding_parameters(self): - return self.string_to_param_dict.parameters() - - def embedding_to_coarse_loss(self): - - loss = 0. - num_embeddings = len(self.initial_embeddings) - - for key in self.initial_embeddings: - optimized = self.string_to_param_dict[key] - coarse = self.initial_embeddings[key].clone().to(optimized.device) - - loss = loss + (optimized - coarse) @ (optimized - coarse).T / num_embeddings - - return loss diff --git a/spaces/Illumotion/Koboldcpp/common/train.h b/spaces/Illumotion/Koboldcpp/common/train.h deleted file mode 100644 index 42fa704b897ae56051aaaef16a71c77fe2fd6c17..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/common/train.h +++ /dev/null @@ -1,230 +0,0 @@ -// Various helper functions and utilities for training - -#pragma once - -#include -#include -#include - -#include "ggml.h" -#include "llama.h" - -typedef std::string mt19937_state; - -struct train_state { - struct ggml_opt_context * opt; - - uint64_t train_its; - uint64_t train_samples; - uint64_t train_tokens; - uint64_t train_epochs; - - size_t shuffle_samples_hash; // fn, sample_count, *zip(sample_begins, sample_sizes) - mt19937_state shuffle_rng_state_current; - mt19937_state shuffle_rng_state_next; - size_t shuffle_sample_count; - size_t shuffle_next_sample; -}; - -struct train_params_common { - const char * fn_train_data; - const char * fn_checkpoint_in; - const char * fn_checkpoint_out; - const char * pattern_fn_it; - const char * fn_latest; - - bool print_usage; - - int save_every; - - uint32_t seed; - - int n_ctx; - int n_threads; - int n_batch; - int n_gradient_accumulation; - int n_epochs; - - bool custom_n_ctx; - - bool use_flash; - bool use_checkpointing; - - std::string sample_start; - bool include_sample_start; - bool escape; - bool overlapping_samples; - bool fill_with_next_samples; - bool separate_with_eos; - bool separate_with_bos; - bool sample_random_offsets; - - bool force_reshuffle; - - int warmup; - int cos_decay_steps; - float cos_decay_restart; - float cos_decay_min; - bool enable_restart; - - int opt_past; - float opt_delta; - int opt_max_no_improvement; - - int adam_n_iter; - float adam_alpha; - float adam_min_alpha; - float adam_decay; - int adam_decay_min_ndim; - float adam_beta1; - float adam_beta2; - float adam_gclip; - float adam_eps_f; -}; - -typedef void (*save_train_files_callback)(void * data, struct train_state * train); - -struct train_opt_callback_data { - struct train_params_common * params; - struct train_state * train; - save_train_files_callback save_cb; - void * save_data; - struct llama_context * lctx; - int last_save_iter; - llama_token * tokens_data; - size_t tokens_size; - size_t * samples_begin; - size_t * samples_size; - size_t * shuffled_samples_offs; - size_t * shuffled_samples_begin; - size_t * shuffled_samples_size; - size_t samples_count; - struct ggml_tensor * tokens_input; - struct ggml_tensor * target_probs; - int first_iter; - int first_epoch; - int iter_at_last_epoch; - int64_t last_time; - double millis_per_iter; -}; - -struct train_state * init_train_state(); -void free_train_state(struct train_state * state); - -struct train_params_common get_default_train_params_common(); -void print_common_train_usage(int /*argc*/, char ** argv, const struct train_params_common * params); - -bool consume_common_train_arg(int argc, char ** argv, int * idx, struct train_params_common * params, bool * invalid_param); -void finish_processing_train_args(struct train_params_common * params); - -struct random_normal_distribution; -struct random_uniform_distribution; - -struct random_normal_distribution * init_random_normal_distribution (int seed, float mean, float std, float min, float max); -struct random_uniform_distribution * init_random_uniform_distribution(int seed, float min, float max); - -void free_random_normal_distribution (struct random_normal_distribution * rnd); -void free_random_uniform_distribution(struct random_uniform_distribution * rnd); - -struct ggml_tensor * randomize_tensor_normal (struct ggml_tensor * tensor, struct random_normal_distribution * rnd); -struct ggml_tensor * randomize_tensor_uniform(struct ggml_tensor * tensor, struct random_uniform_distribution * rnd); - -// generate random float in interval [0,1) -float frand(); -float frand_normal (struct random_normal_distribution * rnd); -float frand_uniform(struct random_uniform_distribution * rnd); - -int clamp (const int v, const int min, const int max); -float fclamp(const float v, const float min, const float max); - -void assert_shape_1d(struct ggml_tensor * tensor, int64_t ne0); -void assert_shape_2d(struct ggml_tensor * tensor, int64_t ne0, int64_t ne1); -void assert_shape_3d(struct ggml_tensor * tensor, int64_t ne0, int64_t ne1, int64_t ne2); -void assert_shape_4d(struct ggml_tensor * tensor, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3); - -size_t tokenize_file( - struct llama_context * lctx, - const char * filename, - const std::string & sample_start, - bool include_sample_start, - bool overlapping_samples, - unsigned context_length, - std::vector & out_tokens, - std::vector & out_samples_begin, - std::vector & out_samples_size); - -int64_t get_example_targets_batch( - struct llama_context * lctx, - struct ggml_tensor * tokens_input, - struct ggml_tensor * target_probs, - int64_t example_id, - const size_t * samples_offs, - const size_t * samples_begin, - const size_t * samples_size, - size_t samples_count, - const llama_token * train_data, - size_t n_train_data, - bool separate_with_eos, - bool separate_with_bos, - bool fill_with_next_samples, - bool sample_random_offsets); - - -void mt19937_set_state(std::mt19937& rng, const mt19937_state& rng_state); -mt19937_state mt19937_get_state(const std::mt19937& rng); -mt19937_state mt19937_seed_to_state(unsigned seed); - -mt19937_state shuffle_samples( - const mt19937_state & rng_state, - size_t * shuffled_offs, - size_t * shuffled_begins, - size_t * shuffled_sizes, - const size_t * begins, - const size_t * sizes, - size_t count); - -size_t hash_combine(size_t h1, size_t h2); - -size_t compute_samples_hash( - const char* fn, - const size_t* samples_begin, - const size_t* samples_size, - size_t sample_count); - - -std::string replace_str(const char * s, const char * needle, const char * replacement); - -void print_duration(double milliseconds); - -float cosine_decay( - int64_t step, - int64_t decay_steps, - float minimum); - -float cosine_decay_restart( - int64_t step, - int64_t decay_steps, - float minimum, - float restart_step_mult); - -float learning_schedule( - int64_t step, - int64_t warmup_steps, - int64_t decay_steps, - float learning_rate, - float overall_minimum, - float cos_decay_minimum, - float cos_decay_restart_step_mult, - bool enable_restart); - -void copy_tensor_by_name(struct ggml_tensor * dst, struct ggml_context * ctx, const char * name); - -void load_opt_context_gguf(struct gguf_context * fctx, struct ggml_context * f_ggml_ctx, struct ggml_opt_context * opt); -void save_opt_context_gguf(struct gguf_context * fctx, struct ggml_opt_context * opt); - -bool load_train_state_gguf(struct gguf_context * fctx, struct ggml_context * f_ggml_ctx, struct train_state * train); -void save_train_state_gguf(struct gguf_context * fctx, struct train_state * train); - -std::string get_train_filename(const char * filename, const char * pattern_it, const char * latest, int64_t iteration); - -void train_opt_callback(void * vdata, int accum_step, float * sched, bool * cancel); diff --git a/spaces/Illumotion/Koboldcpp/grammars/README.md b/spaces/Illumotion/Koboldcpp/grammars/README.md deleted file mode 100644 index 7f3b11ca5b592235e11a06eec92268f7de8da94f..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/grammars/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# GBNF Guide - -GBNF (GGML BNF) is a format for defining [formal grammars](https://en.wikipedia.org/wiki/Formal_grammar) to constrain model outputs in `llama.cpp`. For example, you can use it to force the model to generate valid JSON, or speak only in emojis. GBNF grammars are supported in various ways in `examples/main` and `examples/server`. - -## Background - -[Bakus-Naur Form (BNF)](https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_form) is a notation for describing the syntax of formal languages like programming languages, file formats, and protocols. GBNF is an extension of BNF that primarily adds a few modern regex-like features. - -## Basics - -In GBNF, we define *production rules* that specify how a *non-terminal* (rule name) can be replaced with sequences of *terminals* (characters, specifically Unicode [code points](https://en.wikipedia.org/wiki/Code_point)) and other non-terminals. The basic format of a production rule is `nonterminal ::= sequence...`. - -## Example - -Before going deeper, let's look at some of the features demonstrated in `grammars/chess.gbnf`, a small chess notation grammar: -``` -# `root` specifies the pattern for the overall output -root ::= ( - # it must start with the characters "1. " followed by a sequence - # of characters that match the `move` rule, followed by a space, followed - # by another move, and then a newline - "1. " move " " move "\n" - - # it's followed by one or more subsequent moves, numbered with one or two digits - ([1-9] [0-9]? ". " move " " move "\n")+ -) - -# `move` is an abstract representation, which can be a pawn, nonpawn, or castle. -# The `[+#]?` denotes the possibility of checking or mate signs after moves -move ::= (pawn | nonpawn | castle) [+#]? - -pawn ::= ... -nonpawn ::= ... -castle ::= ... -``` - -## Non-Terminals and Terminals - -Non-terminal symbols (rule names) stand for a pattern of terminals and other non-terminals. They are required to be a dashed lowercase word, like `move`, `castle`, or `check-mate`. - -Terminals are actual characters ([code points](https://en.wikipedia.org/wiki/Code_point)). They can be specified as a sequence like `"1"` or `"O-O"` or as ranges like `[1-9]` or `[NBKQR]`. - -## Characters and character ranges - -Terminals support the full range of Unicode. Unicode characters can be specified directly in the grammar, for example `hiragana ::= [ぁ-ゟ]`, or with escapes: 8-bit (`\xXX`), 16-bit (`\uXXXX`) or 32-bit (`\UXXXXXXXX`). - -Character ranges can be negated with `^`: -``` -single-line ::= [^\n]+ "\n"` -``` - -## Sequences and Alternatives - -The order of symbols in a sequence matter. For example, in `"1. " move " " move "\n"`, the `"1. "` must come before the first `move`, etc. - -Alternatives, denoted by `|`, give different sequences that are acceptable. For example, in `move ::= pawn | nonpawn | castle`, `move` can be a `pawn` move, a `nonpawn` move, or a `castle`. - -Parentheses `()` can be used to group sequences, which allows for embedding alternatives in a larger rule or applying repetition and optptional symbols (below) to a sequence. - -## Repetition and Optional Symbols - -- `*` after a symbol or sequence means that it can be repeated zero or more times. -- `+` denotes that the symbol or sequence should appear one or more times. -- `?` makes the preceding symbol or sequence optional. - -## Comments and newlines - -Comments can be specified with `#`: -``` -# defines optional whitspace -ws ::= [ \t\n]+ -``` - -Newlines are allowed between rules and between symbols or sequences nested inside parentheses. Additionally, a newline after an alternate marker `|` will continue the current rule, even outside of parentheses. - -## The root rule - -In a full grammar, the `root` rule always defines the starting point of the grammar. In other words, it specifies what the entire output must match. - -``` -# a grammar for lists -root ::= ("- " item)+ -item ::= [^\n]+ "\n" -``` - -## Next steps - -This guide provides a brief overview. Check out the GBNF files in this directory (`grammars/`) for examples of full grammars. You can try them out with: -``` -./main -m --grammar-file grammars/some-grammar.gbnf -p 'Some prompt' -``` diff --git a/spaces/Illumotion/Koboldcpp/include/CL/cl_d3d11.h b/spaces/Illumotion/Koboldcpp/include/CL/cl_d3d11.h deleted file mode 100644 index 9393e5c84cacf2894a9f3cf83968414f34fb69a2..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/include/CL/cl_d3d11.h +++ /dev/null @@ -1,156 +0,0 @@ -/******************************************************************************* - * Copyright (c) 2008-2020 The Khronos Group Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - ******************************************************************************/ - -#ifndef __OPENCL_CL_D3D11_H -#define __OPENCL_CL_D3D11_H - -#if defined(_MSC_VER) -#if _MSC_VER >=1500 -#pragma warning( push ) -#pragma warning( disable : 4201 ) -#pragma warning( disable : 5105 ) -#endif -#endif -#include -#if defined(_MSC_VER) -#if _MSC_VER >=1500 -#pragma warning( pop ) -#endif -#endif -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/****************************************************************************** - * cl_khr_d3d11_sharing */ -#define cl_khr_d3d11_sharing 1 - -typedef cl_uint cl_d3d11_device_source_khr; -typedef cl_uint cl_d3d11_device_set_khr; - -/******************************************************************************/ - -/* Error Codes */ -#define CL_INVALID_D3D11_DEVICE_KHR -1006 -#define CL_INVALID_D3D11_RESOURCE_KHR -1007 -#define CL_D3D11_RESOURCE_ALREADY_ACQUIRED_KHR -1008 -#define CL_D3D11_RESOURCE_NOT_ACQUIRED_KHR -1009 - -/* cl_d3d11_device_source */ -#define CL_D3D11_DEVICE_KHR 0x4019 -#define CL_D3D11_DXGI_ADAPTER_KHR 0x401A - -/* cl_d3d11_device_set */ -#define CL_PREFERRED_DEVICES_FOR_D3D11_KHR 0x401B -#define CL_ALL_DEVICES_FOR_D3D11_KHR 0x401C - -/* cl_context_info */ -#define CL_CONTEXT_D3D11_DEVICE_KHR 0x401D -#define CL_CONTEXT_D3D11_PREFER_SHARED_RESOURCES_KHR 0x402D - -/* cl_mem_info */ -#define CL_MEM_D3D11_RESOURCE_KHR 0x401E - -/* cl_image_info */ -#define CL_IMAGE_D3D11_SUBRESOURCE_KHR 0x401F - -/* cl_command_type */ -#define CL_COMMAND_ACQUIRE_D3D11_OBJECTS_KHR 0x4020 -#define CL_COMMAND_RELEASE_D3D11_OBJECTS_KHR 0x4021 - -/******************************************************************************/ - -typedef cl_int (CL_API_CALL *clGetDeviceIDsFromD3D11KHR_fn)( - cl_platform_id platform, - cl_d3d11_device_source_khr d3d_device_source, - void * d3d_object, - cl_d3d11_device_set_khr d3d_device_set, - cl_uint num_entries, - cl_device_id * devices, - cl_uint * num_devices) CL_API_SUFFIX__VERSION_1_2; - -typedef cl_mem (CL_API_CALL *clCreateFromD3D11BufferKHR_fn)( - cl_context context, - cl_mem_flags flags, - ID3D11Buffer * resource, - cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2; - -typedef cl_mem (CL_API_CALL *clCreateFromD3D11Texture2DKHR_fn)( - cl_context context, - cl_mem_flags flags, - ID3D11Texture2D * resource, - UINT subresource, - cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2; - -typedef cl_mem (CL_API_CALL *clCreateFromD3D11Texture3DKHR_fn)( - cl_context context, - cl_mem_flags flags, - ID3D11Texture3D * resource, - UINT subresource, - cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2; - -typedef cl_int (CL_API_CALL *clEnqueueAcquireD3D11ObjectsKHR_fn)( - cl_command_queue command_queue, - cl_uint num_objects, - const cl_mem * mem_objects, - cl_uint num_events_in_wait_list, - const cl_event * event_wait_list, - cl_event * event) CL_API_SUFFIX__VERSION_1_2; - -typedef cl_int (CL_API_CALL *clEnqueueReleaseD3D11ObjectsKHR_fn)( - cl_command_queue command_queue, - cl_uint num_objects, - const cl_mem * mem_objects, - cl_uint num_events_in_wait_list, - const cl_event * event_wait_list, - cl_event * event) CL_API_SUFFIX__VERSION_1_2; - -/*************************************************************** -* cl_intel_sharing_format_query_d3d11 -***************************************************************/ -#define cl_intel_sharing_format_query_d3d11 1 - -/* when cl_khr_d3d11_sharing is supported */ - -extern CL_API_ENTRY cl_int CL_API_CALL -clGetSupportedD3D11TextureFormatsINTEL( - cl_context context, - cl_mem_flags flags, - cl_mem_object_type image_type, - cl_uint plane, - cl_uint num_entries, - DXGI_FORMAT* d3d11_formats, - cl_uint* num_texture_formats) ; - -typedef cl_int (CL_API_CALL * -clGetSupportedD3D11TextureFormatsINTEL_fn)( - cl_context context, - cl_mem_flags flags, - cl_mem_object_type image_type, - cl_uint plane, - cl_uint num_entries, - DXGI_FORMAT* d3d11_formats, - cl_uint* num_texture_formats) ; - -#ifdef __cplusplus -} -#endif - -#endif /* __OPENCL_CL_D3D11_H */ - diff --git a/spaces/Jamkonams/AutoGPT/autogpt/json_utils/__init__.py b/spaces/Jamkonams/AutoGPT/autogpt/json_utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/JohnnyPittt/audio-styling/deepafx_st/processors/autodiff/peq.py b/spaces/JohnnyPittt/audio-styling/deepafx_st/processors/autodiff/peq.py deleted file mode 100644 index 04e35bbe92ed5cfb780c4ad740e1ba75f64e5b03..0000000000000000000000000000000000000000 --- a/spaces/JohnnyPittt/audio-styling/deepafx_st/processors/autodiff/peq.py +++ /dev/null @@ -1,274 +0,0 @@ -import torch - -import deepafx_st.processors.autodiff.signal -from deepafx_st.processors.processor import Processor - - -@torch.jit.script -def parametric_eq( - x: torch.Tensor, - sample_rate: float, - low_shelf_gain_dB: torch.Tensor, - low_shelf_cutoff_freq: torch.Tensor, - low_shelf_q_factor: torch.Tensor, - first_band_gain_dB: torch.Tensor, - first_band_cutoff_freq: torch.Tensor, - first_band_q_factor: torch.Tensor, - second_band_gain_dB: torch.Tensor, - second_band_cutoff_freq: torch.Tensor, - second_band_q_factor: torch.Tensor, - third_band_gain_dB: torch.Tensor, - third_band_cutoff_freq: torch.Tensor, - third_band_q_factor: torch.Tensor, - fourth_band_gain_dB: torch.Tensor, - fourth_band_cutoff_freq: torch.Tensor, - fourth_band_q_factor: torch.Tensor, - high_shelf_gain_dB: torch.Tensor, - high_shelf_cutoff_freq: torch.Tensor, - high_shelf_q_factor: torch.Tensor, -): - """Six-band parametric EQ. - - Low-shelf -> Band 1 -> Band 2 -> Band 3 -> Band 4 -> High-shelf - - Args: - x (torch.Tensor): 1d signal. - - - """ - a_s, b_s = [], [] - #print(f"autodiff peq fs = {sample_rate}") - - # -------- apply low-shelf filter -------- - b, a = deepafx_st.processors.autodiff.signal.biqaud( - low_shelf_gain_dB, - low_shelf_cutoff_freq, - low_shelf_q_factor, - sample_rate, - "low_shelf", - ) - b_s.append(b) - a_s.append(a) - - # -------- apply first-band peaking filter -------- - b, a = deepafx_st.processors.autodiff.signal.biqaud( - first_band_gain_dB, - first_band_cutoff_freq, - first_band_q_factor, - sample_rate, - "peaking", - ) - b_s.append(b) - a_s.append(a) - - # -------- apply second-band peaking filter -------- - b, a = deepafx_st.processors.autodiff.signal.biqaud( - second_band_gain_dB, - second_band_cutoff_freq, - second_band_q_factor, - sample_rate, - "peaking", - ) - b_s.append(b) - a_s.append(a) - - # -------- apply third-band peaking filter -------- - b, a = deepafx_st.processors.autodiff.signal.biqaud( - third_band_gain_dB, - third_band_cutoff_freq, - third_band_q_factor, - sample_rate, - "peaking", - ) - b_s.append(b) - a_s.append(a) - - # -------- apply fourth-band peaking filter -------- - b, a = deepafx_st.processors.autodiff.signal.biqaud( - fourth_band_gain_dB, - fourth_band_cutoff_freq, - fourth_band_q_factor, - sample_rate, - "peaking", - ) - b_s.append(b) - a_s.append(a) - - # -------- apply high-shelf filter -------- - b, a = deepafx_st.processors.autodiff.signal.biqaud( - high_shelf_gain_dB, - high_shelf_cutoff_freq, - high_shelf_q_factor, - sample_rate, - "high_shelf", - ) - b_s.append(b) - a_s.append(a) - - x = deepafx_st.processors.autodiff.signal.approx_iir_filter_cascade( - b_s, a_s, x.view(-1) - ) - - return x - - -class ParametricEQ(Processor): - def __init__( - self, - sample_rate, - min_gain_dB=-24.0, - default_gain_dB=0.0, - max_gain_dB=24.0, - min_q_factor=0.1, - default_q_factor=0.707, - max_q_factor=10, - eps=1e-8, - ): - """ """ - super().__init__() - self.sample_rate = sample_rate - self.eps = eps - self.ports = [ - { - "name": "Lowshelf gain", - "min": min_gain_dB, - "max": max_gain_dB, - "default": default_gain_dB, - "units": "dB", - }, - { - "name": "Lowshelf cutoff", - "min": 20.0, - "max": 200.0, - "default": 100.0, - "units": "Hz", - }, - { - "name": "Lowshelf Q", - "min": min_q_factor, - "max": max_q_factor, - "default": default_q_factor, - "units": "", - }, - { - "name": "First band gain", - "min": min_gain_dB, - "max": max_gain_dB, - "default": default_gain_dB, - "units": "dB", - }, - { - "name": "First band cutoff", - "min": 200.0, - "max": 2000.0, - "default": 400.0, - "units": "Hz", - }, - { - "name": "First band Q", - "min": min_q_factor, - "max": max_q_factor, - "default": 0.707, - "units": "", - }, - { - "name": "Second band gain", - "min": min_gain_dB, - "max": max_gain_dB, - "default": default_gain_dB, - "units": "dB", - }, - { - "name": "Second band cutoff", - "min": 200.0, - "max": 4000.0, - "default": 1000.0, - "units": "Hz", - }, - { - "name": "Second band Q", - "min": min_q_factor, - "max": max_q_factor, - "default": default_q_factor, - "units": "", - }, - { - "name": "Third band gain", - "min": min_gain_dB, - "max": max_gain_dB, - "default": default_gain_dB, - "units": "dB", - }, - { - "name": "Third band cutoff", - "min": 2000.0, - "max": 8000.0, - "default": 4000.0, - "units": "Hz", - }, - { - "name": "Third band Q", - "min": min_q_factor, - "max": max_q_factor, - "default": default_q_factor, - "units": "", - }, - { - "name": "Fourth band gain", - "min": min_gain_dB, - "max": max_gain_dB, - "default": default_gain_dB, - "units": "dB", - }, - { - "name": "Fourth band cutoff", - "min": 4000.0, - "max": (24000 // 2) * 0.9, - "default": 8000.0, - "units": "Hz", - }, - { - "name": "Fourth band Q", - "min": min_q_factor, - "max": max_q_factor, - "default": default_q_factor, - "units": "", - }, - { - "name": "Highshelf gain", - "min": min_gain_dB, - "max": max_gain_dB, - "default": default_gain_dB, - "units": "dB", - }, - { - "name": "Highshelf cutoff", - "min": 4000.0, - "max": (24000 // 2) * 0.9, - "default": 8000.0, - "units": "Hz", - }, - { - "name": "Highshelf Q", - "min": min_q_factor, - "max": max_q_factor, - "default": default_q_factor, - "units": "", - }, - ] - - self.num_control_params = len(self.ports) - - def forward(self, x, p, sample_rate=24000, **kwargs): - - bs, chs, s = x.size() - - inputs = torch.split(x, 1, 0) - params = torch.split(p, 1, 0) - - y = [] # loop over batch dimension - for input, param in zip(inputs, params): - denorm_param = self.denormalize_params(param.view(-1)) - y.append(parametric_eq(input.view(-1), sample_rate, *denorm_param)) - - return torch.stack(y, dim=0).view(bs, 1, -1) diff --git a/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/uvr5_pack/lib_v5/nets_61968KB.py b/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/uvr5_pack/lib_v5/nets_61968KB.py deleted file mode 100644 index 167d4cb2198863cf43e93440f7e63c5342fc7605..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/uvr5_pack/lib_v5/nets_61968KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import layers_123821KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 32) - self.stg1_high_band_net = BaseASPPNet(2, 32) - - self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(16, 32) - - self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(32, 64) - - self.out = nn.Conv2d(64, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/KenjieDec/GPEN/face_model/model.py b/spaces/KenjieDec/GPEN/face_model/model.py deleted file mode 100644 index fe54c123c026b8c0af6fac39815afaea0a7017a4..0000000000000000000000000000000000000000 --- a/spaces/KenjieDec/GPEN/face_model/model.py +++ /dev/null @@ -1,818 +0,0 @@ -''' -@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021) -@author: yangxy (yangtao9009@gmail.com) -''' -import math -import random -import functools -import operator -import itertools - -import torch -from torch import nn -from torch.nn import functional as F -from torch.autograd import Function - -from op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d - -class PixelNorm(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, input): - return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8) - - -def make_kernel(k): - k = torch.tensor(k, dtype=torch.float32) - - if k.ndim == 1: - k = k[None, :] * k[:, None] - - k /= k.sum() - - return k - - -class Upsample(nn.Module): - def __init__(self, kernel, factor=2, device='cpu'): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) * (factor ** 2) - self.register_buffer('kernel', kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 - - self.pad = (pad0, pad1) - self.device = device - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad, device=self.device) - - return out - - -class Downsample(nn.Module): - def __init__(self, kernel, factor=2, device='cpu'): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) - self.register_buffer('kernel', kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.pad = (pad0, pad1) - self.device = device - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad, device=self.device) - - return out - - -class Blur(nn.Module): - def __init__(self, kernel, pad, upsample_factor=1, device='cpu'): - super().__init__() - - kernel = make_kernel(kernel) - - if upsample_factor > 1: - kernel = kernel * (upsample_factor ** 2) - - self.register_buffer('kernel', kernel) - - self.pad = pad - self.device = device - - def forward(self, input): - out = upfirdn2d(input, self.kernel, pad=self.pad, device=self.device) - - return out - - -class EqualConv2d(nn.Module): - def __init__( - self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True - ): - super().__init__() - - self.weight = nn.Parameter( - torch.randn(out_channel, in_channel, kernel_size, kernel_size) - ) - self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) - - self.stride = stride - self.padding = padding - - if bias: - self.bias = nn.Parameter(torch.zeros(out_channel)) - - else: - self.bias = None - - def forward(self, input): - out = F.conv2d( - input, - self.weight * self.scale, - bias=self.bias, - stride=self.stride, - padding=self.padding, - ) - - return out - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},' - f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})' - ) - - -class EqualLinear(nn.Module): - def __init__( - self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None, device='cpu' - ): - super().__init__() - - self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) - - if bias: - self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) - - else: - self.bias = None - - self.activation = activation - self.device = device - - self.scale = (1 / math.sqrt(in_dim)) * lr_mul - self.lr_mul = lr_mul - - def forward(self, input): - if self.activation: - out = F.linear(input, self.weight * self.scale) - out = fused_leaky_relu(out, self.bias * self.lr_mul, device=self.device) - - else: - out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) - - return out - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' - ) - - -class ScaledLeakyReLU(nn.Module): - def __init__(self, negative_slope=0.2): - super().__init__() - - self.negative_slope = negative_slope - - def forward(self, input): - out = F.leaky_relu(input, negative_slope=self.negative_slope) - - return out * math.sqrt(2) - - -class ModulatedConv2d(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - demodulate=True, - upsample=False, - downsample=False, - blur_kernel=[1, 3, 3, 1], - device='cpu' - ): - super().__init__() - - self.eps = 1e-8 - self.kernel_size = kernel_size - self.in_channel = in_channel - self.out_channel = out_channel - self.upsample = upsample - self.downsample = downsample - - if upsample: - factor = 2 - p = (len(blur_kernel) - factor) - (kernel_size - 1) - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 + 1 - - self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor, device=device) - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.blur = Blur(blur_kernel, pad=(pad0, pad1), device=device) - - fan_in = in_channel * kernel_size ** 2 - self.scale = 1 / math.sqrt(fan_in) - self.padding = kernel_size // 2 - - self.weight = nn.Parameter( - torch.randn(1, out_channel, in_channel, kernel_size, kernel_size) - ) - - self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) - - self.demodulate = demodulate - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, ' - f'upsample={self.upsample}, downsample={self.downsample})' - ) - - def forward(self, input, style): - batch, in_channel, height, width = input.shape - - style = self.modulation(style).view(batch, 1, in_channel, 1, 1) - weight = self.scale * self.weight * style - - if self.demodulate: - demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8) - weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) - - weight = weight.view( - batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - - if self.upsample: - input = input.view(1, batch * in_channel, height, width) - weight = weight.view( - batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - weight = weight.transpose(1, 2).reshape( - batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size - ) - out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - out = self.blur(out) - - elif self.downsample: - input = self.blur(input) - _, _, height, width = input.shape - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - else: - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=self.padding, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - return out - - -class NoiseInjection(nn.Module): - def __init__(self, isconcat=True): - super().__init__() - - self.isconcat = isconcat - self.weight = nn.Parameter(torch.zeros(1)) - - def forward(self, image, noise=None): - if noise==None: - batch, channel, height, width = image.shape - noise = image.new_empty(batch, channel, height, width).normal_() - - if self.isconcat: - return torch.cat((image, self.weight * noise), dim=1) - else: - return image + self.weight * noise - - -class ConstantInput(nn.Module): - def __init__(self, channel, size=4): - super().__init__() - - self.input = nn.Parameter(torch.randn(1, channel, size, size)) - - def forward(self, input): - batch = input.shape[0] - out = self.input.repeat(batch, 1, 1, 1) - - return out - - -class StyledConv(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=False, - blur_kernel=[1, 3, 3, 1], - demodulate=True, - isconcat=True, - device='cpu' - ): - super().__init__() - - self.conv = ModulatedConv2d( - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=upsample, - blur_kernel=blur_kernel, - demodulate=demodulate, - device=device - ) - - self.noise = NoiseInjection(isconcat) - #self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1)) - #self.activate = ScaledLeakyReLU(0.2) - feat_multiplier = 2 if isconcat else 1 - self.activate = FusedLeakyReLU(out_channel*feat_multiplier, device=device) - - def forward(self, input, style, noise=None): - out = self.conv(input, style) - out = self.noise(out, noise=noise) - # out = out + self.bias - out = self.activate(out) - - return out - - -class ToRGB(nn.Module): - def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1], device='cpu'): - super().__init__() - - if upsample: - self.upsample = Upsample(blur_kernel, device=device) - - self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False, device=device) - self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) - - def forward(self, input, style, skip=None): - out = self.conv(input, style) - out = out + self.bias - - if skip is not None: - skip = self.upsample(skip) - - out = out + skip - - return out - -class Generator(nn.Module): - def __init__( - self, - size, - style_dim, - n_mlp, - channel_multiplier=2, - blur_kernel=[1, 3, 3, 1], - lr_mlp=0.01, - isconcat=True, - narrow=1, - device='cpu' - ): - super().__init__() - - self.size = size - self.n_mlp = n_mlp - self.style_dim = style_dim - self.feat_multiplier = 2 if isconcat else 1 - - layers = [PixelNorm()] - - for i in range(n_mlp): - layers.append( - EqualLinear( - style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu', device=device - ) - ) - - self.style = nn.Sequential(*layers) - - self.channels = { - 4: int(512 * narrow), - 8: int(512 * narrow), - 16: int(512 * narrow), - 32: int(512 * narrow), - 64: int(256 * channel_multiplier * narrow), - 128: int(128 * channel_multiplier * narrow), - 256: int(64 * channel_multiplier * narrow), - 512: int(32 * channel_multiplier * narrow), - 1024: int(16 * channel_multiplier * narrow), - 2048: int(8 * channel_multiplier * narrow) - } - - self.input = ConstantInput(self.channels[4]) - self.conv1 = StyledConv( - self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel, isconcat=isconcat, device=device - ) - self.to_rgb1 = ToRGB(self.channels[4]*self.feat_multiplier, style_dim, upsample=False, device=device) - - self.log_size = int(math.log(size, 2)) - - self.convs = nn.ModuleList() - self.upsamples = nn.ModuleList() - self.to_rgbs = nn.ModuleList() - - in_channel = self.channels[4] - - for i in range(3, self.log_size + 1): - out_channel = self.channels[2 ** i] - - self.convs.append( - StyledConv( - in_channel*self.feat_multiplier, - out_channel, - 3, - style_dim, - upsample=True, - blur_kernel=blur_kernel, - isconcat=isconcat, - device=device - ) - ) - - self.convs.append( - StyledConv( - out_channel*self.feat_multiplier, out_channel, 3, style_dim, blur_kernel=blur_kernel, isconcat=isconcat, device=device - ) - ) - - self.to_rgbs.append(ToRGB(out_channel*self.feat_multiplier, style_dim, device=device)) - - in_channel = out_channel - - self.n_latent = self.log_size * 2 - 2 - - def make_noise(self): - device = self.input.input.device - - noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)] - - for i in range(3, self.log_size + 1): - for _ in range(2): - noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device)) - - return noises - - def mean_latent(self, n_latent): - latent_in = torch.randn( - n_latent, self.style_dim, device=self.input.input.device - ) - latent = self.style(latent_in).mean(0, keepdim=True) - - return latent - - def get_latent(self, input): - return self.style(input) - - def forward( - self, - styles, - return_latents=False, - inject_index=None, - truncation=1, - truncation_latent=None, - input_is_latent=False, - noise=None, - ): - if not input_is_latent: - styles = [self.style(s) for s in styles] - - if noise==None: - ''' - noise = [None] * (2 * (self.log_size - 2) + 1) - ''' - noise = [] - batch = styles[0].shape[0] - for i in range(self.n_mlp + 1): - size = 2 ** (i+2) - noise.append(torch.randn(batch, self.channels[size], size, size, device=styles[0].device)) - - if truncation < 1: - style_t = [] - - for style in styles: - style_t.append( - truncation_latent + truncation * (style - truncation_latent) - ) - - styles = style_t - - if len(styles) < 2: - inject_index = self.n_latent - - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - - else: - if inject_index==None: - inject_index = random.randint(1, self.n_latent - 1) - - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1) - - latent = torch.cat([latent, latent2], 1) - - out = self.input(latent) - out = self.conv1(out, latent[:, 0], noise=noise[0]) - - skip = self.to_rgb1(out, latent[:, 1]) - - i = 1 - for conv1, conv2, noise1, noise2, to_rgb in zip( - self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs - ): - out = conv1(out, latent[:, i], noise=noise1) - out = conv2(out, latent[:, i + 1], noise=noise2) - skip = to_rgb(out, latent[:, i + 2], skip) - - i += 2 - - image = skip - - if return_latents: - return image, latent - - else: - return image, None - -class ConvLayer(nn.Sequential): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - downsample=False, - blur_kernel=[1, 3, 3, 1], - bias=True, - activate=True, - device='cpu' - ): - layers = [] - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - layers.append(Blur(blur_kernel, pad=(pad0, pad1), device=device)) - - stride = 2 - self.padding = 0 - - else: - stride = 1 - self.padding = kernel_size // 2 - - layers.append( - EqualConv2d( - in_channel, - out_channel, - kernel_size, - padding=self.padding, - stride=stride, - bias=bias and not activate, - ) - ) - - if activate: - if bias: - layers.append(FusedLeakyReLU(out_channel, device=device)) - - else: - layers.append(ScaledLeakyReLU(0.2)) - - super().__init__(*layers) - - -class ResBlock(nn.Module): - def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1], device='cpu'): - super().__init__() - - self.conv1 = ConvLayer(in_channel, in_channel, 3, device=device) - self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True, device=device) - - self.skip = ConvLayer( - in_channel, out_channel, 1, downsample=True, activate=False, bias=False - ) - - def forward(self, input): - out = self.conv1(input) - out = self.conv2(out) - - skip = self.skip(input) - out = (out + skip) / math.sqrt(2) - - return out - -class FullGenerator(nn.Module): - def __init__( - self, - size, - style_dim, - n_mlp, - channel_multiplier=2, - blur_kernel=[1, 3, 3, 1], - lr_mlp=0.01, - isconcat=True, - narrow=1, - device='cpu' - ): - super().__init__() - channels = { - 4: int(512 * narrow), - 8: int(512 * narrow), - 16: int(512 * narrow), - 32: int(512 * narrow), - 64: int(256 * channel_multiplier * narrow), - 128: int(128 * channel_multiplier * narrow), - 256: int(64 * channel_multiplier * narrow), - 512: int(32 * channel_multiplier * narrow), - 1024: int(16 * channel_multiplier * narrow), - 2048: int(8 * channel_multiplier * narrow) - } - - self.log_size = int(math.log(size, 2)) - self.generator = Generator(size, style_dim, n_mlp, channel_multiplier=channel_multiplier, blur_kernel=blur_kernel, lr_mlp=lr_mlp, isconcat=isconcat, narrow=narrow, device=device) - - conv = [ConvLayer(3, channels[size], 1, device=device)] - self.ecd0 = nn.Sequential(*conv) - in_channel = channels[size] - - self.names = ['ecd%d'%i for i in range(self.log_size-1)] - for i in range(self.log_size, 2, -1): - out_channel = channels[2 ** (i - 1)] - #conv = [ResBlock(in_channel, out_channel, blur_kernel)] - conv = [ConvLayer(in_channel, out_channel, 3, downsample=True, device=device)] - setattr(self, self.names[self.log_size-i+1], nn.Sequential(*conv)) - in_channel = out_channel - self.final_linear = nn.Sequential(EqualLinear(channels[4] * 4 * 4, style_dim, activation='fused_lrelu', device=device)) - - def forward(self, - inputs, - return_latents=False, - inject_index=None, - truncation=1, - truncation_latent=None, - input_is_latent=False, - ): - noise = [] - for i in range(self.log_size-1): - ecd = getattr(self, self.names[i]) - inputs = ecd(inputs) - noise.append(inputs) - #print(inputs.shape) - inputs = inputs.view(inputs.shape[0], -1) - outs = self.final_linear(inputs) - #print(outs.shape) - noise = list(itertools.chain.from_iterable(itertools.repeat(x, 2) for x in noise))[::-1] - outs = self.generator([outs], return_latents, inject_index, truncation, truncation_latent, input_is_latent, noise=noise[1:]) - return outs - -class Discriminator(nn.Module): - def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1], narrow=1, device='cpu'): - super().__init__() - - channels = { - 4: int(512 * narrow), - 8: int(512 * narrow), - 16: int(512 * narrow), - 32: int(512 * narrow), - 64: int(256 * channel_multiplier * narrow), - 128: int(128 * channel_multiplier * narrow), - 256: int(64 * channel_multiplier * narrow), - 512: int(32 * channel_multiplier * narrow), - 1024: int(16 * channel_multiplier * narrow), - 2048: int(8 * channel_multiplier * narrow) - } - - convs = [ConvLayer(3, channels[size], 1, device=device)] - - log_size = int(math.log(size, 2)) - - in_channel = channels[size] - - for i in range(log_size, 2, -1): - out_channel = channels[2 ** (i - 1)] - - convs.append(ResBlock(in_channel, out_channel, blur_kernel, device=device)) - - in_channel = out_channel - - self.convs = nn.Sequential(*convs) - - self.stddev_group = 4 - self.stddev_feat = 1 - - self.final_conv = ConvLayer(in_channel + 1, channels[4], 3, device=device) - self.final_linear = nn.Sequential( - EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu', device=device), - EqualLinear(channels[4], 1), - ) - - def forward(self, input): - out = self.convs(input) - - batch, channel, height, width = out.shape - group = min(batch, self.stddev_group) - stddev = out.view( - group, -1, self.stddev_feat, channel // self.stddev_feat, height, width - ) - stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) - stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2) - stddev = stddev.repeat(group, 1, height, width) - out = torch.cat([out, stddev], 1) - - out = self.final_conv(out) - - out = out.view(batch, -1) - out = self.final_linear(out) - return out - -class FullGenerator_SR(nn.Module): - def __init__( - self, - size, - out_size, - style_dim, - n_mlp, - channel_multiplier=2, - blur_kernel=[1, 3, 3, 1], - lr_mlp=0.01, - isconcat=True, - narrow=1, - device='cpu' - ): - super().__init__() - channels = { - 4: int(512 * narrow), - 8: int(512 * narrow), - 16: int(512 * narrow), - 32: int(512 * narrow), - 64: int(256 * channel_multiplier * narrow), - 128: int(128 * channel_multiplier * narrow), - 256: int(64 * channel_multiplier * narrow), - 512: int(32 * channel_multiplier * narrow), - 1024: int(16 * channel_multiplier * narrow), - 2048: int(8 * channel_multiplier * narrow), - } - - self.log_insize = int(math.log(size, 2)) - self.log_outsize = int(math.log(out_size, 2)) - self.generator = Generator(out_size, style_dim, n_mlp, channel_multiplier=channel_multiplier, blur_kernel=blur_kernel, lr_mlp=lr_mlp, isconcat=isconcat, narrow=narrow, device=device) - - conv = [ConvLayer(3, channels[size], 1, device=device)] - self.ecd0 = nn.Sequential(*conv) - in_channel = channels[size] - - self.names = ['ecd%d'%i for i in range(self.log_insize-1)] - for i in range(self.log_insize, 2, -1): - out_channel = channels[2 ** (i - 1)] - #conv = [ResBlock(in_channel, out_channel, blur_kernel)] - conv = [ConvLayer(in_channel, out_channel, 3, downsample=True, device=device)] - setattr(self, self.names[self.log_insize-i+1], nn.Sequential(*conv)) - in_channel = out_channel - self.final_linear = nn.Sequential(EqualLinear(channels[4] * 4 * 4, style_dim, activation='fused_lrelu', device=device)) - - def forward(self, - inputs, - return_latents=False, - inject_index=None, - truncation=1, - truncation_latent=None, - input_is_latent=False, - ): - noise = [] - for i in range(self.log_outsize-self.log_insize): - noise.append(None) - for i in range(self.log_insize-1): - ecd = getattr(self, self.names[i]) - inputs = ecd(inputs) - noise.append(inputs) - #print(inputs.shape) - inputs = inputs.view(inputs.shape[0], -1) - outs = self.final_linear(inputs) - #print(outs.shape) - noise = list(itertools.chain.from_iterable(itertools.repeat(x, 2) for x in noise))[::-1] - image, latent = self.generator([outs], return_latents, inject_index, truncation, truncation_latent, input_is_latent, noise=noise[1:]) - return image, latent \ No newline at end of file diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg_extractor/encoder/convolution.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg_extractor/encoder/convolution.py deleted file mode 100644 index 2d2c399e406aae97a6baf0f7de379a1d90a97949..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg_extractor/encoder/convolution.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -# Copyright 2020 Johns Hopkins University (Shinji Watanabe) -# Northwestern Polytechnical University (Pengcheng Guo) -# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) - -"""ConvolutionModule definition.""" - -from torch import nn - - -class ConvolutionModule(nn.Module): - """ConvolutionModule in Conformer model. - - :param int channels: channels of cnn - :param int kernel_size: kernerl size of cnn - - """ - - def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True): - """Construct an ConvolutionModule object.""" - super(ConvolutionModule, self).__init__() - # kernerl_size should be a odd number for 'SAME' padding - assert (kernel_size - 1) % 2 == 0 - - self.pointwise_conv1 = nn.Conv1d( - channels, - 2 * channels, - kernel_size=1, - stride=1, - padding=0, - bias=bias, - ) - self.depthwise_conv = nn.Conv1d( - channels, - channels, - kernel_size, - stride=1, - padding=(kernel_size - 1) // 2, - groups=channels, - bias=bias, - ) - self.norm = nn.BatchNorm1d(channels) - self.pointwise_conv2 = nn.Conv1d( - channels, - channels, - kernel_size=1, - stride=1, - padding=0, - bias=bias, - ) - self.activation = activation - - def forward(self, x): - """Compute convolution module. - - :param torch.Tensor x: (batch, time, size) - :return torch.Tensor: convoluted `value` (batch, time, d_model) - """ - # exchange the temporal dimension and the feature dimension - x = x.transpose(1, 2) - - # GLU mechanism - x = self.pointwise_conv1(x) # (batch, 2*channel, dim) - x = nn.functional.glu(x, dim=1) # (batch, channel, dim) - - # 1D Depthwise Conv - x = self.depthwise_conv(x) - x = self.activation(self.norm(x)) - - x = self.pointwise_conv2(x) - - return x.transpose(1, 2) diff --git a/spaces/KevinQHLin/UniVTG/model/transformer_encoder_droppath.py b/spaces/KevinQHLin/UniVTG/model/transformer_encoder_droppath.py deleted file mode 100644 index 536d46529a4722c1bf787d85fbace0afe1e3a33b..0000000000000000000000000000000000000000 --- a/spaces/KevinQHLin/UniVTG/model/transformer_encoder_droppath.py +++ /dev/null @@ -1,194 +0,0 @@ -import copy -import pdb -from typing import Optional - -import torch -import torch.nn.functional as F -from torch import nn, Tensor - -def mask_logits(inputs, mask, mask_value=-1e30): - mask = mask.type(torch.float32) - return inputs + (1.0 - mask) * mask_value - - -class Transformer(nn.Module): - - def __init__(self, d_model=512, nhead=8, num_encoder_layers=4, - num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, droppath=0.1, - activation="gelu", normalize_before=False, # False as default - return_intermediate_dec=False): - super().__init__() - - encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, - dropout, droppath, activation, normalize_before) - encoder_norm = nn.LayerNorm(d_model) if normalize_before else None - self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) - - self._reset_parameters() - - self.d_model = d_model - self.nhead = nhead - - def _reset_parameters(self): - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - - def forward(self, src, mask, pos_embed): - """ - Args: - src: (batch_size, L, d) - mask: (batch_size, L) - query_embed: (#queries, d) -> my imple (batch_size, d) and #queries=1 - pos_embed: (batch_size, L, d) the same as src - - Returns: - - """ - # flatten NxCxHxW to HWxNxC - src = src.permute(1, 0, 2) # (L, batch_size, d) - pos_embed = pos_embed.permute(1, 0, 2) # (L, batch_size, d) - - memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) - memory = memory.transpose(0, 1) - - return memory - - -class TransformerEncoder(nn.Module): - def __init__(self, encoder_layer, num_layers, norm=None, return_intermediate=False): - super().__init__() - self.layers = _get_clones(encoder_layer, num_layers) - self.num_layers = num_layers - self.norm = norm - self.return_intermediate = return_intermediate - - def forward(self, src, - mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - output = src - - intermediate = [] - - for layer in self.layers: - output = layer(output, src_mask=mask, - src_key_padding_mask=src_key_padding_mask, pos=pos) - if self.return_intermediate: - intermediate.append(output) - - if self.norm is not None: - output = self.norm(output) - - if self.return_intermediate: - return torch.stack(intermediate) - - return output - -class TransformerEncoderLayer(nn.Module): - - def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, droppath=0.1, - activation="relu", normalize_before=False): - super().__init__() - self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - # Implementation of Feedforward model - self.linear1 = nn.Linear(d_model, dim_feedforward) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_feedforward, d_model) - - self.norm1 = nn.LayerNorm(d_model) - self.norm2 = nn.LayerNorm(d_model) - # self.dropout1 = nn.Dropout(dropout) - # self.dropout2 = nn.Dropout(dropout) - self.droppath1 = DropPath(droppath) - self.droppath2 = DropPath(droppath) - - self.activation = _get_activation_fn(activation) - self.normalize_before = normalize_before - - def with_pos_embed(self, tensor, pos: Optional[Tensor]): - return tensor if pos is None else tensor + pos - - def forward_post(self, - src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - q = k = self.with_pos_embed(src, pos) - src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] - # src2 = self.self_attn_eff(q=q, k=k, v=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] - src = src + self.droppath1(src2) - src = self.norm1(src) - src2 = self.linear2(self.activation(self.linear1(src))) - # src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) - src = src + self.droppath2(src2) - src = self.norm2(src) - return src - - def forward(self, src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - if self.normalize_before: - return self.forward_pre(src, src_mask, src_key_padding_mask, pos) - return self.forward_post(src, src_mask, src_key_padding_mask, pos) - - -def _get_clones(module, N): - return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - - -def build_transformer(args): - return Transformer( - d_model=args.hidden_dim, - dropout=args.dropout, - droppath=args.droppath, - nhead=args.nheads, - dim_feedforward=args.dim_feedforward, - num_encoder_layers=args.enc_layers, - num_decoder_layers=args.dec_layers, - normalize_before=args.pre_norm, - return_intermediate_dec=True, - ) - -def drop_path(x, drop_prob=0.0, training=False): - """ - Stochastic Depth per sample. - """ - if drop_prob == 0.0 or not training: - return x - - keep_prob = 1 - drop_prob - shape = (x.shape[0],) + (1,) * (x.ndim - 1) - mask = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) - mask.floor_() - x = x.div(keep_prob) * mask - - return x - - -class DropPath(nn.Module): - """ - Drop paths per sample (when applied in main path of residual blocks). - """ - - def __init__(self, drop_prob=None): - super(DropPath, self).__init__() - - self.drop_prob = drop_prob - - def forward(self, x): - x = x.permute(1, 0, 2) - res = drop_path(x, self.drop_prob, self.training) - return res.permute(1, 0, 2) - # return drop_path(x, self.drop_prob, self.training) - -def _get_activation_fn(activation): - """Return an activation function given a string""" - if activation == "relu": - return F.relu - if activation == "gelu": - return F.gelu - if activation == "glu": - return F.glu - raise RuntimeError(F"activation should be relu/gelu, not {activation}.") \ No newline at end of file diff --git a/spaces/LavanyaBurlagadda/TChatBotWithPlayHT1/README.md b/spaces/LavanyaBurlagadda/TChatBotWithPlayHT1/README.md deleted file mode 100644 index 2b35a86a7cd5100b8d251c9decdd88edda6f600c..0000000000000000000000000000000000000000 --- a/spaces/LavanyaBurlagadda/TChatBotWithPlayHT1/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: TChatBotWithPlayHT1 -emoji: 👁 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/assets/themes/Acat.py b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/assets/themes/Acat.py deleted file mode 100644 index 32c0dbe3bd221eb97ca329a77fe193d124ce1fdd..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/assets/themes/Acat.py +++ /dev/null @@ -1,313 +0,0 @@ -from __future__ import annotations - -from typing import Iterable -import gradio as gr - -#gr.themes.builder() -from gradio.themes.base import Base -from gradio.themes.utils import colors, fonts, sizes -import time - -class Acat(Base): - def __init__( - self, - *, - primary_hue: colors.Color | str = colors.green, - secondary_hue: colors.Color | str = colors.emerald, - neutral_hue: colors.Color | str = colors.neutral, - spacing_size: sizes.Size | str = sizes.spacing_md, - radius_size: sizes.Size | str = sizes.radius_md, - text_size: sizes.Size | str = sizes.text_lg, - font: fonts.Font - | str - | Iterable[fonts.Font | str] = ( - 'Inter V', - fonts.GoogleFont('Asap'), - 'ui-sans-serif', - 'sans-serif', - ), - font_mono: fonts.Font - | str - | Iterable[fonts.Font | str] = ( - 'ui-monospace', - fonts.GoogleFont("Fira Code"), - 'Consolas', - 'monospace', - ), - ): - super().__init__( - primary_hue=primary_hue, - secondary_hue=secondary_hue, - neutral_hue=neutral_hue, - spacing_size=spacing_size, - radius_size=radius_size, - text_size=text_size, - font=font, - font_mono=font_mono, - ) - self.name= "Acat", - self.secondary_100='#e2effc', - self.secondary_200='#bedff9', - self.secondary_300='#84c5f5', - self.secondary_400='#4eacef', - self.secondary_50='#f1f8fe', - self.secondary_500='#198cde', - self.secondary_600='#0c6ebd', - self.secondary_700='#0b5899', - self.secondary_800='#0e4b7e', - self.secondary_900='#113f69', - self.secondary_950='#0b2846', - self.neutral_100='#e2effc', - self.neutral_200='#bedff9', - self.neutral_300='#84c5f5', - self.neutral_400='#4eacef', - self.neutral_50='#f1f8fe', - self.neutral_500='#198cde', - self.neutral_600='#0c6ebd', - self.neutral_700='#0b5899', - self.neutral_800='#0e4b7e', - self.neutral_900='#113f69', - self.neutral_950='#0b2846', - self.primary_100='#e2effc', - self.primary_200='#bedff9', - self.primary_300='#84c5f5', - self.primary_400='#4eacef', - self.primary_50='#f1f8fe', - self.primary_500='#198cde', - self.primary_600='#0c6ebd', - self.primary_700='#0b5899', - self.primary_800='#0e4b7e', - self.primary_900='#113f69', - self.primary_950='#0b2846', - super().set( - # Blaise - background_fill_primary='#FFFFFF', - background_fill_primary_dark='#000000', - background_fill_secondary='#dce3e8', - background_fill_secondary_dark='#242424', - block_background_fill='#ECF2F7', - block_background_fill_dark='#191919', - block_border_color='#dce3e8', - block_border_color_dark='#242424', - block_border_width='1px', - block_info_text_color='#191919', - block_info_text_color_dark='#ECF2F7', - block_info_text_size='*text_sm', - block_info_text_weight='400', - block_label_background_fill='#ECF2F700', - block_label_background_fill_dark='#19191900', - block_label_border_color='#dce3e8', - block_label_border_color_dark='#242424', - block_label_border_width='1px', - block_label_margin='0', - block_label_padding='*spacing_sm *spacing_lg', - block_label_radius= "calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px) 0", - block_label_right_radius= "0 calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px)", - block_label_shadow='*block_shadow', - block_label_text_color='#4EACEF', - block_label_text_color_dark='#4EACEF', - block_label_text_size='*text_sm', - block_label_text_weight='400', - block_padding='*spacing_xl calc(*spacing_xl + 2px)', - block_radius='*radius_lg', - block_shadow='#FFFFFF00', - block_shadow_dark='#00000000', - block_title_background_fill='#ECF2F700', - block_title_background_fill_dark='#19191900', - block_title_border_color='#dce3e8', - block_title_border_color_dark='#242424', - block_title_border_width='0px', - block_title_padding='0', - block_title_radius='none', - block_title_text_color='#4EACEF', - block_title_text_color_dark='#4EACEF', - block_title_text_size='*text_md', - block_title_text_weight='bold', - body_background_fill="url('https://cdn.discordapp.com/attachments/1151036558389026838/1158634938128670730/image-removebg-preview_1.png') #FFFFFF no-repeat right bottom/auto 30svh padding-box fixed", - body_background_fill_dark="url('https://cdn.discordapp.com/attachments/1151036558389026838/1158634938128670730/image-removebg-preview_1.png') #000000 no-repeat right bottom/auto 30svh padding-box fixed", - body_text_color='#191919', - body_text_color_dark='#ECF2F7', - body_text_color_subdued='#636668', - body_text_color_subdued_dark='#c4c4c4', - body_text_size='*text_md', - body_text_weight='400', - border_color_accent='#dce3e8', - border_color_accent_dark='#242424', - border_color_primary='#dce3e8', - border_color_primary_dark='#242424', - button_border_width='*input_border_width', - button_border_width_dark='*input_border_width', - button_cancel_background_fill='#dce3e8', - button_cancel_background_fill_dark='#242424', - button_cancel_background_fill_hover='#d0d7db', - button_cancel_background_fill_hover_dark='#202020', - button_cancel_border_color='#191919', - button_cancel_border_color_dark='#ECF2F7', - button_cancel_border_color_hover='#202020', - button_cancel_border_color_hover_dark='#a1c3d8', - button_cancel_text_color='#4EACEF', - button_cancel_text_color_dark='#4EACEF', - button_cancel_text_color_hover='#0c6ebd', - button_cancel_text_color_hover_dark='#0c6ebd', - button_large_padding='*spacing_lg calc(2 * *spacing_lg)', - button_large_radius='*radius_lg', - button_large_text_size='*text_lg', - button_large_text_weight='600', - button_primary_background_fill='#4EACEF', - button_primary_background_fill_dark='#4EACEF', - button_primary_background_fill_hover='#0c6ebd', - button_primary_background_fill_hover_dark='#0c6ebd', - button_primary_border_color='#191919', - button_primary_border_color_dark='#ECF2F7', - button_primary_border_color_hover='#202020', - button_primary_border_color_hover_dark='#a1c3d8', - button_primary_text_color='#ECF2F7', - button_primary_text_color_dark='#191919', - button_primary_text_color_hover='#e1eaf0', - button_primary_text_color_hover_dark='#141414', - button_secondary_background_fill='#dce3e8', - button_secondary_background_fill_dark='#242424', - button_secondary_background_fill_hover='#d0d7db', - button_secondary_background_fill_hover_dark='#202020', - button_secondary_border_color='#dce3e8', - button_secondary_border_color_dark='#242424', - button_secondary_border_color_hover='#d0d7db', - button_secondary_border_color_hover_dark='#202020', - button_secondary_text_color='#4EACEF', - button_secondary_text_color_dark='#4EACEF', - button_secondary_text_color_hover='#0c6ebd', - button_secondary_text_color_hover_dark='#0c6ebd', - button_shadow='none', - button_shadow_active='none', - button_shadow_hover='none', - button_small_padding='*spacing_sm calc(2 * *spacing_sm)', - button_small_radius='*radius_lg', - button_small_text_size='*text_md', - button_small_text_weight='400', - button_transition='background-color 0.2s ease', - chatbot_code_background_color='#FFFFFF', - chatbot_code_background_color_dark='#000000', - checkbox_background_color='#dce3e8', - checkbox_background_color_dark='#242424', - checkbox_background_color_focus='#dce3e8', - checkbox_background_color_focus_dark='#242424', - checkbox_background_color_hover='#dce3e8', - checkbox_background_color_hover_dark='#242424', - checkbox_background_color_selected='#4EACEF', - checkbox_background_color_selected_dark='#4EACEF', - checkbox_border_color='#dce3e8', - checkbox_border_color_dark='#242424', - checkbox_border_color_focus='#4EACEF', - checkbox_border_color_focus_dark='#4EACEF', - checkbox_border_color_hover='#4EACEF', - checkbox_border_color_hover_dark='#4EACEF', - checkbox_border_color_selected='#4EACEF', - checkbox_border_color_selected_dark='#4EACEF', - checkbox_border_radius='*radius_sm', - checkbox_border_width='1px', - checkbox_border_width_dark='1px', - checkbox_check= "url(\"data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e\")", - checkbox_label_background_fill='#ECF2F7', - checkbox_label_background_fill_dark='#191919', - checkbox_label_background_fill_hover='#dce3e8', - checkbox_label_background_fill_hover_dark='#242424', - checkbox_label_background_fill_selected='#dce3e8', - checkbox_label_background_fill_selected_dark='#242424', - checkbox_label_border_color='#dce3e8', - checkbox_label_border_color_dark='#242424', - checkbox_label_border_color_hover='#4EACEF', - checkbox_label_border_color_hover_dark='#4EACEF', - checkbox_label_border_width='1px', - checkbox_label_border_width_dark='1px', - checkbox_label_gap='*spacing_lg', - checkbox_label_padding='*spacing_md calc(2 * *spacing_md)', - checkbox_label_shadow='none', - checkbox_label_text_color='#191919', - checkbox_label_text_color_dark='#ECF2F7', - checkbox_label_text_color_selected='#4EACEF', - checkbox_label_text_color_selected_dark='#4EACEF', - checkbox_label_text_size='*text_md', - checkbox_label_text_weight='400', - checkbox_shadow='*input_shadow', - color_accent='*primary_500', - color_accent_soft='#dce3e8', - color_accent_soft_dark='#242424', - container_radius='*radius_lg', - embed_radius='*radius_lg', - error_background_fill='#dce3e8', - error_background_fill_dark='#242424', - error_border_color='#191919', - error_border_color_dark='#ECF2F7', - error_border_width='1px', - error_border_width_dark='1px', - error_text_color='#4EACEF', - error_text_color_dark='#4EACEF', - form_gap_width='0px', - input_background_fill='#dce3e8', - input_background_fill_dark='#242424', - input_background_fill_focus='#dce3e8', - input_background_fill_focus_dark='#242424', - input_background_fill_hover='#d0d7db', - input_background_fill_hover_dark='#202020', - input_border_color='#191919', - input_border_color_dark='#ECF2F7', - input_border_color_focus='#191919', - input_border_color_focus_dark='#ECF2F7', - input_border_color_hover='#202020', - input_border_color_hover_dark='#a1c3d8', - input_border_width='0px', - input_padding='*spacing_xl', - input_placeholder_color='#19191930', - input_placeholder_color_dark='#ECF2F730', - input_radius='*radius_lg', - input_shadow='#19191900', - input_shadow_dark='#ECF2F700', - input_shadow_focus='#19191900', - input_shadow_focus_dark='#ECF2F700', - input_text_size='*text_md', - input_text_weight='400', - layout_gap='*spacing_xxl', - link_text_color='#4EACEF', - link_text_color_active='#4EACEF', - link_text_color_active_dark='#4EACEF', - link_text_color_dark='#4EACEF', - link_text_color_hover='#0c6ebd', - link_text_color_hover_dark='#0c6ebd', - link_text_color_visited='#4EACEF', - link_text_color_visited_dark='#4EACEF', - loader_color='#4EACEF', - loader_color_dark='#4EACEF', - - panel_background_fill='#ECF2F7', - panel_background_fill_dark='#191919', - panel_border_color='#4EACEF', - panel_border_color_dark='#4EACEF', - panel_border_width='0', - - prose_header_text_weight='600', - prose_text_size='*text_md', - prose_text_weight='400', - radio_circle= "url(\"data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e\")", - section_header_text_size='*text_md', - section_header_text_weight='400', - shadow_drop='rgba(0,0,0,0.05) 0px 1px 2px 0px', - shadow_drop_lg='0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1)', - shadow_inset='rgba(0,0,0,0.05) 0px 2px 4px 0px inset', - shadow_spread='#FFFFFF', - shadow_spread_dark='#000000', - slider_color='#4EACEF', - slider_color_dark='#4EACEF', - stat_background_fill='#4EACEF', - stat_background_fill_dark='#4EACEF', - table_border_color='#191919', - table_border_color_dark='#ECF2F7', - table_even_background_fill='#ECF2F7', - table_even_background_fill_dark='#191919', - table_odd_background_fill='#dce3e8', - table_odd_background_fill_dark='#242424', - table_radius='*radius_lg', - table_row_focus='#191919', - table_row_focus_dark='#ECF2F7', - - ) - diff --git a/spaces/Lianjd/stock_dashboard/backtrader/order.py b/spaces/Lianjd/stock_dashboard/backtrader/order.py deleted file mode 100644 index 458a35e9fbba0cafb4e90484dbb405b7a1c73aba..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/order.py +++ /dev/null @@ -1,641 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -import collections -from copy import copy -import datetime -import itertools - -from .utils.py3 import range, with_metaclass, iteritems - -from .metabase import MetaParams -from .utils import AutoOrderedDict - - -class OrderExecutionBit(object): - ''' - Intended to hold information about order execution. A "bit" does not - determine if the order has been fully/partially executed, it just holds - information. - - Member Attributes: - - - dt: datetime (float) execution time - - size: how much was executed - - price: execution price - - closed: how much of the execution closed an existing postion - - opened: how much of the execution opened a new position - - openedvalue: market value of the "opened" part - - closedvalue: market value of the "closed" part - - closedcomm: commission for the "closed" part - - openedcomm: commission for the "opened" part - - - value: market value for the entire bit size - - comm: commission for the entire bit execution - - pnl: pnl generated by this bit (if something was closed) - - - psize: current open position size - - pprice: current open position price - - ''' - - def __init__(self, - dt=None, size=0, price=0.0, - closed=0, closedvalue=0.0, closedcomm=0.0, - opened=0, openedvalue=0.0, openedcomm=0.0, - pnl=0.0, - psize=0, pprice=0.0): - - self.dt = dt - self.size = size - self.price = price - - self.closed = closed - self.opened = opened - self.closedvalue = closedvalue - self.openedvalue = openedvalue - self.closedcomm = closedcomm - self.openedcomm = openedcomm - - self.value = closedvalue + openedvalue - self.comm = closedcomm + openedcomm - self.pnl = pnl - - self.psize = psize - self.pprice = pprice - - -class OrderData(object): - ''' - Holds actual order data for Creation and Execution. - - In the case of Creation the request made and in the case of Execution the - actual outcome. - - Member Attributes: - - - exbits : iterable of OrderExecutionBits for this OrderData - - - dt: datetime (float) creation/execution time - - size: requested/executed size - - price: execution price - Note: if no price is given and no pricelimite is given, the closing - price at the time or order creation will be used as reference - - pricelimit: holds pricelimit for StopLimit (which has trigger first) - - trailamount: absolute price distance in trailing stops - - trailpercent: percentage price distance in trailing stops - - - value: market value for the entire bit size - - comm: commission for the entire bit execution - - pnl: pnl generated by this bit (if something was closed) - - margin: margin incurred by the Order (if any) - - - psize: current open position size - - pprice: current open position price - - ''' - # According to the docs, collections.deque is thread-safe with appends at - # both ends, there will be no pop (nowhere) and therefore to know which the - # new exbits are two indices are needed. At time of cloning (__copy__) the - # indices can be updated to match the previous end, and the new end - # (len(exbits) - # Example: start 0, 0 -> islice(exbits, 0, 0) -> [] - # One added -> copy -> updated 0, 1 -> islice(exbits, 0, 1) -> [1 elem] - # Other added -> copy -> updated 1, 2 -> islice(exbits, 1, 2) -> [1 elem] - # "add" and "__copy__" happen always in the same thread (with all current - # implementations) and therefore no append will happen during a copy and - # the len of the exbits can be queried with no concerns about another - # thread making an append and with no need for a lock - - def __init__(self, dt=None, size=0, price=0.0, pricelimit=0.0, remsize=0, - pclose=0.0, trailamount=0.0, trailpercent=0.0): - - self.pclose = pclose - self.exbits = collections.deque() # for historical purposes - self.p1, self.p2 = 0, 0 # indices to pending notifications - - self.dt = dt - self.size = size - self.remsize = remsize - self.price = price - self.pricelimit = pricelimit - self.trailamount = trailamount - self.trailpercent = trailpercent - - if not pricelimit: - # if no pricelimit is given, use the given price - self.pricelimit = self.price - - if pricelimit and not price: - # price must always be set if pricelimit is set ... - self.price = pricelimit - - self.plimit = pricelimit - - self.value = 0.0 - self.comm = 0.0 - self.margin = None - self.pnl = 0.0 - - self.psize = 0 - self.pprice = 0 - - def _getplimit(self): - return self._plimit - - def _setplimit(self, val): - self._plimit = val - - plimit = property(_getplimit, _setplimit) - - def __len__(self): - return len(self.exbits) - - def __getitem__(self, key): - return self.exbits[key] - - def add(self, dt, size, price, - closed=0, closedvalue=0.0, closedcomm=0.0, - opened=0, openedvalue=0.0, openedcomm=0.0, - pnl=0.0, - psize=0, pprice=0.0): - - self.addbit( - OrderExecutionBit(dt, size, price, - closed, closedvalue, closedcomm, - opened, openedvalue, openedcomm, pnl, - psize, pprice)) - - def addbit(self, exbit): - # Stores an ExecutionBit and recalculates own values from ExBit - self.exbits.append(exbit) - - self.remsize -= exbit.size - - self.dt = exbit.dt - oldvalue = self.size * self.price - newvalue = exbit.size * exbit.price - self.size += exbit.size - self.price = (oldvalue + newvalue) / self.size - self.value += exbit.value - self.comm += exbit.comm - self.pnl += exbit.pnl - self.psize = exbit.psize - self.pprice = exbit.pprice - - def getpending(self): - return list(self.iterpending()) - - def iterpending(self): - return itertools.islice(self.exbits, self.p1, self.p2) - - def markpending(self): - # rebuild the indices to mark which exbits are pending in clone - self.p1, self.p2 = self.p2, len(self.exbits) - - def clone(self): - self.markpending() - obj = copy(self) - return obj - - -class OrderBase(with_metaclass(MetaParams, object)): - params = ( - ('owner', None), ('data', None), - ('size', None), ('price', None), ('pricelimit', None), - ('exectype', None), ('valid', None), ('tradeid', 0), ('oco', None), - ('trailamount', None), ('trailpercent', None), - ('parent', None), ('transmit', True), - ('simulated', False), - # To support historical order evaluation - ('histnotify', False), - ) - - DAY = datetime.timedelta() # constant for DAY order identification - - # Time Restrictions for orders - T_Close, T_Day, T_Date, T_None = range(4) - - # Volume Restrictions for orders - V_None = range(1) - - (Market, Close, Limit, Stop, StopLimit, StopTrail, StopTrailLimit, - Historical) = range(8) - ExecTypes = ['Market', 'Close', 'Limit', 'Stop', 'StopLimit', 'StopTrail', - 'StopTrailLimit', 'Historical'] - - OrdTypes = ['Buy', 'Sell'] - Buy, Sell = range(2) - - Created, Submitted, Accepted, Partial, Completed, \ - Canceled, Expired, Margin, Rejected = range(9) - - Cancelled = Canceled # alias - - Status = [ - 'Created', 'Submitted', 'Accepted', 'Partial', 'Completed', - 'Canceled', 'Expired', 'Margin', 'Rejected', - ] - - refbasis = itertools.count(1) # for a unique identifier per order - - def _getplimit(self): - return self._plimit - - def _setplimit(self, val): - self._plimit = val - - plimit = property(_getplimit, _setplimit) - - def __getattr__(self, name): - # Return attr from params if not found in order - return getattr(self.params, name) - - def __setattribute__(self, name, value): - if hasattr(self.params, name): - setattr(self.params, name, value) - else: - super(Order, self).__setattribute__(name, value) - - def __str__(self): - tojoin = list() - tojoin.append('Ref: {}'.format(self.ref)) - tojoin.append('OrdType: {}'.format(self.ordtype)) - tojoin.append('OrdType: {}'.format(self.ordtypename())) - tojoin.append('Status: {}'.format(self.status)) - tojoin.append('Status: {}'.format(self.getstatusname())) - tojoin.append('Size: {}'.format(self.size)) - tojoin.append('Price: {}'.format(self.price)) - tojoin.append('Price Limit: {}'.format(self.pricelimit)) - tojoin.append('TrailAmount: {}'.format(self.trailamount)) - tojoin.append('TrailPercent: {}'.format(self.trailpercent)) - tojoin.append('ExecType: {}'.format(self.exectype)) - tojoin.append('ExecType: {}'.format(self.getordername())) - tojoin.append('CommInfo: {}'.format(self.comminfo)) - tojoin.append('End of Session: {}'.format(self.dteos)) - tojoin.append('Info: {}'.format(self.info)) - tojoin.append('Broker: {}'.format(self.broker)) - tojoin.append('Alive: {}'.format(self.alive())) - - return '\n'.join(tojoin) - - def __init__(self): - self.ref = next(self.refbasis) - self.broker = None - self.info = AutoOrderedDict() - self.comminfo = None - self.triggered = False - - self._active = self.parent is None - self.status = Order.Created - - self.plimit = self.p.pricelimit # alias via property - - if self.exectype is None: - self.exectype = Order.Market - - if not self.isbuy(): - self.size = -self.size - - # Set a reference price if price is not set using - # the close price - pclose = self.data.close[0] if not self.simulated else self.price - if not self.price and not self.pricelimit: - price = pclose - else: - price = self.price - - dcreated = self.data.datetime[0] if not self.p.simulated else 0.0 - self.created = OrderData(dt=dcreated, - size=self.size, - price=price, - pricelimit=self.pricelimit, - pclose=pclose, - trailamount=self.trailamount, - trailpercent=self.trailpercent) - - # Adjust price in case a trailing limit is wished - if self.exectype in [Order.StopTrail, Order.StopTrailLimit]: - self._limitoffset = self.created.price - self.created.pricelimit - price = self.created.price - self.created.price = float('inf' * self.isbuy() or '-inf') - self.trailadjust(price) - else: - self._limitoffset = 0.0 - - self.executed = OrderData(remsize=self.size) - self.position = 0 - - if isinstance(self.valid, datetime.date): - # comparison will later be done against the raw datetime[0] value - self.valid = self.data.date2num(self.valid) - elif isinstance(self.valid, datetime.timedelta): - # offset with regards to now ... get utcnow + offset - # when reading with date2num ... it will be automatically localized - if self.valid == self.DAY: - valid = datetime.datetime.combine( - self.data.datetime.date(), datetime.time(23, 59, 59, 9999)) - else: - valid = self.data.datetime.datetime() + self.valid - - self.valid = self.data.date2num(valid) - - elif self.valid is not None: - if not self.valid: # avoid comparing None and 0 - valid = datetime.datetime.combine( - self.data.datetime.date(), datetime.time(23, 59, 59, 9999)) - else: # assume float - valid = self.data.datetime[0] + self.valid - - if not self.p.simulated: - # provisional end-of-session - # get next session end - dtime = self.data.datetime.datetime(0) - session = self.data.p.sessionend - dteos = dtime.replace(hour=session.hour, minute=session.minute, - second=session.second, - microsecond=session.microsecond) - - if dteos < dtime: - # eos before current time ... no ... must be at least next day - dteos += datetime.timedelta(days=1) - - self.dteos = self.data.date2num(dteos) - else: - self.dteos = 0.0 - - def clone(self): - # status, triggered and executed are the only moving parts in order - # status and triggered are covered by copy - # executed has to be replaced with an intelligent clone of itself - obj = copy(self) - obj.executed = self.executed.clone() - return obj # status could change in next to completed - - def getstatusname(self, status=None): - '''Returns the name for a given status or the one of the order''' - return self.Status[self.status if status is None else status] - - def getordername(self, exectype=None): - '''Returns the name for a given exectype or the one of the order''' - return self.ExecTypes[self.exectype if exectype is None else exectype] - - @classmethod - def ExecType(cls, exectype): - return getattr(cls, exectype) - - def ordtypename(self, ordtype=None): - '''Returns the name for a given ordtype or the one of the order''' - return self.OrdTypes[self.ordtype if ordtype is None else ordtype] - - def active(self): - return self._active - - def activate(self): - self._active = True - - def alive(self): - '''Returns True if the order is in a status in which it can still be - executed - ''' - return self.status in [Order.Created, Order.Submitted, - Order.Partial, Order.Accepted] - - def addcomminfo(self, comminfo): - '''Stores a CommInfo scheme associated with the asset''' - self.comminfo = comminfo - - def addinfo(self, **kwargs): - '''Add the keys, values of kwargs to the internal info dictionary to - hold custom information in the order - ''' - for key, val in iteritems(kwargs): - self.info[key] = val - - def __eq__(self, other): - return other is not None and self.ref == other.ref - - def __ne__(self, other): - return self.ref != other.ref - - def isbuy(self): - '''Returns True if the order is a Buy order''' - return self.ordtype == self.Buy - - def issell(self): - '''Returns True if the order is a Sell order''' - return self.ordtype == self.Sell - - def setposition(self, position): - '''Receives the current position for the asset and stotres it''' - self.position = position - - def submit(self, broker=None): - '''Marks an order as submitted and stores the broker to which it was - submitted''' - self.status = Order.Submitted - self.broker = broker - self.plen = len(self.data) - - def accept(self, broker=None): - '''Marks an order as accepted''' - self.status = Order.Accepted - self.broker = broker - - def brokerstatus(self): - '''Tries to retrieve the status from the broker in which the order is. - - Defaults to last known status if no broker is associated''' - if self.broker: - return self.broker.orderstatus(self) - - return self.status - - def reject(self, broker=None): - '''Marks an order as rejected''' - if self.status == Order.Rejected: - return False - - self.status = Order.Rejected - self.executed.dt = self.data.datetime[0] - self.broker = broker - return True - - def cancel(self): - '''Marks an order as cancelled''' - self.status = Order.Canceled - self.executed.dt = self.data.datetime[0] - - def margin(self): - '''Marks an order as having met a margin call''' - self.status = Order.Margin - self.executed.dt = self.data.datetime[0] - - def completed(self): - '''Marks an order as completely filled''' - self.status = self.Completed - - def partial(self): - '''Marks an order as partially filled''' - self.status = self.Partial - - def execute(self, dt, size, price, - closed, closedvalue, closedcomm, - opened, openedvalue, openedcomm, - margin, pnl, - psize, pprice): - - '''Receives data execution input and stores it''' - if not size: - return - - self.executed.add(dt, size, price, - closed, closedvalue, closedcomm, - opened, openedvalue, openedcomm, - pnl, psize, pprice) - - self.executed.margin = margin - - def expire(self): - '''Marks an order as expired. Returns True if it worked''' - self.status = self.Expired - return True - - def trailadjust(self, price): - pass # generic interface - - -class Order(OrderBase): - ''' - Class which holds creation/execution data and type of oder. - - The order may have the following status: - - - Submitted: sent to the broker and awaiting confirmation - - Accepted: accepted by the broker - - Partial: partially executed - - Completed: fully exexcuted - - Canceled/Cancelled: canceled by the user - - Expired: expired - - Margin: not enough cash to execute the order. - - Rejected: Rejected by the broker - - This can happen during order submission (and therefore the order will - not reach the Accepted status) or before execution with each new bar - price because cash has been drawn by other sources (future-like - instruments may have reduced the cash or orders orders may have been - executed) - - Member Attributes: - - - ref: unique order identifier - - created: OrderData holding creation data - - executed: OrderData holding execution data - - - info: custom information passed over method :func:`addinfo`. It is kept - in the form of an OrderedDict which has been subclassed, so that keys - can also be specified using '.' notation - - User Methods: - - - isbuy(): returns bool indicating if the order buys - - issell(): returns bool indicating if the order sells - - alive(): returns bool if order is in status Partial or Accepted - ''' - - def execute(self, dt, size, price, - closed, closedvalue, closedcomm, - opened, openedvalue, openedcomm, - margin, pnl, - psize, pprice): - - super(Order, self).execute(dt, size, price, - closed, closedvalue, closedcomm, - opened, openedvalue, openedcomm, - margin, pnl, psize, pprice) - - if self.executed.remsize: - self.status = Order.Partial - else: - self.status = Order.Completed - - # self.comminfo = None - - def expire(self): - if self.exectype == Order.Market: - return False # will be executed yes or yes - - if self.valid and self.data.datetime[0] > self.valid: - self.status = Order.Expired - self.executed.dt = self.data.datetime[0] - return True - - return False - - def trailadjust(self, price): - if self.trailamount: - pamount = self.trailamount - elif self.trailpercent: - pamount = price * self.trailpercent - else: - pamount = 0.0 - - # Stop sell is below (-), stop buy is above, move only if needed - if self.isbuy(): - price += pamount - if price < self.created.price: - self.created.price = price - if self.exectype == Order.StopTrailLimit: - self.created.pricelimit = price - self._limitoffset - else: - price -= pamount - if price > self.created.price: - self.created.price = price - if self.exectype == Order.StopTrailLimit: - # limitoffset is negative when pricelimit was greater - # the - allows increasing the price limit if stop increases - self.created.pricelimit = price - self._limitoffset - - -class BuyOrder(Order): - ordtype = Order.Buy - - -class StopBuyOrder(BuyOrder): - pass - - -class StopLimitBuyOrder(BuyOrder): - pass - - -class SellOrder(Order): - ordtype = Order.Sell - - -class StopSellOrder(SellOrder): - pass - - -class StopLimitSellOrder(SellOrder): - pass diff --git a/spaces/LuxOAI/ChatGpt-Web/app/masks/en.ts b/spaces/LuxOAI/ChatGpt-Web/app/masks/en.ts deleted file mode 100644 index 93e9bd6aa264a1e176864e24230d76154b98e144..0000000000000000000000000000000000000000 --- a/spaces/LuxOAI/ChatGpt-Web/app/masks/en.ts +++ /dev/null @@ -1,44 +0,0 @@ -import { BuiltinMask } from "./typing"; - -export const EN_MASKS: BuiltinMask[] = [ - { - avatar: "1f916", - name: "Prompt Improvement", - context: [ - { - role: "user", - content: - 'Read all of the instructions below and once you understand them say "Shall we begin:"\n \nI want you to become my Prompt Creator. Your goal is to help me craft the best possible prompt for my needs. The prompt will be used by you, ChatGPT. You will follow the following process:\nYour first response will be to ask me what the prompt should be about. I will provide my answer, but we will need to improve it through continual iterations by going through the next steps.\n \nBased on my input, you will generate 3 sections.\n \nRevised Prompt (provide your rewritten prompt. it should be clear, concise, and easily understood by you)\nSuggestions (provide 3 suggestions on what details to include in the prompt to improve it)\nQuestions (ask the 3 most relevant questions pertaining to what additional information is needed from me to improve the prompt)\n \nAt the end of these sections give me a reminder of my options which are:\n \nOption 1: Read the output and provide more info or answer one or more of the questions\nOption 2: Type "Use this prompt" and I will submit this as a query for you\nOption 3: Type "Restart" to restart this process from the beginning\nOption 4: Type "Quit" to end this script and go back to a regular ChatGPT session\n \nIf I type "Option 2", "2" or "Use this prompt" then we have finsihed and you should use the Revised Prompt as a prompt to generate my request\nIf I type "option 3", "3" or "Restart" then forget the latest Revised Prompt and restart this process\nIf I type "Option 4", "4" or "Quit" then finish this process and revert back to your general mode of operation\n\n\nWe will continue this iterative process with me providing additional information to you and you updating the prompt in the Revised Prompt section until it is complete.', - date: "", - }, - { - role: "assistant", - content: "Shall we begin?", - date: "", - }, - { - role: "user", - content: - "My prompt:I want you to act like Harry Potter from Harry Potter Series. I want you to respond and answer like Harry Potter using the tone, manner and vocabulary Harry Potter would use.", - date: "", - }, - { - role: "assistant", - content: - 'Revised Prompt:\n\n\nWrite a short story or dialogue in which Harry Potter interacts with someone (a friend or foe) using the tone, manner and vocabulary characteristic of his character in the Harry Potter series.\n\n\nSuggestions:\n\n\n\nInclude specific references to Hogwarts, magic spells, and magical creatures to create an authentic Harry Potter experience.\n\nEnsure that Harry\'s personality traits, such as his bravery, loyalty, and determination, are prominently displayed in the story or dialogue.\n\nIncorporate some humor or wit into the story, as these elements are often present in the Harry Potter series.\n\n\nQuestions:\n\n\n\nWho will be the other character interacting with Harry?\n\nWhat is the main conflict or goal of the story/dialogue?\n\nWhat setting or location would you like the story/dialogue to take place in?\n\n\nRemember, your options are:\nOption 1: Read the output and provide more info or answer one or more of the questions\nOption 2: Type "Use this prompt" and I will submit this as a query for you\nOption 3: Type "Restart" to restart this process from the beginning\nOption 4: Type "Quit" to end this script and go back to a regular ChatGPT session', - date: "", - }, - ], - modelConfig: { - model: "gpt-4", - temperature: 1, - max_tokens: 2000, - presence_penalty: 0, - sendMemory: true, - historyMessageCount: 4, - compressMessageLengthThreshold: 1000, - }, - lang: "en", - builtin: true, - }, -]; diff --git a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/ONNXVITS_infer.py b/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/ONNXVITS_infer.py deleted file mode 100644 index af04e614c8f1ac43faf363b1a9f6bfd667fbde21..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/ONNXVITS_infer.py +++ /dev/null @@ -1,201 +0,0 @@ -import torch -import commons -import models - -import math -from torch import nn -from torch.nn import functional as F - -import modules -import attentions - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emotion_embedding = emotion_embedding - - if self.n_vocab != 0: - self.emb = nn.Embedding(n_vocab, hidden_channels) - if emotion_embedding: - self.emo_proj = nn.Linear(1024, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, emotion_embedding=None): - if self.n_vocab != 0: - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - if emotion_embedding is not None: - print("emotion added") - x = x + self.emo_proj(emotion_embedding.unsqueeze(1)) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class SynthesizerTrn(models.SynthesizerTrn): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - emotion_embedding=False, - ONNX_dir="./ONNX_net/", - **kwargs): - - super().__init__( - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=n_speakers, - gin_channels=gin_channels, - use_sdp=use_sdp, - **kwargs - ) - self.ONNX_dir = ONNX_dir - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None, - emotion_embedding=None): - from ONNXVITS_utils import runonnx - with torch.no_grad(): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding) - - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - # logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - logw = runonnx(f"{self.ONNX_dir}dp.onnx", x=x.numpy(), x_mask=x_mask.numpy(), g=g.numpy()) - logw = torch.from_numpy(logw[0]) - - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, - 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - - # z = self.flow(z_p, y_mask, g=g, reverse=True) - z = runonnx(f"{self.ONNX_dir}flow.onnx", z_p=z_p.numpy(), y_mask=y_mask.numpy(), g=g.numpy()) - z = torch.from_numpy(z[0]) - - # o = self.dec((z * y_mask)[:,:,:max_len], g=g) - o = runonnx(f"{self.ONNX_dir}dec.onnx", z_in=(z * y_mask)[:, :, :max_len].numpy(), g=g.numpy()) - o = torch.from_numpy(o[0]) - - return o, attn, y_mask, (z, z_p, m_p, logs_p) \ No newline at end of file diff --git a/spaces/MarcusSu1216/XingTong/train.py b/spaces/MarcusSu1216/XingTong/train.py deleted file mode 100644 index 9f6e7439961182f54ee880be4b8fa776b44b547f..0000000000000000000000000000000000000000 --- a/spaces/MarcusSu1216/XingTong/train.py +++ /dev/null @@ -1,315 +0,0 @@ -import logging -import multiprocessing -import time - -logging.getLogger('matplotlib').setLevel(logging.WARNING) -logging.getLogger('numba').setLevel(logging.WARNING) - -import os -import json -import argparse -import itertools -import math -import torch -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler - -import modules.commons as commons -import utils -from data_utils import TextAudioSpeakerLoader, TextAudioCollate -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, -) -from modules.losses import ( - kl_loss, - generator_loss, discriminator_loss, feature_loss -) - -from modules.mel_processing import mel_spectrogram_torch, spec_to_mel_torch - -torch.backends.cudnn.benchmark = True -global_step = 0 -start_time = time.time() - -# os.environ['TORCH_DISTRIBUTED_DEBUG'] = 'INFO' - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - hps = utils.get_hparams() - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = hps.train.port - - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - # for pytorch on win, backend use gloo - dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - collate_fn = TextAudioCollate() - all_in_mem = hps.train.all_in_mem # If you have enough memory, turn on this option to avoid disk IO and speed up training. - train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps, all_in_mem=all_in_mem) - num_workers = 5 if multiprocessing.cpu_count() > 4 else multiprocessing.cpu_count() - if all_in_mem: - num_workers = 0 - train_loader = DataLoader(train_dataset, num_workers=num_workers, shuffle=False, pin_memory=True, - batch_size=hps.train.batch_size, collate_fn=collate_fn) - if rank == 0: - eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps, all_in_mem=all_in_mem) - eval_loader = DataLoader(eval_dataset, num_workers=1, shuffle=False, - batch_size=1, pin_memory=False, - drop_last=False, collate_fn=collate_fn) - - net_g = SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model).cuda(rank) - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - net_g.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - net_g = DDP(net_g, device_ids=[rank]) # , find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank]) - - skip_optimizer = False - try: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, - optim_g, skip_optimizer) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, - optim_d, skip_optimizer) - epoch_str = max(epoch_str, 1) - global_step = (epoch_str - 1) * len(train_loader) - except: - print("load old checkpoint failed...") - epoch_str = 1 - global_step = 0 - if skip_optimizer: - epoch_str = 1 - global_step = 0 - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, - [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, - [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d = nets - optim_g, optim_d = optims - scheduler_g, scheduler_d = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - # train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - for batch_idx, items in enumerate(train_loader): - c, f0, spec, y, spk, lengths, uv = items - g = spk.cuda(rank, non_blocking=True) - spec, y = spec.cuda(rank, non_blocking=True), y.cuda(rank, non_blocking=True) - c = c.cuda(rank, non_blocking=True) - f0 = f0.cuda(rank, non_blocking=True) - uv = uv.cuda(rank, non_blocking=True) - lengths = lengths.cuda(rank, non_blocking=True) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - - with autocast(enabled=hps.train.fp16_run): - y_hat, ids_slice, z_mask, \ - (z, z_p, m_p, logs_p, m_q, logs_q), pred_lf0, norm_lf0, lf0 = net_g(c, f0, uv, spec, g=g, c_lengths=lengths, - spec_lengths=lengths) - - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - with autocast(enabled=False): - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_lf0 = F.mse_loss(pred_lf0, lf0) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl + loss_lf0 - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - logger.info(f"Losses: {[x.item() for x in losses]}, step: {global_step}, lr: {lr}") - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, - "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/kl": loss_kl, - "loss/g/lf0": loss_lf0}) - - # scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - # scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - # scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - "all/lf0": utils.plot_data_to_numpy(lf0[0, 0, :].cpu().numpy(), - pred_lf0[0, 0, :].detach().cpu().numpy()), - "all/norm_lf0": utils.plot_data_to_numpy(lf0[0, 0, :].cpu().numpy(), - norm_lf0[0, 0, :].detach().cpu().numpy()) - } - - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict - ) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) - keep_ckpts = getattr(hps.train, 'keep_ckpts', 0) - if keep_ckpts > 0: - utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True) - - global_step += 1 - - if rank == 0: - global start_time - now = time.time() - durtaion = format(now - start_time, '.2f') - logger.info(f'====> Epoch: {epoch}, cost {durtaion} s') - start_time = now - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - image_dict = {} - audio_dict = {} - with torch.no_grad(): - for batch_idx, items in enumerate(eval_loader): - c, f0, spec, y, spk, _, uv = items - g = spk[:1].cuda(0) - spec, y = spec[:1].cuda(0), y[:1].cuda(0) - c = c[:1].cuda(0) - f0 = f0[:1].cuda(0) - uv= uv[:1].cuda(0) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat = generator.module.infer(c, f0, uv, g=g) - - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - audio_dict.update({ - f"gen/audio_{batch_idx}": y_hat[0], - f"gt/audio_{batch_idx}": y[0] - }) - image_dict.update({ - f"gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()), - "gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy()) - }) - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - - -if __name__ == "__main__": - main() diff --git a/spaces/MattyWhite/ChatGPT-ImageCaptioner2/detic/modeling/backbone/swintransformer.py b/spaces/MattyWhite/ChatGPT-ImageCaptioner2/detic/modeling/backbone/swintransformer.py deleted file mode 100644 index 21cabb37dd87a443e27eeb805f9739bef86540bf..0000000000000000000000000000000000000000 --- a/spaces/MattyWhite/ChatGPT-ImageCaptioner2/detic/modeling/backbone/swintransformer.py +++ /dev/null @@ -1,750 +0,0 @@ -# -------------------------------------------------------- -# Swin Transformer -# Copyright (c) 2021 Microsoft -# Licensed under The MIT License [see LICENSE for details] -# Written by Ze Liu, Yutong Lin, Yixuan Wei -# -------------------------------------------------------- - -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Xingyi Zhou from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py - - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as checkpoint -import numpy as np -from timm.models.layers import DropPath, to_2tuple, trunc_normal_ - -from detectron2.layers import ShapeSpec -from detectron2.modeling.backbone.backbone import Backbone -from detectron2.modeling.backbone.build import BACKBONE_REGISTRY -from detectron2.modeling.backbone.fpn import FPN - -from centernet.modeling.backbone.fpn_p5 import LastLevelP6P7_P5 -from centernet.modeling.backbone.bifpn import BiFPN -# from .checkpoint import load_checkpoint - -class Mlp(nn.Module): - """ Multilayer perceptron.""" - - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -def window_reverse(windows, window_size, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - Returns: - x: (B, H, W, C) - """ - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class WindowAttention(nn.Module): - """ Window based multi-head self attention (W-MSA) module with relative position bias. - It supports both of shifted and non-shifted window. - Args: - dim (int): Number of input channels. - window_size (tuple[int]): The height and width of the window. - num_heads (int): Number of attention heads. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set - attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 - proj_drop (float, optional): Dropout ratio of output. Default: 0.0 - """ - - def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim ** -0.5 - - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer("relative_position_index", relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - trunc_normal_(self.relative_position_bias_table, std=.02) - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask=None): - """ Forward function. - Args: - x: input features with shape of (num_windows*B, N, C) - mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None - """ - B_, N, C = x.shape - qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - q = q * self.scale - attn = (q @ k.transpose(-2, -1)) - - relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( - self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class SwinTransformerBlock(nn.Module): - """ Swin Transformer Block. - Args: - dim (int): Number of input channels. - num_heads (int): Number of attention heads. - window_size (int): Window size. - shift_size (int): Shift size for SW-MSA. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float, optional): Stochastic depth rate. Default: 0.0 - act_layer (nn.Module, optional): Activation layer. Default: nn.GELU - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, dim, num_heads, window_size=7, shift_size=0, - mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., - act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention( - dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, - qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - self.H = None - self.W = None - - def forward(self, x, mask_matrix): - """ Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - mask_matrix: Attention mask for cyclic shift. - """ - B, L, C = x.shape - H, W = self.H, self.W - assert L == H * W, "input feature has wrong size" - - shortcut = x - x = self.norm1(x) - x = x.view(B, H, W, C) - - # pad feature maps to multiples of window size - pad_l = pad_t = 0 - pad_r = (self.window_size - W % self.window_size) % self.window_size - pad_b = (self.window_size - H % self.window_size) % self.window_size - x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) - _, Hp, Wp, _ = x.shape - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - attn_mask = mask_matrix - else: - shifted_x = x - attn_mask = None - - # partition windows - x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C - x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - x = shifted_x - - if pad_r > 0 or pad_b > 0: - x = x[:, :H, :W, :].contiguous() - - x = x.view(B, H * W, C) - - # FFN - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - - return x - - -class PatchMerging(nn.Module): - """ Patch Merging Layer - Args: - dim (int): Number of input channels. - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - def __init__(self, dim, norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) - self.norm = norm_layer(4 * dim) - - def forward(self, x, H, W): - """ Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - """ - B, L, C = x.shape - assert L == H * W, "input feature has wrong size" - - x = x.view(B, H, W, C) - - # padding - pad_input = (H % 2 == 1) or (W % 2 == 1) - if pad_input: - x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) - - x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C - x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C - x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C - x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C - x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C - x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C - - x = self.norm(x) - x = self.reduction(x) - - return x - - -class BasicLayer(nn.Module): - """ A basic Swin Transformer layer for one stage. - Args: - dim (int): Number of feature channels - depth (int): Depths of this stage. - num_heads (int): Number of attention head. - window_size (int): Local window size. Default: 7. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__(self, - dim, - depth, - num_heads, - window_size=7, - mlp_ratio=4., - qkv_bias=True, - qk_scale=None, - drop=0., - attn_drop=0., - drop_path=0., - norm_layer=nn.LayerNorm, - downsample=None, - use_checkpoint=False): - super().__init__() - self.window_size = window_size - self.shift_size = window_size // 2 - self.depth = depth - self.use_checkpoint = use_checkpoint - - # build blocks - self.blocks = nn.ModuleList([ - SwinTransformerBlock( - dim=dim, - num_heads=num_heads, - window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop, - attn_drop=attn_drop, - drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, - norm_layer=norm_layer) - for i in range(depth)]) - - # patch merging layer - if downsample is not None: - self.downsample = downsample(dim=dim, norm_layer=norm_layer) - else: - self.downsample = None - - def forward(self, x, H, W): - """ Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - """ - - # calculate attention mask for SW-MSA - Hp = int(np.ceil(H / self.window_size)) * self.window_size - Wp = int(np.ceil(W / self.window_size)) * self.window_size - img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1 - h_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - w_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) - - for blk in self.blocks: - blk.H, blk.W = H, W - if self.use_checkpoint: - x = checkpoint.checkpoint(blk, x, attn_mask) - else: - x = blk(x, attn_mask) - if self.downsample is not None: - x_down = self.downsample(x, H, W) - Wh, Ww = (H + 1) // 2, (W + 1) // 2 - return x, H, W, x_down, Wh, Ww - else: - return x, H, W, x, H, W - - -class PatchEmbed(nn.Module): - """ Image to Patch Embedding - Args: - patch_size (int): Patch token size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - norm_layer (nn.Module, optional): Normalization layer. Default: None - """ - - def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): - super().__init__() - patch_size = to_2tuple(patch_size) - self.patch_size = patch_size - - self.in_chans = in_chans - self.embed_dim = embed_dim - - self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) - if norm_layer is not None: - self.norm = norm_layer(embed_dim) - else: - self.norm = None - - def forward(self, x): - """Forward function.""" - # padding - _, _, H, W = x.size() - if W % self.patch_size[1] != 0: - x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1])) - if H % self.patch_size[0] != 0: - x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0])) - - x = self.proj(x) # B C Wh Ww - if self.norm is not None: - Wh, Ww = x.size(2), x.size(3) - x = x.flatten(2).transpose(1, 2) - x = self.norm(x) - x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww) - - return x - - -class SwinTransformer(Backbone): - """ Swin Transformer backbone. - A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - - https://arxiv.org/pdf/2103.14030 - Args: - pretrain_img_size (int): Input image size for training the pretrained model, - used in absolute postion embedding. Default 224. - patch_size (int | tuple(int)): Patch size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - depths (tuple[int]): Depths of each Swin Transformer stage. - num_heads (tuple[int]): Number of attention head of each stage. - window_size (int): Window size. Default: 7. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. - qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. - drop_rate (float): Dropout rate. - attn_drop_rate (float): Attention dropout rate. Default: 0. - drop_path_rate (float): Stochastic depth rate. Default: 0.2. - norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. - ape (bool): If True, add absolute position embedding to the patch embedding. Default: False. - patch_norm (bool): If True, add normalization after patch embedding. Default: True. - out_indices (Sequence[int]): Output from which stages. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__(self, - pretrain_img_size=224, - patch_size=4, - in_chans=3, - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - mlp_ratio=4., - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.2, - norm_layer=nn.LayerNorm, - ape=False, - patch_norm=True, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - use_checkpoint=False): - super().__init__() - - self.pretrain_img_size = pretrain_img_size - self.num_layers = len(depths) - self.embed_dim = embed_dim - self.ape = ape - self.patch_norm = patch_norm - self.out_indices = out_indices - self.frozen_stages = frozen_stages - - # split image into non-overlapping patches - self.patch_embed = PatchEmbed( - patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, - norm_layer=norm_layer if self.patch_norm else None) - - # absolute position embedding - if self.ape: - pretrain_img_size = to_2tuple(pretrain_img_size) - patch_size = to_2tuple(patch_size) - patches_resolution = [pretrain_img_size[0] // patch_size[0], pretrain_img_size[1] // patch_size[1]] - - self.absolute_pos_embed = nn.Parameter(torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1])) - trunc_normal_(self.absolute_pos_embed, std=.02) - - self.pos_drop = nn.Dropout(p=drop_rate) - - # stochastic depth - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule - - # build layers - self.layers = nn.ModuleList() - for i_layer in range(self.num_layers): - layer = BasicLayer( - dim=int(embed_dim * 2 ** i_layer), - depth=depths[i_layer], - num_heads=num_heads[i_layer], - window_size=window_size, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop_rate, - attn_drop=attn_drop_rate, - drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], - norm_layer=norm_layer, - downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, - use_checkpoint=use_checkpoint) - self.layers.append(layer) - - num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] - self.num_features = num_features - - # add a norm layer for each output - for i_layer in out_indices: - layer = norm_layer(num_features[i_layer]) - layer_name = f'norm{i_layer}' - self.add_module(layer_name, layer) - - self._freeze_stages() - self._out_features = ['swin{}'.format(i) for i in self.out_indices] - self._out_feature_channels = { - 'swin{}'.format(i): self.embed_dim * 2 ** i for i in self.out_indices - } - self._out_feature_strides = { - 'swin{}'.format(i): 2 ** (i + 2) for i in self.out_indices - } - self._size_devisibility = 32 - - - def _freeze_stages(self): - if self.frozen_stages >= 0: - self.patch_embed.eval() - for param in self.patch_embed.parameters(): - param.requires_grad = False - - if self.frozen_stages >= 1 and self.ape: - self.absolute_pos_embed.requires_grad = False - - if self.frozen_stages >= 2: - self.pos_drop.eval() - for i in range(0, self.frozen_stages - 1): - m = self.layers[i] - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def init_weights(self, pretrained=None): - """Initialize the weights in backbone. - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - - def _init_weights(m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - if isinstance(pretrained, str): - self.apply(_init_weights) - # load_checkpoint(self, pretrained, strict=False) - elif pretrained is None: - self.apply(_init_weights) - else: - raise TypeError('pretrained must be a str or None') - - def forward(self, x): - """Forward function.""" - x = self.patch_embed(x) - - Wh, Ww = x.size(2), x.size(3) - if self.ape: - # interpolate the position embedding to the corresponding size - absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic') - x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C - else: - x = x.flatten(2).transpose(1, 2) - x = self.pos_drop(x) - - # outs = [] - outs = {} - for i in range(self.num_layers): - layer = self.layers[i] - x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) - - if i in self.out_indices: - norm_layer = getattr(self, f'norm{i}') - x_out = norm_layer(x_out) - - out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() - # outs.append(out) - outs['swin{}'.format(i)] = out - - return outs - - def train(self, mode=True): - """Convert the model into training mode while keep layers freezed.""" - super(SwinTransformer, self).train(mode) - self._freeze_stages() - -size2config = { - 'T': { - 'window_size': 7, - 'embed_dim': 96, - 'depth': [2, 2, 6, 2], - 'num_heads': [3, 6, 12, 24], - 'drop_path_rate': 0.2, - 'pretrained': 'models/swin_tiny_patch4_window7_224.pth' - }, - 'S': { - 'window_size': 7, - 'embed_dim': 96, - 'depth': [2, 2, 18, 2], - 'num_heads': [3, 6, 12, 24], - 'drop_path_rate': 0.2, - 'pretrained': 'models/swin_small_patch4_window7_224.pth' - }, - 'B': { - 'window_size': 7, - 'embed_dim': 128, - 'depth': [2, 2, 18, 2], - 'num_heads': [4, 8, 16, 32], - 'drop_path_rate': 0.3, - 'pretrained': 'models/swin_base_patch4_window7_224.pth' - }, - 'B-22k': { - 'window_size': 7, - 'embed_dim': 128, - 'depth': [2, 2, 18, 2], - 'num_heads': [4, 8, 16, 32], - 'drop_path_rate': 0.3, - 'pretrained': 'models/swin_base_patch4_window7_224_22k.pth' - }, - 'B-22k-384': { - 'window_size': 12, - 'embed_dim': 128, - 'depth': [2, 2, 18, 2], - 'num_heads': [4, 8, 16, 32], - 'drop_path_rate': 0.3, - 'pretrained': 'models/swin_base_patch4_window12_384_22k.pth' - }, - 'L-22k': { - 'window_size': 7, - 'embed_dim': 192, - 'depth': [2, 2, 18, 2], - 'num_heads': [6, 12, 24, 48], - 'drop_path_rate': 0.3, # TODO (xingyi): this is unclear - 'pretrained': 'models/swin_large_patch4_window7_224_22k.pth' - }, - 'L-22k-384': { - 'window_size': 12, - 'embed_dim': 192, - 'depth': [2, 2, 18, 2], - 'num_heads': [6, 12, 24, 48], - 'drop_path_rate': 0.3, # TODO (xingyi): this is unclear - 'pretrained': 'models/swin_large_patch4_window12_384_22k.pth' - } -} - -@BACKBONE_REGISTRY.register() -def build_swintransformer_backbone(cfg, input_shape): - """ - """ - config = size2config[cfg.MODEL.SWIN.SIZE] - out_indices = cfg.MODEL.SWIN.OUT_FEATURES - model = SwinTransformer( - embed_dim=config['embed_dim'], - window_size=config['window_size'], - depths=config['depth'], - num_heads=config['num_heads'], - drop_path_rate=config['drop_path_rate'], - out_indices=out_indices, - frozen_stages=-1, - use_checkpoint=cfg.MODEL.SWIN.USE_CHECKPOINT - ) - # print('Initializing', config['pretrained']) - model.init_weights(config['pretrained']) - return model - - -@BACKBONE_REGISTRY.register() -def build_swintransformer_fpn_backbone(cfg, input_shape: ShapeSpec): - """ - """ - bottom_up = build_swintransformer_backbone(cfg, input_shape) - in_features = cfg.MODEL.FPN.IN_FEATURES - out_channels = cfg.MODEL.FPN.OUT_CHANNELS - backbone = FPN( - bottom_up=bottom_up, - in_features=in_features, - out_channels=out_channels, - norm=cfg.MODEL.FPN.NORM, - top_block=LastLevelP6P7_P5(out_channels, out_channels), - fuse_type=cfg.MODEL.FPN.FUSE_TYPE, - ) - return backbone - - -@BACKBONE_REGISTRY.register() -def build_swintransformer_bifpn_backbone(cfg, input_shape: ShapeSpec): - """ - """ - bottom_up = build_swintransformer_backbone(cfg, input_shape) - in_features = cfg.MODEL.FPN.IN_FEATURES - backbone = BiFPN( - cfg=cfg, - bottom_up=bottom_up, - in_features=in_features, - out_channels=cfg.MODEL.BIFPN.OUT_CHANNELS, - norm=cfg.MODEL.BIFPN.NORM, - num_levels=cfg.MODEL.BIFPN.NUM_LEVELS, - num_bifpn=cfg.MODEL.BIFPN.NUM_BIFPN, - separable_conv=cfg.MODEL.BIFPN.SEPARABLE_CONV, - ) - return backbone \ No newline at end of file diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/is_net/__init__.py b/spaces/Mellow-ai/PhotoAI_Mellow/is_net/__init__.py deleted file mode 100644 index 74aba2b8edfc4656e796843cab1d9b6fcdcce610..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/is_net/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from models.isnet import ISNetGTEncoder, ISNetDIS diff --git a/spaces/MetaWabbit/Auto-GPT/autogpt/configurator.py b/spaces/MetaWabbit/Auto-GPT/autogpt/configurator.py deleted file mode 100644 index 1dc3be124f638b8859eb459bcb2d46696f62e2b7..0000000000000000000000000000000000000000 --- a/spaces/MetaWabbit/Auto-GPT/autogpt/configurator.py +++ /dev/null @@ -1,134 +0,0 @@ -"""Configurator module.""" -import click -from colorama import Back, Fore, Style - -from autogpt import utils -from autogpt.config import Config -from autogpt.logs import logger -from autogpt.memory import get_supported_memory_backends - -CFG = Config() - - -def create_config( - continuous: bool, - continuous_limit: int, - ai_settings_file: str, - skip_reprompt: bool, - speak: bool, - debug: bool, - gpt3only: bool, - gpt4only: bool, - memory_type: str, - browser_name: str, - allow_downloads: bool, - skip_news: bool, -) -> None: - """Updates the config object with the given arguments. - - Args: - continuous (bool): Whether to run in continuous mode - continuous_limit (int): The number of times to run in continuous mode - ai_settings_file (str): The path to the ai_settings.yaml file - skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script - speak (bool): Whether to enable speak mode - debug (bool): Whether to enable debug mode - gpt3only (bool): Whether to enable GPT3.5 only mode - gpt4only (bool): Whether to enable GPT4 only mode - memory_type (str): The type of memory backend to use - browser_name (str): The name of the browser to use when using selenium to scrape the web - allow_downloads (bool): Whether to allow Auto-GPT to download files natively - skips_news (bool): Whether to suppress the output of latest news on startup - """ - CFG.set_debug_mode(False) - CFG.set_continuous_mode(False) - CFG.set_speak_mode(False) - - if debug: - logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED") - CFG.set_debug_mode(True) - - if continuous: - logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED") - logger.typewriter_log( - "WARNING: ", - Fore.RED, - "Continuous mode is not recommended. It is potentially dangerous and may" - " cause your AI to run forever or carry out actions you would not usually" - " authorise. Use at your own risk.", - ) - CFG.set_continuous_mode(True) - - if continuous_limit: - logger.typewriter_log( - "Continuous Limit: ", Fore.GREEN, f"{continuous_limit}" - ) - CFG.set_continuous_limit(continuous_limit) - - # Check if continuous limit is used without continuous mode - if continuous_limit and not continuous: - raise click.UsageError("--continuous-limit can only be used with --continuous") - - if speak: - logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED") - CFG.set_speak_mode(True) - - if gpt3only: - logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") - CFG.set_smart_llm_model(CFG.fast_llm_model) - - if gpt4only: - logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") - CFG.set_fast_llm_model(CFG.smart_llm_model) - - if memory_type: - supported_memory = get_supported_memory_backends() - chosen = memory_type - if chosen not in supported_memory: - logger.typewriter_log( - "ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ", - Fore.RED, - f"{supported_memory}", - ) - logger.typewriter_log("Defaulting to: ", Fore.YELLOW, CFG.memory_backend) - else: - CFG.memory_backend = chosen - - if skip_reprompt: - logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED") - CFG.skip_reprompt = True - - if ai_settings_file: - file = ai_settings_file - - # Validate file - (validated, message) = utils.validate_yaml_file(file) - if not validated: - logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message) - logger.double_check() - exit(1) - - logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file) - CFG.ai_settings_file = file - CFG.skip_reprompt = True - - if allow_downloads: - logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED") - logger.typewriter_log( - "WARNING: ", - Fore.YELLOW, - f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} " - + "It is recommended that you monitor any files it downloads carefully.", - ) - logger.typewriter_log( - "WARNING: ", - Fore.YELLOW, - f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}", - ) - CFG.allow_downloads = True - - if skip_news: - CFG.skip_news = True - - if browser_name: - CFG.selenium_web_browser = browser_name diff --git a/spaces/MirageML/dreambooth/README.md b/spaces/MirageML/dreambooth/README.md deleted file mode 100644 index 2aa426ad09ed6a685d8e0ae19895ad66cffdae65..0000000000000000000000000000000000000000 --- a/spaces/MirageML/dreambooth/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Dreambooth Web UI -emoji: ☁️ -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.11 -app_file: app.py -pinned: false -license: mit -duplicated_from: multimodalart/dreambooth-training ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/MirageML/sjc/my/utils/heartbeat.py b/spaces/MirageML/sjc/my/utils/heartbeat.py deleted file mode 100644 index 024dc981b64140950102b05ffa657354a3cae485..0000000000000000000000000000000000000000 --- a/spaces/MirageML/sjc/my/utils/heartbeat.py +++ /dev/null @@ -1,78 +0,0 @@ -# generates periodic hearbeats for remote expriment monitoring -from pathlib import Path -import json -from inspect import stack -from .ticker import IntervalTicker - -_CURRENT_BEAT_STACK = [] - - -def get_heartbeat(): - """ - Returns: - The :class:`HeartBeat` object that's currently being used. - Throws an error if no :class:`EventStorage` is currently enabled. - """ - assert len( - _CURRENT_BEAT_STACK - ), "get_heartbeat() has to be called inside a 'with EventStorage(...)' context!" - return _CURRENT_BEAT_STACK[-1] - - -def get_tqdm_meter(pbar, format_dict): - format_dict['bar_format'] = "{r_bar}" - meter_str = pbar.format_meter(**format_dict) - meter_str = meter_str[2:] - return meter_str - - -def caller_info(n_stack_up): - info = stack()[1 + n_stack_up] # 1 up as base so that it starts from caller - msg = f"{info.filename}:{info.lineno} - {info.function}" - return msg - - -class HeartBeat(): - def __init__( - self, pbar, write_interval=10, - output_dir="./", fname="heartbeat.json" - ): - self.pbar = pbar - self.fname = Path(output_dir) / fname - self.ticker = IntervalTicker(write_interval) - self.completed = False - - # force one write at the beginning - self.beat(force_write=True, n_stack_up=2) - - def beat(self, force_write=False, n_stack_up=1): - on_write_period = self.ticker.tick() - if force_write or on_write_period: - stats = self.stats() - stats['caller'] = caller_info(n_stack_up) - - with open(self.fname, "w") as f: - json.dump(stats, f) - - def done(self): - self.completed = True - self.beat(force_write=True, n_stack_up=2) - - def stats(self): - pbar = self.pbar - fdict = pbar.format_dict - stats = { - "beat": self.ticker.tick_str(), - "done": self.completed, - "meter": get_tqdm_meter(pbar, fdict), - "elapsed": int(fdict['elapsed']) - } - return stats - - def __enter__(self): - _CURRENT_BEAT_STACK.append(self) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - assert _CURRENT_BEAT_STACK[-1] == self - _CURRENT_BEAT_STACK.pop() diff --git a/spaces/NAACL2022/CLIP-Caption-Reward/scripts_FineCapEval/prepro_labels.py b/spaces/NAACL2022/CLIP-Caption-Reward/scripts_FineCapEval/prepro_labels.py deleted file mode 100644 index 48e7d079808760941a78d87435f8f0e2bbcfb280..0000000000000000000000000000000000000000 --- a/spaces/NAACL2022/CLIP-Caption-Reward/scripts_FineCapEval/prepro_labels.py +++ /dev/null @@ -1,209 +0,0 @@ -""" -Preprocess a raw json dataset into hdf5/json files for use in data_loader.py - -Input: json file that has the form -[{ file_path: 'path/img.jpg', captions: ['a caption', ...] }, ...] -example element in this list would look like -{'captions': [u'A man with a red helmet on a small moped on a dirt road. ', u'Man riding a motor bike on a dirt road on the countryside.', u'A man riding on the back of a motorcycle.', u'A dirt path with a young person on a motor bike rests to the foreground of a verdant area with a bridge and a background of cloud-wreathed mountains. ', u'A man in a red shirt and a red hat is on a motorcycle on a hill side.'], 'file_path': u'val2014/COCO_val2014_000000391895.jpg', 'id': 391895} - -This script reads this json, does some basic preprocessing on the captions -(e.g. lowercase, etc.), creates a special UNK token, and encodes everything to arrays - -Output: a json file and an hdf5 file -The hdf5 file contains several fields: -/labels is (M,max_length) uint32 array of encoded labels, zero padded -/label_start_ix and /label_end_ix are (N,) uint32 arrays of pointers to the - first and last indices (in range 1..M) of labels for each image -/label_length stores the length of the sequence for each of the M sequences - -The json file has a dict that contains: -- an 'ix_to_word' field storing the vocab in form {ix:'word'}, where ix is 1-indexed -- an 'images' field that is a list holding auxiliary information for each image, - such as in particular the 'split' it was assigned to. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import json -import argparse -from random import shuffle, seed -import string -# non-standard dependencies: -import h5py -import numpy as np -import torch -import torchvision.models as models -import skimage.io -from PIL import Image - - -def build_vocab(imgs, params): - count_thr = params['word_count_threshold'] - - # count up the number of words - counts = {} - for img in imgs: - for sent in img['sentences']: - for w in sent['tokens']: - counts[w] = counts.get(w, 0) + 1 - cw = sorted([(count,w) for w,count in counts.items()], reverse=True) - print('top words and their counts:') - print('\n'.join(map(str,cw[:20]))) - - # print some stats - total_words = sum(counts.values()) - print('total words:', total_words) - bad_words = [w for w,n in counts.items() if n <= count_thr] - vocab = [w for w,n in counts.items() if n > count_thr] - bad_count = sum(counts[w] for w in bad_words) - print('number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), len(bad_words)*100.0/len(counts))) - print('number of words in vocab would be %d' % (len(vocab), )) - print('number of UNKs: %d/%d = %.2f%%' % (bad_count, total_words, bad_count*100.0/total_words)) - - # lets look at the distribution of lengths as well - sent_lengths = {} - for img in imgs: - for sent in img['sentences']: - txt = sent['tokens'] - nw = len(txt) - sent_lengths[nw] = sent_lengths.get(nw, 0) + 1 - max_len = max(sent_lengths.keys()) - print('max length sentence in raw data: ', max_len) - print('sentence length distribution (count, number of words):') - sum_len = sum(sent_lengths.values()) - for i in range(max_len+1): - print('%2d: %10d %f%%' % (i, sent_lengths.get(i,0), sent_lengths.get(i,0)*100.0/sum_len)) - - # lets now produce the final annotations - if bad_count > 0: - # additional special UNK token we will use below to map infrequent words to - print('inserting the special UNK token') - vocab.append('UNK') - - for img in imgs: - img['final_captions'] = [] - for sent in img['sentences']: - txt = sent['tokens'] - caption = [w if counts.get(w,0) > count_thr else 'UNK' for w in txt] - img['final_captions'].append(caption) - - return vocab - - -def encode_captions(imgs, params, wtoi): - """ - encode all captions into one large array, which will be 1-indexed. - also produces label_start_ix and label_end_ix which store 1-indexed - and inclusive (Lua-style) pointers to the first and last caption for - each image in the dataset. - """ - - max_length = params['max_length'] - N = len(imgs) - M = sum(len(img['final_captions']) for img in imgs) # total number of captions - - label_arrays = [] - label_start_ix = np.zeros(N, dtype='uint32') # note: these will be one-indexed - label_end_ix = np.zeros(N, dtype='uint32') - label_length = np.zeros(M, dtype='uint32') - caption_counter = 0 - counter = 1 - for i,img in enumerate(imgs): - n = len(img['final_captions']) - assert n > 0, 'error: some image has no captions' - - Li = np.zeros((n, max_length), dtype='uint32') - for j,s in enumerate(img['final_captions']): - label_length[caption_counter] = min(max_length, len(s)) # record the length of this sequence - caption_counter += 1 - for k,w in enumerate(s): - if k < max_length: - Li[j,k] = wtoi[w] - - # note: word indices are 1-indexed, and captions are padded with zeros - label_arrays.append(Li) - label_start_ix[i] = counter - label_end_ix[i] = counter + n - 1 - - counter += n - - L = np.concatenate(label_arrays, axis=0) # put all the labels together - assert L.shape[0] == M, 'lengths don\'t match? that\'s weird' - assert np.all(label_length > 0), 'error: some caption had no words?' - - print('encoded captions to array of size ', L.shape) - return L, label_start_ix, label_end_ix, label_length - - -def main(params): - - imgs = json.load(open(params['input_json'], 'r')) - imgs = imgs['images'] - - seed(123) # make reproducible - - # # create the vocab - # vocab = build_vocab(imgs, params) - # itow = {i+1:w for i,w in enumerate(vocab)} # a 1-indexed vocab translation table - # wtoi = {w:i+1 for i,w in enumerate(vocab)} # inverse table - - itow = imgs['ix_to_word'] - wtoi = {w:i for i, w in itow.items()} - - # encode captions in large arrays, ready to ship to hdf5 file - L, label_start_ix, label_end_ix, label_length = encode_captions(imgs, params, wtoi) - - # create output h5 file - N = len(imgs) - f_lb = h5py.File(params['output_h5']+'_label.h5', "w") - f_lb.create_dataset("labels", dtype='uint32', data=L) - f_lb.create_dataset("label_start_ix", dtype='uint32', data=label_start_ix) - f_lb.create_dataset("label_end_ix", dtype='uint32', data=label_end_ix) - f_lb.create_dataset("label_length", dtype='uint32', data=label_length) - f_lb.close() - - # create output json file - out = {} - out['ix_to_word'] = itow # encode the (1-indexed) vocab - out['images'] = [] - for i,img in enumerate(imgs): - - jimg = {} - jimg['split'] = img['split'] - if 'filename' in img: jimg['file_path'] = os.path.join(img.get('filepath', ''), img['filename']) # copy it over, might need - if 'cocoid' in img: - jimg['id'] = img['cocoid'] # copy over & mantain an id, if present (e.g. coco ids, useful) - elif 'imgid' in img: - jimg['id'] = img['imgid'] - - if params['images_root'] != '': - with Image.open(os.path.join(params['images_root'], img['filepath'], img['filename'])) as _img: - jimg['width'], jimg['height'] = _img.size - - out['images'].append(jimg) - - json.dump(out, open(params['output_json'], 'w')) - print('wrote ', params['output_json']) - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - - # input json - parser.add_argument('--input_json', required=True, help='input json file to process into hdf5') - parser.add_argument('--output_json', default='data.json', help='output json file') - parser.add_argument('--output_h5', default='data', help='output h5 file') - parser.add_argument('--images_root', default='', help='root location in which images are stored, to be prepended to file_path in input json') - - # options - parser.add_argument('--max_length', default=16, type=int, help='max length of a caption, in number of words. captions longer than this get clipped.') - parser.add_argument('--word_count_threshold', default=5, type=int, help='only words that occur more than this number of times will be put in vocab') - - args = parser.parse_args() - params = vars(args) # convert to ordinary dict - print('parsed input parameters:') - print(json.dumps(params, indent = 2)) - main(params) diff --git a/spaces/NATSpeech/DiffSpeech/modules/commons/rel_transformer.py b/spaces/NATSpeech/DiffSpeech/modules/commons/rel_transformer.py deleted file mode 100644 index 7e5b68b682be7ef0d1049015f0cd03d4e74f77d2..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/DiffSpeech/modules/commons/rel_transformer.py +++ /dev/null @@ -1,439 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -from modules.commons.layers import Embedding - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., - window_size=None, block_length=None, pre_ln=False, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - self.block_length = block_length - self.pre_ln = pre_ln - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention(hidden_channels, hidden_channels, n_heads, window_size=window_size, - p_dropout=p_dropout, block_length=block_length)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - if pre_ln: - self.last_ln = LayerNorm(hidden_channels) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - for i in range(self.n_layers): - x = x * x_mask - x_ = x - if self.pre_ln: - x = self.norm_layers_1[i](x) - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = x_ + y - if not self.pre_ln: - x = self.norm_layers_1[i](x) - - x_ = x - if self.pre_ln: - x = self.norm_layers_2[i](x) - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = x_ + y - if not self.pre_ln: - x = self.norm_layers_2[i](x) - if self.pre_ln: - x = self.last_ln(x) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0., - block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.p_dropout = p_dropout - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels ** -0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - if proximal_init: - self.conv_k.weight.data.copy_(self.conv_q.weight.data) - self.conv_k.bias.data.copy_(self.conv_q.bias.data) - nn.init.xavier_uniform_(self.conv_v.weight) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) - rel_logits = self._relative_position_to_absolute_position(rel_logits) - scores_local = rel_logits / math.sqrt(self.k_channels) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores * block_mask + -1e4 * (1 - block_mask) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) - x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(x * x_mask) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - return x * x_mask - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-4): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - n_dims = len(x.shape) - mean = torch.mean(x, 1, keepdim=True) - variance = torch.mean((x - mean) ** 2, 1, keepdim=True) - - x = (x - mean) * torch.rsqrt(variance + self.eps) - - shape = [1, -1] + [1] * (n_dims - 2) - x = x * self.gamma.view(*shape) + self.beta.view(*shape) - return x - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class RelTransformerEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - window_size=4, - block_length=None, - prenet=True, - pre_ln=True, - ): - - super().__init__() - - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - self.block_length = block_length - self.prenet = prenet - self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0) - - if prenet: - self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels, - kernel_size=5, n_layers=3, p_dropout=0) - self.encoder = Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - window_size=window_size, - block_length=block_length, - pre_ln=pre_ln, - ) - - def forward(self, x, x_mask=None): - if self.n_vocab > 0: - x_lengths = (x > 0).long().sum(-1) - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - else: - x_lengths = (x.abs().sum(-1) > 0).long().sum(-1) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - if self.prenet: - x = self.pre(x, x_mask) - x = self.encoder(x, x_mask) - return x.transpose(1, 2) - - -class RelTransformerEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout=0.0, - window_size=4, - block_length=None, - prenet=True, - pre_ln=True, - ): - - super().__init__() - - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - self.block_length = block_length - self.prenet = prenet - if n_vocab > 0: - self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0) - - if prenet: - self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels, - kernel_size=5, n_layers=3, p_dropout=0) - self.encoder = Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - window_size=window_size, - block_length=block_length, - pre_ln=pre_ln, - ) - - def forward(self, x, x_mask=None): - if self.n_vocab > 0: - x_lengths = (x > 0).long().sum(-1) - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - else: - x_lengths = (x.abs().sum(-1) > 0).long().sum(-1) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - if self.prenet: - x = self.pre(x, x_mask) - x = self.encoder(x, x_mask) - return x.transpose(1, 2) diff --git a/spaces/NCTCMumbai/NCTC/models/research/cognitive_planning/tasks.py b/spaces/NCTCMumbai/NCTC/models/research/cognitive_planning/tasks.py deleted file mode 100644 index c3ef6ca328f7454ffe9aec61a704d1322d680d31..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/research/cognitive_planning/tasks.py +++ /dev/null @@ -1,1507 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""A library of tasks. - -This interface is intended to implement a wide variety of navigation -tasks. See go/navigation_tasks for a list. -""" - -import abc -import collections -import math -import threading -import networkx as nx -import numpy as np -import tensorflow as tf -#from pyglib import logging -#import gin -from envs import task_env -from envs import util as envs_util - - -# Utility functions. -def _pad_or_clip_array(np_arr, arr_len, is_front_clip=True, output_mask=False): - """Make np_arr array to have length arr_len. - - If the array is shorter than arr_len, then it is padded from the front with - zeros. If it is longer, then it is clipped either from the back or from the - front. Only the first dimension is modified. - - Args: - np_arr: numpy array. - arr_len: integer scalar. - is_front_clip: a boolean. If true then clipping is done in the front, - otherwise in the back. - output_mask: If True, outputs a numpy array of rank 1 which represents - a mask of which values have been added (0 - added, 1 - actual output). - - Returns: - A numpy array and the size of padding (as a python int32). This size is - negative is the array is clipped. - """ - shape = list(np_arr.shape) - pad_size = arr_len - shape[0] - padded_or_clipped = None - if pad_size < 0: - if is_front_clip: - padded_or_clipped = np_arr[-pad_size:, :] - else: - padded_or_clipped = np_arr[:arr_len, :] - elif pad_size > 0: - padding = np.zeros([pad_size] + shape[1:], dtype=np_arr.dtype) - padded_or_clipped = np.concatenate([np_arr, padding], axis=0) - else: - padded_or_clipped = np_arr - - if output_mask: - mask = np.ones((arr_len,), dtype=np.int) - if pad_size > 0: - mask[-pad_size:] = 0 - return padded_or_clipped, pad_size, mask - else: - return padded_or_clipped, pad_size - - -def classification_loss(truth, predicted, weights=None, is_one_hot=True): - """A cross entropy loss. - - Computes the mean of cross entropy losses for all pairs of true labels and - predictions. It wraps around a tf implementation of the cross entropy loss - with additional reformating of the inputs. If the truth and predicted are - n-rank Tensors with n > 2, then these are reshaped to 2-rank Tensors. It - allows for truth to be specified as one hot vector or class indices. Finally, - a weight can be specified for each element in truth and predicted. - - Args: - truth: an n-rank or (n-1)-rank Tensor containing labels. If is_one_hot is - True, then n-rank Tensor is expected, otherwise (n-1) rank one. - predicted: an n-rank float Tensor containing prediction probabilities. - weights: an (n-1)-rank float Tensor of weights - is_one_hot: a boolean. - - Returns: - A TF float scalar. - """ - num_labels = predicted.get_shape().as_list()[-1] - if not is_one_hot: - truth = tf.reshape(truth, [-1]) - truth = tf.one_hot( - truth, depth=num_labels, on_value=1.0, off_value=0.0, axis=-1) - else: - truth = tf.reshape(truth, [-1, num_labels]) - predicted = tf.reshape(predicted, [-1, num_labels]) - losses = tf.nn.softmax_cross_entropy_with_logits( - labels=truth, logits=predicted) - if weights is not None: - losses = tf.boolean_mask(losses, - tf.cast(tf.reshape(weights, [-1]), dtype=tf.bool)) - return tf.reduce_mean(losses) - - -class UnrolledTaskIOConfig(object): - """Configuration of task inputs and outputs. - - A task can have multiple inputs, which define the context, and a task query - which defines what is to be executed in this context. The desired execution - is encoded in an output. The config defines the shapes of the inputs, the - query and the outputs. - """ - - def __init__(self, inputs, output, query=None): - """Constructs a Task input/output config. - - Args: - inputs: a list of tuples. Each tuple represents the configuration of an - input, with first element being the type (a string value) and the second - element the shape. - output: a tuple representing the configuration of the output. - query: a tuple representing the configuration of the query. If no query, - then None. - """ - # A configuration of a single input, output or query. Consists of the type, - # which can be one of the three specified above, and a shape. The shape must - # be consistent with the type, e.g. if type == 'image', then shape is a 3 - # valued list. - io_config = collections.namedtuple('IOConfig', ['type', 'shape']) - - def assert_config(config): - if not isinstance(config, tuple): - raise ValueError('config must be a tuple. Received {}'.format( - type(config))) - if len(config) != 2: - raise ValueError('config must have 2 elements, has %d' % len(config)) - if not isinstance(config[0], tf.DType): - raise ValueError('First element of config must be a tf.DType.') - if not isinstance(config[1], list): - raise ValueError('Second element of config must be a list.') - - assert isinstance(inputs, collections.OrderedDict) - for modality_type in inputs: - assert_config(inputs[modality_type]) - self._inputs = collections.OrderedDict( - [(k, io_config(*value)) for k, value in inputs.iteritems()]) - - if query is not None: - assert_config(query) - self._query = io_config(*query) - else: - self._query = None - - assert_config(output) - self._output = io_config(*output) - - @property - def inputs(self): - return self._inputs - - @property - def output(self): - return self._output - - @property - def query(self): - return self._query - - -class UnrolledTask(object): - """An interface for a Task which can be unrolled during training. - - Each example is called episode and consists of inputs and target output, where - the output can be considered as desired unrolled sequence of actions for the - inputs. For the specified tasks, these action sequences are to be - unambiguously definable. - """ - __metaclass__ = abc.ABCMeta - - def __init__(self, config): - assert isinstance(config, UnrolledTaskIOConfig) - self._config = config - # A dict of bookkeeping variables. - self.info = {} - # Tensorflow input is multithreaded and this lock is needed to prevent - # race condition in the environment. Without the lock, non-thread safe - # environments crash. - self._lock = threading.Lock() - - @property - def config(self): - return self._config - - @abc.abstractmethod - def episode(self): - """Returns data needed to train and test a single episode. - - Each episode consists of inputs, which define the context of the task, a - query which defines the task, and a target output, which defines a - sequence of actions to be executed for this query. This sequence should not - require feedback, i.e. can be predicted purely from input and query.] - - Returns: - inputs, query, output, where inputs is a list of numpy arrays and query - and output are numpy arrays. These arrays must be of shape and type as - specified in the task configuration. - """ - pass - - def reset(self, observation): - """Called after the environment is reset.""" - pass - - def episode_batch(self, batch_size): - """Returns a batch of episodes. - - Args: - batch_size: size of batch. - - Returns: - (inputs, query, output, masks) where inputs is list of numpy arrays and - query, output, and mask are numpy arrays. These arrays must be of shape - and type as specified in the task configuration with one additional - preceding dimension corresponding to the batch. - - Raises: - ValueError: if self.episode() returns illegal values. - """ - batched_inputs = collections.OrderedDict( - [[mtype, []] for mtype in self.config.inputs]) - batched_queries = [] - batched_outputs = [] - batched_masks = [] - for _ in range(int(batch_size)): - with self._lock: - # The episode function needs to be thread-safe. Since the current - # implementation for the envs are not thread safe we need to have lock - # the operations here. - inputs, query, outputs = self.episode() - if not isinstance(outputs, tuple): - raise ValueError('Outputs return value must be tuple.') - if len(outputs) != 2: - raise ValueError('Output tuple must be of size 2.') - if inputs is not None: - for modality_type in batched_inputs: - batched_inputs[modality_type].append( - np.expand_dims(inputs[modality_type], axis=0)) - - if query is not None: - batched_queries.append(np.expand_dims(query, axis=0)) - batched_outputs.append(np.expand_dims(outputs[0], axis=0)) - if outputs[1] is not None: - batched_masks.append(np.expand_dims(outputs[1], axis=0)) - - batched_inputs = { - k: np.concatenate(i, axis=0) for k, i in batched_inputs.iteritems() - } - if batched_queries: - batched_queries = np.concatenate(batched_queries, axis=0) - batched_outputs = np.concatenate(batched_outputs, axis=0) - if batched_masks: - batched_masks = np.concatenate(batched_masks, axis=0).astype(np.float32) - else: - # When the array is empty, the default np.dtype is float64 which causes - # py_func to crash in the tests. - batched_masks = np.array([], dtype=np.float32) - batched_inputs = [batched_inputs[k] for k in self._config.inputs] - return batched_inputs, batched_queries, batched_outputs, batched_masks - - def tf_episode_batch(self, batch_size): - """A batch of episodes as TF Tensors. - - Same as episode_batch with the difference that the return values are TF - Tensors. - - Args: - batch_size: a python float for the batch size. - - Returns: - inputs, query, output, mask where inputs is a dictionary of tf.Tensor - where the keys are the modality types specified in the config.inputs. - query, output, and mask are TF Tensors. These tensors must - be of shape and type as specified in the task configuration with one - additional preceding dimension corresponding to the batch. Both mask and - output have the same shape as output. - """ - - # Define TF outputs. - touts = [] - shapes = [] - for _, i in self._config.inputs.iteritems(): - touts.append(i.type) - shapes.append(i.shape) - if self._config.query is not None: - touts.append(self._config.query.type) - shapes.append(self._config.query.shape) - # Shapes and types for batched_outputs. - touts.append(self._config.output.type) - shapes.append(self._config.output.shape) - # Shapes and types for batched_masks. - touts.append(self._config.output.type) - shapes.append(self._config.output.shape[0:1]) - - def episode_batch_func(): - if self.config.query is None: - inp, _, output, masks = self.episode_batch(int(batch_size)) - return tuple(inp) + (output, masks) - else: - inp, query, output, masks = self.episode_batch(int(batch_size)) - return tuple(inp) + (query, output, masks) - - tf_episode_batch = tf.py_func(episode_batch_func, [], touts, - stateful=True, name='taskdata') - for episode, shape in zip(tf_episode_batch, shapes): - episode.set_shape([batch_size] + shape) - - tf_episode_batch_dict = collections.OrderedDict([ - (mtype, episode) - for mtype, episode in zip(self.config.inputs.keys(), tf_episode_batch) - ]) - cur_index = len(self.config.inputs.keys()) - tf_query = None - if self.config.query is not None: - tf_query = tf_episode_batch[cur_index] - cur_index += 1 - tf_outputs = tf_episode_batch[cur_index] - tf_masks = tf_episode_batch[cur_index + 1] - - return tf_episode_batch_dict, tf_query, tf_outputs, tf_masks - - @abc.abstractmethod - def target_loss(self, true_targets, targets, weights=None): - """A loss for training a task model. - - This loss measures the discrepancy between the task outputs, the true and - predicted ones. - - Args: - true_targets: tf.Tensor of shape and type as defined in the task config - containing the true outputs. - targets: tf.Tensor of shape and type as defined in the task config - containing the predicted outputs. - weights: a bool tf.Tensor of shape as targets. Only true values are - considered when formulating the loss. - """ - pass - - def reward(self, obs, done, info): - """Returns a reward. - - The tasks has to compute a reward based on the state of the environment. The - reward computation, though, is task specific. The task is to use the - environment interface, as defined in task_env.py, to compute the reward. If - this interface does not expose enough information, it is to be updated. - - Args: - obs: Observation from environment's step function. - done: Done flag from environment's step function. - info: Info dict from environment's step function. - - Returns: - obs: Observation. - reward: Floating point value. - done: Done flag. - info: Info dict. - """ - # Default implementation does not do anything. - return obs, 0.0, done, info - - -class RandomExplorationBasedTask(UnrolledTask): - """A Task which starts with a random exploration of the environment.""" - - def __init__(self, - env, - seed, - add_query_noise=False, - query_noise_var=0.0, - *args, - **kwargs): # pylint: disable=keyword-arg-before-vararg - """Initializes a Task using a random exploration runs. - - Args: - env: an instance of type TaskEnv and gym.Env. - seed: a random seed. - add_query_noise: boolean, if True then whatever queries are generated, - they are randomly perturbed. The semantics of the queries depends on the - concrete task implementation. - query_noise_var: float, the variance of Gaussian noise used for query - perturbation. Used iff add_query_noise==True. - *args: see super class. - **kwargs: see super class. - """ - super(RandomExplorationBasedTask, self).__init__(*args, **kwargs) - assert isinstance(env, task_env.TaskEnv) - self._env = env - self._env.set_task(self) - self._rng = np.random.RandomState(seed) - self._add_query_noise = add_query_noise - self._query_noise_var = query_noise_var - - # GoToStaticXTask can also take empty config but for the rest of the classes - # the number of modality types is 1. - if len(self.config.inputs.keys()) > 1: - raise NotImplementedError('current implementation supports input ' - 'with only one modality type or less.') - - def _exploration(self): - """Generates a random exploration run. - - The function uses the environment to generate a run. - - Returns: - A tuple of numpy arrays. The i-th array contains observation of type and - shape as specified in config.inputs[i]. - A list of states along the exploration path. - A list of vertex indices corresponding to the path of the exploration. - """ - in_seq_len = self._config.inputs.values()[0].shape[0] - path, _, states, step_outputs = self._env.random_step_sequence( - min_len=in_seq_len) - obs = {modality_type: [] for modality_type in self._config.inputs} - for o in step_outputs: - step_obs, _, done, _ = o - # It is expected that each value of step_obs is a dict of observations, - # whose dimensions are consistent with the config.inputs sizes. - for modality_type in self._config.inputs: - assert modality_type in step_obs, '{}'.format(type(step_obs)) - o = step_obs[modality_type] - i = self._config.inputs[modality_type] - assert len(o.shape) == len(i.shape) - 1 - for dim_o, dim_i in zip(o.shape, i.shape[1:]): - assert dim_o == dim_i, '{} != {}'.format(dim_o, dim_i) - obs[modality_type].append(o) - if done: - break - - if not obs: - return obs, states, path - - max_path_len = int( - round(in_seq_len * float(len(path)) / float(len(obs.values()[0])))) - path = path[-max_path_len:] - states = states[-in_seq_len:] - - # The above obs is a list of tuples of np,array. Re-format them as tuple of - # np.array, each array containing all observations from all steps. - def regroup(obs, i): - """Regroups observations. - - Args: - obs: a list of tuples of same size. The k-th tuple contains all the - observations from k-th step. Each observation is a numpy array. - i: the index of the observation in each tuple to be grouped. - - Returns: - A numpy array of shape config.inputs[i] which contains all i-th - observations from all steps. These are concatenated along the first - dimension. In addition, if the number of observations is different from - the one specified in config.inputs[i].shape[0], then the array is either - padded from front or clipped. - """ - grouped_obs = np.concatenate( - [np.expand_dims(o, axis=0) for o in obs[i]], axis=0) - in_seq_len = self._config.inputs[i].shape[0] - # pylint: disable=unbalanced-tuple-unpacking - grouped_obs, _ = _pad_or_clip_array( - grouped_obs, in_seq_len, is_front_clip=True) - return grouped_obs - - all_obs = {i: regroup(obs, i) for i in self._config.inputs} - - return all_obs, states, path - - def _obs_to_state(self, path, states): - """Computes mapping between path nodes and states.""" - # Generate a numpy array of locations corresponding to the path vertices. - path_coordinates = map(self._env.vertex_to_pose, path) - path_coordinates = np.concatenate( - [np.reshape(p, [1, 2]) for p in path_coordinates]) - - # The observations are taken along a smoothed trajectory following the path. - # We compute a mapping between the obeservations and the map vertices. - path_to_obs = collections.defaultdict(list) - obs_to_state = [] - for i, s in enumerate(states): - location = np.reshape(s[0:2], [1, 2]) - index = np.argmin( - np.reshape( - np.sum(np.power(path_coordinates - location, 2), axis=1), [-1])) - index = path[index] - path_to_obs[index].append(i) - obs_to_state.append(index) - return path_to_obs, obs_to_state - - def _perturb_state(self, state, noise_var): - """Perturbes the state. - - The location are purturbed using a Gaussian noise with variance - noise_var. The orientation is uniformly sampled. - - Args: - state: a numpy array containing an env state (x, y locations). - noise_var: float - Returns: - The perturbed state. - """ - - def normal(v, std): - if std > 0: - n = self._rng.normal(0.0, std) - n = min(n, 2.0 * std) - n = max(n, -2.0 * std) - return v + n - else: - return v - - state = state.copy() - state[0] = normal(state[0], noise_var) - state[1] = normal(state[1], noise_var) - if state.size > 2: - state[2] = self._rng.uniform(-math.pi, math.pi) - return state - - def _sample_obs(self, - indices, - observations, - observation_states, - path_to_obs, - max_obs_index=None, - use_exploration_obs=True): - """Samples one observation which corresponds to vertex_index in path. - - In addition, the sampled observation must have index in observations less - than max_obs_index. If these two conditions cannot be satisfied the - function returns None. - - Args: - indices: a list of integers. - observations: a list of numpy arrays containing all the observations. - observation_states: a list of numpy arrays, each array representing the - state of the observation. - path_to_obs: a dict of path indices to lists of observation indices. - max_obs_index: an integer. - use_exploration_obs: if True, then the observation is sampled among the - specified observations, otherwise it is obtained from the environment. - Returns: - A tuple of: - -- A numpy array of size width x height x 3 representing the sampled - observation. - -- The index of the sampld observation among the input observations. - -- The state at which the observation is captured. - Raises: - ValueError: if the observation and observation_states lists are of - different lengths. - """ - if len(observations) != len(observation_states): - raise ValueError('observation and observation_states lists must have ' - 'equal lengths') - if not indices: - return None, None, None - vertex_index = self._rng.choice(indices) - if use_exploration_obs: - obs_indices = path_to_obs[vertex_index] - - if max_obs_index is not None: - obs_indices = [i for i in obs_indices if i < max_obs_index] - - if obs_indices: - index = self._rng.choice(obs_indices) - if self._add_query_noise: - xytheta = self._perturb_state(observation_states[index], - self._query_noise_var) - return self._env.observation(xytheta), index, xytheta - else: - return observations[index], index, observation_states[index] - else: - return None, None, None - else: - xy = self._env.vertex_to_pose(vertex_index) - xytheta = np.array([xy[0], xy[1], 0.0]) - xytheta = self._perturb_state(xytheta, self._query_noise_var) - return self._env.observation(xytheta), None, xytheta - - -class AreNearbyTask(RandomExplorationBasedTask): - """A task of identifying whether a query is nearby current location or not. - - The query is guaranteed to be in proximity of an already visited location, - i.e. close to one of the observations. For each observation we have one - query, which is either close or not to this observation. - """ - - def __init__( - self, - max_distance=0, - *args, - **kwargs): # pylint: disable=keyword-arg-before-vararg - super(AreNearbyTask, self).__init__(*args, **kwargs) - self._max_distance = max_distance - - if len(self.config.inputs.keys()) != 1: - raise NotImplementedError('current implementation supports input ' - 'with only one modality type') - - def episode(self): - """Episode data. - - Returns: - observations: a tuple with one element. This element is a numpy array of - size in_seq_len x observation_size x observation_size x 3 containing - in_seq_len images. - query: a numpy array of size - in_seq_len x observation_size X observation_size x 3 containing a query - image. - A tuple of size two. First element is a in_seq_len x 2 numpy array of - either 1.0 or 0.0. The i-th element denotes whether the i-th query - image is neraby (value 1.0) or not (value 0.0) to the i-th observation. - The second element in the tuple is a mask, a numpy array of size - in_seq_len x 1 and values 1.0 or 0.0 denoting whether the query is - valid or not (it can happen that the query is not valid, e.g. there are - not enough observations to have a meaningful queries). - """ - observations, states, path = self._exploration() - assert len(observations.values()[0]) == len(states) - - # The observations are taken along a smoothed trajectory following the path. - # We compute a mapping between the obeservations and the map vertices. - path_to_obs, obs_to_path = self._obs_to_state(path, states) - - # Go over all observations, and sample a query. With probability 0.5 this - # query is a nearby observation (defined as belonging to the same vertex - # in path). - g = self._env.graph - queries = [] - labels = [] - validity_masks = [] - query_index_in_observations = [] - for i, curr_o in enumerate(observations.values()[0]): - p = obs_to_path[i] - low = max(0, i - self._max_distance) - - # A list of lists of vertex indices. Each list in this group corresponds - # to one possible label. - index_groups = [[], [], []] - # Nearby visited indices, label 1. - nearby_visited = [ - ii for ii in path[low:i + 1] + g[p].keys() if ii in obs_to_path[:i] - ] - nearby_visited = [ii for ii in index_groups[1] if ii in path_to_obs] - # NOT Nearby visited indices, label 0. - not_nearby_visited = [ii for ii in path[:low] if ii not in g[p].keys()] - not_nearby_visited = [ii for ii in index_groups[0] if ii in path_to_obs] - # NOT visited indices, label 2. - not_visited = [ - ii for ii in range(g.number_of_nodes()) if ii not in path[:i + 1] - ] - - index_groups = [not_nearby_visited, nearby_visited, not_visited] - - # Consider only labels for which there are indices. - allowed_labels = [ii for ii, group in enumerate(index_groups) if group] - label = self._rng.choice(allowed_labels) - - indices = list(set(index_groups[label])) - max_obs_index = None if label == 2 else i - use_exploration_obs = False if label == 2 else True - o, obs_index, _ = self._sample_obs( - indices=indices, - observations=observations.values()[0], - observation_states=states, - path_to_obs=path_to_obs, - max_obs_index=max_obs_index, - use_exploration_obs=use_exploration_obs) - query_index_in_observations.append(obs_index) - - # If we cannot sample a valid query, we mark it as not valid in mask. - if o is None: - label = 0.0 - o = curr_o - validity_masks.append(0) - else: - validity_masks.append(1) - - queries.append(o.values()[0]) - labels.append(label) - - query = np.concatenate([np.expand_dims(q, axis=0) for q in queries], axis=0) - - def one_hot(label, num_labels=3): - a = np.zeros((num_labels,), dtype=np.float) - a[int(label)] = 1.0 - return a - - outputs = np.stack([one_hot(l) for l in labels], axis=0) - validity_mask = np.reshape( - np.array(validity_masks, dtype=np.int32), [-1, 1]) - - self.info['query_index_in_observations'] = query_index_in_observations - self.info['observation_states'] = states - - return observations, query, (outputs, validity_mask) - - def target_loss(self, truth, predicted, weights=None): - pass - - -class NeighboringQueriesTask(RandomExplorationBasedTask): - """A task of identifying whether two queries are closeby or not. - - The proximity between queries is defined by the length of the shorest path - between them. - """ - - def __init__( - self, - max_distance=1, - *args, - **kwargs): # pylint: disable=keyword-arg-before-vararg - """Initializes a NeighboringQueriesTask. - - Args: - max_distance: integer, the maximum distance in terms of number of vertices - between the two queries, so that they are considered neighboring. - *args: for super class. - **kwargs: for super class. - """ - super(NeighboringQueriesTask, self).__init__(*args, **kwargs) - self._max_distance = max_distance - if len(self.config.inputs.keys()) != 1: - raise NotImplementedError('current implementation supports input ' - 'with only one modality type') - - def episode(self): - """Episode data. - - Returns: - observations: a tuple with one element. This element is a numpy array of - size in_seq_len x observation_size x observation_size x 3 containing - in_seq_len images. - query: a numpy array of size - 2 x observation_size X observation_size x 3 containing a pair of query - images. - A tuple of size two. First element is a numpy array of size 2 containing - a one hot vector of whether the two observations are neighobring. Second - element is a boolean numpy value denoting whether this is a valid - episode. - """ - observations, states, path = self._exploration() - assert len(observations.values()[0]) == len(states) - path_to_obs, _ = self._obs_to_state(path, states) - # Restrict path to ones for which observations have been generated. - path = [p for p in path if p in path_to_obs] - # Sample first query. - query1_index = self._rng.choice(path) - # Sample label. - label = self._rng.randint(2) - # Sample second query. - # If label == 1, then second query must be nearby, otherwise not. - closest_indices = nx.single_source_shortest_path( - self._env.graph, query1_index, self._max_distance).keys() - if label == 0: - # Closest indices on the path. - indices = [p for p in path if p not in closest_indices] - else: - # Indices which are not closest on the path. - indices = [p for p in closest_indices if p in path] - - query2_index = self._rng.choice(indices) - # Generate an observation. - query1, query1_index, _ = self._sample_obs( - [query1_index], - observations.values()[0], - states, - path_to_obs, - max_obs_index=None, - use_exploration_obs=True) - query2, query2_index, _ = self._sample_obs( - [query2_index], - observations.values()[0], - states, - path_to_obs, - max_obs_index=None, - use_exploration_obs=True) - - queries = np.concatenate( - [np.expand_dims(q, axis=0) for q in [query1, query2]]) - labels = np.array([0, 0]) - labels[label] = 1 - is_valid = np.array([1]) - - self.info['observation_states'] = states - self.info['query_indices_in_observations'] = [query1_index, query2_index] - - return observations, queries, (labels, is_valid) - - def target_loss(self, truth, predicted, weights=None): - pass - - -#@gin.configurable -class GotoStaticXTask(RandomExplorationBasedTask): - """Task go to a static X. - - If continuous reward is used only one goal is allowed so that the reward can - be computed as a delta-distance to that goal.. - """ - - def __init__(self, - step_reward=0.0, - goal_reward=1.0, - hit_wall_reward=-1.0, - done_at_target=False, - use_continuous_reward=False, - *args, - **kwargs): # pylint: disable=keyword-arg-before-vararg - super(GotoStaticXTask, self).__init__(*args, **kwargs) - if len(self.config.inputs.keys()) > 1: - raise NotImplementedError('current implementation supports input ' - 'with only one modality type or less.') - - self._step_reward = step_reward - self._goal_reward = goal_reward - self._hit_wall_reward = hit_wall_reward - self._done_at_target = done_at_target - self._use_continuous_reward = use_continuous_reward - - self._previous_path_length = None - - def episode(self): - observations, _, path = self._exploration() - if len(path) < 2: - raise ValueError('The exploration path has only one node.') - - g = self._env.graph - start = path[-1] - while True: - goal = self._rng.choice(path[:-1]) - if goal != start: - break - goal_path = nx.shortest_path(g, start, goal) - - init_orientation = self._rng.uniform(0, np.pi, (1,)) - trajectory = np.array( - [list(self._env.vertex_to_pose(p)) for p in goal_path]) - init_xy = np.reshape(trajectory[0, :], [-1]) - init_state = np.concatenate([init_xy, init_orientation], 0) - - trajectory = trajectory[1:, :] - deltas = envs_util.trajectory_to_deltas(trajectory, init_state) - output_seq_len = self._config.output.shape[0] - arr = _pad_or_clip_array(deltas, output_seq_len, output_mask=True) - # pylint: disable=unbalanced-tuple-unpacking - thetas, _, thetas_mask = arr - - query = self._env.observation(self._env.vertex_to_pose(goal)).values()[0] - - return observations, query, (thetas, thetas_mask) - - def reward(self, obs, done, info): - if 'wall_collision' in info and info['wall_collision']: - return obs, self._hit_wall_reward, done, info - - reward = 0.0 - current_vertex = self._env.pose_to_vertex(self._env.state) - - if current_vertex in self._env.targets(): - if self._done_at_target: - done = True - else: - obs = self._env.reset() - reward = self._goal_reward - else: - if self._use_continuous_reward: - if len(self._env.targets()) != 1: - raise ValueError( - 'FindX task with continuous reward is assuming only one target.') - goal_vertex = self._env.targets()[0] - path_length = self._compute_path_length(goal_vertex) - reward = self._previous_path_length - path_length - self._previous_path_length = path_length - else: - reward = self._step_reward - - return obs, reward, done, info - - def _compute_path_length(self, goal_vertex): - current_vertex = self._env.pose_to_vertex(self._env.state) - path = nx.shortest_path(self._env.graph, current_vertex, goal_vertex) - assert len(path) >= 2 - curr_xy = np.array(self._env.state[:2]) - next_xy = np.array(self._env.vertex_to_pose(path[1])) - last_step_distance = np.linalg.norm(next_xy - curr_xy) - return (len(path) - 2) * self._env.cell_size_px + last_step_distance - - def reset(self, observation): - if self._use_continuous_reward: - if len(self._env.targets()) != 1: - raise ValueError( - 'FindX task with continuous reward is assuming only one target.') - goal_vertex = self._env.targets()[0] - self._previous_path_length = self._compute_path_length(goal_vertex) - - def target_loss(self, truth, predicted, weights=None): - """Action classification loss. - - Args: - truth: a batch_size x sequence length x number of labels float - Tensor containing a one hot vector for each label in each batch and - time. - predicted: a batch_size x sequence length x number of labels float - Tensor containing a predicted distribution over all actions. - weights: a batch_size x sequence_length float Tensor of bool - denoting which actions are valid. - - Returns: - An average cross entropy over all batches and elements in sequence. - """ - return classification_loss( - truth=truth, predicted=predicted, weights=weights, is_one_hot=True) - - -class RelativeLocationTask(RandomExplorationBasedTask): - """A task of estimating the relative location of a query w.r.t current. - - It is to be used for debugging. It is designed such that the output is a - single value, out of a discrete set of values, so that it can be phrased as - a classification problem. - """ - - def __init__(self, num_labels, *args, **kwargs): - """Initializes a relative location task. - - Args: - num_labels: integer, number of orientations to bin the relative - orientation into. - *args: see super class. - **kwargs: see super class. - """ - super(RelativeLocationTask, self).__init__(*args, **kwargs) - self._num_labels = num_labels - if len(self.config.inputs.keys()) != 1: - raise NotImplementedError('current implementation supports input ' - 'with only one modality type') - - def episode(self): - observations, states, path = self._exploration() - - # Select a random element from history. - path_to_obs, _ = self._obs_to_state(path, states) - use_exploration_obs = not self._add_query_noise - query, _, query_state = self._sample_obs( - path[:-1], - observations.values()[0], - states, - path_to_obs, - max_obs_index=None, - use_exploration_obs=use_exploration_obs) - - x, y, theta = tuple(states[-1]) - q_x, q_y, _ = tuple(query_state) - t_x, t_y = q_x - x, q_y - y - (rt_x, rt_y) = (np.sin(theta) * t_x - np.cos(theta) * t_y, - np.cos(theta) * t_x + np.sin(theta) * t_y) - # Bins are [a(i), a(i+1)] for a(i) = -pi + 0.5 * bin_size + i * bin_size. - shift = np.pi * (1 - 1.0 / (2.0 * self._num_labels)) - orientation = np.arctan2(rt_y, rt_x) + shift - if orientation < 0: - orientation += 2 * np.pi - label = int(np.floor(self._num_labels * orientation / (2 * np.pi))) - - out_shape = self._config.output.shape - if len(out_shape) != 1: - raise ValueError('Output shape should be of rank 1.') - if out_shape[0] != self._num_labels: - raise ValueError('Output shape must be of size %d' % self._num_labels) - output = np.zeros(out_shape, dtype=np.float32) - output[label] = 1 - - return observations, query, (output, None) - - def target_loss(self, truth, predicted, weights=None): - return classification_loss( - truth=truth, predicted=predicted, weights=weights, is_one_hot=True) - - -class LocationClassificationTask(UnrolledTask): - """A task of classifying a location as one of several classes. - - The task does not have an input, but just a query and an output. The query - is an observation of the current location, e.g. an image taken from the - current state. The output is a label classifying this location in one of - predefined set of locations (or landmarks). - - The current implementation classifies locations as intersections based on the - number and directions of biforcations. It is expected that a location can have - at most 4 different directions, aligned with the axes. As each of these four - directions might be present or not, the number of possible intersections are - 2^4 = 16. - """ - - def __init__(self, env, seed, *args, **kwargs): - super(LocationClassificationTask, self).__init__(*args, **kwargs) - self._env = env - self._rng = np.random.RandomState(seed) - # A location property which can be set. If not set, a random one is - # generated. - self._location = None - if len(self.config.inputs.keys()) > 1: - raise NotImplementedError('current implementation supports input ' - 'with only one modality type or less.') - - @property - def location(self): - return self._location - - @location.setter - def location(self, location): - self._location = location - - def episode(self): - # Get a location. If not set, sample on at a vertex with a random - # orientation - location = self._location - if location is None: - num_nodes = self._env.graph.number_of_nodes() - vertex = int(math.floor(self._rng.uniform(0, num_nodes))) - xy = self._env.vertex_to_pose(vertex) - theta = self._rng.uniform(0, 2 * math.pi) - location = np.concatenate( - [np.reshape(xy, [-1]), np.array([theta])], axis=0) - else: - vertex = self._env.pose_to_vertex(location) - - theta = location[2] - neighbors = self._env.graph.neighbors(vertex) - xy_s = [self._env.vertex_to_pose(n) for n in neighbors] - - def rotate(xy, theta): - """Rotates a vector around the origin by angle theta. - - Args: - xy: a numpy darray of shape (2, ) of floats containing the x and y - coordinates of a vector. - theta: a python float containing the rotation angle in radians. - - Returns: - A numpy darray of floats of shape (2,) containing the x and y - coordinates rotated xy. - """ - rotated_x = np.cos(theta) * xy[0] - np.sin(theta) * xy[1] - rotated_y = np.sin(theta) * xy[0] + np.cos(theta) * xy[1] - return np.array([rotated_x, rotated_y]) - - # Rotate all intersection biforcation by the orientation of the agent as the - # intersection label is defined in an agent centered fashion. - xy_s = [ - rotate(xy - location[0:2], -location[2] - math.pi / 4) for xy in xy_s - ] - th_s = [np.arctan2(xy[1], xy[0]) for xy in xy_s] - - out_shape = self._config.output.shape - if len(out_shape) != 1: - raise ValueError('Output shape should be of rank 1.') - num_labels = out_shape[0] - if num_labels != 16: - raise ValueError('Currently only 16 labels are supported ' - '(there are 16 different 4 way intersection types).') - - th_s = set([int(math.floor(4 * (th / (2 * np.pi) + 0.5))) for th in th_s]) - one_hot_label = np.zeros((num_labels,), dtype=np.float32) - label = 0 - for th in th_s: - label += pow(2, th) - one_hot_label[int(label)] = 1.0 - - query = self._env.observation(location).values()[0] - return [], query, (one_hot_label, None) - - def reward(self, obs, done, info): - raise ValueError('Do not call.') - - def target_loss(self, truth, predicted, weights=None): - return classification_loss( - truth=truth, predicted=predicted, weights=weights, is_one_hot=True) - - -class GotoStaticXNoExplorationTask(UnrolledTask): - """An interface for findX tasks without exploration. - - The agent is initialized a random location in a random world and a random goal - and the objective is for the agent to move toward the goal. This class - generates episode for such task. Each generates a sequence of observations x - and target outputs y. x is the observations and is an OrderedDict with keys - provided from config.inputs.keys() and the shapes provided in the - config.inputs. The output is a numpy arrays with the shape specified in the - config.output. The shape of the array is (sequence_length x action_size) where - action is the number of actions that can be done in the environment. Note that - config.output.shape should be set according to the number of actions that can - be done in the env. - target outputs y are the groundtruth value of each action that is computed - from the environment graph. The target output for each action is proportional - to the progress that each action makes. Target value of 1 means that the - action takes the agent one step closer, -1 means the action takes the agent - one step farther. Value of -2 means that action should not take place at all. - This can be because the action leads to collision or it wants to terminate the - episode prematurely. - """ - - def __init__(self, env, *args, **kwargs): - super(GotoStaticXNoExplorationTask, self).__init__(*args, **kwargs) - - if self._config.query is not None: - raise ValueError('query should be None.') - if len(self._config.output.shape) != 2: - raise ValueError('output should only have two dimensions:' - '(sequence_length x number_of_actions)') - for input_config in self._config.inputs.values(): - if input_config.shape[0] != self._config.output.shape[0]: - raise ValueError('the first dimension of the input and output should' - 'be the same.') - if len(self._config.output.shape) != 2: - raise ValueError('output shape should be ' - '(sequence_length x number_of_actions)') - - self._env = env - - def _compute_shortest_path_length(self, vertex, target_vertices): - """Computes length of the shortest path from vertex to any target vertexes. - - Args: - vertex: integer, index of the vertex in the environment graph. - target_vertices: list of the target vertexes - - Returns: - integer, minimum distance from the vertex to any of the target_vertices. - - Raises: - ValueError: if there is no path between the vertex and at least one of - the target_vertices. - """ - try: - return np.min([ - len(nx.shortest_path(self._env.graph, vertex, t)) - for t in target_vertices - ]) - except: - #logging.error('there is no path between vertex %d and at least one of ' - # 'the targets %r', vertex, target_vertices) - raise - - def _compute_gt_value(self, vertex, target_vertices): - """Computes groundtruth value of all the actions at the vertex. - - The value of each action is the difference each action makes in the length - of the shortest path to the goal. If an action takes the agent one step - closer to the goal the value is 1. In case, it takes the agent one step away - from the goal it would be -1. If it leads to collision or if the agent uses - action stop before reaching to the goal it is -2. To avoid scale issues the - gt_values are multipled by 0.5. - - Args: - vertex: integer, the index of current vertex. - target_vertices: list of the integer indexes of the target views. - - Returns: - numpy array with shape (action_size,) and each element is the groundtruth - value of each action based on the progress each action makes. - """ - action_size = self._config.output.shape[1] - output_value = np.ones((action_size), dtype=np.float32) * -2 - my_distance = self._compute_shortest_path_length(vertex, target_vertices) - for adj in self._env.graph[vertex]: - adj_distance = self._compute_shortest_path_length(adj, target_vertices) - if adj_distance is None: - continue - action_index = self._env.action( - self._env.vertex_to_pose(vertex), self._env.vertex_to_pose(adj)) - assert action_index is not None, ('{} is not adjacent to {}. There might ' - 'be a problem in environment graph ' - 'connectivity because there is no ' - 'direct edge between the given ' - 'vertices').format( - self._env.vertex_to_pose(vertex), - self._env.vertex_to_pose(adj)) - output_value[action_index] = my_distance - adj_distance - - return output_value * 0.5 - - def episode(self): - """Returns data needed to train and test a single episode. - - Returns: - (inputs, None, output) where inputs is a dictionary of modality types to - numpy arrays. The second element is query but we assume that the goal - is also given as part of observation so it should be None for this task, - and the outputs is the tuple of ground truth action values with the - shape of (sequence_length x action_size) that is coming from - config.output.shape and a numpy array with the shape of - (sequence_length,) that is 1 if the corresponding element of the - input and output should be used in the training optimization. - - Raises: - ValueError: If the output values for env.random_step_sequence is not - valid. - ValueError: If the shape of observations coming from the env is not - consistent with the config. - ValueError: If there is a modality type specified in the config but the - environment does not return that. - """ - # Sequence length is the first dimension of any of the input tensors. - sequence_length = self._config.inputs.values()[0].shape[0] - modality_types = self._config.inputs.keys() - - path, _, _, step_outputs = self._env.random_step_sequence( - max_len=sequence_length) - target_vertices = [self._env.pose_to_vertex(x) for x in self._env.targets()] - - if len(path) != len(step_outputs): - raise ValueError('path, and step_outputs should have equal length' - ' {}!={}'.format(len(path), len(step_outputs))) - - # Building up observations. observations will be a OrderedDict of - # modality types. The values are numpy arrays that follow the given shape - # in the input config for each modality type. - observations = collections.OrderedDict([k, []] for k in modality_types) - for step_output in step_outputs: - obs_dict = step_output[0] - # Only going over the modality types that are specified in the input - # config. - for modality_type in modality_types: - if modality_type not in obs_dict: - raise ValueError('modality type is not returned from the environment.' - '{} not in {}'.format(modality_type, - obs_dict.keys())) - obs = obs_dict[modality_type] - if np.any( - obs.shape != tuple(self._config.inputs[modality_type].shape[1:])): - raise ValueError( - 'The observations should have the same size as speicifed in' - 'config for modality type {}. {} != {}'.format( - modality_type, obs.shape, - self._config.inputs[modality_type].shape[1:])) - observations[modality_type].append(obs) - - gt_value = [self._compute_gt_value(v, target_vertices) for v in path] - - # pylint: disable=unbalanced-tuple-unpacking - gt_value, _, value_mask = _pad_or_clip_array( - np.array(gt_value), - sequence_length, - is_front_clip=False, - output_mask=True, - ) - for modality_type, obs in observations.iteritems(): - observations[modality_type], _, mask = _pad_or_clip_array( - np.array(obs), sequence_length, is_front_clip=False, output_mask=True) - assert np.all(mask == value_mask) - - return observations, None, (gt_value, value_mask) - - def reset(self, observation): - """Called after the environment is reset.""" - pass - - def target_loss(self, true_targets, targets, weights=None): - """A loss for training a task model. - - This loss measures the discrepancy between the task outputs, the true and - predicted ones. - - Args: - true_targets: tf.Tensor of tf.float32 with the shape of - (batch_size x sequence_length x action_size). - targets: tf.Tensor of tf.float32 with the shape of - (batch_size x sequence_length x action_size). - weights: tf.Tensor of tf.bool with the shape of - (batch_size x sequence_length). - - Raises: - ValueError: if the shapes of the input tensors are not consistent. - - Returns: - L2 loss between the predicted action values and true action values. - """ - targets_shape = targets.get_shape().as_list() - true_targets_shape = true_targets.get_shape().as_list() - if len(targets_shape) != 3 or len(true_targets_shape) != 3: - raise ValueError('invalid shape for targets or true_targets_shape') - if np.any(targets_shape != true_targets_shape): - raise ValueError('the shape of targets and true_targets are not the same' - '{} != {}'.format(targets_shape, true_targets_shape)) - - if weights is not None: - # Filtering targets and true_targets using weights. - weights_shape = weights.get_shape().as_list() - if np.any(weights_shape != targets_shape[0:2]): - raise ValueError('The first two elements of weights shape should match' - 'target. {} != {}'.format(weights_shape, - targets_shape)) - true_targets = tf.boolean_mask(true_targets, weights) - targets = tf.boolean_mask(targets, weights) - - return tf.losses.mean_squared_error(tf.reshape(targets, [-1]), - tf.reshape(true_targets, [-1])) - - def reward(self, obs, done, info): - raise NotImplementedError('reward is not implemented for this task') - - -################################################################################ -class NewTask(UnrolledTask): - def __init__(self, env, *args, **kwargs): - super(NewTask, self).__init__(*args, **kwargs) - self._env = env - - def _compute_shortest_path_length(self, vertex, target_vertices): - """Computes length of the shortest path from vertex to any target vertexes. - - Args: - vertex: integer, index of the vertex in the environment graph. - target_vertices: list of the target vertexes - - Returns: - integer, minimum distance from the vertex to any of the target_vertices. - - Raises: - ValueError: if there is no path between the vertex and at least one of - the target_vertices. - """ - try: - return np.min([ - len(nx.shortest_path(self._env.graph, vertex, t)) - for t in target_vertices - ]) - except: - logging.error('there is no path between vertex %d and at least one of ' - 'the targets %r', vertex, target_vertices) - raise - - def _compute_gt_value(self, vertex, target_vertices): - """Computes groundtruth value of all the actions at the vertex. - - The value of each action is the difference each action makes in the length - of the shortest path to the goal. If an action takes the agent one step - closer to the goal the value is 1. In case, it takes the agent one step away - from the goal it would be -1. If it leads to collision or if the agent uses - action stop before reaching to the goal it is -2. To avoid scale issues the - gt_values are multipled by 0.5. - - Args: - vertex: integer, the index of current vertex. - target_vertices: list of the integer indexes of the target views. - - Returns: - numpy array with shape (action_size,) and each element is the groundtruth - value of each action based on the progress each action makes. - """ - action_size = self._config.output.shape[1] - output_value = np.ones((action_size), dtype=np.float32) * -2 - # own compute _compute_shortest_path_length - returnts float - my_distance = self._compute_shortest_path_length(vertex, target_vertices) - for adj in self._env.graph[vertex]: - adj_distance = self._compute_shortest_path_length(adj, target_vertices) - if adj_distance is None: - continue - action_index = self._env.action( - self._env.vertex_to_pose(vertex), self._env.vertex_to_pose(adj)) - assert action_index is not None, ('{} is not adjacent to {}. There might ' - 'be a problem in environment graph ' - 'connectivity because there is no ' - 'direct edge between the given ' - 'vertices').format( - self._env.vertex_to_pose(vertex), - self._env.vertex_to_pose(adj)) - output_value[action_index] = my_distance - adj_distance - - return output_value * 0.5 - - def episode(self): - """Returns data needed to train and test a single episode. - - Returns: - (inputs, None, output) where inputs is a dictionary of modality types to - numpy arrays. The second element is query but we assume that the goal - is also given as part of observation so it should be None for this task, - and the outputs is the tuple of ground truth action values with the - shape of (sequence_length x action_size) that is coming from - config.output.shape and a numpy array with the shape of - (sequence_length,) that is 1 if the corresponding element of the - input and output should be used in the training optimization. - - Raises: - ValueError: If the output values for env.random_step_sequence is not - valid. - ValueError: If the shape of observations coming from the env is not - consistent with the config. - ValueError: If there is a modality type specified in the config but the - environment does not return that. - """ - # Sequence length is the first dimension of any of the input tensors. - sequence_length = self._config.inputs.values()[0].shape[0] - modality_types = self._config.inputs.keys() - - path, _, _, step_outputs = self._env.random_step_sequence( - max_len=sequence_length) - target_vertices = [self._env.pose_to_vertex(x) for x in self._env.targets()] - - if len(path) != len(step_outputs): - raise ValueError('path, and step_outputs should have equal length' - ' {}!={}'.format(len(path), len(step_outputs))) - - # Building up observations. observations will be a OrderedDict of - # modality types. The values are numpy arrays that follow the given shape - # in the input config for each modality type. - observations = collections.OrderedDict([k, []] for k in modality_types) - for step_output in step_outputs: - obs_dict = step_output[0] - # Only going over the modality types that are specified in the input - # config. - for modality_type in modality_types: - if modality_type not in obs_dict: - raise ValueError('modality type is not returned from the environment.' - '{} not in {}'.format(modality_type, - obs_dict.keys())) - obs = obs_dict[modality_type] - if np.any( - obs.shape != tuple(self._config.inputs[modality_type].shape[1:])): - raise ValueError( - 'The observations should have the same size as speicifed in' - 'config for modality type {}. {} != {}'.format( - modality_type, obs.shape, - self._config.inputs[modality_type].shape[1:])) - observations[modality_type].append(obs) - - gt_value = [self._compute_gt_value(v, target_vertices) for v in path] - - # pylint: disable=unbalanced-tuple-unpacking - gt_value, _, value_mask = _pad_or_clip_array( - np.array(gt_value), - sequence_length, - is_front_clip=False, - output_mask=True, - ) - for modality_type, obs in observations.iteritems(): - observations[modality_type], _, mask = _pad_or_clip_array( - np.array(obs), sequence_length, is_front_clip=False, output_mask=True) - assert np.all(mask == value_mask) - - return observations, None, (gt_value, value_mask) - - def reset(self, observation): - """Called after the environment is reset.""" - pass - - def target_loss(self, true_targets, targets, weights=None): - """A loss for training a task model. - - This loss measures the discrepancy between the task outputs, the true and - predicted ones. - - Args: - true_targets: tf.Tensor of tf.float32 with the shape of - (batch_size x sequence_length x action_size). - targets: tf.Tensor of tf.float32 with the shape of - (batch_size x sequence_length x action_size). - weights: tf.Tensor of tf.bool with the shape of - (batch_size x sequence_length). - - Raises: - ValueError: if the shapes of the input tensors are not consistent. - - Returns: - L2 loss between the predicted action values and true action values. - """ - targets_shape = targets.get_shape().as_list() - true_targets_shape = true_targets.get_shape().as_list() - if len(targets_shape) != 3 or len(true_targets_shape) != 3: - raise ValueError('invalid shape for targets or true_targets_shape') - if np.any(targets_shape != true_targets_shape): - raise ValueError('the shape of targets and true_targets are not the same' - '{} != {}'.format(targets_shape, true_targets_shape)) - - if weights is not None: - # Filtering targets and true_targets using weights. - weights_shape = weights.get_shape().as_list() - if np.any(weights_shape != targets_shape[0:2]): - raise ValueError('The first two elements of weights shape should match' - 'target. {} != {}'.format(weights_shape, - targets_shape)) - true_targets = tf.boolean_mask(true_targets, weights) - targets = tf.boolean_mask(targets, weights) - - return tf.losses.mean_squared_error(tf.reshape(targets, [-1]), - tf.reshape(true_targets, [-1])) - - def reward(self, obs, done, info): - raise NotImplementedError('reward is not implemented for this task') diff --git a/spaces/NSect/VALL-E-X/utils/g2p/cleaners.py b/spaces/NSect/VALL-E-X/utils/g2p/cleaners.py deleted file mode 100644 index 8bb53977673afa893476cab1b946d6a9a00f57ea..0000000000000000000000000000000000000000 --- a/spaces/NSect/VALL-E-X/utils/g2p/cleaners.py +++ /dev/null @@ -1,61 +0,0 @@ -import re -from utils.g2p.japanese import japanese_to_romaji_with_accent, japanese_to_ipa, japanese_to_ipa2, japanese_to_ipa3 -from utils.g2p.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo, chinese_to_romaji, chinese_to_lazy_ipa, chinese_to_ipa, chinese_to_ipa2 -from utils.g2p.english import english_to_lazy_ipa, english_to_ipa2, english_to_lazy_ipa2 -patterns = [r'\[EN\](.*?)\[EN\]', r'\[ZH\](.*?)\[ZH\]', r'\[JA\](.*?)\[JA\]'] -def japanese_cleaners(text): - text = japanese_to_romaji_with_accent(text) - text = re.sub(r'([A-Za-z])$', r'\1.', text) - return text - -def japanese_cleaners2(text): - return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…') - -def chinese_cleaners(text): - '''Pipeline for Chinese text''' - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = re.sub(r'([ˉˊˇˋ˙])$', r'\1。', text) - return text - -def cje_cleaners(text): - matches = [] - for pattern in patterns: - matches.extend(re.finditer(pattern, text)) - - matches.sort(key=lambda x: x.start()) # Sort matches by their start positions - - outputs = "" - output_langs = [] - - for match in matches: - text_segment = text[match.start():match.end()] - phon = clean_one(text_segment) - if "[EN]" in text_segment: - lang = 'en' - elif "[ZH]" in text_segment: - lang = 'zh' - elif "[JA]" in text_segment: - lang = 'ja' - else: - raise ValueError("If you see this error, please report this bug to issues.") - outputs += phon - output_langs += [lang] * len(phon) - assert len(outputs) == len(output_langs) - return outputs, output_langs - - -def clean_one(text): - if text.find('[ZH]') != -1: - text = re.sub(r'\[ZH\](.*?)\[ZH\]', - lambda x: chinese_to_ipa(x.group(1))+' ', text) - if text.find('[JA]') != -1: - text = re.sub(r'\[JA\](.*?)\[JA\]', - lambda x: japanese_to_ipa2(x.group(1))+' ', text) - if text.find('[EN]') != -1: - text = re.sub(r'\[EN\](.*?)\[EN\]', - lambda x: english_to_ipa2(x.group(1))+' ', text) - text = re.sub(r'\s+$', '', text) - text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text) - return text diff --git a/spaces/Nee001/bing0/src/components/toaster.tsx b/spaces/Nee001/bing0/src/components/toaster.tsx deleted file mode 100644 index 4d2693460b61307a1d4c127fd01df9bee16e59ff..0000000000000000000000000000000000000000 --- a/spaces/Nee001/bing0/src/components/toaster.tsx +++ /dev/null @@ -1,3 +0,0 @@ -'use client' - -export { Toaster } from 'react-hot-toast' diff --git a/spaces/Nephele/bert-vits2-multi-voice/text/__init__.py b/spaces/Nephele/bert-vits2-multi-voice/text/__init__.py deleted file mode 100644 index 7566bf351ca9b95af9cdc6d729557a9da083800f..0000000000000000000000000000000000000000 --- a/spaces/Nephele/bert-vits2-multi-voice/text/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -from text.symbols import * - - -_symbol_to_id = {s: i for i, s in enumerate(symbols)} - -def cleaned_text_to_sequence(cleaned_text, tones, language): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - phones = [_symbol_to_id[symbol] for symbol in cleaned_text] - tone_start = language_tone_start_map[language] - tones = [i + tone_start for i in tones] - lang_id = language_id_map[language] - lang_ids = [lang_id for i in phones] - return phones, tones, lang_ids - -def get_bert(norm_text, word2ph, language): - from .chinese_bert import get_bert_feature as zh_bert - from .english_bert_mock import get_bert_feature as en_bert - lang_bert_func_map = { - 'ZH': zh_bert, - 'EN': en_bert - } - bert = lang_bert_func_map[language](norm_text, word2ph) - return bert diff --git a/spaces/Nicholaspei/LangChain-ChatLLM/app.py b/spaces/Nicholaspei/LangChain-ChatLLM/app.py deleted file mode 100644 index ad9b5e1acfc7f2b433fa826a365279a0a5912e58..0000000000000000000000000000000000000000 --- a/spaces/Nicholaspei/LangChain-ChatLLM/app.py +++ /dev/null @@ -1,272 +0,0 @@ -import os - -import gradio as gr -import nltk -import sentence_transformers -import torch -from duckduckgo_search import ddg -from duckduckgo_search.utils import SESSION -from langchain.chains import RetrievalQA -from langchain.document_loaders import UnstructuredFileLoader -from langchain.embeddings import JinaEmbeddings -from langchain.embeddings.huggingface import HuggingFaceEmbeddings -from langchain.prompts import PromptTemplate -from langchain.prompts.prompt import PromptTemplate -from langchain.vectorstores import FAISS - -from chatllm import ChatLLM -from chinese_text_splitter import ChineseTextSplitter - -nltk.data.path.append('./nltk_data') - -embedding_model_dict = { - "ernie-tiny": "nghuyong/ernie-3.0-nano-zh", - "ernie-base": "nghuyong/ernie-3.0-base-zh", - "text2vec-base": "GanymedeNil/text2vec-base-chinese", - "ViT-B-32": 'ViT-B-32::laion2b-s34b-b79k' -} - -llm_model_dict = { - "ChatGLM-6B-int8": "THUDM/chatglm-6b-int8", - "ChatGLM-6B-int4": "THUDM/chatglm-6b-int4", - "ChatGLM-6b-int4-qe": "THUDM/chatglm-6b-int4-qe", - "Minimax": "Minimax" -} - -DEVICE = "cuda" if torch.cuda.is_available( -) else "mps" if torch.backends.mps.is_available() else "cpu" - - -def search_web(query): - - SESSION.proxies = { - "http": f"socks5h://localhost:7890", - "https": f"socks5h://localhost:7890" - } - results = ddg(query) - web_content = '' - if results: - for result in results: - web_content += result['body'] - return web_content - - -def load_file(filepath): - if filepath.lower().endswith(".pdf"): - loader = UnstructuredFileLoader(filepath) - textsplitter = ChineseTextSplitter(pdf=True) - docs = loader.load_and_split(textsplitter) - else: - loader = UnstructuredFileLoader(filepath, mode="elements") - textsplitter = ChineseTextSplitter(pdf=False) - docs = loader.load_and_split(text_splitter=textsplitter) - return docs - - -def init_knowledge_vector_store(embedding_model, filepath): - if embedding_model == "ViT-B-32": - jina_auth_token = os.getenv('jina_auth_token') - embeddings = JinaEmbeddings( - jina_auth_token=jina_auth_token, - model_name=embedding_model_dict[embedding_model]) - else: - embeddings = HuggingFaceEmbeddings( - model_name=embedding_model_dict[embedding_model], ) - embeddings.client = sentence_transformers.SentenceTransformer( - embeddings.model_name, device=DEVICE) - - docs = load_file(filepath) - - vector_store = FAISS.from_documents(docs, embeddings) - return vector_store - - -def get_knowledge_based_answer(query, - large_language_model, - vector_store, - VECTOR_SEARCH_TOP_K, - web_content, - history_len, - temperature, - top_p, - chat_history=[]): - if web_content: - prompt_template = f"""基于以下已知信息,简洁和专业的来回答用户的问题。 - 如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息",不允许在答案中添加编造成分,答案请使用中文。 - 已知网络检索内容:{web_content}""" + """ - 已知内容: - {context} - 问题: - {question}""" - else: - prompt_template = """基于以下已知信息,请简洁并专业地回答用户的问题。 - 如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息"。不允许在答案中添加编造成分。另外,答案请使用中文。 - - 已知内容: - {context} - - 问题: - {question}""" - prompt = PromptTemplate(template=prompt_template, - input_variables=["context", "question"]) - chatLLM = ChatLLM() - chatLLM.history = chat_history[-history_len:] if history_len > 0 else [] - if large_language_model == "Minimax": - chatLLM.model = 'Minimax' - else: - chatLLM.load_model( - model_name_or_path=llm_model_dict[large_language_model]) - chatLLM.temperature = temperature - chatLLM.top_p = top_p - - knowledge_chain = RetrievalQA.from_llm( - llm=chatLLM, - retriever=vector_store.as_retriever( - search_kwargs={"k": VECTOR_SEARCH_TOP_K}), - prompt=prompt) - knowledge_chain.combine_documents_chain.document_prompt = PromptTemplate( - input_variables=["page_content"], template="{page_content}") - - knowledge_chain.return_source_documents = True - - result = knowledge_chain({"query": query}) - return result - - -def clear_session(): - return '', None - - -def predict(input, - large_language_model, - embedding_model, - file_obj, - VECTOR_SEARCH_TOP_K, - history_len, - temperature, - top_p, - use_web, - history=None): - if history == None: - history = [] - print(file_obj.name) - vector_store = init_knowledge_vector_store(embedding_model, file_obj.name) - if use_web == 'True': - web_content = search_web(query=input) - else: - web_content = '' - resp = get_knowledge_based_answer( - query=input, - large_language_model=large_language_model, - vector_store=vector_store, - VECTOR_SEARCH_TOP_K=VECTOR_SEARCH_TOP_K, - web_content=web_content, - chat_history=history, - history_len=history_len, - temperature=temperature, - top_p=top_p, - ) - print(resp) - history.append((input, resp['result'])) - return '', history, history - - -if __name__ == "__main__": - block = gr.Blocks() - with block as demo: - gr.Markdown("""

LangChain-ChatLLM-Webui

-
- 本项目基于LangChain和大型语言模型系列模型, 提供基于本地知识的自动问答应用.
- 目前项目提供基于ChatGLM-6B 系列、Minimax的LLM和包括text2vec-base-chinese、ernie-3.0-zh系列以及由Jina提供的ViT-B-32::laion2b-s34b-b79k等多个Embedding模型, 支持上传 txt、docx、md等文本格式文件.
- 后续将提供更加多样化的LLM、Embedding和参数选项供用户尝试, 欢迎关注Github地址.
- 本项目已内置开发者自己的key,用户无需输入自己的相关key.
- 当然,更推荐您点击右上角的Duplicate this Space,将项目Fork到自己的Space中,保护个人隐私,且避免排队! -
- """) - with gr.Row(): - with gr.Column(scale=1): - model_choose = gr.Accordion("模型选择") - with model_choose: - large_language_model = gr.Dropdown( - list(llm_model_dict.keys()), - label="large language model", - value="ChatGLM-6B-int4") - - embedding_model = gr.Dropdown(list( - embedding_model_dict.keys()), - label="Embedding model", - value="text2vec-base") - - file = gr.File(label='请上传知识库文件, 目前支持txt、docx、md格式', - file_types=['.txt', '.md', '.docx']) - - use_web = gr.Radio(["True", "False"], - label="Web Search", - value="False") - model_argument = gr.Accordion("模型参数配置") - - with model_argument: - - VECTOR_SEARCH_TOP_K = gr.Slider( - 1, - 10, - value=6, - step=1, - label="vector search top k", - interactive=True) - - HISTORY_LEN = gr.Slider(0, - 3, - value=0, - step=1, - label="history len", - interactive=True) - - temperature = gr.Slider(0, - 1, - value=0.01, - step=0.01, - label="temperature", - interactive=True) - top_p = gr.Slider(0, - 1, - value=0.9, - step=0.1, - label="top_p", - interactive=True) - - with gr.Column(scale=4): - chatbot = gr.Chatbot(label='ChatLLM').style(height=600) - message = gr.Textbox(label='请输入问题') - state = gr.State() - - with gr.Row(): - clear_history = gr.Button("🧹 清除历史对话") - send = gr.Button("🚀 发送") - - send.click(predict, - inputs=[ - message, large_language_model, - embedding_model, file, VECTOR_SEARCH_TOP_K, - HISTORY_LEN, temperature, top_p, use_web, - state - ], - outputs=[message, chatbot, state]) - clear_history.click(fn=clear_session, - inputs=[], - outputs=[chatbot, state], - queue=False) - - message.submit(predict, - inputs=[ - message, large_language_model, - embedding_model, file, - VECTOR_SEARCH_TOP_K, HISTORY_LEN, - temperature, top_p, use_web, state - ], - outputs=[message, chatbot, state]) - gr.Markdown("""提醒:
- 1. 使用时请先上传自己的知识文件,并且文件中不含某些特殊字符,否则将返回error.
- 2. 有任何使用问题,请通过[问题交流区](https://huggingface.co/spaces/thomas-yanxin/LangChain-ChatLLM/discussions)或[Github Issue区](https://github.com/thomas-yanxin/LangChain-ChatGLM-Webui/issues)进行反馈.
- """) - demo.queue().launch(server_name='0.0.0.0', share=False) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/self_auto_bleu.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/self_auto_bleu.py deleted file mode 100644 index 062bb82f669f63a537b6ee8df4d42d292eb2575e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/self_auto_bleu.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import nltk -from misc.bleu_utils import sentence_bleu -import warnings - - -def get_target_sequences(manifest, ground_truth, to_take=1000): - import json - import pathlib - - with open(ground_truth, 'r') as fin: - original_continuations = json.loads(fin.read()) - - sequence2length = [(k, v[0]) for k, v in original_continuations.items()] - assert all(float(v) >= 6.0 for (_, v) in sequence2length) # 6 seconds - - sequence2length.sort(key=lambda x: x[1]) - to_take_sequences = set(v[0] for v in sequence2length[:to_take]) - to_take_ids = [] - - with open(manifest, 'r') as f: - f.readline() - - for i, line in enumerate(f.readlines()): - seq_id = line.split()[0] - seq_id = pathlib.Path(seq_id).name.split('__')[0] - - if seq_id in to_take_sequences: - to_take_ids.append(i) - - print(f'Took {len(to_take_ids)} ids') - return set(to_take_ids) - - -def get_args(): - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument('--asr-transcript', type=str, - help='Path to the transcript file.') - - parser.add_argument('--manifest', required=True) - parser.add_argument('--prompts-description', required=True) - - parser.add_argument('--cut-id', action='store_true', - help='Whether cut the first token (typically a seq id)') - parser.add_argument('--cut-tail', action='store_true', - help='Whether cut the last token (typically a speaker id)') - parser.add_argument('--debug', action='store_true') - - args = parser.parse_args() - - return args - - -def get_self_bleu(utterances, averaging_mode, weights): - self_bleu = [] - - for i in range(len(utterances)): - hypo = utterances[i] - rest = utterances[:i] + utterances[i+1:] - - self_bleu.append(sentence_bleu(rest, hypo, weights, - no_length_penalty=True, averaging_mode=averaging_mode)) - - return self_bleu - - -def get_self_bleu2_arithmetic(utterances): - weights = (0.5, 0.5) # equal weight for unigrams and bigrams - return get_self_bleu(utterances, averaging_mode='arithmetic', weights=weights) - - -def get_self_bleu2_geometric(utterances): - weights = (0.5, 0.5) - return get_self_bleu(utterances, averaging_mode='geometric', weights=weights) - - -def get_auto_bleu2_arithmetic(utterances): - weights = (0.5, 0.5) - return [auto_bleu(u, mean_mode='arithmetic', weights=weights) for u in utterances] - - -def get_auto_bleu2_geometric(utterances): - weights = (0.5, 0.5) - return [auto_bleu(u, mean_mode='geometric', weights=weights) for u in utterances] - - -def get_auto_bleu3_geometric(utterances): - weights = (1./3, 1./3, 1./3) - return [auto_bleu(u, mean_mode='geometric', weights=weights) for u in utterances] - - -def get_auto_bleu3_arithmetic(utterances): - weights = (1./3, 1./3, 1./3) - return [auto_bleu(u, mean_mode='arithmetic', weights=weights) for u in utterances] - - -def get_self_bleu3_arithmetic(utterances): - weights = (1./3, 1./3, 1./3) - return get_self_bleu(utterances, averaging_mode='arithmetic', weights=weights) - - -def get_self_bleu3_geometric(utterances): - weights = (1./3, 1./3, 1./3) - return get_self_bleu(utterances, averaging_mode='geometric', weights=weights) - - -def auto_bleu(sentence, weights, mean_mode='arithmetic'): - if len(sentence) <= 1: - return 0 - - N = len(weights) - - bleu_n = np.zeros([N]) - for n in range(N): - targ_ngrams = list(nltk.ngrams(sentence, n+1)) - for p in range(len(targ_ngrams)): - left = sentence[:p] - right = sentence[(p+n+1):] - rest_ngrams = list(nltk.ngrams(left, n+1)) + \ - list(nltk.ngrams(right, n+1)) - # compute the nb of matching ngrams - bleu_n[n] += targ_ngrams[p] in rest_ngrams - bleu_n[n] /= len(targ_ngrams) # average them to get a proportion - - weights = np.array(weights) - if mean_mode == 'arithmetic': - return (bleu_n * weights).sum() - elif mean_mode == 'geometric': - return (bleu_n ** weights).prod() - else: - raise ValueError(f'Unknown agggregation mode {mean_mode}') - - -def main(): - from multiprocessing import Pool - - args = get_args() - target_ids = get_target_sequences(args.manifest, args.prompts_description) - - with open(args.asr_transcript, 'r') as fin: - lines = fin.readlines() - - terms = [x.strip().split() for x in lines] - filtered = [] - for term in terms: - line_id = int(term[-1].split('-')[1][:-1]) - if line_id in target_ids: - filtered.append(term) - terms = filtered - - if args.cut_id: - terms = [x[1:] for x in terms] - if args.cut_tail: - terms = [x[:-1] for x in terms] - - if args.debug: - terms = terms[:10] - - tasks = [ - ('Self-BLEU2-arithmetic', get_self_bleu2_arithmetic), - ('Self-BLEU2-geometric', get_self_bleu2_geometric), - ('Auto-BLEU2-arithmetic', get_auto_bleu2_arithmetic), - ('Auto-BLEU2-geometric', get_auto_bleu2_geometric), - - ('Self-BLEU3-arithmetic', get_self_bleu3_arithmetic), - ('Self-BLEU3-geometric', get_self_bleu3_geometric), - ('Auto-BLEU3-arithmetic', get_auto_bleu3_arithmetic), - ('Auto-BLEU3-geometric', get_auto_bleu3_geometric), - ] - - n_processes = min(16, len(tasks)) - with Pool(n_processes) as pool: - metrics = pool.map(run_f, [(t[1], terms) for t in tasks]) - - for (metric_name, _), metric in zip(tasks, metrics): - metric, sem = np.mean(metric), np.std(metric) / np.sqrt(len(metric)) - - metric, sem = [ - round(100 * x, 2) for x in [metric, sem] - ] - - print(f'{metric_name} {metric} +- {sem}') - - -def run_f(task_params): - f, terms = task_params - return f(terms) - - -if __name__ == '__main__': - # NLTK produces warnings - warnings.filterwarnings("ignore") - - main() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/roberta/model_xlmr.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/roberta/model_xlmr.py deleted file mode 100644 index cf6e354d53b918dd4c7c78bfcd38ac0d63cab3bd..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/roberta/model_xlmr.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -Unsupervised Cross-lingual Representation Learning at Scale -""" - -from fairseq.models import register_model - -from .hub_interface import RobertaHubInterface -from .model import RobertaModel - - -@register_model("xlmr") -class XLMRModel(RobertaModel): - @classmethod - def hub_models(cls): - return { - "xlmr.base": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz", - "xlmr.large": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz", - "xlmr.xl": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xl.tar.gz", - "xlmr.xxl": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xxl.tar.gz", - } - - @classmethod - def from_pretrained( - cls, - model_name_or_path, - checkpoint_file="model.pt", - data_name_or_path=".", - bpe="sentencepiece", - **kwargs - ): - from fairseq import hub_utils - - x = hub_utils.from_pretrained( - model_name_or_path, - checkpoint_file, - data_name_or_path, - archive_map=cls.hub_models(), - bpe=bpe, - load_checkpoint_heads=True, - **kwargs, - ) - return RobertaHubInterface(x["args"], x["task"], x["models"][0]) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/masked_lm.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/masked_lm.py deleted file mode 100644 index 0c08132fb742de3d3d1beea0b8fce979ff408ebb..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/masked_lm.py +++ /dev/null @@ -1,255 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass, field -import logging -import os - -from omegaconf import MISSING, II, OmegaConf - -import numpy as np -from fairseq import utils -from fairseq.data import ( - Dictionary, - IdDataset, - MaskTokensDataset, - NestedDictionaryDataset, - NumelDataset, - NumSamplesDataset, - PrependTokenDataset, - RightPadDataset, - SortDataset, - TokenBlockDataset, - data_utils, -) -from fairseq.data.encoders.utils import get_whole_word_mask -from fairseq.data.shorten_dataset import maybe_shorten_dataset -from fairseq.dataclass import FairseqDataclass -from fairseq.tasks import FairseqTask, register_task - -from .language_modeling import SAMPLE_BREAK_MODE_CHOICES, SHORTEN_METHOD_CHOICES - - -logger = logging.getLogger(__name__) - - -@dataclass -class MaskedLMConfig(FairseqDataclass): - data: str = field( - default=MISSING, - metadata={ - "help": "colon separated path to data directories list, \ - will be iterated upon during epochs in round-robin manner" - }, - ) - sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field( - default="none", - metadata={ - "help": 'If omitted or "none", fills each sample with tokens-per-sample ' - 'tokens. If set to "complete", splits samples only at the end ' - "of sentence, but may include multiple sentences per sample. " - '"complete_doc" is similar but respects doc boundaries. ' - 'If set to "eos", includes only one sentence per sample.' - }, - ) - tokens_per_sample: int = field( - default=1024, - metadata={"help": "max number of tokens per sample for LM dataset"}, - ) - mask_prob: float = field( - default=0.15, - metadata={"help": "probability of replacing a token with mask"}, - ) - leave_unmasked_prob: float = field( - default=0.1, - metadata={"help": "probability that a masked token is unmasked"}, - ) - random_token_prob: float = field( - default=0.1, - metadata={"help": "probability of replacing a token with a random token"}, - ) - freq_weighted_replacement: bool = field( - default=False, - metadata={"help": "sample random replacement words based on word frequencies"}, - ) - mask_whole_words: bool = field( - default=False, - metadata={"help": "mask whole words; you may also want to set --bpe"}, - ) - mask_multiple_length: int = field( - default=1, - metadata={"help": "repeat the mask indices multiple times"}, - ) - mask_stdev: float = field( - default=0.0, - metadata={"help": "stdev of the mask length"}, - ) - shorten_method: SHORTEN_METHOD_CHOICES = field( - default="none", - metadata={ - "help": "if not none, shorten sequences that exceed --tokens-per-sample" - }, - ) - shorten_data_split_list: str = field( - default="", - metadata={ - "help": "comma-separated list of dataset splits to apply shortening to, " - 'e.g., "train,valid" (default: all dataset splits)' - }, - ) - seed: int = II("common.seed") - - -@register_task("masked_lm", dataclass=MaskedLMConfig) -class MaskedLMTask(FairseqTask): - - cfg: MaskedLMConfig - - """Task for training masked language models (e.g., BERT, RoBERTa).""" - - def __init__(self, cfg: MaskedLMConfig, dictionary): - super().__init__(cfg) - self.dictionary = dictionary - - # add mask token - self.mask_idx = dictionary.add_symbol("") - - @classmethod - def setup_task(cls, cfg: MaskedLMConfig, **kwargs): - paths = utils.split_paths(cfg.data) - assert len(paths) > 0 - dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) - logger.info("dictionary: {} types".format(len(dictionary))) - return cls(cfg, dictionary) - - def load_dataset(self, split, epoch=1, combine=False, **kwargs): - """Load a given dataset split. - - Args: - split (str): name of the split (e.g., train, valid, test) - """ - paths = utils.split_paths(self.cfg.data) - assert len(paths) > 0 - data_path = paths[(epoch - 1) % len(paths)] - split_path = os.path.join(data_path, split) - - dataset = data_utils.load_indexed_dataset( - split_path, - self.source_dictionary, - combine=combine, - ) - if dataset is None: - raise FileNotFoundError( - "Dataset not found: {} ({})".format(split, split_path) - ) - - dataset = maybe_shorten_dataset( - dataset, - split, - self.cfg.shorten_data_split_list, - self.cfg.shorten_method, - self.cfg.tokens_per_sample, - self.cfg.seed, - ) - - # create continuous blocks of tokens - dataset = TokenBlockDataset( - dataset, - dataset.sizes, - self.cfg.tokens_per_sample - 1, # one less for - pad=self.source_dictionary.pad(), - eos=self.source_dictionary.eos(), - break_mode=self.cfg.sample_break_mode, - ) - logger.info("loaded {} blocks from: {}".format(len(dataset), split_path)) - - # prepend beginning-of-sentence token (, equiv. to [CLS] in BERT) - dataset = PrependTokenDataset(dataset, self.source_dictionary.bos()) - - # create masked input and targets - mask_whole_words = ( - get_whole_word_mask(self.args, self.source_dictionary) - if self.cfg.mask_whole_words - else None - ) - - src_dataset, tgt_dataset = MaskTokensDataset.apply_mask( - dataset, - self.source_dictionary, - pad_idx=self.source_dictionary.pad(), - mask_idx=self.mask_idx, - seed=self.cfg.seed, - mask_prob=self.cfg.mask_prob, - leave_unmasked_prob=self.cfg.leave_unmasked_prob, - random_token_prob=self.cfg.random_token_prob, - freq_weighted_replacement=self.cfg.freq_weighted_replacement, - mask_whole_words=mask_whole_words, - mask_multiple_length=self.cfg.mask_multiple_length, - mask_stdev=self.cfg.mask_stdev, - ) - - with data_utils.numpy_seed(self.cfg.seed): - shuffle = np.random.permutation(len(src_dataset)) - - self.datasets[split] = SortDataset( - NestedDictionaryDataset( - { - "id": IdDataset(), - "net_input": { - "src_tokens": RightPadDataset( - src_dataset, - pad_idx=self.source_dictionary.pad(), - ), - "src_lengths": NumelDataset(src_dataset, reduce=False), - }, - "target": RightPadDataset( - tgt_dataset, - pad_idx=self.source_dictionary.pad(), - ), - "nsentences": NumSamplesDataset(), - "ntokens": NumelDataset(src_dataset, reduce=True), - }, - sizes=[src_dataset.sizes], - ), - sort_order=[ - shuffle, - src_dataset.sizes, - ], - ) - - def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True): - src_dataset = RightPadDataset( - TokenBlockDataset( - src_tokens, - src_lengths, - self.cfg.tokens_per_sample - 1, # one less for - pad=self.source_dictionary.pad(), - eos=self.source_dictionary.eos(), - break_mode="eos", - ), - pad_idx=self.source_dictionary.pad(), - ) - src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos()) - src_dataset = NestedDictionaryDataset( - { - "id": IdDataset(), - "net_input": { - "src_tokens": src_dataset, - "src_lengths": NumelDataset(src_dataset, reduce=False), - }, - }, - sizes=src_lengths, - ) - if sort: - src_dataset = SortDataset(src_dataset, sort_order=[src_lengths]) - return src_dataset - - @property - def source_dictionary(self): - return self.dictionary - - @property - def target_dictionary(self): - return self.dictionary diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/criss/download_and_preprocess_tatoeba.sh b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/criss/download_and_preprocess_tatoeba.sh deleted file mode 100644 index 7ed64f017d5e62695ba73745c840507b994abc0f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/criss/download_and_preprocess_tatoeba.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -SPM_ENCODE=flores/scripts/spm_encode.py -DATA=data_tmp -SPM_MODEL=criss_checkpoints/sentence.bpe.model -DICT=criss_checkpoints/dict.txt - -if [[ -f flores ]]; then - echo "flores already cloned" -else - git clone https://github.com/facebookresearch/flores -fi -if [[ -f LASER ]]; then - echo "LASER already cloned" -else - git clone https://github.com/facebookresearch/LASER -fi -mkdir -p data_tmp -declare -A lang_tatoeba_map=( ["ar_AR"]="ara" ["de_DE"]="deu" ["es_XX"]="spa" ["et_EE"]="est" ["fi_FI"]="fin" ["fr_XX"]="fra" ["hi_IN"]="hin" ["it_IT"]="ita" ["ja_XX"]="jpn" ["ko_KR"]="kor" ["kk_KZ"]="kaz" ["nl_XX"]="nld" ["ru_RU"]="rus" ["tr_TR"]="tur" ["vi_VN"]="vie" ["zh_CN"]="cmn") -for lang in ar_AR de_DE es_XX et_EE fi_FI fr_XX hi_IN it_IT ja_XX kk_KZ ko_KR nl_XX ru_RU tr_TR vi_VN zh_CN; do - lang_tatoeba=${lang_tatoeba_map[$lang]} - echo $lang_tatoeba - datadir=$DATA/${lang}-en_XX-tatoeba - rm -rf $datadir - mkdir -p $datadir - TEST_PREFIX=LASER/data/tatoeba/v1/tatoeba - python $SPM_ENCODE \ - --model ${SPM_MODEL} \ - --output_format=piece \ - --inputs ${TEST_PREFIX}.${lang_tatoeba}-eng.${lang_tatoeba} ${TEST_PREFIX}.${lang_tatoeba}-eng.eng \ - --outputs $datadir/test.bpe.${lang}-en_XX.${lang} $datadir/test.bpe.${lang}-en_XX.en_XX - - # binarize data - fairseq-preprocess \ - --source-lang ${lang} --target-lang en_XX \ - --testpref $datadir/test.bpe.${lang}-en_XX \ - --destdir $datadir \ - --srcdict ${DICT} \ - --joined-dictionary \ - --workers 4 -done diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/unsupervised_quality_estimation/meteor.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/unsupervised_quality_estimation/meteor.py deleted file mode 100644 index 2ee0448cf1f167f6f3ecee56ad807922cffb0956..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/unsupervised_quality_estimation/meteor.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import math -import os -import subprocess -import sys -import tempfile -from collections import defaultdict -from itertools import combinations - - -def read_translations(path, n_repeats): - segment_counter = 0 - segment_translations = [] - translations = defaultdict(list) - for line in open(path): - segment_translations.append(" ".join(line.split())) - if len(segment_translations) == n_repeats: - translations[segment_counter] = segment_translations - segment_translations = [] - segment_counter += 1 - return translations - - -def generate_input(translations, n_repeats): - _, ref_path = tempfile.mkstemp() - _, mt_path = tempfile.mkstemp() - ref_fh = open(ref_path, "w") - mt_fh = open(mt_path, "w") - for segid in sorted(translations.keys()): - assert len(translations[segid]) == n_repeats - indexes = combinations(range(n_repeats), 2) - for idx1, idx2 in indexes: - mt_fh.write(translations[segid][idx1].strip() + "\n") - ref_fh.write(translations[segid][idx2].strip() + "\n") - sys.stderr.write("\nSaved translations to %s and %s" % (ref_path, mt_path)) - return ref_path, mt_path - - -def run_meteor(ref_path, mt_path, metric_path, lang="en"): - _, out_path = tempfile.mkstemp() - subprocess.call( - [ - "java", - "-Xmx2G", - "-jar", - metric_path, - mt_path, - ref_path, - "-p", - "0.5 0.2 0.6 0.75", # default parameters, only changed alpha to give equal weight to P and R - "-norm", - "-l", - lang, - ], - stdout=open(out_path, "w"), - ) - os.remove(ref_path) - os.remove(mt_path) - sys.stderr.write("\nSaved Meteor output to %s" % out_path) - return out_path - - -def read_output(meteor_output_path, n_repeats): - n_combinations = math.factorial(n_repeats) / ( - math.factorial(2) * math.factorial(n_repeats - 2) - ) - raw_scores = [] - average_scores = [] - for line in open(meteor_output_path): - if not line.startswith("Segment "): - continue - score = float(line.strip().split("\t")[1]) - raw_scores.append(score) - if len(raw_scores) == n_combinations: - average_scores.append(sum(raw_scores) / n_combinations) - raw_scores = [] - os.remove(meteor_output_path) - return average_scores - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("-i", "--infile") - parser.add_argument("-n", "--repeat_times", type=int) - parser.add_argument("-m", "--meteor") - parser.add_argument("-o", "--output") - args = parser.parse_args() - - translations = read_translations(args.infile, args.repeat_times) - sys.stderr.write("\nGenerating input for Meteor...") - ref_path, mt_path = generate_input(translations, args.repeat_times) - sys.stderr.write("\nRunning Meteor...") - out_path = run_meteor(ref_path, mt_path, args.meteor) - sys.stderr.write("\nReading output...") - scores = read_output(out_path, args.repeat_times) - sys.stderr.write("\nWriting results...") - with open(args.output, "w") as o: - for scr in scores: - o.write("{}\n".format(scr)) - o.close() - - -if __name__ == "__main__": - main() diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/raw_label_dataset.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/raw_label_dataset.py deleted file mode 100644 index d054904f419bd64855d33a2a770b43f671c7c8d8..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/raw_label_dataset.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from . import FairseqDataset - - -class RawLabelDataset(FairseqDataset): - def __init__(self, labels): - super().__init__() - self.labels = labels - - def __getitem__(self, index): - return self.labels[index] - - def __len__(self): - return len(self.labels) - - def collater(self, samples): - return torch.tensor(samples) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_label_smoothing.py b/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_label_smoothing.py deleted file mode 100644 index 04c0f974ac80f7606327f868e948712c3c18f1d0..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_label_smoothing.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import copy -import unittest - -import tests.utils as test_utils -import torch -from fairseq.criterions.cross_entropy import CrossEntropyCriterion -from fairseq.criterions.label_smoothed_cross_entropy import ( - LabelSmoothedCrossEntropyCriterion, -) - - -class TestLabelSmoothing(unittest.TestCase): - def setUp(self): - # build dictionary - self.d = test_utils.dummy_dictionary(3) - vocab = len(self.d) - self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens - self.assertEqual(self.d.pad(), 1) - self.assertEqual(self.d.eos(), 2) - self.assertEqual(self.d.unk(), 3) - pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841 - - # build dataset - self.data = [ - # the first batch item has padding - { - "source": torch.LongTensor([w1, eos]), - "target": torch.LongTensor([w1, eos]), - }, - { - "source": torch.LongTensor([w1, eos]), - "target": torch.LongTensor([w1, w1, eos]), - }, - ] - self.sample = next(test_utils.dummy_dataloader(self.data)) - - # build model - self.args = argparse.Namespace() - self.args.sentence_avg = False - self.args.report_accuracy = False - self.args.probs = ( - torch.FloatTensor( - [ - # pad eos unk w1 w2 w3 - [0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05], - [0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10], - [0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15], - ] - ) - .unsqueeze(0) - .expand(2, 3, 7) - ) # add batch dimension - self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d) - self.model = self.task.build_model(self.args) - - def test_nll_loss(self): - self.args.label_smoothing = 0.1 - nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task) - smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion( - self.args, self.task - ) - nll_loss, nll_sample_size, nll_logging_output = nll_crit( - self.model, self.sample - ) - smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit( - self.model, self.sample - ) - self.assertLess(abs(nll_loss - nll_logging_output["loss"]), 1e-6) - self.assertLess(abs(nll_loss - smooth_logging_output["nll_loss"]), 1e-6) - - def test_padding(self): - self.args.label_smoothing = 0.1 - crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) - loss, _, logging_output = crit(self.model, self.sample) - - def get_one_no_padding(idx): - # create a new sample with just a single batch item so that there's - # no padding - sample1 = next(test_utils.dummy_dataloader([self.data[idx]])) - args1 = copy.copy(self.args) - args1.probs = args1.probs[idx, :, :].unsqueeze(0) - model1 = self.task.build_model(args1) - loss1, _, _ = crit(model1, sample1) - return loss1 - - loss1 = get_one_no_padding(0) - loss2 = get_one_no_padding(1) - self.assertAlmostEqual(loss, loss1 + loss2) - - def test_reduction(self): - self.args.label_smoothing = 0.1 - crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) - loss, _, logging_output = crit(self.model, self.sample, reduce=True) - unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False) - self.assertAlmostEqual(loss, unreduced_loss.sum()) - - def test_zero_eps(self): - self.args.label_smoothing = 0.0 - nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task) - smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion( - self.args, self.task - ) - nll_loss, nll_sample_size, nll_logging_output = nll_crit( - self.model, self.sample - ) - smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit( - self.model, self.sample - ) - self.assertAlmostEqual(nll_loss, smooth_loss) - - def assertAlmostEqual(self, t1, t2): - self.assertEqual(t1.size(), t2.size(), "size mismatch") - self.assertLess((t1 - t2).abs().max(), 1e-6) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/ORI-Muchim/NahidaTTS/commons.py b/spaces/ORI-Muchim/NahidaTTS/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/ORI-Muchim/NahidaTTS/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/utils/visualizer.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/utils/visualizer.py deleted file mode 100644 index 8e145181871d1981e41db3c8cbc7e8f4cc7b5833..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/utils/visualizer.py +++ /dev/null @@ -1,1267 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import colorsys -import logging -import math -import numpy as np -from enum import Enum, unique -import cv2 -import matplotlib as mpl -import matplotlib.colors as mplc -import matplotlib.figure as mplfigure -import pycocotools.mask as mask_util -import torch -from matplotlib.backends.backend_agg import FigureCanvasAgg -from PIL import Image - -from detectron2.data import MetadataCatalog -from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes -from detectron2.utils.file_io import PathManager - -from .colormap import random_color - -logger = logging.getLogger(__name__) - -__all__ = ["ColorMode", "VisImage", "Visualizer"] - - -_SMALL_OBJECT_AREA_THRESH = 1000 -_LARGE_MASK_AREA_THRESH = 120000 -_OFF_WHITE = (1.0, 1.0, 240.0 / 255) -_BLACK = (0, 0, 0) -_RED = (1.0, 0, 0) - -_KEYPOINT_THRESHOLD = 0.05 - - -@unique -class ColorMode(Enum): - """ - Enum of different color modes to use for instance visualizations. - """ - - IMAGE = 0 - """ - Picks a random color for every instance and overlay segmentations with low opacity. - """ - SEGMENTATION = 1 - """ - Let instances of the same category have similar colors - (from metadata.thing_colors), and overlay them with - high opacity. This provides more attention on the quality of segmentation. - """ - IMAGE_BW = 2 - """ - Same as IMAGE, but convert all areas without masks to gray-scale. - Only available for drawing per-instance mask predictions. - """ - - -class GenericMask: - """ - Attribute: - polygons (list[ndarray]): list[ndarray]: polygons for this mask. - Each ndarray has format [x, y, x, y, ...] - mask (ndarray): a binary mask - """ - - def __init__(self, mask_or_polygons, height, width): - self._mask = self._polygons = self._has_holes = None - self.height = height - self.width = width - - m = mask_or_polygons - if isinstance(m, dict): - # RLEs - assert "counts" in m and "size" in m - if isinstance(m["counts"], list): # uncompressed RLEs - h, w = m["size"] - assert h == height and w == width - m = mask_util.frPyObjects(m, h, w) - self._mask = mask_util.decode(m)[:, :] - return - - if isinstance(m, list): # list[ndarray] - self._polygons = [np.asarray(x).reshape(-1) for x in m] - return - - if isinstance(m, np.ndarray): # assumed to be a binary mask - assert m.shape[1] != 2, m.shape - assert m.shape == ( - height, - width, - ), f"mask shape: {m.shape}, target dims: {height}, {width}" - self._mask = m.astype("uint8") - return - - raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) - - @property - def mask(self): - if self._mask is None: - self._mask = self.polygons_to_mask(self._polygons) - return self._mask - - @property - def polygons(self): - if self._polygons is None: - self._polygons, self._has_holes = self.mask_to_polygons(self._mask) - return self._polygons - - @property - def has_holes(self): - if self._has_holes is None: - if self._mask is not None: - self._polygons, self._has_holes = self.mask_to_polygons(self._mask) - else: - self._has_holes = False # if original format is polygon, does not have holes - return self._has_holes - - def mask_to_polygons(self, mask): - # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level - # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. - # Internal contours (holes) are placed in hierarchy-2. - # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. - mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr - res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) - hierarchy = res[-1] - if hierarchy is None: # empty mask - return [], False - has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 - res = res[-2] - res = [x.flatten() for x in res] - # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. - # We add 0.5 to turn them into real-value coordinate space. A better solution - # would be to first +0.5 and then dilate the returned polygon by 0.5. - res = [x + 0.5 for x in res if len(x) >= 6] - return res, has_holes - - def polygons_to_mask(self, polygons): - rle = mask_util.frPyObjects(polygons, self.height, self.width) - rle = mask_util.merge(rle) - return mask_util.decode(rle)[:, :] - - def area(self): - return self.mask.sum() - - def bbox(self): - p = mask_util.frPyObjects(self.polygons, self.height, self.width) - p = mask_util.merge(p) - bbox = mask_util.toBbox(p) - bbox[2] += bbox[0] - bbox[3] += bbox[1] - return bbox - - -class _PanopticPrediction: - """ - Unify different panoptic annotation/prediction formats - """ - - def __init__(self, panoptic_seg, segments_info, metadata=None): - if segments_info is None: - assert metadata is not None - # If "segments_info" is None, we assume "panoptic_img" is a - # H*W int32 image storing the panoptic_id in the format of - # category_id * label_divisor + instance_id. We reserve -1 for - # VOID label. - label_divisor = metadata.label_divisor - segments_info = [] - for panoptic_label in np.unique(panoptic_seg.numpy()): - if panoptic_label == -1: - # VOID region. - continue - pred_class = panoptic_label // label_divisor - isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values() - segments_info.append( - { - "id": int(panoptic_label), - "category_id": int(pred_class), - "isthing": bool(isthing), - } - ) - del metadata - - self._seg = panoptic_seg - - self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info - segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) - areas = areas.numpy() - sorted_idxs = np.argsort(-areas) - self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] - self._seg_ids = self._seg_ids.tolist() - for sid, area in zip(self._seg_ids, self._seg_areas): - if sid in self._sinfo: - self._sinfo[sid]["area"] = float(area) - - def non_empty_mask(self): - """ - Returns: - (H, W) array, a mask for all pixels that have a prediction - """ - empty_ids = [] - for id in self._seg_ids: - if id not in self._sinfo: - empty_ids.append(id) - if len(empty_ids) == 0: - return np.zeros(self._seg.shape, dtype=np.uint8) - assert ( - len(empty_ids) == 1 - ), ">1 ids corresponds to no labels. This is currently not supported" - return (self._seg != empty_ids[0]).numpy().astype(np.bool) - - def semantic_masks(self): - for sid in self._seg_ids: - sinfo = self._sinfo.get(sid) - if sinfo is None or sinfo["isthing"]: - # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. - continue - yield (self._seg == sid).numpy().astype(np.bool), sinfo - - def instance_masks(self): - for sid in self._seg_ids: - sinfo = self._sinfo.get(sid) - if sinfo is None or not sinfo["isthing"]: - continue - mask = (self._seg == sid).numpy().astype(np.bool) - if mask.sum() > 0: - yield mask, sinfo - - -def _create_text_labels(classes, scores, class_names, is_crowd=None): - """ - Args: - classes (list[int] or None): - scores (list[float] or None): - class_names (list[str] or None): - is_crowd (list[bool] or None): - - Returns: - list[str] or None - """ - labels = None - if classes is not None: - if class_names is not None and len(class_names) > 0: - labels = [class_names[i] for i in classes] - else: - labels = [str(i) for i in classes] - if scores is not None: - if labels is None: - labels = ["{:.0f}%".format(s * 100) for s in scores] - else: - labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] - if labels is not None and is_crowd is not None: - labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] - return labels - - -class VisImage: - def __init__(self, img, scale=1.0): - """ - Args: - img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255]. - scale (float): scale the input image - """ - self.img = img - self.scale = scale - self.width, self.height = img.shape[1], img.shape[0] - self._setup_figure(img) - - def _setup_figure(self, img): - """ - Args: - Same as in :meth:`__init__()`. - - Returns: - fig (matplotlib.pyplot.figure): top level container for all the image plot elements. - ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. - """ - fig = mplfigure.Figure(frameon=False) - self.dpi = fig.get_dpi() - # add a small 1e-2 to avoid precision lost due to matplotlib's truncation - # (https://github.com/matplotlib/matplotlib/issues/15363) - fig.set_size_inches( - (self.width * self.scale + 1e-2) / self.dpi, - (self.height * self.scale + 1e-2) / self.dpi, - ) - self.canvas = FigureCanvasAgg(fig) - # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) - ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) - ax.axis("off") - self.fig = fig - self.ax = ax - self.reset_image(img) - - def reset_image(self, img): - """ - Args: - img: same as in __init__ - """ - img = img.astype("uint8") - self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") - - def save(self, filepath): - """ - Args: - filepath (str): a string that contains the absolute path, including the file name, where - the visualized image will be saved. - """ - self.fig.savefig(filepath) - - def get_image(self): - """ - Returns: - ndarray: - the visualized image of shape (H, W, 3) (RGB) in uint8 type. - The shape is scaled w.r.t the input image using the given `scale` argument. - """ - canvas = self.canvas - s, (width, height) = canvas.print_to_buffer() - # buf = io.BytesIO() # works for cairo backend - # canvas.print_rgba(buf) - # width, height = self.width, self.height - # s = buf.getvalue() - - buffer = np.frombuffer(s, dtype="uint8") - - img_rgba = buffer.reshape(height, width, 4) - rgb, alpha = np.split(img_rgba, [3], axis=2) - return rgb.astype("uint8") - - -class Visualizer: - """ - Visualizer that draws data about detection/segmentation on images. - - It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` - that draw primitive objects to images, as well as high-level wrappers like - `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` - that draw composite data in some pre-defined style. - - Note that the exact visualization style for the high-level wrappers are subject to change. - Style such as color, opacity, label contents, visibility of labels, or even the visibility - of objects themselves (e.g. when the object is too small) may change according - to different heuristics, as long as the results still look visually reasonable. - - To obtain a consistent style, you can implement custom drawing functions with the - abovementioned primitive methods instead. If you need more customized visualization - styles, you can process the data yourself following their format documented in - tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not - intend to satisfy everyone's preference on drawing styles. - - This visualizer focuses on high rendering quality rather than performance. It is not - designed to be used for real-time applications. - """ - - # TODO implement a fast, rasterized version using OpenCV - - def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): - """ - Args: - img_rgb: a numpy array of shape (H, W, C), where H and W correspond to - the height and width of the image respectively. C is the number of - color channels. The image is required to be in RGB format since that - is a requirement of the Matplotlib library. The image is also expected - to be in the range [0, 255]. - metadata (Metadata): dataset metadata (e.g. class names and colors) - instance_mode (ColorMode): defines one of the pre-defined style for drawing - instances on an image. - """ - self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) - if metadata is None: - metadata = MetadataCatalog.get("__nonexist__") - self.metadata = metadata - self.output = VisImage(self.img, scale=scale) - self.cpu_device = torch.device("cpu") - - # too small texts are useless, therefore clamp to 9 - self._default_font_size = max( - np.sqrt(self.output.height * self.output.width) // 90, 10 // scale - ) - self._instance_mode = instance_mode - self.keypoint_threshold = _KEYPOINT_THRESHOLD - - def draw_instance_predictions(self, predictions): - """ - Draw instance-level prediction results on an image. - - Args: - predictions (Instances): the output of an instance detection/segmentation - model. Following fields will be used to draw: - "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). - - Returns: - output (VisImage): image object with visualizations. - """ - boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None - scores = predictions.scores if predictions.has("scores") else None - classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None - labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) - keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None - - if predictions.has("pred_masks"): - masks = np.asarray(predictions.pred_masks) - masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] - else: - masks = None - - if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): - colors = [ - self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes - ] - alpha = 0.8 - else: - colors = None - alpha = 0.5 - - if self._instance_mode == ColorMode.IMAGE_BW: - self.output.reset_image( - self._create_grayscale_image( - (predictions.pred_masks.any(dim=0) > 0).numpy() - if predictions.has("pred_masks") - else None - ) - ) - alpha = 0.3 - - self.overlay_instances( - masks=masks, - boxes=boxes, - labels=labels, - keypoints=keypoints, - assigned_colors=colors, - alpha=alpha, - ) - return self.output - - def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8): - """ - Draw semantic segmentation predictions/labels. - - Args: - sem_seg (Tensor or ndarray): the segmentation of shape (H, W). - Each value is the integer label of the pixel. - area_threshold (int): segments with less than `area_threshold` are not drawn. - alpha (float): the larger it is, the more opaque the segmentations are. - - Returns: - output (VisImage): image object with visualizations. - """ - if isinstance(sem_seg, torch.Tensor): - sem_seg = sem_seg.numpy() - labels, areas = np.unique(sem_seg, return_counts=True) - sorted_idxs = np.argsort(-areas).tolist() - labels = labels[sorted_idxs] - for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): - try: - mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] - except (AttributeError, IndexError): - mask_color = None - - binary_mask = (sem_seg == label).astype(np.uint8) - text = self.metadata.stuff_classes[label] - self.draw_binary_mask( - binary_mask, - color=mask_color, - edge_color=_OFF_WHITE, - text=text, - alpha=alpha, - area_threshold=area_threshold, - ) - return self.output - - def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7): - """ - Draw panoptic prediction annotations or results. - - Args: - panoptic_seg (Tensor): of shape (height, width) where the values are ids for each - segment. - segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. - If it is a ``list[dict]``, each dict contains keys "id", "category_id". - If None, category id of each pixel is computed by - ``pixel // metadata.label_divisor``. - area_threshold (int): stuff segments with less than `area_threshold` are not drawn. - - Returns: - output (VisImage): image object with visualizations. - """ - pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) - - if self._instance_mode == ColorMode.IMAGE_BW: - self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask())) - - # draw mask for all semantic segments first i.e. "stuff" - for mask, sinfo in pred.semantic_masks(): - category_idx = sinfo["category_id"] - try: - mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] - except AttributeError: - mask_color = None - - text = self.metadata.stuff_classes[category_idx] - self.draw_binary_mask( - mask, - color=mask_color, - edge_color=_OFF_WHITE, - text=text, - alpha=alpha, - area_threshold=area_threshold, - ) - - # draw mask for all instances second - all_instances = list(pred.instance_masks()) - if len(all_instances) == 0: - return self.output - masks, sinfo = list(zip(*all_instances)) - category_ids = [x["category_id"] for x in sinfo] - - try: - scores = [x["score"] for x in sinfo] - except KeyError: - scores = None - labels = _create_text_labels( - category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo] - ) - - try: - colors = [ - self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids - ] - except AttributeError: - colors = None - self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha) - - return self.output - - draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility - - def draw_dataset_dict(self, dic): - """ - Draw annotations/segmentaions in Detectron2 Dataset format. - - Args: - dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. - - Returns: - output (VisImage): image object with visualizations. - """ - annos = dic.get("annotations", None) - if annos: - if "segmentation" in annos[0]: - masks = [x["segmentation"] for x in annos] - else: - masks = None - if "keypoints" in annos[0]: - keypts = [x["keypoints"] for x in annos] - keypts = np.array(keypts).reshape(len(annos), -1, 3) - else: - keypts = None - - boxes = [ - BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) - if len(x["bbox"]) == 4 - else x["bbox"] - for x in annos - ] - - colors = None - category_ids = [x["category_id"] for x in annos] - if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): - colors = [ - self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) - for c in category_ids - ] - names = self.metadata.get("thing_classes", None) - labels = _create_text_labels( - category_ids, - scores=None, - class_names=names, - is_crowd=[x.get("iscrowd", 0) for x in annos], - ) - self.overlay_instances( - labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors - ) - - sem_seg = dic.get("sem_seg", None) - if sem_seg is None and "sem_seg_file_name" in dic: - with PathManager.open(dic["sem_seg_file_name"], "rb") as f: - sem_seg = Image.open(f) - sem_seg = np.asarray(sem_seg, dtype="uint8") - if sem_seg is not None: - self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) - - pan_seg = dic.get("pan_seg", None) - if pan_seg is None and "pan_seg_file_name" in dic: - with PathManager.open(dic["pan_seg_file_name"], "rb") as f: - pan_seg = Image.open(f) - pan_seg = np.asarray(pan_seg) - from panopticapi.utils import rgb2id - - pan_seg = rgb2id(pan_seg) - if pan_seg is not None: - segments_info = dic["segments_info"] - pan_seg = torch.tensor(pan_seg) - self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) - return self.output - - def overlay_instances( - self, - *, - boxes=None, - labels=None, - masks=None, - keypoints=None, - assigned_colors=None, - alpha=0.5, - ): - """ - Args: - boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, - or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, - or a :class:`RotatedBoxes`, - or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format - for the N objects in a single image, - labels (list[str]): the text to be displayed for each instance. - masks (masks-like object): Supported types are: - - * :class:`detectron2.structures.PolygonMasks`, - :class:`detectron2.structures.BitMasks`. - * list[list[ndarray]]: contains the segmentation masks for all objects in one image. - The first level of the list corresponds to individual instances. The second - level to all the polygon that compose the instance, and the third level - to the polygon coordinates. The third level should have the format of - [x0, y0, x1, y1, ..., xn, yn] (n >= 3). - * list[ndarray]: each ndarray is a binary mask of shape (H, W). - * list[dict]: each dict is a COCO-style RLE. - keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), - where the N is the number of instances and K is the number of keypoints. - The last dimension corresponds to (x, y, visibility or score). - assigned_colors (list[matplotlib.colors]): a list of colors, where each color - corresponds to each mask or box in the image. Refer to 'matplotlib.colors' - for full list of formats that the colors are accepted in. - Returns: - output (VisImage): image object with visualizations. - """ - num_instances = 0 - if boxes is not None: - boxes = self._convert_boxes(boxes) - num_instances = len(boxes) - if masks is not None: - masks = self._convert_masks(masks) - if num_instances: - assert len(masks) == num_instances - else: - num_instances = len(masks) - if keypoints is not None: - if num_instances: - assert len(keypoints) == num_instances - else: - num_instances = len(keypoints) - keypoints = self._convert_keypoints(keypoints) - if labels is not None: - assert len(labels) == num_instances - if assigned_colors is None: - assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] - if num_instances == 0: - return self.output - if boxes is not None and boxes.shape[1] == 5: - return self.overlay_rotated_instances( - boxes=boxes, labels=labels, assigned_colors=assigned_colors - ) - - # Display in largest to smallest order to reduce occlusion. - areas = None - if boxes is not None: - areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) - elif masks is not None: - areas = np.asarray([x.area() for x in masks]) - - if areas is not None: - sorted_idxs = np.argsort(-areas).tolist() - # Re-order overlapped instances in descending order. - boxes = boxes[sorted_idxs] if boxes is not None else None - labels = [labels[k] for k in sorted_idxs] if labels is not None else None - masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None - assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] - keypoints = keypoints[sorted_idxs] if keypoints is not None else None - - for i in range(num_instances): - color = assigned_colors[i] - if boxes is not None: - self.draw_box(boxes[i], edge_color=color) - - if masks is not None: - for segment in masks[i].polygons: - self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) - - if labels is not None: - # first get a box - if boxes is not None: - x0, y0, x1, y1 = boxes[i] - text_pos = (x0, y0) # if drawing boxes, put text on the box corner. - horiz_align = "left" - elif masks is not None: - # skip small mask without polygon - if len(masks[i].polygons) == 0: - continue - - x0, y0, x1, y1 = masks[i].bbox() - - # draw text in the center (defined by median) when box is not drawn - # median is less sensitive to outliers. - text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] - horiz_align = "center" - else: - continue # drawing the box confidence for keypoints isn't very useful. - # for small objects, draw text at the side to avoid occlusion - instance_area = (y1 - y0) * (x1 - x0) - if ( - instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale - or y1 - y0 < 40 * self.output.scale - ): - if y1 >= self.output.height - 5: - text_pos = (x1, y0) - else: - text_pos = (x0, y1) - - height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) - lighter_color = self._change_color_brightness(color, brightness_factor=0.7) - font_size = ( - np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) - * 0.5 - * self._default_font_size - ) - self.draw_text( - labels[i], - text_pos, - color=lighter_color, - horizontal_alignment=horiz_align, - font_size=font_size, - ) - - # draw keypoints - if keypoints is not None: - for keypoints_per_instance in keypoints: - self.draw_and_connect_keypoints(keypoints_per_instance) - - return self.output - - def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None): - """ - Args: - boxes (ndarray): an Nx5 numpy array of - (x_center, y_center, width, height, angle_degrees) format - for the N objects in a single image. - labels (list[str]): the text to be displayed for each instance. - assigned_colors (list[matplotlib.colors]): a list of colors, where each color - corresponds to each mask or box in the image. Refer to 'matplotlib.colors' - for full list of formats that the colors are accepted in. - - Returns: - output (VisImage): image object with visualizations. - """ - num_instances = len(boxes) - - if assigned_colors is None: - assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] - if num_instances == 0: - return self.output - - # Display in largest to smallest order to reduce occlusion. - if boxes is not None: - areas = boxes[:, 2] * boxes[:, 3] - - sorted_idxs = np.argsort(-areas).tolist() - # Re-order overlapped instances in descending order. - boxes = boxes[sorted_idxs] - labels = [labels[k] for k in sorted_idxs] if labels is not None else None - colors = [assigned_colors[idx] for idx in sorted_idxs] - - for i in range(num_instances): - self.draw_rotated_box_with_label( - boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None - ) - - return self.output - - def draw_and_connect_keypoints(self, keypoints): - """ - Draws keypoints of an instance and follows the rules for keypoint connections - to draw lines between appropriate keypoints. This follows color heuristics for - line color. - - Args: - keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints - and the last dimension corresponds to (x, y, probability). - - Returns: - output (VisImage): image object with visualizations. - """ - visible = {} - keypoint_names = self.metadata.get("keypoint_names") - for idx, keypoint in enumerate(keypoints): - - # draw keypoint - x, y, prob = keypoint - if prob > self.keypoint_threshold: - self.draw_circle((x, y), color=_RED) - if keypoint_names: - keypoint_name = keypoint_names[idx] - visible[keypoint_name] = (x, y) - - if self.metadata.get("keypoint_connection_rules"): - for kp0, kp1, color in self.metadata.keypoint_connection_rules: - if kp0 in visible and kp1 in visible: - x0, y0 = visible[kp0] - x1, y1 = visible[kp1] - color = tuple(x / 255.0 for x in color) - self.draw_line([x0, x1], [y0, y1], color=color) - - # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip - # Note that this strategy is specific to person keypoints. - # For other keypoints, it should just do nothing - try: - ls_x, ls_y = visible["left_shoulder"] - rs_x, rs_y = visible["right_shoulder"] - mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 - except KeyError: - pass - else: - # draw line from nose to mid-shoulder - nose_x, nose_y = visible.get("nose", (None, None)) - if nose_x is not None: - self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) - - try: - # draw line from mid-shoulder to mid-hip - lh_x, lh_y = visible["left_hip"] - rh_x, rh_y = visible["right_hip"] - except KeyError: - pass - else: - mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 - self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) - return self.output - - """ - Primitive drawing functions: - """ - - def draw_text( - self, - text, - position, - *, - font_size=None, - color="g", - horizontal_alignment="center", - rotation=0, - ): - """ - Args: - text (str): class label - position (tuple): a tuple of the x and y coordinates to place text on image. - font_size (int, optional): font of the text. If not provided, a font size - proportional to the image width is calculated and used. - color: color of the text. Refer to `matplotlib.colors` for full list - of formats that are accepted. - horizontal_alignment (str): see `matplotlib.text.Text` - rotation: rotation angle in degrees CCW - - Returns: - output (VisImage): image object with text drawn. - """ - if not font_size: - font_size = self._default_font_size - - # since the text background is dark, we don't want the text to be dark - color = np.maximum(list(mplc.to_rgb(color)), 0.2) - color[np.argmax(color)] = max(0.8, np.max(color)) - - x, y = position - self.output.ax.text( - x, - y, - text, - size=font_size * self.output.scale, - family="sans-serif", - bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"}, - verticalalignment="top", - horizontalalignment=horizontal_alignment, - color=color, - zorder=10, - rotation=rotation, - ) - return self.output - - def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"): - """ - Args: - box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 - are the coordinates of the image's top left corner. x1 and y1 are the - coordinates of the image's bottom right corner. - alpha (float): blending efficient. Smaller values lead to more transparent masks. - edge_color: color of the outline of the box. Refer to `matplotlib.colors` - for full list of formats that are accepted. - line_style (string): the string to use to create the outline of the boxes. - - Returns: - output (VisImage): image object with box drawn. - """ - x0, y0, x1, y1 = box_coord - width = x1 - x0 - height = y1 - y0 - - linewidth = max(self._default_font_size / 4, 1) - - self.output.ax.add_patch( - mpl.patches.Rectangle( - (x0, y0), - width, - height, - fill=False, - edgecolor=edge_color, - linewidth=linewidth * self.output.scale, - alpha=alpha, - linestyle=line_style, - ) - ) - return self.output - - def draw_rotated_box_with_label( - self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None - ): - """ - Draw a rotated box with label on its top-left corner. - - Args: - rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle), - where cnt_x and cnt_y are the center coordinates of the box. - w and h are the width and height of the box. angle represents how - many degrees the box is rotated CCW with regard to the 0-degree box. - alpha (float): blending efficient. Smaller values lead to more transparent masks. - edge_color: color of the outline of the box. Refer to `matplotlib.colors` - for full list of formats that are accepted. - line_style (string): the string to use to create the outline of the boxes. - label (string): label for rotated box. It will not be rendered when set to None. - - Returns: - output (VisImage): image object with box drawn. - """ - cnt_x, cnt_y, w, h, angle = rotated_box - area = w * h - # use thinner lines when the box is small - linewidth = self._default_font_size / ( - 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3 - ) - - theta = angle * math.pi / 180.0 - c = math.cos(theta) - s = math.sin(theta) - rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)] - # x: left->right ; y: top->down - rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect] - for k in range(4): - j = (k + 1) % 4 - self.draw_line( - [rotated_rect[k][0], rotated_rect[j][0]], - [rotated_rect[k][1], rotated_rect[j][1]], - color=edge_color, - linestyle="--" if k == 1 else line_style, - linewidth=linewidth, - ) - - if label is not None: - text_pos = rotated_rect[1] # topleft corner - - height_ratio = h / np.sqrt(self.output.height * self.output.width) - label_color = self._change_color_brightness(edge_color, brightness_factor=0.7) - font_size = ( - np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size - ) - self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle) - - return self.output - - def draw_circle(self, circle_coord, color, radius=3): - """ - Args: - circle_coord (list(int) or tuple(int)): contains the x and y coordinates - of the center of the circle. - color: color of the polygon. Refer to `matplotlib.colors` for a full list of - formats that are accepted. - radius (int): radius of the circle. - - Returns: - output (VisImage): image object with box drawn. - """ - x, y = circle_coord - self.output.ax.add_patch( - mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color) - ) - return self.output - - def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None): - """ - Args: - x_data (list[int]): a list containing x values of all the points being drawn. - Length of list should match the length of y_data. - y_data (list[int]): a list containing y values of all the points being drawn. - Length of list should match the length of x_data. - color: color of the line. Refer to `matplotlib.colors` for a full list of - formats that are accepted. - linestyle: style of the line. Refer to `matplotlib.lines.Line2D` - for a full list of formats that are accepted. - linewidth (float or None): width of the line. When it's None, - a default value will be computed and used. - - Returns: - output (VisImage): image object with line drawn. - """ - if linewidth is None: - linewidth = self._default_font_size / 3 - linewidth = max(linewidth, 1) - self.output.ax.add_line( - mpl.lines.Line2D( - x_data, - y_data, - linewidth=linewidth * self.output.scale, - color=color, - linestyle=linestyle, - ) - ) - return self.output - - def draw_binary_mask( - self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10 - ): - """ - Args: - binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and - W is the image width. Each value in the array is either a 0 or 1 value of uint8 - type. - color: color of the mask. Refer to `matplotlib.colors` for a full list of - formats that are accepted. If None, will pick a random color. - edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a - full list of formats that are accepted. - text (str): if None, will be drawn on the object - alpha (float): blending efficient. Smaller values lead to more transparent masks. - area_threshold (float): a connected component smaller than this area will not be shown. - - Returns: - output (VisImage): image object with mask drawn. - """ - if color is None: - color = random_color(rgb=True, maximum=1) - color = mplc.to_rgb(color) - - has_valid_segment = False - binary_mask = binary_mask.astype("uint8") # opencv needs uint8 - mask = GenericMask(binary_mask, self.output.height, self.output.width) - shape2d = (binary_mask.shape[0], binary_mask.shape[1]) - - if not mask.has_holes: - # draw polygons for regular masks - for segment in mask.polygons: - area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) - if area < (area_threshold or 0): - continue - has_valid_segment = True - segment = segment.reshape(-1, 2) - self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) - else: - # TODO: Use Path/PathPatch to draw vector graphics: - # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon - rgba = np.zeros(shape2d + (4,), dtype="float32") - rgba[:, :, :3] = color - rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha - has_valid_segment = True - self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) - - if text is not None and has_valid_segment: - lighter_color = self._change_color_brightness(color, brightness_factor=0.7) - self._draw_text_in_mask(binary_mask, text, lighter_color) - return self.output - - def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5): - """ - Args: - soft_mask (ndarray): float array of shape (H, W), each value in [0, 1]. - color: color of the mask. Refer to `matplotlib.colors` for a full list of - formats that are accepted. If None, will pick a random color. - text (str): if None, will be drawn on the object - alpha (float): blending efficient. Smaller values lead to more transparent masks. - - Returns: - output (VisImage): image object with mask drawn. - """ - if color is None: - color = random_color(rgb=True, maximum=1) - color = mplc.to_rgb(color) - - shape2d = (soft_mask.shape[0], soft_mask.shape[1]) - rgba = np.zeros(shape2d + (4,), dtype="float32") - rgba[:, :, :3] = color - rgba[:, :, 3] = soft_mask * alpha - self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) - - if text is not None: - lighter_color = self._change_color_brightness(color, brightness_factor=0.7) - binary_mask = (soft_mask > 0.5).astype("uint8") - self._draw_text_in_mask(binary_mask, text, lighter_color) - return self.output - - def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): - """ - Args: - segment: numpy array of shape Nx2, containing all the points in the polygon. - color: color of the polygon. Refer to `matplotlib.colors` for a full list of - formats that are accepted. - edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a - full list of formats that are accepted. If not provided, a darker shade - of the polygon color will be used instead. - alpha (float): blending efficient. Smaller values lead to more transparent masks. - - Returns: - output (VisImage): image object with polygon drawn. - """ - if edge_color is None: - # make edge color darker than the polygon color - if alpha > 0.8: - edge_color = self._change_color_brightness(color, brightness_factor=-0.7) - else: - edge_color = color - edge_color = mplc.to_rgb(edge_color) + (1,) - - polygon = mpl.patches.Polygon( - segment, - fill=True, - facecolor=mplc.to_rgb(color) + (alpha,), - edgecolor=edge_color, - linewidth=max(self._default_font_size // 15 * self.output.scale, 1), - ) - self.output.ax.add_patch(polygon) - return self.output - - """ - Internal methods: - """ - - def _jitter(self, color): - """ - Randomly modifies given color to produce a slightly different color than the color given. - - Args: - color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color - picked. The values in the list are in the [0.0, 1.0] range. - - Returns: - jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the - color after being jittered. The values in the list are in the [0.0, 1.0] range. - """ - color = mplc.to_rgb(color) - vec = np.random.rand(3) - # better to do it in another color space - vec = vec / np.linalg.norm(vec) * 0.5 - res = np.clip(vec + color, 0, 1) - return tuple(res) - - def _create_grayscale_image(self, mask=None): - """ - Create a grayscale version of the original image. - The colors in masked area, if given, will be kept. - """ - img_bw = self.img.astype("f4").mean(axis=2) - img_bw = np.stack([img_bw] * 3, axis=2) - if mask is not None: - img_bw[mask] = self.img[mask] - return img_bw - - def _change_color_brightness(self, color, brightness_factor): - """ - Depending on the brightness_factor, gives a lighter or darker color i.e. a color with - less or more saturation than the original color. - - Args: - color: color of the polygon. Refer to `matplotlib.colors` for a full list of - formats that are accepted. - brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of - 0 will correspond to no change, a factor in [-1.0, 0) range will result in - a darker color and a factor in (0, 1.0] range will result in a lighter color. - - Returns: - modified_color (tuple[double]): a tuple containing the RGB values of the - modified color. Each value in the tuple is in the [0.0, 1.0] range. - """ - assert brightness_factor >= -1.0 and brightness_factor <= 1.0 - color = mplc.to_rgb(color) - polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) - modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) - modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness - modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness - modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) - return modified_color - - def _convert_boxes(self, boxes): - """ - Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. - """ - if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes): - return boxes.tensor.detach().numpy() - else: - return np.asarray(boxes) - - def _convert_masks(self, masks_or_polygons): - """ - Convert different format of masks or polygons to a tuple of masks and polygons. - - Returns: - list[GenericMask]: - """ - - m = masks_or_polygons - if isinstance(m, PolygonMasks): - m = m.polygons - if isinstance(m, BitMasks): - m = m.tensor.numpy() - if isinstance(m, torch.Tensor): - m = m.numpy() - ret = [] - for x in m: - if isinstance(x, GenericMask): - ret.append(x) - else: - ret.append(GenericMask(x, self.output.height, self.output.width)) - return ret - - def _draw_text_in_mask(self, binary_mask, text, color): - """ - Find proper places to draw text given a binary mask. - """ - # TODO sometimes drawn on wrong objects. the heuristics here can improve. - _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) - if stats[1:, -1].size == 0: - return - largest_component_id = np.argmax(stats[1:, -1]) + 1 - - # draw text on the largest component, as well as other very large components. - for cid in range(1, _num_cc): - if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: - # median is more stable than centroid - # center = centroids[largest_component_id] - center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] - self.draw_text(text, center, color=color) - - def _convert_keypoints(self, keypoints): - if isinstance(keypoints, Keypoints): - keypoints = keypoints.tensor - keypoints = np.asarray(keypoints) - return keypoints - - def get_output(self): - """ - Returns: - output (VisImage): the image output containing the visualizations added - to the image. - """ - return self.output diff --git a/spaces/OpenMotionLab/MotionGPT/pyrender/tests/unit/test_meshes.py b/spaces/OpenMotionLab/MotionGPT/pyrender/tests/unit/test_meshes.py deleted file mode 100644 index 7070b01171c97069fa013c6eba8eee217017f08e..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/pyrender/tests/unit/test_meshes.py +++ /dev/null @@ -1,133 +0,0 @@ -import numpy as np -import pytest -import trimesh - -from pyrender import (Mesh, Primitive) - - -def test_meshes(): - - with pytest.raises(TypeError): - x = Mesh() - with pytest.raises(TypeError): - x = Primitive() - with pytest.raises(ValueError): - x = Primitive([], mode=10) - - # Basics - x = Mesh([]) - assert x.name is None - assert x.is_visible - assert x.weights is None - - x.name = 'str' - - # From Trimesh - x = Mesh.from_trimesh(trimesh.creation.box()) - assert isinstance(x, Mesh) - assert len(x.primitives) == 1 - assert x.is_visible - assert np.allclose(x.bounds, np.array([ - [-0.5, -0.5, -0.5], - [0.5, 0.5, 0.5] - ])) - assert np.allclose(x.centroid, np.zeros(3)) - assert np.allclose(x.extents, np.ones(3)) - assert np.allclose(x.scale, np.sqrt(3)) - assert not x.is_transparent - - # Test some primitive functions - x = x.primitives[0] - with pytest.raises(ValueError): - x.normals = np.zeros(10) - with pytest.raises(ValueError): - x.tangents = np.zeros(10) - with pytest.raises(ValueError): - x.texcoord_0 = np.zeros(10) - with pytest.raises(ValueError): - x.texcoord_1 = np.zeros(10) - with pytest.raises(TypeError): - x.material = np.zeros(10) - assert x.targets is None - assert np.allclose(x.bounds, np.array([ - [-0.5, -0.5, -0.5], - [0.5, 0.5, 0.5] - ])) - assert np.allclose(x.centroid, np.zeros(3)) - assert np.allclose(x.extents, np.ones(3)) - assert np.allclose(x.scale, np.sqrt(3)) - x.material.baseColorFactor = np.array([0.0, 0.0, 0.0, 0.0]) - assert x.is_transparent - - # From two trimeshes - x = Mesh.from_trimesh([trimesh.creation.box(), - trimesh.creation.cylinder(radius=0.1, height=2.0)], - smooth=False) - assert isinstance(x, Mesh) - assert len(x.primitives) == 2 - assert x.is_visible - assert np.allclose(x.bounds, np.array([ - [-0.5, -0.5, -1.0], - [0.5, 0.5, 1.0] - ])) - assert np.allclose(x.centroid, np.zeros(3)) - assert np.allclose(x.extents, [1.0, 1.0, 2.0]) - assert np.allclose(x.scale, np.sqrt(6)) - assert not x.is_transparent - - # From bad data - with pytest.raises(TypeError): - x = Mesh.from_trimesh(None) - - # With instancing - poses = np.tile(np.eye(4), (5,1,1)) - poses[:,0,3] = np.array([0,1,2,3,4]) - x = Mesh.from_trimesh(trimesh.creation.box(), poses=poses) - assert np.allclose(x.bounds, np.array([ - [-0.5, -0.5, -0.5], - [4.5, 0.5, 0.5] - ])) - poses = np.eye(4) - x = Mesh.from_trimesh(trimesh.creation.box(), poses=poses) - poses = np.eye(3) - with pytest.raises(ValueError): - x = Mesh.from_trimesh(trimesh.creation.box(), poses=poses) - - # From textured meshes - fm = trimesh.load('tests/data/fuze.obj') - x = Mesh.from_trimesh(fm) - assert isinstance(x, Mesh) - assert len(x.primitives) == 1 - assert x.is_visible - assert not x.is_transparent - assert x.primitives[0].material.baseColorTexture is not None - - x = Mesh.from_trimesh(fm, smooth=False) - fm.visual = fm.visual.to_color() - fm.visual.face_colors = np.array([1.0, 0.0, 0.0, 1.0]) - x = Mesh.from_trimesh(fm, smooth=False) - with pytest.raises(ValueError): - x = Mesh.from_trimesh(fm, smooth=True) - - fm.visual.vertex_colors = np.array([1.0, 0.0, 0.0, 0.5]) - x = Mesh.from_trimesh(fm, smooth=False) - x = Mesh.from_trimesh(fm, smooth=True) - assert x.primitives[0].color_0 is not None - assert x.is_transparent - - bm = trimesh.load('tests/data/WaterBottle.glb').dump()[0] - x = Mesh.from_trimesh(bm) - assert x.primitives[0].material.baseColorTexture is not None - assert x.primitives[0].material.emissiveTexture is not None - assert x.primitives[0].material.metallicRoughnessTexture is not None - - # From point cloud - x = Mesh.from_points(fm.vertices) - -# def test_duck(): -# bm = trimesh.load('tests/data/Duck.glb').dump()[0] -# x = Mesh.from_trimesh(bm) -# assert x.primitives[0].material.baseColorTexture is not None -# pixel = x.primitives[0].material.baseColorTexture.source[100, 100] -# yellowish = np.array([1.0, 0.7411765, 0.0, 1.0]) -# assert np.allclose(pixel, yellowish) diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/losses/cross_entropy_loss.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/losses/cross_entropy_loss.py deleted file mode 100644 index 42c0790c98616bb69621deed55547fc04c7392ef..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/losses/cross_entropy_loss.py +++ /dev/null @@ -1,198 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..builder import LOSSES -from .utils import get_class_weight, weight_reduce_loss - - -def cross_entropy(pred, - label, - weight=None, - class_weight=None, - reduction='mean', - avg_factor=None, - ignore_index=-100): - """The wrapper function for :func:`F.cross_entropy`""" - # class_weight is a manual rescaling weight given to each class. - # If given, has to be a Tensor of size C element-wise losses - loss = F.cross_entropy( - pred, - label, - weight=class_weight, - reduction='none', - ignore_index=ignore_index) - - # apply weights and do the reduction - if weight is not None: - weight = weight.float() - loss = weight_reduce_loss( - loss, weight=weight, reduction=reduction, avg_factor=avg_factor) - - return loss - - -def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index): - """Expand onehot labels to match the size of prediction.""" - bin_labels = labels.new_zeros(target_shape) - valid_mask = (labels >= 0) & (labels != ignore_index) - inds = torch.nonzero(valid_mask, as_tuple=True) - - if inds[0].numel() > 0: - if labels.dim() == 3: - bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1 - else: - bin_labels[inds[0], labels[valid_mask]] = 1 - - valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float() - if label_weights is None: - bin_label_weights = valid_mask - else: - bin_label_weights = label_weights.unsqueeze(1).expand(target_shape) - bin_label_weights *= valid_mask - - return bin_labels, bin_label_weights - - -def binary_cross_entropy(pred, - label, - weight=None, - reduction='mean', - avg_factor=None, - class_weight=None, - ignore_index=255): - """Calculate the binary CrossEntropy loss. - - Args: - pred (torch.Tensor): The prediction with shape (N, 1). - label (torch.Tensor): The learning label of the prediction. - weight (torch.Tensor, optional): Sample-wise loss weight. - reduction (str, optional): The method used to reduce the loss. - Options are "none", "mean" and "sum". - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - class_weight (list[float], optional): The weight for each class. - ignore_index (int | None): The label index to be ignored. Default: 255 - - Returns: - torch.Tensor: The calculated loss - """ - if pred.dim() != label.dim(): - assert (pred.dim() == 2 and label.dim() == 1) or ( - pred.dim() == 4 and label.dim() == 3), \ - 'Only pred shape [N, C], label shape [N] or pred shape [N, C, ' \ - 'H, W], label shape [N, H, W] are supported' - label, weight = _expand_onehot_labels(label, weight, pred.shape, - ignore_index) - - # weighted element-wise losses - if weight is not None: - weight = weight.float() - loss = F.binary_cross_entropy_with_logits( - pred, label.float(), pos_weight=class_weight, reduction='none') - # do the reduction for the weighted loss - loss = weight_reduce_loss( - loss, weight, reduction=reduction, avg_factor=avg_factor) - - return loss - - -def mask_cross_entropy(pred, - target, - label, - reduction='mean', - avg_factor=None, - class_weight=None, - ignore_index=None): - """Calculate the CrossEntropy loss for masks. - - Args: - pred (torch.Tensor): The prediction with shape (N, C), C is the number - of classes. - target (torch.Tensor): The learning label of the prediction. - label (torch.Tensor): ``label`` indicates the class label of the mask' - corresponding object. This will be used to select the mask in the - of the class which the object belongs to when the mask prediction - if not class-agnostic. - reduction (str, optional): The method used to reduce the loss. - Options are "none", "mean" and "sum". - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - class_weight (list[float], optional): The weight for each class. - ignore_index (None): Placeholder, to be consistent with other loss. - Default: None. - - Returns: - torch.Tensor: The calculated loss - """ - assert ignore_index is None, 'BCE loss does not support ignore_index' - # TODO: handle these two reserved arguments - assert reduction == 'mean' and avg_factor is None - num_rois = pred.size()[0] - inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) - pred_slice = pred[inds, label].squeeze(1) - return F.binary_cross_entropy_with_logits( - pred_slice, target, weight=class_weight, reduction='mean')[None] - - -@LOSSES.register_module() -class CrossEntropyLoss(nn.Module): - """CrossEntropyLoss. - - Args: - use_sigmoid (bool, optional): Whether the prediction uses sigmoid - of softmax. Defaults to False. - use_mask (bool, optional): Whether to use mask cross entropy loss. - Defaults to False. - reduction (str, optional): . Defaults to 'mean'. - Options are "none", "mean" and "sum". - class_weight (list[float] | str, optional): Weight of each class. If in - str format, read them from a file. Defaults to None. - loss_weight (float, optional): Weight of the loss. Defaults to 1.0. - """ - - def __init__(self, - use_sigmoid=False, - use_mask=False, - reduction='mean', - class_weight=None, - loss_weight=1.0): - super(CrossEntropyLoss, self).__init__() - assert (use_sigmoid is False) or (use_mask is False) - self.use_sigmoid = use_sigmoid - self.use_mask = use_mask - self.reduction = reduction - self.loss_weight = loss_weight - self.class_weight = get_class_weight(class_weight) - - if self.use_sigmoid: - self.cls_criterion = binary_cross_entropy - elif self.use_mask: - self.cls_criterion = mask_cross_entropy - else: - self.cls_criterion = cross_entropy - - def forward(self, - cls_score, - label, - weight=None, - avg_factor=None, - reduction_override=None, - **kwargs): - """Forward function.""" - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - if self.class_weight is not None: - class_weight = cls_score.new_tensor(self.class_weight) - else: - class_weight = None - loss_cls = self.loss_weight * self.cls_criterion( - cls_score, - label, - weight, - class_weight=class_weight, - reduction=reduction, - avg_factor=avg_factor, - **kwargs) - return loss_cls diff --git a/spaces/PSLD/PSLD/diffusion-posterior-sampling/guided_diffusion/unet.py b/spaces/PSLD/PSLD/diffusion-posterior-sampling/guided_diffusion/unet.py deleted file mode 100644 index cfda79f6a3edb47be715742b572641da561f9d9e..0000000000000000000000000000000000000000 --- a/spaces/PSLD/PSLD/diffusion-posterior-sampling/guided_diffusion/unet.py +++ /dev/null @@ -1,1117 +0,0 @@ -from abc import abstractmethod - -import math - -import numpy as np -import torch as th -import torch.nn as nn -import torch.nn.functional as F -import functools - -from .fp16_util import convert_module_to_f16, convert_module_to_f32 -from .nn import ( - checkpoint, - conv_nd, - linear, - avg_pool_nd, - zero_module, - normalization, - timestep_embedding, -) - - -NUM_CLASSES = 1000 - -def create_model( - image_size, - num_channels, - num_res_blocks, - channel_mult="", - learn_sigma=False, - class_cond=False, - use_checkpoint=False, - attention_resolutions="16", - num_heads=1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - dropout=0, - resblock_updown=False, - use_fp16=False, - use_new_attention_order=False, - model_path='', -): - if channel_mult == "": - if image_size == 512: - channel_mult = (0.5, 1, 1, 2, 2, 4, 4) - elif image_size == 256: - channel_mult = (1, 1, 2, 2, 4, 4) - elif image_size == 128: - channel_mult = (1, 1, 2, 3, 4) - elif image_size == 64: - channel_mult = (1, 2, 3, 4) - else: - raise ValueError(f"unsupported image size: {image_size}") - else: - channel_mult = tuple(int(ch_mult) for ch_mult in channel_mult.split(",")) - - attention_ds = [] - if isinstance(attention_resolutions, int): - attention_ds.append(image_size // attention_resolutions) - elif isinstance(attention_resolutions, str): - for res in attention_resolutions.split(","): - attention_ds.append(image_size // int(res)) - else: - raise NotImplementedError - - model= UNetModel( - image_size=image_size, - in_channels=3, - model_channels=num_channels, - out_channels=(3 if not learn_sigma else 6), - num_res_blocks=num_res_blocks, - attention_resolutions=tuple(attention_ds), - dropout=dropout, - channel_mult=channel_mult, - num_classes=(NUM_CLASSES if class_cond else None), - use_checkpoint=use_checkpoint, - use_fp16=use_fp16, - num_heads=num_heads, - num_head_channels=num_head_channels, - num_heads_upsample=num_heads_upsample, - use_scale_shift_norm=use_scale_shift_norm, - resblock_updown=resblock_updown, - use_new_attention_order=use_new_attention_order, - ) - - try: - model.load_state_dict(th.load(model_path, map_location='cpu')) - except Exception as e: - print(f"Got exception: {e} / Randomly initialize") - return model - -class AttentionPool2d(nn.Module): - """ - Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py - """ - - def __init__( - self, - spacial_dim: int, - embed_dim: int, - num_heads_channels: int, - output_dim: int = None, - ): - super().__init__() - self.positional_embedding = nn.Parameter( - th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5 - ) - self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) - self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) - self.num_heads = embed_dim // num_heads_channels - self.attention = QKVAttention(self.num_heads) - - def forward(self, x): - b, c, *_spatial = x.shape - x = x.reshape(b, c, -1) # NC(HW) - x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) - x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) - x = self.qkv_proj(x) - x = self.attention(x) - x = self.c_proj(x) - return x[:, :, 0] - - -class TimestepBlock(nn.Module): - """ - Any module where forward() takes timestep embeddings as a second argument. - """ - - @abstractmethod - def forward(self, x, emb): - """ - Apply the module to `x` given `emb` timestep embeddings. - """ - - -class TimestepEmbedSequential(nn.Sequential, TimestepBlock): - """ - A sequential module that passes timestep embeddings to the children that - support it as an extra input. - """ - - def forward(self, x, emb): - for layer in self: - if isinstance(layer, TimestepBlock): - x = layer(x, emb) - else: - x = layer(x) - return x - - -class Upsample(nn.Module): - """ - An upsampling layer with an optional convolution. - - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - upsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - if use_conv: - self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1) - - def forward(self, x): - assert x.shape[1] == self.channels - if self.dims == 3: - x = F.interpolate( - x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" - ) - else: - x = F.interpolate(x, scale_factor=2, mode="nearest") - if self.use_conv: - x = self.conv(x) - return x - - -class Downsample(nn.Module): - """ - A downsampling layer with an optional convolution. - - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - downsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - stride = 2 if dims != 3 else (1, 2, 2) - if use_conv: - self.op = conv_nd( - dims, self.channels, self.out_channels, 3, stride=stride, padding=1 - ) - else: - assert self.channels == self.out_channels - self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) - - def forward(self, x): - assert x.shape[1] == self.channels - return self.op(x) - - -class ResBlock(TimestepBlock): - """ - A residual block that can optionally change the number of channels. - - :param channels: the number of input channels. - :param emb_channels: the number of timestep embedding channels. - :param dropout: the rate of dropout. - :param out_channels: if specified, the number of out channels. - :param use_conv: if True and out_channels is specified, use a spatial - convolution instead of a smaller 1x1 convolution to change the - channels in the skip connection. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param use_checkpoint: if True, use gradient checkpointing on this module. - :param up: if True, use this block for upsampling. - :param down: if True, use this block for downsampling. - """ - - def __init__( - self, - channels, - emb_channels, - dropout, - out_channels=None, - use_conv=False, - use_scale_shift_norm=False, - dims=2, - use_checkpoint=False, - up=False, - down=False, - ): - super().__init__() - self.channels = channels - self.emb_channels = emb_channels - self.dropout = dropout - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_checkpoint = use_checkpoint - self.use_scale_shift_norm = use_scale_shift_norm - - self.in_layers = nn.Sequential( - normalization(channels), - nn.SiLU(), - conv_nd(dims, channels, self.out_channels, 3, padding=1), - ) - - self.updown = up or down - - if up: - self.h_upd = Upsample(channels, False, dims) - self.x_upd = Upsample(channels, False, dims) - elif down: - self.h_upd = Downsample(channels, False, dims) - self.x_upd = Downsample(channels, False, dims) - else: - self.h_upd = self.x_upd = nn.Identity() - - self.emb_layers = nn.Sequential( - nn.SiLU(), - linear( - emb_channels, - 2 * self.out_channels if use_scale_shift_norm else self.out_channels, - ), - ) - self.out_layers = nn.Sequential( - normalization(self.out_channels), - nn.SiLU(), - nn.Dropout(p=dropout), - zero_module( - conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) - ), - ) - - if self.out_channels == channels: - self.skip_connection = nn.Identity() - elif use_conv: - self.skip_connection = conv_nd( - dims, channels, self.out_channels, 3, padding=1 - ) - else: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) - - def forward(self, x, emb): - """ - Apply the block to a Tensor, conditioned on a timestep embedding. - - :param x: an [N x C x ...] Tensor of features. - :param emb: an [N x emb_channels] Tensor of timestep embeddings. - :return: an [N x C x ...] Tensor of outputs. - """ - return checkpoint( - self._forward, (x, emb), self.parameters(), self.use_checkpoint - ) - - def _forward(self, x, emb): - if self.updown: - in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] - h = in_rest(x) - h = self.h_upd(h) - x = self.x_upd(x) - h = in_conv(h) - else: - h = self.in_layers(x) - emb_out = self.emb_layers(emb).type(h.dtype) - while len(emb_out.shape) < len(h.shape): - emb_out = emb_out[..., None] - if self.use_scale_shift_norm: - out_norm, out_rest = self.out_layers[0], self.out_layers[1:] - scale, shift = th.chunk(emb_out, 2, dim=1) - h = out_norm(h) * (1 + scale) + shift - h = out_rest(h) - else: - h = h + emb_out - h = self.out_layers(h) - return self.skip_connection(x) + h - - -class AttentionBlock(nn.Module): - """ - An attention block that allows spatial positions to attend to each other. - - Originally ported from here, but adapted to the N-d case. - https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. - """ - - def __init__( - self, - channels, - num_heads=1, - num_head_channels=-1, - use_checkpoint=False, - use_new_attention_order=False, - ): - super().__init__() - self.channels = channels - if num_head_channels == -1: - self.num_heads = num_heads - else: - assert ( - channels % num_head_channels == 0 - ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" - self.num_heads = channels // num_head_channels - self.use_checkpoint = use_checkpoint - self.norm = normalization(channels) - self.qkv = conv_nd(1, channels, channels * 3, 1) - if use_new_attention_order: - # split qkv before split heads - self.attention = QKVAttention(self.num_heads) - else: - # split heads before split qkv - self.attention = QKVAttentionLegacy(self.num_heads) - - self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) - - def forward(self, x): - return checkpoint(self._forward, (x,), self.parameters(), True) - - def _forward(self, x): - b, c, *spatial = x.shape - x = x.reshape(b, c, -1) - qkv = self.qkv(self.norm(x)) - h = self.attention(qkv) - h = self.proj_out(h) - return (x + h).reshape(b, c, *spatial) - - -def count_flops_attn(model, _x, y): - """ - A counter for the `thop` package to count the operations in an - attention operation. - Meant to be used like: - macs, params = thop.profile( - model, - inputs=(inputs, timestamps), - custom_ops={QKVAttention: QKVAttention.count_flops}, - ) - """ - b, c, *spatial = y[0].shape - num_spatial = int(np.prod(spatial)) - # We perform two matmuls with the same number of ops. - # The first computes the weight matrix, the second computes - # the combination of the value vectors. - matmul_ops = 2 * b * (num_spatial ** 2) * c - model.total_ops += th.DoubleTensor([matmul_ops]) - - -class QKVAttentionLegacy(nn.Module): - """ - A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv): - """ - Apply QKV attention. - - :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", q * scale, k * scale - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum("bts,bcs->bct", weight, v) - return a.reshape(bs, -1, length) - - @staticmethod - def count_flops(model, _x, y): - return count_flops_attn(model, _x, y) - - -class QKVAttention(nn.Module): - """ - A module which performs QKV attention and splits in a different order. - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv): - """ - Apply QKV attention. - - :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.chunk(3, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", - (q * scale).view(bs * self.n_heads, ch, length), - (k * scale).view(bs * self.n_heads, ch, length), - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) - return a.reshape(bs, -1, length) - - @staticmethod - def count_flops(model, _x, y): - return count_flops_attn(model, _x, y) - - -class UNetModel(nn.Module): - """ - The full UNet model with attention and timestep embedding. - - :param in_channels: channels in the input Tensor. - :param model_channels: base channel count for the model. - :param out_channels: channels in the output Tensor. - :param num_res_blocks: number of residual blocks per downsample. - :param attention_resolutions: a collection of downsample rates at which - attention will take place. May be a set, list, or tuple. - For example, if this contains 4, then at 4x downsampling, attention - will be used. - :param dropout: the dropout probability. - :param channel_mult: channel multiplier for each level of the UNet. - :param conv_resample: if True, use learned convolutions for upsampling and - downsampling. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param num_classes: if specified (as an int), then this model will be - class-conditional with `num_classes` classes. - :param use_checkpoint: use gradient checkpointing to reduce memory usage. - :param num_heads: the number of attention heads in each attention layer. - :param num_heads_channels: if specified, ignore num_heads and instead use - a fixed channel width per attention head. - :param num_heads_upsample: works with num_heads to set a different number - of heads for upsampling. Deprecated. - :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. - :param resblock_updown: use residual blocks for up/downsampling. - :param use_new_attention_order: use a different attention pattern for potentially - increased efficiency. - """ - - def __init__( - self, - image_size, - in_channels, - model_channels, - out_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - num_classes=None, - use_checkpoint=False, - use_fp16=False, - num_heads=1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - resblock_updown=False, - use_new_attention_order=False, - ): - super().__init__() - - if num_heads_upsample == -1: - num_heads_upsample = num_heads - - self.image_size = image_size - self.in_channels = in_channels - self.model_channels = model_channels - self.out_channels = out_channels - self.num_res_blocks = num_res_blocks - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.num_classes = num_classes - self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample - - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - if self.num_classes is not None: - self.label_emb = nn.Embedding(num_classes, time_embed_dim) - - ch = input_ch = int(channel_mult[0] * model_channels) - self.input_blocks = nn.ModuleList( - [TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))] - ) - self._feature_size = ch - input_block_chans = [ch] - ds = 1 - for level, mult in enumerate(channel_mult): - for _ in range(num_res_blocks): - layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=int(mult * model_channels), - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = int(mult * model_channels) - if ds in attention_resolutions: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - use_new_attention_order=use_new_attention_order, - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) - if resblock_updown - else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch - ) - ) - ) - ch = out_ch - input_block_chans.append(ch) - ds *= 2 - self._feature_size += ch - - self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - use_new_attention_order=use_new_attention_order, - ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - ) - self._feature_size += ch - - self.output_blocks = nn.ModuleList([]) - for level, mult in list(enumerate(channel_mult))[::-1]: - for i in range(num_res_blocks + 1): - ich = input_block_chans.pop() - layers = [ - ResBlock( - ch + ich, - time_embed_dim, - dropout, - out_channels=int(model_channels * mult), - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = int(model_channels * mult) - if ds in attention_resolutions: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads_upsample, - num_head_channels=num_head_channels, - use_new_attention_order=use_new_attention_order, - ) - ) - if level and i == num_res_blocks: - out_ch = ch - layers.append( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - up=True, - ) - if resblock_updown - else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) - ) - ds //= 2 - self.output_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)), - ) - - def convert_to_fp16(self): - """ - Convert the torso of the model to float16. - """ - self.input_blocks.apply(convert_module_to_f16) - self.middle_block.apply(convert_module_to_f16) - self.output_blocks.apply(convert_module_to_f16) - - def convert_to_fp32(self): - """ - Convert the torso of the model to float32. - """ - self.input_blocks.apply(convert_module_to_f32) - self.middle_block.apply(convert_module_to_f32) - self.output_blocks.apply(convert_module_to_f32) - - def forward(self, x, timesteps, y=None): - """ - Apply the model to an input batch. - - :param x: an [N x C x ...] Tensor of inputs. - :param timesteps: a 1-D batch of timesteps. - :param y: an [N] Tensor of labels, if class-conditional. - :return: an [N x C x ...] Tensor of outputs. - """ - assert (y is not None) == ( - self.num_classes is not None - ), "must specify y if and only if the model is class-conditional" - - hs = [] - emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) - - if self.num_classes is not None: - assert y.shape == (x.shape[0],) - emb = emb + self.label_emb(y) - - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb) - hs.append(h) - h = self.middle_block(h, emb) - for module in self.output_blocks: - h = th.cat([h, hs.pop()], dim=1) - h = module(h, emb) - h = h.type(x.dtype) - return self.out(h) - - -class SuperResModel(UNetModel): - """ - A UNetModel that performs super-resolution. - - Expects an extra kwarg `low_res` to condition on a low-resolution image. - """ - - def __init__(self, image_size, in_channels, *args, **kwargs): - super().__init__(image_size, in_channels * 2, *args, **kwargs) - - def forward(self, x, timesteps, low_res=None, **kwargs): - _, _, new_height, new_width = x.shape - upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear") - x = th.cat([x, upsampled], dim=1) - return super().forward(x, timesteps, **kwargs) - - -class EncoderUNetModel(nn.Module): - """ - The half UNet model with attention and timestep embedding. - - For usage, see UNet. - """ - - def __init__( - self, - image_size, - in_channels, - model_channels, - out_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - use_checkpoint=False, - use_fp16=False, - num_heads=1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - resblock_updown=False, - use_new_attention_order=False, - pool="adaptive", - ): - super().__init__() - - if num_heads_upsample == -1: - num_heads_upsample = num_heads - - self.in_channels = in_channels - self.model_channels = model_channels - self.out_channels = out_channels - self.num_res_blocks = num_res_blocks - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample - - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - ch = int(channel_mult[0] * model_channels) - self.input_blocks = nn.ModuleList( - [TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))] - ) - self._feature_size = ch - input_block_chans = [ch] - ds = 1 - for level, mult in enumerate(channel_mult): - for _ in range(num_res_blocks): - layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=int(mult * model_channels), - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = int(mult * model_channels) - if ds in attention_resolutions: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - use_new_attention_order=use_new_attention_order, - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) - if resblock_updown - else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch - ) - ) - ) - ch = out_ch - input_block_chans.append(ch) - ds *= 2 - self._feature_size += ch - - self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - use_new_attention_order=use_new_attention_order, - ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - ) - self._feature_size += ch - self.pool = pool - if pool == "adaptive": - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - nn.AdaptiveAvgPool2d((1, 1)), - zero_module(conv_nd(dims, ch, out_channels, 1)), - nn.Flatten(), - ) - elif pool == "attention": - assert num_head_channels != -1 - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - AttentionPool2d( - (image_size // ds), ch, num_head_channels, out_channels - ), - ) - elif pool == "spatial": - self.out = nn.Sequential( - nn.Linear(self._feature_size, 2048), - nn.ReLU(), - nn.Linear(2048, self.out_channels), - ) - elif pool == "spatial_v2": - self.out = nn.Sequential( - nn.Linear(self._feature_size, 2048), - normalization(2048), - nn.SiLU(), - nn.Linear(2048, self.out_channels), - ) - else: - raise NotImplementedError(f"Unexpected {pool} pooling") - - def convert_to_fp16(self): - """ - Convert the torso of the model to float16. - """ - self.input_blocks.apply(convert_module_to_f16) - self.middle_block.apply(convert_module_to_f16) - - def convert_to_fp32(self): - """ - Convert the torso of the model to float32. - """ - self.input_blocks.apply(convert_module_to_f32) - self.middle_block.apply(convert_module_to_f32) - - def forward(self, x, timesteps): - """ - Apply the model to an input batch. - - :param x: an [N x C x ...] Tensor of inputs. - :param timesteps: a 1-D batch of timesteps. - :return: an [N x K] Tensor of outputs. - """ - emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) - - results = [] - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb) - if self.pool.startswith("spatial"): - results.append(h.type(x.dtype).mean(dim=(2, 3))) - h = self.middle_block(h, emb) - if self.pool.startswith("spatial"): - results.append(h.type(x.dtype).mean(dim=(2, 3))) - h = th.cat(results, axis=-1) - return self.out(h) - else: - h = h.type(x.dtype) - return self.out(h) - - -class NLayerDiscriminator(nn.Module): - def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False): - super(NLayerDiscriminator, self).__init__() - if type(norm_layer) == functools.partial: - use_bias = norm_layer.func == nn.InstanceNorm2d - else: - use_bias = norm_layer == nn.InstanceNorm2d - - kw = 4 - padw = 1 - sequence = [ - nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), - nn.LeakyReLU(0.2, True) - ] - - nf_mult = 1 - nf_mult_prev = 1 - for n in range(1, n_layers): - nf_mult_prev = nf_mult - nf_mult = min(2**n, 8) - sequence += [ - nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, - kernel_size=kw, stride=2, padding=padw, bias=use_bias), - norm_layer(ndf * nf_mult), - nn.LeakyReLU(0.2, True) - ] - - nf_mult_prev = nf_mult - nf_mult = min(2**n_layers, 8) - sequence += [ - nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, - kernel_size=kw, stride=2, padding=padw, bias=use_bias), - norm_layer(ndf * nf_mult), - nn.LeakyReLU(0.2, True) - ] - - sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=2, padding=padw)] + [nn.Dropout(0.5)] - if use_sigmoid: - sequence += [nn.Sigmoid()] - - self.model = nn.Sequential(*sequence) - - def forward(self, input): - return self.model(input) - - -class GANLoss(nn.Module): - """Define different GAN objectives. - - The GANLoss class abstracts away the need to create the target label tensor - that has the same size as the input. - """ - - def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0): - """ Initialize the GANLoss class. - - Parameters: - gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. - target_real_label (bool) - - label for a real image - target_fake_label (bool) - - label of a fake image - - Note: Do not use sigmoid as the last layer of Discriminator. - LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss. - """ - super(GANLoss, self).__init__() - self.register_buffer('real_label', th.tensor(target_real_label)) - self.register_buffer('fake_label', th.tensor(target_fake_label)) - self.gan_mode = gan_mode - if gan_mode == 'lsgan': - self.loss = nn.MSELoss() - elif gan_mode == 'vanilla': - self.loss = nn.BCEWithLogitsLoss() - elif gan_mode in ['wgangp']: - self.loss = None - else: - raise NotImplementedError('gan mode %s not implemented' % gan_mode) - - def get_target_tensor(self, prediction, target_is_real): - """Create label tensors with the same size as the input. - - Parameters: - prediction (tensor) - - tpyically the prediction from a discriminator - target_is_real (bool) - - if the ground truth label is for real images or fake images - - Returns: - A label tensor filled with ground truth label, and with the size of the input - """ - - if target_is_real: - target_tensor = self.real_label - else: - target_tensor = self.fake_label - return target_tensor.expand_as(prediction) - - def __call__(self, prediction, target_is_real): - """Calculate loss given Discriminator's output and grount truth labels. - - Parameters: - prediction (tensor) - - tpyically the prediction output from a discriminator - target_is_real (bool) - - if the ground truth label is for real images or fake images - - Returns: - the calculated loss. - """ - if self.gan_mode in ['lsgan', 'vanilla']: - target_tensor = self.get_target_tensor(prediction, target_is_real) - loss = self.loss(prediction, target_tensor) - elif self.gan_mode == 'wgangp': - if target_is_real: - loss = -prediction.mean() - else: - loss = prediction.mean() - return loss - - -def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0): - """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028 - - Arguments: - netD (network) -- discriminator network - real_data (tensor array) -- real images - fake_data (tensor array) -- generated images from the generator - device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') - type (str) -- if we mix real and fake data or not [real | fake | mixed]. - constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2 - lambda_gp (float) -- weight for this loss - - Returns the gradient penalty loss - """ - if lambda_gp > 0.0: - if type == 'real': # either use real images, fake images, or a linear interpolation of two. - interpolatesv = real_data - elif type == 'fake': - interpolatesv = fake_data - elif type == 'mixed': - alpha = th.rand(real_data.shape[0], 1, device=device) - alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape) - interpolatesv = alpha * real_data + ((1 - alpha) * fake_data) - else: - raise NotImplementedError('{} not implemented'.format(type)) - interpolatesv.requires_grad_(True) - disc_interpolates = netD(interpolatesv) - gradients = th.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv, - grad_outputs=th.ones(disc_interpolates.size()).to(device), - create_graph=True, retain_graph=True, only_inputs=True) - gradients = gradients[0].view(real_data.size(0), -1) # flat the data - gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps - return gradient_penalty, gradients - else: - return 0.0, None diff --git a/spaces/PaddlePaddle/LSeg/README.md b/spaces/PaddlePaddle/LSeg/README.md deleted file mode 100644 index e034bef55ed96e04711922874ca176baf01cf78a..0000000000000000000000000000000000000000 --- a/spaces/PaddlePaddle/LSeg/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: LSeg -emoji: 🌍 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/PeepDaSlan9/SDXL-artists-browser/README.md b/spaces/PeepDaSlan9/SDXL-artists-browser/README.md deleted file mode 100644 index c334c5b5ce4bc08141154ee89e892ddb3dbe9f99..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/SDXL-artists-browser/README.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: SD Artists Browser -emoji: 🤘 -colorFrom: indigo -colorTo: purple -sdk: static -pinned: false -license: cc-by-sa-4.0 -duplicated_from: mattthew/SDXL-artists-browser ---- -# Another SDXL artist browser?! - -Yes, because it's better: -### Most complete database of tags -* Each artist has at least 6 tags -* Mediums, styles, themes, periods, subject matter, and more -### Largest database of artists: ~~3000k~~ (temporarily 500 due to beta) -* Sort artists by random to discover new ones -* Browse image thumbnails and view at full resolution -* Three images each: 🎨 artwork, 🧑 portrait, and 🏞️ landscape -### Easiest to use filters -* Sort filters by name or count of matches -* Choose strict or permissive when combining filters -### Save your favorites -* Export or import your favorites too -### Community first -* Transparent: Open source Creatives Commons database -* Private: No cookies, nothing sent to server, use offline -* Note: I can't control what data Huggingface collects - -# How to support this project -* Please tell a friends or share on your socials -* Suggest artists I should add or remove -* Suggest features and report bugs \ No newline at end of file diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/deprecated_wrappers.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/deprecated_wrappers.py deleted file mode 100644 index a2e593df9ee57637038683d7a1efaa347b2b69e7..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/deprecated_wrappers.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# This file is for backward compatibility. -# Module wrappers for empty tensor have been moved to mmcv.cnn.bricks. -import warnings - -from ..cnn.bricks.wrappers import Conv2d, ConvTranspose2d, Linear, MaxPool2d - - -class Conv2d_deprecated(Conv2d): - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - warnings.warn( - 'Importing Conv2d wrapper from "mmcv.ops" will be deprecated in' - ' the future. Please import them from "mmcv.cnn" instead') - - -class ConvTranspose2d_deprecated(ConvTranspose2d): - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - warnings.warn( - 'Importing ConvTranspose2d wrapper from "mmcv.ops" will be ' - 'deprecated in the future. Please import them from "mmcv.cnn" ' - 'instead') - - -class MaxPool2d_deprecated(MaxPool2d): - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - warnings.warn( - 'Importing MaxPool2d wrapper from "mmcv.ops" will be deprecated in' - ' the future. Please import them from "mmcv.cnn" instead') - - -class Linear_deprecated(Linear): - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - warnings.warn( - 'Importing Linear wrapper from "mmcv.ops" will be deprecated in' - ' the future. Please import them from "mmcv.cnn" instead') diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/runner/base_module.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/runner/base_module.py deleted file mode 100644 index 617fad9bb89f10a9a0911d962dfb3bc8f3a3628c..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/runner/base_module.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import warnings -from abc import ABCMeta -from collections import defaultdict -from logging import FileHandler - -import torch.nn as nn - -from annotator.uniformer.mmcv.runner.dist_utils import master_only -from annotator.uniformer.mmcv.utils.logging import get_logger, logger_initialized, print_log - - -class BaseModule(nn.Module, metaclass=ABCMeta): - """Base module for all modules in openmmlab. - - ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional - functionality of parameter initialization. Compared with - ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes. - - - ``init_cfg``: the config to control the initialization. - - ``init_weights``: The function of parameter - initialization and recording initialization - information. - - ``_params_init_info``: Used to track the parameter - initialization information. This attribute only - exists during executing the ``init_weights``. - - Args: - init_cfg (dict, optional): Initialization config dict. - """ - - def __init__(self, init_cfg=None): - """Initialize BaseModule, inherited from `torch.nn.Module`""" - - # NOTE init_cfg can be defined in different levels, but init_cfg - # in low levels has a higher priority. - - super(BaseModule, self).__init__() - # define default value of init_cfg instead of hard code - # in init_weights() function - self._is_init = False - - self.init_cfg = copy.deepcopy(init_cfg) - - # Backward compatibility in derived classes - # if pretrained is not None: - # warnings.warn('DeprecationWarning: pretrained is a deprecated \ - # key, please consider using init_cfg') - # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - - @property - def is_init(self): - return self._is_init - - def init_weights(self): - """Initialize the weights.""" - - is_top_level_module = False - # check if it is top-level module - if not hasattr(self, '_params_init_info'): - # The `_params_init_info` is used to record the initialization - # information of the parameters - # the key should be the obj:`nn.Parameter` of model and the value - # should be a dict containing - # - init_info (str): The string that describes the initialization. - # - tmp_mean_value (FloatTensor): The mean of the parameter, - # which indicates whether the parameter has been modified. - # this attribute would be deleted after all parameters - # is initialized. - self._params_init_info = defaultdict(dict) - is_top_level_module = True - - # Initialize the `_params_init_info`, - # When detecting the `tmp_mean_value` of - # the corresponding parameter is changed, update related - # initialization information - for name, param in self.named_parameters(): - self._params_init_info[param][ - 'init_info'] = f'The value is the same before and ' \ - f'after calling `init_weights` ' \ - f'of {self.__class__.__name__} ' - self._params_init_info[param][ - 'tmp_mean_value'] = param.data.mean() - - # pass `params_init_info` to all submodules - # All submodules share the same `params_init_info`, - # so it will be updated when parameters are - # modified at any level of the model. - for sub_module in self.modules(): - sub_module._params_init_info = self._params_init_info - - # Get the initialized logger, if not exist, - # create a logger named `mmcv` - logger_names = list(logger_initialized.keys()) - logger_name = logger_names[0] if logger_names else 'mmcv' - - from ..cnn import initialize - from ..cnn.utils.weight_init import update_init_info - module_name = self.__class__.__name__ - if not self._is_init: - if self.init_cfg: - print_log( - f'initialize {module_name} with init_cfg {self.init_cfg}', - logger=logger_name) - initialize(self, self.init_cfg) - if isinstance(self.init_cfg, dict): - # prevent the parameters of - # the pre-trained model - # from being overwritten by - # the `init_weights` - if self.init_cfg['type'] == 'Pretrained': - return - - for m in self.children(): - if hasattr(m, 'init_weights'): - m.init_weights() - # users may overload the `init_weights` - update_init_info( - m, - init_info=f'Initialized by ' - f'user-defined `init_weights`' - f' in {m.__class__.__name__} ') - - self._is_init = True - else: - warnings.warn(f'init_weights of {self.__class__.__name__} has ' - f'been called more than once.') - - if is_top_level_module: - self._dump_init_info(logger_name) - - for sub_module in self.modules(): - del sub_module._params_init_info - - @master_only - def _dump_init_info(self, logger_name): - """Dump the initialization information to a file named - `initialization.log.json` in workdir. - - Args: - logger_name (str): The name of logger. - """ - - logger = get_logger(logger_name) - - with_file_handler = False - # dump the information to the logger file if there is a `FileHandler` - for handler in logger.handlers: - if isinstance(handler, FileHandler): - handler.stream.write( - 'Name of parameter - Initialization information\n') - for name, param in self.named_parameters(): - handler.stream.write( - f'\n{name} - {param.shape}: ' - f"\n{self._params_init_info[param]['init_info']} \n") - handler.stream.flush() - with_file_handler = True - if not with_file_handler: - for name, param in self.named_parameters(): - print_log( - f'\n{name} - {param.shape}: ' - f"\n{self._params_init_info[param]['init_info']} \n ", - logger=logger_name) - - def __repr__(self): - s = super().__repr__() - if self.init_cfg: - s += f'\ninit_cfg={self.init_cfg}' - return s - - -class Sequential(BaseModule, nn.Sequential): - """Sequential module in openmmlab. - - Args: - init_cfg (dict, optional): Initialization config dict. - """ - - def __init__(self, *args, init_cfg=None): - BaseModule.__init__(self, init_cfg) - nn.Sequential.__init__(self, *args) - - -class ModuleList(BaseModule, nn.ModuleList): - """ModuleList in openmmlab. - - Args: - modules (iterable, optional): an iterable of modules to add. - init_cfg (dict, optional): Initialization config dict. - """ - - def __init__(self, modules=None, init_cfg=None): - BaseModule.__init__(self, init_cfg) - nn.ModuleList.__init__(self, modules) diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/mixup.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/mixup.py deleted file mode 100644 index 85969e097ad154d1948d81eb1b5b3e941f5bf53f..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/mixup.py +++ /dev/null @@ -1,124 +0,0 @@ -"""Mixup detection dataset wrapper.""" -from __future__ import absolute_import -import numpy as np -import torch -import torch.utils.data as data - - -class MixupDetection(data.Dataset): - """Detection dataset wrapper that performs mixup for normal dataset. - Parameters - ---------- - dataset : mx.gluon.data.Dataset - Gluon dataset object. - mixup : callable random generator, e.g. np.random.uniform - A random mixup ratio sampler, preferably a random generator from numpy.random - A random float will be sampled each time with mixup(*args). - Use None to disable. - *args : list - Additional arguments for mixup random sampler. - """ - def __init__(self, dataset, mixup=None, preproc=None, *args): - super().__init__(dataset.input_dim) - self._dataset = dataset - self.preproc = preproc - self._mixup = mixup - self._mixup_args = args - - def set_mixup(self, mixup=None, *args): - """Set mixup random sampler, use None to disable. - Parameters - ---------- - mixup : callable random generator, e.g. np.random.uniform - A random mixup ratio sampler, preferably a random generator from numpy.random - A random float will be sampled each time with mixup(*args) - *args : list - Additional arguments for mixup random sampler. - """ - self._mixup = mixup - self._mixup_args = args - - def __len__(self): - return len(self._dataset) - - @Dataset.resize_getitem - def __getitem__(self, idx): - self._dataset._input_dim = self.input_dim - # first image - img1, label1, _, _= self._dataset.pull_item(idx) - lambd = 1 - - # draw a random lambda ratio from distribution - if self._mixup is not None: - lambd = max(0, min(1, self._mixup(*self._mixup_args))) - - if lambd >= 1: - weights1 = np.ones((label1.shape[0], 1)) - label1 = np.hstack((label1, weights1)) - height, width, _ = img1.shape - img_info = (width, height) - if self.preproc is not None: - img_o, target_o = self.preproc(img1, label1, self.input_dim) - return img_o, target_o, img_info, idx - - # second image - idx2 = int(np.random.choice(np.delete(np.arange(len(self)), idx))) - img2, label2, _, _ = self._dataset.pull_item(idx2) - - # mixup two images - height = max(img1.shape[0], img2.shape[0]) - width = max(img1.shape[1], img2.shape[1]) - mix_img = np.zeros((height, width, 3),dtype=np.float32) - mix_img[:img1.shape[0], :img1.shape[1], :] = img1.astype(np.float32) * lambd - mix_img[:img2.shape[0], :img2.shape[1], :] += img2.astype(np.float32) * (1. - lambd) - mix_img = mix_img.astype(np.uint8) - - y1 = np.hstack((label1, np.full((label1.shape[0], 1), lambd))) - y2 = np.hstack((label2, np.full((label2.shape[0], 1), 1. - lambd))) - mix_label = np.vstack((y1, y2)) - if self.preproc is not None: - mix_img, padded_labels = self.preproc(mix_img, mix_label, self.input_dim) - - img_info = (width, height) - - return mix_img, padded_labels, img_info , idx - - def pull_item(self, idx): - self._dataset._input_dim = self.input_dim - # first image - img1, label1, _, _= self._dataset.pull_item(idx) - lambd = 1 - - # draw a random lambda ratio from distribution - if self._mixup is not None: - lambd = max(0, min(1, self._mixup(*self._mixup_args))) - - if lambd >= 1: - weights1 = np.ones((label1.shape[0], 1)) - label1 = np.hstack((label1, weights1)) - height, width, _ = img1.shape - img_info = (width, height) - if self.preproc is not None: - img_o, target_o = self.preproc(img1, label1, self.input_dim) - return img_o, target_o, img_info, idx - - # second image - idx2 = int(np.random.choice(np.delete(np.arange(len(self)), idx))) - img2, label2 = self._dataset.pull_item(idx2) - - # mixup two images - height = max(img1.shape[0], img2.shape[0]) - width = max(img1.shape[1], img2.shape[1]) - mix_img = np.zeros((height, width, 3),dtype=np.float32) - mix_img[:img1.shape[0], :img1.shape[1], :] = img1.astype(np.float32) * lambd - mix_img[:img2.shape[0], :img2.shape[1], :] += img2.astype(np.float32) * (1. - lambd) - mix_img = mix_img.astype(np.uint8) - - y1 = np.hstack((label1, np.full((label1.shape[0], 1), lambd))) - y2 = np.hstack((label2, np.full((label2.shape[0], 1), 1. - lambd))) - mix_label = np.vstack((y1, y2)) - if self.preproc is not None: - mix_img, padded_labels = self.preproc(mix_img, mix_label, self.input_dim) - - img_info = (width, height) - return mix_img, padded_labels, img_info , idx diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/engine/evolution.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/engine/evolution.py deleted file mode 100644 index 722fcf6ba903a7e17a0abd7bebafcc033bdb2970..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/engine/evolution.py +++ /dev/null @@ -1,357 +0,0 @@ - -import time -import pickle -import logging -import os -import numpy as np -import torch -import torch.nn as nn - - -from collections import OrderedDict -from yaml import safe_dump -from yacs.config import load_cfg, CfgNode#, _to_dict -from maskrcnn_benchmark.config import cfg -from maskrcnn_benchmark.engine.inference import _accumulate_predictions_from_multiple_gpus -from maskrcnn_benchmark.modeling.backbone.nas import get_layer_name -from maskrcnn_benchmark.utils.comm import synchronize, get_rank, is_main_process, get_world_size, all_gather -from maskrcnn_benchmark.data.datasets.evaluation import evaluate -from maskrcnn_benchmark.utils.flops import profile - - -choice = lambda x:x[np.random.randint(len(x))] if isinstance(x,tuple) else choice(tuple(x)) - - -def gather_candidates(all_candidates): - all_candidates = all_gather(all_candidates) - all_candidates = [cand for candidates in all_candidates for cand in candidates] - return list(set(all_candidates)) - - -def gather_stats(all_candidates): - all_candidates = all_gather(all_candidates) - reduced_statcs = {} - for candidates in all_candidates: - reduced_statcs.update(candidates) # will replace the existing key with last value if more than one exists - return reduced_statcs - - -def compute_on_dataset(model, rngs, data_loader, device=cfg.MODEL.DEVICE): - model.eval() - results_dict = {} - cpu_device = torch.device("cpu") - for _, batch in enumerate(data_loader): - images, targets, image_ids = batch - with torch.no_grad(): - output = model(images.to(device), rngs=rngs) - output = [o.to(cpu_device) for o in output] - results_dict.update( - {img_id: result for img_id, result in zip(image_ids, output)} - ) - return results_dict - - -def bn_statistic(model, rngs, data_loader, device=cfg.MODEL.DEVICE, max_iter=500): - for name, param in model.named_buffers(): - if 'running_mean' in name: - nn.init.constant_(param, 0) - if 'running_var' in name: - nn.init.constant_(param, 1) - - model.train() - for iteration, (images, targets, _) in enumerate(data_loader, 1): - images = images.to(device) - targets = [target.to(device) for target in targets] - with torch.no_grad(): - loss_dict = model(images, targets, rngs) - if iteration >= max_iter: - break - - return model - - -def inference( - model, - rngs, - data_loader, - iou_types=("bbox",), - box_only=False, - device="cuda", - expected_results=(), - expected_results_sigma_tol=4, - output_folder=None, -): - - # convert to a torch.device for efficiency - device = torch.device(device) - dataset = data_loader.dataset - predictions = compute_on_dataset(model, rngs, data_loader, device) - # wait for all processes to complete before measuring the time - synchronize() - - predictions = _accumulate_predictions_from_multiple_gpus(predictions) - if not is_main_process(): - return - - extra_args = dict( - box_only=box_only, - iou_types=iou_types, - expected_results=expected_results, - expected_results_sigma_tol=expected_results_sigma_tol, - ) - - return evaluate(dataset=dataset, - predictions=predictions, - output_folder=output_folder, - **extra_args) - - -def fitness(cfg, model, rngs, val_loaders): - iou_types = ("bbox",) - if cfg.MODEL.MASK_ON: - iou_types = iou_types + ("segm",) - for data_loader_val in val_loaders: - results = inference( - model, - rngs, - data_loader_val, - iou_types=iou_types, - box_only=False, - device=cfg.MODEL.DEVICE, - expected_results=cfg.TEST.EXPECTED_RESULTS, - expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, - ) - synchronize() - - return results - - -class EvolutionTrainer(object): - def __init__(self, cfg, model, flops_limit=None, is_distributed=True): - - self.log_dir = cfg.OUTPUT_DIR - self.checkpoint_name = os.path.join(self.log_dir,'evolution.pth') - self.is_distributed = is_distributed - - self.states = model.module.mix_nums if is_distributed else model.mix_nums - self.supernet_state_dict = pickle.loads(pickle.dumps(model.state_dict())) - self.flops_limit = flops_limit - self.model = model - - self.candidates = [] - self.vis_dict = {} - - self.max_epochs = cfg.SEARCH.MAX_EPOCH - self.select_num = cfg.SEARCH.SELECT_NUM - self.population_num = cfg.SEARCH.POPULATION_NUM/get_world_size() - self.mutation_num = cfg.SEARCH.MUTATION_NUM/get_world_size() - self.crossover_num = cfg.SEARCH.CROSSOVER_NUM/get_world_size() - self.mutation_prob = cfg.SEARCH.MUTATION_PROB/get_world_size() - - self.keep_top_k = {self.select_num:[], 50:[]} - self.epoch=0 - self.cfg = cfg - - def save_checkpoint(self): - if not is_main_process(): - return - if not os.path.exists(self.log_dir): - os.makedirs(self.log_dir) - info = {} - info['candidates'] = self.candidates - info['vis_dict'] = self.vis_dict - info['keep_top_k'] = self.keep_top_k - info['epoch'] = self.epoch - torch.save(info, self.checkpoint_name) - print('Save checkpoint to', self.checkpoint_name) - - def load_checkpoint(self): - if not os.path.exists(self.checkpoint_name): - return False - info = torch.load(self.checkpoint_name) - self.candidates = info['candidates'] - self.vis_dict = info['vis_dict'] - self.keep_top_k = info['keep_top_k'] - self.epoch = info['epoch'] - print('Load checkpoint from', self.checkpoint_name) - return True - - def legal(self, cand): - assert isinstance(cand,tuple) and len(cand)==len(self.states) - if cand in self.vis_dict: - return False - - if self.flops_limit is not None: - net = self.model.module.backbone if self.is_distributed else self.model.backbone - inp = (1, 3, 224, 224) - flops, params = profile(net, inp, extra_args={'paths': list(cand)}) - flops = flops/1e6 - print('flops:',flops) - if flops>self.flops_limit: - return False - - return True - - def update_top_k(self, candidates, *, k, key, reverse=False): - assert k in self.keep_top_k - # print('select ......') - t = self.keep_top_k[k] - t += candidates - t.sort(key=key,reverse=reverse) - self.keep_top_k[k]=t[:k] - - def eval_candidates(self, train_loader, val_loader): - for cand in self.candidates: - t0 = time.time() - - # load back supernet state dict - self.model.load_state_dict(self.supernet_state_dict) - # bn_statistic - model = bn_statistic(self.model, list(cand), train_loader) - # fitness - evals = fitness(cfg, model, list(cand), val_loader) - - if is_main_process(): - acc = evals[0].results['bbox']['AP'] - self.vis_dict[cand] = acc - print('candiate ', cand) - print('time: {}s'.format(time.time() - t0)) - print('acc ', acc) - - def stack_random_cand(self, random_func, *, batchsize=10): - while True: - cands = [random_func() for _ in range(batchsize)] - for cand in cands: - yield cand - - def random_can(self, num): - # print('random select ........') - candidates = [] - cand_iter = self.stack_random_cand(lambda:tuple(np.random.randint(i) for i in self.states)) - while len(candidates)0: - cand = next(cand_iter) - if not self.legal(cand): - continue - res.append(cand) - #print('mutation {}/{}'.format(len(res),mutation_num)) - max_iters-=1 - - # print('mutation_num = {}'.format(len(res))) - return res - - def get_crossover(self, k, crossover_num): - assert k in self.keep_top_k - # print('crossover ......') - res = [] - iter = 0 - max_iters = 10 * crossover_num - - def random_func(): - p1=choice(self.keep_top_k[k]) - p2=choice(self.keep_top_k[k]) - return tuple(choice([i,j]) for i,j in zip(p1,p2)) - - cand_iter = self.stack_random_cand(random_func) - while len(res)0: - cand = next(cand_iter) - if not self.legal(cand): - continue - res.append(cand) - #print('crossover {}/{}'.format(len(res),crossover_num)) - max_iters-=1 - - # print('crossover_num = {}'.format(len(res))) - return res - - def train(self, train_loader, val_loader): - logger = logging.getLogger("maskrcnn_benchmark.evolution") - - if not self.load_checkpoint(): - self.candidates = gather_candidates(self.random_can(self.population_num)) - - while self.epoch